index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
2,700 | 06ea697989f8f9ac539559690dcfd7aa73151e0f | # -*- coding: utf-8 -*-
"""
@author: chris
Modified from THOMAS MCTAVISH (2010-11-04).
mpiexec -f ~/machinefile -enable-x -n 96 python Population.py --noplot
"""
from __future__ import with_statement
from __future__ import division
import sys
sys.path.append('../NET/sheff/weasel/')
sys.path.append('../NET/sheffprk/template/')
import os
#use_pc = True
import sys
argv = sys.argv
if "-python" in argv:
use_pc = True
else:
use_pc = False
if use_pc == True:
from neuron import h
pc = h.ParallelContext()
rank = int(pc.id())
nhost = pc.nhost()
else:
from mpi4py import MPI
from neuron import h
rank = MPI.COMM_WORLD.rank
#print sys.version
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-o', action='store', dest='opt')
parser.add_argument('--noplot', action='store_true')
parser.add_argument('--norun', action='store_true')
parser.add_argument('--noconst', action='store_true')
parser.add_argument('--noqual', action='store_true')
pars, unknown = parser.parse_known_args(['-o','--noplot','--norun','--noconst','--noqual'])
if __name__ == '__main__':
import matplotlib
if rank == 0:
matplotlib.use('Tkagg', warn=True)
else:
matplotlib.use('Agg', warn=True)
if __name__ == '__main__':
do_plot = 1
if results.noplot: # do not plot to windows
matplotlib.use('Agg', warn=True)
if rank == 0: print "- No plotting"
do_plot = 0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import random as rnd
import neuronpy.util.spiketrain
#set_printoptions(threshold='nan')
from Stimulation import *
from Stimhelp import *
from units import *
from cells.PassiveCell import *
from itertools import izip
try:
import cPickle as pickle
except:
import pickle
import gzip
import h5py
from templates.synapse.synapse import Synapse
from synapsepfpurk import Synapse as Synapse2
if use_pc is False: import mdp
import time as ttime
from scipy.optimize import fmin, leastsq
from NeuroTools import stgen, signals
import md5
#from guppy import hpy
#hpy = hpy()
class Population:
"""
A population of N cells
"""
def __init__(self, cellimport = [], celltype = None, N = [10], temperature = 6.3, cell_exe = 0, ihold = [0*nA], ihold_sigma = [0*nA], amp = [0*nA], amod = [None], anoise = [None], give_freq = False, do_run = 1, pickle_prefix = "default", istart = 0, istop = 0.07, di = 0.001, dt = 0.025*ms, use_mpi = True, use_pc = False):
"""
:param N: Number of cells.
:param fluct_m:
:param fluct_s:
:param fluct_tau:
"""
self.use_pc = use_pc
if type(celltype) is not list: celltype = [celltype] #convert to list if it is not given as one
self.celltype = celltype
if type(cell_exe) is not list: cell_exe = [cell_exe] #convert to list if it is not given as one
self.cell_exe = cell_exe
if cellimport is not None:
if cellimport == []:
for n in range(len(celltype)):
cellimport.append("from cells." + self.celltype[n] + " import *")
self.cellimport = cellimport
if type(N) is not list: N = [N]
self.N = N # Total number of cells in the net
self.n_celltypes = len(self.N)
self.a_celltype = [0] # celltype to analyse
self.factor_celltype = [1]*self.n_celltypes
self.set_init(ihold, ihold_sigma, amp, amod)
self.CF_var = False
self.inh_hold_sigma = [0]
self.intr_hold_sigma = [0]
#self.sigma_inh_hold = 0
#self.sigma_ihold = 0
if type(anoise) is not list: anoise = [anoise]*self.n_celltypes
if len(anoise) < self.n_celltypes: anoise = [anoise[0]]*self.n_celltypes
self.anoise = anoise # RUN self.set_i()
self.give_freq = give_freq # RUN self.set_i()
self.temperature = temperature
self.gid_count = 0
self.gidlist = [] # List of global identifiers on this host
self.global_gidlist = [] # List of global identifiers
self.cells = [] # Cells on this host
self.t_vec = []
self.id_vec = []
self.rec_v = []
for n in range(self.n_celltypes):
if use_mpi:
self.t_vec.append(h.Vector()) # np.array([0])
self.id_vec.append(h.Vector()) # np.array([-1], dtype=int)
else:
self.t_vec.append([])
self.rec_v.append(h.Vector())
#self.t_vec = h.Vector(np.array([0])) # Spike time of all cells on this host
#self.id_vec = h.Vector(np.array([-1])) # Ids of spike times on this host
self.flucts = [] # Fluctuating inputs on this host
self.fluct_m = 0 # [nA]
self.fluct_s = [0] # [nA]
self.fluct_tau = 0*ms # [ms]
self.noises = [] # Random number generators on this host
self.plays = [] # Play inputs on this host
self.rec_is = []
self.trains = []
self.vecstim = []
self.nc_vecstim = []
self.spike_vec = []
self.syn_tau1 = 5*ms # Synapse of virtual target neuron
self.syn_tau2 = 5*ms # Synapse of virtual target neuron
self.tmax = 10*sec # maximum length of plot that should be plotted!!
self.nc_delay = 0 #500*ms # only important if syn_output is used, not used currently
self.dt = dt
self.bin_width = dt
self.jitter = 0*ms
self.delta_t = 0*ms
self.istart = istart
self.istop = istop
self.di = di
self.ic_holds = []
self.i_holdrs = []
self.i_holds = []
self.ic_starts = []
self.vc_starts = []
self.ic_steps = []
self.rec_step = []
self.tvecs = []
self.ivecs = []
self.noises = []
self.record_syn = []
self.id_all_vec_input = []
self.t_all_vec_input = []
if len(self.N) == len(self.cell_exe) == len(self.celltype):
pass
else:
raise ValueError('N, cell_exe, celltype do NOT have equal length!')
self.use_mpi = use_mpi
self.use_pc = use_pc
if self.use_mpi:
#### Make a new ParallelContext object
self.pc = h.ParallelContext()
self.id = self.pc.id()
self.nhost = int(self.pc.nhost())
if self.use_pc == False:
s = "mpi4py thinks I am %d of %d on %s, NEURON thinks I am %d of %d\n"
processorname = MPI.Get_processor_name()
self.comm = MPI.COMM_WORLD
if self.id == 0:
print s % (self.comm.rank, self.comm.size, processorname, self.id, self.nhost)
else:
s = "NEURON thinks I am %d of %d\n"
if self.id == 0:
print s % (self.id, self.nhost)
self.barrier()
else:
self.id = 0
self.nhost = 1
self.do_run = do_run
self.first_run = True
self.set_numcells() # Build the portion of cells on this host.
self.pickle_prefix = pickle_prefix
# plot options
self.ymax = 0
self.ax = None
self.linewidth = 1.5
self.color_vec = None
self.alpha = 0.8
self.method_interpol = np.array(['bin','syn'])
self.dumpsave = 1
self.called_syn_out_all = False
self.no_fmean=False
self.tau1_ex=[0*ms]*self.n_celltypes
self.tau2_ex=[10*ms]*self.n_celltypes
self.tau1_inh=[0*ms]*self.n_celltypes
self.tau2_inh=[100*ms]*self.n_celltypes
self.n_syn_ex = [0]*self.n_celltypes
self.g_syn_ex = [1]*self.n_celltypes
self.g_syn_ex_s = [0]*self.n_celltypes
self.mglufac_ex = [1,0]
self.noise_syn = [0]*self.n_celltypes
self.noise_syn_tau = [0*ms]*self.n_celltypes
self.noise_syn_inh = [0]*self.n_celltypes
self.noise_syn_tau_inh = [0*ms]*self.n_celltypes
self.noise_a = [1e9]*self.n_celltypes
self.noise_a_inh = [1e9]*self.n_celltypes
self.inh_hold = [0]*self.n_celltypes
self.n_syn_inh = [0]*self.n_celltypes
self.g_syn_inh = [1]*self.n_celltypes
self.g_syn_inh_s = [0]*self.n_celltypes
self.intr_hold = [0]*self.n_celltypes
self.n_syn_intr = [0]*self.n_celltypes
self.g_syn_intr = [0]*self.n_celltypes
self.syn_max_mf = [1]*self.n_celltypes # possible mossy fibres per synapse
self.syn_max_inh = [1]*self.n_celltypes # possible Golgi cells per synapse
self.syn_max_intr = [1]*self.n_celltypes # possible Intruding cells per synapse
self.seed = 50
self.force_run = False
self.give_psd = False
self.do_if = True
self.fluct_g_e0 = []
self.fluct_g_i0 = []
self.fluct_std_e = []
self.fluct_std_i = []
self.fluct_tau_e = []
self.fluct_tau_i = []
self.adjinh = True # adjust inhibition to get CFo instead of g_ex
self.adjfinh = True # adjust frequnecy of inhibition to get CFo instead of g_ex
self.syn_ex_dist = []
self.syn_inh_dist = []
self.stdp_used = False
self.xmax = 20
self.use_multisplit = False
self.use_local_dt = False
self.simstep = 0
self.plot_train = True
self.inh_delay = 0 # in ms
self.plot_input = True
self.delay_baseline = 8
self.tstop_if = 1
self.gsyn_in_fac = []
self.netcons = [] # keeping track of!
self.nclist = []
self.ST_stims = []
self.PF_stims = []
self.data_dir = "./data"
self.minimal_dir = False
def set_init(self, ihold, ihold_sigma, amp, amod):
# important for all methods:
if type(ihold) is not list: ihold = [ihold] #convert to list if it is not given as one
self.ihold = ihold
self.ihold_orig = ihold
if type(amp) is not list: amp = [amp]
if len(amp) < self.n_celltypes: amp = [amp[0]]*self.n_celltypes
self.amp = amp
if type(amod) is not list: amod = [amod]*self.n_celltypes
self.amod = amod # RUN self.set_i()
self.ihold_sigma = ihold_sigma
def barrier(self):
if self.use_mpi:
if self.use_pc == True:
self.pc.barrier()
else:
self.comm.Barrier()
def broadcast(self, vec, root = 0, fast = False):
if self.use_mpi:
if self.use_pc:
if fast:
hvec = h.Vector(vec)
v = self.pc.broadcast(hvec,root)
vec = np.array(hvec)
else:
sendlist = [None]*self.nhost
if self.id == root:
for i in range(self.nhost):
sendlist[i] = vec
getlist = self.pc.py_alltoall(sendlist)
vec = getlist[root]
else:
#vec = np.array(vec, dtype=np.float64)
#self.comm.Bcast([vec, MPI.DOUBLE])
vec = self.comm.bcast(vec, root=0)
return vec
def set_numcells(self, N = []):
"""
Create, layout, and connect N cells.
"""
self.set_gids(N)
self.create_cells()
#self.syn_output() # generate synaptic "output" in neuron
#self.connect_cells()
def set_gids(self, N = []):
"""Set the gidlist on this host.
Round-robin counting. Each host as an id from 0 to pc.nhost()-1.
Example:
if N = 5 cells and nhost() = 3
node id() = 0 will get cells [0, 3]
node id() = 1 will get cells [1, 4]
node id() = 2 will get cells [2]
"""
self.gidlist = []
if N == []:
N = self.N
# borders where another celltype begins
self.global_gidlist = []
self.n_borders = [0]
for l in range(1,self.n_celltypes+1):
self.n_borders.append(sum(N[0:l]))
self.global_gidlist.append(range(self.n_borders[-2], self.n_borders[-1]))
for n in range(self.n_celltypes): # create list in list
self.gidlist.append([])
for i in range(int(self.id), sum(N), int(self.nhost)): # loop over all cells
n = np.where((np.array(self.n_borders)-i)>0)[0][0]-1 # find out what cell type this is
self.gidlist[n].append(i) # put in specific gidlist for that celltype
self.gid_count = self.gid_count + sum(N)
if self.id == 0: print "nodeid:" , self.id , ", gidlist:" , self.gidlist , ", total gids:" , len(self.global_gidlist) , ", sum(N):" , sum(N) # check gids of node
def del_cells(self):
if self.cells != []:
for n in range(self.n_celltypes):
for m in self.cells[n]:
print "deleting cell", m
del m
del self.cells
self.cells = []
if self.use_mpi: self.pc.gid_clear()
def create_cells(self):
"""
Create cell objects on this host.
"""
if self.do_run:
self.del_cells()
if self.id == 0: print "creating cells"
for n in range(self.n_celltypes):
self.cells.append([]) # create list in list
#print self.cellimport[n]
exec self.cellimport[n]
#print self.gidlist
for i in self.gidlist[n]:
#if "sigma" not in self.cell_exe[n]:
# exec self.cell_exe[n]
# cell.gid = i # tell cell it's gid!
# print i
#else:
if (self.celltype[n] == "IfCell") or (self.celltype[n] == "Grc"):
# add gid to cell and execute!
if self.cell_exe[n][-2] == "(":
exec self.cell_exe[n][0:-1] + "gid=" + str(i) + ")"
else:
exec self.cell_exe[n][0:-1] + ", gid=" + str(i) + ")"
else:
exec self.cell_exe[n]
cell.gid = i
self.cells[n].append(cell) # add to (local) list
if self.use_mpi:
#### Tell this host it has this gid
#### gids can be any integer, they just need to be unique.
#### In this simple case, we set the gid to i.
self.pc.set_gid2node(i, int(self.id))
self.pc.cell(i, cell.nc_spike) # Associate the cell with this host and gid
## NOT NECESSARY ANYMORE ##
#### Means to tell the ParallelContext that this cell is a source.
#nc = cell.connect_target(None)
#self.ncs[n].append(nc)
#### Record spikes of this cell
self.pc.spike_record(i, self.t_vec[n], self.id_vec[n])
#print n, self.cells[n][-1].nc_spike.thresh
else:
self.t_vec[n].append(h.Vector())
cell.nc_spike.record(self.t_vec[n][-1])
def connect_cells(self, conntype=[], stdp=[], tend=1e9):
"""
Connect cells as specified.
"""
if self.do_run:
stdp = stdp[:]
conntype = conntype[:]
if len(stdp) == 0:
for i in conntype:
stdp.append({'wmax':0, 'taupre':0, 'taupost':0, 'apre':0, 'apost':0})
else:
self.stdp_used = True
for i, conn in enumerate(conntype):
typ = conn['type']
conv = conn['conv']
src = conn['src']
tgt = conn['tgt']
w0 = conn['w']
var = conn['var']
tau1 = conn['tau1']
tau2 = conn['tau2']
if 'mgr2' in conn.keys():
mgr2 = conn['mgr2']
mgr2_var = conn['mgr2_var']
else:
mgr2 = 0
mgr2_var = 0
if 'e_inh' in conn.keys():
e_inh = conn['e_inh']
else:
e_inh = -65
if 'e_ex' in conn.keys():
e_ex = conn['e_ex']
else:
e_ex = 0
wmax = stdp[i]['wmax']
taupre = stdp[i]['taupre']
taupost = stdp[i]['taupost']
apre = stdp[i]['apre']
apost = stdp[i]['apost']
# Connect conv cells of celltype src to every cell of celltype tgt
for ni, i in enumerate(self.cells[tgt]):
rnd.seed(i.gid*10*self.seed)
if conv >= len(self.global_gidlist[src]):
gids = self.global_gidlist[src]
if self.id == 0: print "more or equal conv to len(self.global_gidlist[src])"
else:
gids = rnd.sample(self.global_gidlist[src],conv)
if self.id == 0: print conn['type'], ":", ni, ":", gids[0], "\n"
for ng, g in enumerate(gids):
np.random.seed(g*12)
#np.random.seed(int(g%10+1)*12)
if len(shape(w0))>0: # array is given
print "w array is given"
if len(w0[ng]) == self.N[0]:
w = w0[ng][ni]
elif (var > 0) and (w0>0):
w = np.random.normal(w0, w0*var, 1).clip(min=0)
else:
w = w0
if (mgr2_var > 0) and (mgr2>0):
mg = np.random.normal(mgr2, mgr2*mgr2_var, 1).clip(min=0)
else:
mg = mgr2
#print conn['type'], ":", i.gid, ":", g, ", w:", w, "\n"
if self.celltype[tgt] == 'IfCell':
if typ == 'gogr':
i.whatami = "grc"
i.synlist_inh.append(Synapse('goc', i, i.soma, nrel=0, record_all=0, weight_gmax=w))
i0 = int(len(i.synlist_inh)-1)
i.nc_inh.append(self.pc.gid_connect(g, i.synlist_inh[i0].input))
i.nc_inh[-1].delay = 1
i.nc_inh[-1].weight[0] = 1
if typ == 'grgo':
i.whatami = "goc"
i.synlist.append(Synapse('grc', i, i.soma, syntype = 'D', nrel=0, record_all=0, weight_gmax=w))
e0 = int(len(i.synlist)-1)
i.nc.append(self.pc.gid_connect(g, i.synlist[e0].input))
i.nc[-1].delay = 1
i.nc[-1].weight[0] = 1
if typ == 'grgom':
i.whatami = "goc"
i.synlist.append(Synapse('grc', i, i.soma, syntype = 'DM', nrel=0, record_all=0, weight_gmax=w, mglufac = mg))
e0 = int(len(i.synlist)-1)
i.nc.append(self.pc.gid_connect(g, i.synlist[e0].input))
i.nc[-1].delay = 1
i.nc[-1].weight[0] = 1
if typ == 'e2inh':
i.create_synapses(n_inh=1, tau1_inh=tau1, tau2_inh=tau2, e_inh=e_inh, w = w, wmax = wmax, taupre = taupre, taupost = taupost, apre = apre, apost = apost, tend=tend)
i0 = len(i.synlist_inh)-1
if self.use_mpi:
if wmax == 0:
i.pconnect_target(self.pc, source=g, target=i0, syntype='inh', weight=w, delay=1)
else:
i.pconnect_target(self.pc, source=g, target=i0, syntype='inh', weight=1, delay=1)
else:
if wmax == 0:
i.nc_inh.append(self.cells[1][g-self.N[0]].connect_target(target=i.synlist_inh[i0], weight=w, delay=1))
else:
i.nc_inh.append(self.cells[1][g-self.N[0]].connect_target(target=i.synlist_inh[i0], weight=1, delay=1))
if typ == 'e2ex':
i.create_synapses(n_ex = 1, tau1 = tau1, tau2 = tau2, e_ex=e_ex, w = w, wmax = wmax, taupre = taupre, taupost = taupost, apre = apre, apost = apost, tend=tend)
e0 = len(i.synlist)-1
if self.use_mpi:
if wmax == 0:
i.pconnect_target(self.pc, source=g, target=e0, syntype='ex', weight=w, delay=1)
else:
i.pconnect_target(self.pc, source=g, target=e0, syntype='ex', weight=1, delay=1)
else:
if wmax == 0:
i.nc.append(self.cells[0][g].connect_target(target=i.synlist[e0], weight=w, delay=1))
else:
i.nc.append(self.cells[0][g].connect_target(target=i.synlist[e0], weight=1, delay=1))
else: # No IfCell
if typ == 'gogr':
i.createsyn(ngoc = 1, weight_gmax=w) # multiplication factor
i0 = len(i.GOC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'goc')
if typ == 'grgo':
i.createsyn(ngrc = 1, weight_gmax=w) # multiplication factor
i0 = len(i.GRC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])
if typ == 'grgom':
#print w, mg
i.createsyn(ngrcm = 1, weight_gmax=w, mglufac = mg) # multiplication factor
i0 = len(i.GRC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])
if typ == 'grstl':
i.createsyn(ngrc = 1, weight_gmax=w) # multiplication factor
i0 = len(i.GRC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])
if 'e2' in typ:
if 'inh' in typ:
Erev = -65
elif 'ex' in typ:
Erev = 0
if tau1 == 0:
syn = h.ExpSyn(i.soma(0.5))
syn.tau = tau2/ms
else:
if wmax == 0:
syn = h.Exp2Syn(i.soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
else: # STDP
syn = h.stdpE2S(i.soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.on = 1
syn.thresh = -20
syn.wmax = wmax
syn.w = w
syn.taupre = taupre/ms
syn.taupost = taupost/ms
syn.apre = apre
syn.apost = apost
syn.e = Erev/mV
if self.celltype[tgt] == 'Grc':
i.GOC_L.append(syn)
i0 = int(len(i.GOC_L)-1) # get number of current synapse!
i.gocncpc.append(self.pc.gid_connect(g, i.GOC_L[i0]))
i.gocncpc[-1].delay = 1
if wmax == 0:
i.gocncpc[-1].weight[0] = w
else:
i.gocncpc[-1].weight[0] = 1
elif self.celltype[tgt] == 'Goc':
i.GRC_L.append(syn)
e0 = int(len(i.GRC_L)-1) # get number of current synapse!
i.pfncpc.append(self.pc.gid_connect(g, i.GRC_L[e0]))
i.pfncpc[-1].delay = 1
i.pfncpc[-1].weight[0] = w
if wmax == 0:
i.pfncpc[-1].weight[0] = w
else:
i.pfncpc[-1].weight[0] = 1
#self.rec_s1 = h.Vector()
#self.rec_s1.record(self.cells[0][0].synlist_inh[0]._ref_g)
#self.rec_s2 = h.Vector()
#self.rec_s2.record(self.cells[1][0].synlist_inh[0]._ref_g)
def syn_output(self):
"""
Connect cell n to target cell sum(self.N) + 100.
"""
if self.id == 0: # create target cell
tgt_gid = self.gid_count
self.gid_count = self.gid_count + 1
# Synaptic integrated response
self.rec_g = h.Vector()
self.passive_target = PassiveCell()
if self.use_mpi: self.pc.set_gid2node(tgt_gid, 0) # Tell this host it has this gid
syn = self.passive_target.create_synapses(tau1 = self.syn_tau1, tau2 = self.syn_tau2) # if tau1=tau2: alpha synapse!
for i in range(self.n_borders[self.a_celltype[0]],self.n_borders[self.a_celltype[0]+1]): # take all cells, corresponding to self.a_celltype, not just the ones in self.gidlist:
src_gid = i
if self.use_mpi:
nc = self.pc.gid_connect(src_gid, syn)
nc.weight[0] = 1
nc.delay = self.nc_delay/ms #0.05 # MUST be larger than dt!!!
else:
nc = self.cells[self.a_celltype[0]][src_gid].connect_target(target=syn, weight=1, delay=self.nc_delay/ms)
self.nclist.append(nc)
self.rec_g.record(syn._ref_g)
def syn_out_all(self, tau1 = 1*ms, tau2 = 30*ms):
if self.do_run:
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]):
self.cells[n][i].start_record(tau1 = tau1/ms, tau2 = tau2/ms)
self.called_syn_out_all = True
def get_i(self, a, n, do_plot = True):
import md5
m = md5.new()
if ", sigma" in self.cell_exe[n]:
cell_exe_new = self.cell_exe[n].split(", sigma")[0] + ")"
else:
cell_exe_new = self.cell_exe[n]
m.update(cell_exe_new)
filename = self.data_dir + '/if_' + self.celltype[n] + '_' + m.hexdigest() + '.p'
#print filename
if self.id == 0:
is_there = os.path.isfile(filename)
else:
is_there = None
is_there = self.broadcast(is_there)
if (is_there is not True) or (self.force_run is True): # run i/f estimation
if self.id == 0: print '- running i/f estimation for ', self.celltype[n], ' id: ' , m.hexdigest()
exec self.cellimport[n]
exec cell_exe_new
sim = Stimulation(cell, temperature = self.temperature, use_multisplit = self.use_multisplit)
sim.spikes_from_neuron = False
sim.celltype = self.celltype[n]
current_vector, freq_vector, freq_onset_vector = sim.get_if(istart = self.istart, istop = self.istop, di = self.di, tstop = self.tstop_if)
sim = None
cell = None
if self.id == 0:
if do_plot:
plt.figure(99)
plt.plot(current_vector, freq_vector, 'r*-')
plt.plot(current_vector, freq_onset_vector, 'b*-')
plt.savefig("./figs/dump/latest_if_" + self.celltype[n] + ".pdf", dpi = 300) # save it
plt.clf()
#plt.show()
ifv = {'i':current_vector,'f':freq_vector}
print ifv
pickle.dump(ifv, gzip.GzipFile(filename, "wb" ))
self.barrier()
else:
if self.id == 0:
ifv = pickle.load(gzip.GzipFile(filename, "rb" ))
#print ifv
self.barrier()
if self.id == 0:
f = ifv.get('f')
i = ifv.get('i')
i = i[~isnan(f)]
f = f[~isnan(f)]
iin = if_extrap(a, f, i)
else:
iin = [0]
iin = self.broadcast(iin, root=0, fast = True)
self.barrier()
return iin
def set_i(self, ihold = [0]):
ihold = list(ihold)
self.ihold_orig = list(ihold)
self.barrier() # wait for other nodes
# Ihold given as frequency, convert to current
if ((self.give_freq)):
ihold0 = [[] for _ in range(self.n_celltypes)]
for n in range(self.n_celltypes):
a = np.array([ihold[n]])
#print "a:", a
iin = self.get_i(a, n)
#print "iin:", iin
ihold0[n] = iin[0]
if self.id == 0: print '- ihold: ', ihold, 'Hz, => ihold: ', ihold0, 'nA'
# Modulation depth given, not always applied to current!
for n in range(self.n_celltypes):
if self.amod[n] is not None:
if self.give_freq:
# Apply to amplitude:
a = np.array([ihold[n]]) + self.amod[n]*np.array([ihold[n]])
self.amp[n] = self.get_i(a, n) - ihold0[n]
if self.id == 0:
print '- amp: ihold: ', ihold[n], 'Hz , amod: ', self.amod[n], ', => amp: ', self.amp[n], 'nA (' #, self.get_i(a, n), ')'
elif self.n_syn_ex[n] > 0:
if self.id == 0:
print '- amp: ihold: ', ihold[n], 'Hz , amod: ', self.amod[n], ', => amp will be set for each spike generator'
else:
self.amp[n] = self.amod[n] * ihold[n]
if self.id == 0:
print '- amp: ihold: ', ihold[n], 'nA , amod: ', self.amod[n], ', => amp: ', self.amp[n], 'nA'
# Noise depth given, not always applied to current!
if self.anoise[n] is not None:
if (self.give_freq is True) or (self.n_syn_ex[n] > 0):
# Apply to amplitude:
a = np.array([ihold[n]]) + self.anoise[n]*np.array([ihold[n]])
self.fluct_s[n] = ((self.get_i(a, n) - ihold0[n]))/2. # adjust with /2 so that noise = +-2*std
if self.id == 0:
print '- noise: ihold: ', ihold[n], 'Hz , anoise: ', self.anoise[n], ', => fluct_s: ', self.fluct_s[n], 'nA'
else:
self.fluct_s[n] = self.anoise[n] * ihold[n]
if self.id == 0:
print '- noise: ihold: ', ihold[n], 'nA , anoise: ', self.anoise[n], ', => fluct_s: ', self.fluct_s[n], 'nA'
if self.give_freq is True:
ihold = ihold0
return ihold
def calc_fmean(self, t_vec, t_startstop):
#t_startstop[0] = 1
#t_startstop[1] = 5
f_cells_mean = 0
f_cells_cv = np.nan
f_cells_std = np.nan
if len(t_vec) > 0:
f_start_in = mlab.find(t_vec >= t_startstop[0]) # 1
f_stop_in = mlab.find(t_vec <= t_startstop[1]) # 5
if (len(f_start_in) > 0) & (len(f_stop_in) > 0):
f_start = f_start_in[0]
f_stop = f_stop_in[-1]+1
use_spikes = t_vec[f_start:f_stop]*1e3
if len(use_spikes) > 1:
s1 = signals.SpikeTrain(use_spikes)
isi = s1.isi()
f_cells_mean = s1.mean_rate() # use mean of single cells
f_cells_cv = np.std(isi)/np.mean(isi)
f_cells_std = np.std(isi)
#f_start_in = mlab.find(t_vec >= 1)
#f_stop_in = mlab.find(t_vec <= 2)
#if (len(f_start_in) > 0) & (len(f_stop_in) > 0):
# f_start = f_start_in[0]
# f_stop = f_stop_in[-1]+1
# use_spikes = t_vec[f_start:f_stop]*1e3
# if len(use_spikes) > 1:
# s1 = signals.SpikeTrain(use_spikes)
# isi = s1.isi()
# f_cells_cv = np.std(isi)/np.mean(isi)
return f_cells_mean, f_cells_cv, f_cells_std
def get_fmean(self, t_all_vec_vecn, id_all_vec_vecn, t_startstop, gidlist, facborder = 3): # 1e9
f_cells_mean = zeros(len(gidlist))
f_cells_base = zeros(len(gidlist))
f_cells_std = nans(len(gidlist))
f_cells_cv = nans(len(gidlist))
f_cells_gid = nans(len(gidlist))
fbase = np.nan
fmean = np.nan
fmax = np.nan
fmstd = np.nan
fcvm = np.nan
fstdm = np.nan
f_cells_mean_all = []
f_cells_base_all = []
f_cells_cv_all = []
f_cells_std_all = []
gid_del = np.array([])
if self.no_fmean == False:
if self.id == 0: print "- sorting for fmean"
for i, l in enumerate(gidlist):
t_0_vec = t_all_vec_vecn[where(id_all_vec_vecn==l)]
f_cells_mean[i], f_cells_cv[i], f_cells_std[i] = self.calc_fmean(t_0_vec, t_startstop)
f_cells_base[i], _, _ = self.calc_fmean(t_0_vec, [self.delay_baseline-4,self.delay_baseline])
f_cells_gid[i] = l
if self.id == 0: print "- gather fmean"
f_cells_mean_all = self.do_gather(f_cells_mean)
f_cells_base_all = self.do_gather(f_cells_base)
f_cells_std_all = self.do_gather(f_cells_std)
f_cells_cv_all = self.do_gather(f_cells_cv)
f_cells_gid_all = self.do_gather(f_cells_gid)
if self.id == 0:
#print f_cells_mean_all
f_cells_mean_all = np.nan_to_num(f_cells_mean_all)
fmean = mean(f_cells_mean_all) # compute mean of mean rate for all cells
fmstd = std(f_cells_mean_all)
fmax = max(f_cells_mean_all)
f_cells_base_all = np.nan_to_num(f_cells_base_all)
fbase = mean(f_cells_base_all) # compute mean of mean rate for all cells
f_cells_cv_all = f_cells_cv_all[~np.isnan(f_cells_cv_all)]
f_cells_std_all = f_cells_std_all[~np.isnan(f_cells_std_all)]
fcvm = mean(f_cells_cv_all)
fstdm = mean(f_cells_std_all)
print "- get_fmean, fmean: ",fmean, "fmax: ",fmax, "Hz", "fmstd: ",fmstd, "Hz", "fcvm: ",fcvm, "fstdm: ",fstdm, "Hz" ,"fbase: ", fbase, "Hz"
if facborder < 1e9:
fborder = fmean + facborder*fmstd
i = mlab.find(f_cells_mean_all > fborder)
gid_del = f_cells_gid_all[i]
# f_cells_mean_all[i] = 0
# f_cells_cv_all[i] = np.nan
# f_cells_std_all[i] = np.nan
# fmean2 = mean(np.nan_to_num(f_cells_mean_all)) # compute mean of mean rate for all cells
# fmstd2 = std(np.nan_to_num(f_cells_mean_all))
# fmax2 = max(np.nan_to_num(f_cells_mean_all))
# fcvm2 = mean(f_cells_cv_all[~np.isnan(f_cells_cv_all)])
# fstdm2 = mean(f_cells_std_all[~np.isnan(f_cells_std_all)])
# print "- after facborder: get_fmean, fmean: ",fmean2, "fmax: ",fmax2, "Hz", "fmstd: ",fmstd2, "Hz", "fcvm: ",fcvm2, "fstdm: ",fstdm2, "Hz, gid_del: ", gid_del
return fmean, fmax, fmstd, fcvm, fstdm, gid_del, f_cells_mean_all, f_cells_cv_all, f_cells_std_all, fbase, f_cells_base_all
def connect_fluct(self):
"""
Create fluctuating input onto every cell.
"""
if self.do_run:
for m in self.flucts:
del m
del self.flucts
for m in self.noises:
del m
del self.noises
self.flucts = []
self.noises = []
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
#h.mcell_ran4_init(gid)
noiseRandObj = h.Random() # provides NOISE with random stream
self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!!
# print str(gid) + ": " + str(noiseRandObj.normal(0,1))
fluct = h.Ifluct2(self.cells[n][i].soma(0.5))
fluct.m = self.fluct_m/nA # [nA]
fluct.s = self.fluct_s[n]/nA # [nA]
fluct.tau = self.fluct_tau/ms # [ms]
self.flucts.append(fluct) # add to list
self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!
self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0
self.noises[-1].normal(0,1)
def connect_gfluct(self, E_e=0, E_i=-65):
"""
Create fluctuating conductance input onto every cell.
"""
if self.do_run:
for m in self.flucts:
del m
del self.flucts
for m in self.noises:
del m
del self.noises
self.flucts = []
self.noises = []
for n in range(self.n_celltypes):
fluct_g_i0_n = self.fluct_g_i0[n]
if type(fluct_g_i0_n) is not ndarray: fluct_g_i0_n = np.array([fluct_g_i0_n])
if len(fluct_g_i0_n) == len(self.global_gidlist[n]):
pass
else:
fluct_g_i0_n = np.ones(int(len(self.global_gidlist[n])))*fluct_g_i0_n[0]
if self.id == 0: print "- single value in fluct_g_i0_n"
#print fluct_g_i0_n
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
#h.mcell_ran4_init(gid)
noiseRandObj = h.Random() # provides NOISE with random stream
self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!!
# print str(gid) + ": " + str(noiseRandObj.normal(0,1))
fluct = h.Gfluct3(self.cells[n][i].soma(0.5))
fluct.E_e = E_e/mV # [mV]
fluct.E_i = E_i/mV # [mV]
fluct.g_e0 = self.fluct_g_e0[n]/uS # [uS]
fluct.g_i0 = fluct_g_i0_n[i]/uS # [uS]
fluct.std_e = self.fluct_std_e[n]/uS # [uS]
fluct.std_i = self.fluct_std_i[n]/uS # [uS]
fluct.tau_e = self.fluct_tau_e/ms #tau_e/ms # [ms]
fluct.tau_i = self.fluct_tau_i/ms #tau_i/ms # [ms]
self.flucts.append(fluct) # add to list
self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!
self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0
self.noises[-1].normal(0,1)
def connect_synfluct(self, PF_BG_rate=6, PF_BG_cv=1, STL_BG_rate=20, STL_BG_cv=1):
"""
Create fluctuating synaptic input onto every cell.
"""
if self.do_run:
for m in self.ST_stims:
del m
del self.ST_stims
for m in self.PF_stims:
del m
del self.PF_stims
self.ST_stims = []
self.PF_stims = []
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
PF_syn_list = self.cells[n][i].createsyn_PF()
for d in PF_syn_list:
d.input.newnetstim.number = 1e9
d.input.newnetstim.noise = PF_BG_cv
d.input.newnetstim.interval = 1000.0 / PF_BG_rate
d.input.newnetstim.start = 0
self.PF_stims.append(PF_syn_list)
ST_stim_list = self.cells[n][i].createsyn_ST(record_all=0)
for d in ST_stim_list:
d.newnetstim.number = 1e9
d.newnetstim.noise = STL_BG_cv
d.newnetstim.interval = 1000.0 / STL_BG_rate
d.newnetstim.start = 0
self.ST_stims.append(ST_stim_list)
if self.id == 0: print "- PF and ST stimulation added."
def set_IStim(self, ihold = None, ihold_sigma = None, random_start = True, tstart_offset = 0):
"""
Add (random) ihold for each cell and offset!
"""
if self.do_run:
# if not given, use the one in self
if ihold == None:
ihold = self.ihold
if ihold_sigma == None:
ihold_sigma = self.ihold_sigma
if ihold[self.a_celltype[0]] != 0:
ihold = self.set_i(ihold)
for m in self.ic_holds:
#m.destroy()
del m
del self.ic_holds
for m in self.ic_starts:
#m.destroy()
del m
del self.ic_starts
for m in self.vc_starts:
#m.destroy()
del m
del self.vc_starts
self.ic_holds = []
self.ic_starts = []
self.vc_starts = []
self.i_holdrs = []
self.i_holds = ihold
for n in range(self.n_celltypes):
self.i_holdrs.append([])
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
np.random.seed(gid*20)
tis = 1
if random_start == True:
# random start time
tstart = np.random.uniform(tstart_offset+0, tstart_offset+0.5)
#if self.id == 0: print "tstart:", tstart
vc_start = h.SEClamp(self.cells[n][i].soma(0.5))
vc_start.dur1 = tstart/ms
vc_start.amp1 = -80
self.vc_starts.append(vc_start)
tis = 0
else:
tis = 0
if ihold_sigma[n] != 0:
#print ihold_sigma[n], ihold[n]
ihold_r = np.random.normal(ihold[n], ihold[n]*ihold_sigma[n], 1).clip(min=0)
#ihold_r = np.random.uniform(ihold[n]*ihold_sigma[n], ihold[n])
elif self.CF_var is not False: # CF gets not adapted to current but final frequnecy!
r_ok = False
while r_ok == False:
r_temp = np.random.normal(self.ihold_orig[n], self.CF_var[n][1], 1)
if (r_temp <= self.CF_var[n][2]) and (r_temp >= self.CF_var[n][0]): # check borders!
r_ok = True
#print r_temp
ihold_r = self.get_i(r_temp, n)
#print ihold_r
#if self.id == 0:
print "set self.CF_var", r_temp, ihold_r
else: # same ihold for all cells!
ihold_r = ihold[n]
self.i_holdrs[n].append(ihold_r)
if ihold_r != 0:
if hasattr(self.cells[n][i], 'input_vec'):
ic_hold = []
for vec in self.cells[n][i].input_vec:
for inv in vec:
#print ihold_r
ic_hold.append(h.IClamp(inv(0.5)))
ic_hold[-1].amp = self.cells[n][i].ifac * ihold_r / self.cells[n][i].n_input_spiny / nA
ic_hold[-1].delay = tis/ms
ic_hold[-1].dur = 1e9
else:
# holding current
ic_hold = h.IClamp(self.cells[n][i].soma(0.5))
ic_hold.delay = tis/ms
ic_hold.dur = 1e9
ic_hold.amp = ihold_r/nA
self.ic_holds.append(ic_hold)
if self.id == 0: print "set_IStim finished. ihold: ", ihold, ", ihold_sigma: ", ihold_sigma
def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):
"""
Add istep for each cell and offset!
"""
if self.do_run:
#for m in self.ic_steps:
# m.destroy()
# del m
#del self.ic_steps
#self.ic_steps = []
istep = list(istep)
neg = False
for n in range(self.n_celltypes):
if istep[n] < 0:
neg = True
istep[n] = abs(istep[n]) # make positive again
if istep[n] != 0:
if give_freq is True:
a = np.array([istep[n]])
iin = self.get_i(a, n)[0]
if self.id == 0: print "celltype: ", n, " istep: ", istep[n], "Hz => ", iin, " nA"
istep[n] = iin
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
np.random.seed(gid*30)
if self.i_holdrs == []:
if istep_sigma[n] != 0:
istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)
else: # same ihold for all cells!
istep_r = istep[n]
else: # ihold has been set!
if istep_sigma[n] != 0:
istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!
else: # same ihold for all cells!
istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!
if neg:
istep_r = -1*istep_r
if istep[n] == 0:
istep_r = -1*self.i_holdrs[n][i]
#print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])
if istep_r != 0:
# step current
ic_step = h.IClamp(self.cells[n][i].soma(0.5))
ic_step.delay = tstep/ms
ic_step.dur = tdur/ms
ic_step.amp = istep_r/nA
self.ic_steps.append(ic_step)
if self.id == 0: print "set_IStep finished. istep: ", istep, ", istep_sigma: ", istep_sigma
def set_IPlay(self, stimulus, t):
"""
Initializes values for current clamp to play a signal.
"""
if self.do_run:
for m in self.tvecs:
#m.destroy()
del m
del self.tvecs
for m in self.ivecs:
#m.destroy()
del m
del self.ivecs
for m in self.plays:
#m.destroy()
del m
del self.plays
self.tvecs = []
self.ivecs = []
self.plays = []
for i, gid in enumerate(self.gidlist[self.a_celltype[0]]): # for every cell in the gidlist
tvec = h.Vector(t/ms)
ivec = h.Vector(stimulus/nA)
play = h.IClamp(self.cells[self.a_celltype[0]][i].soma(0.5))
play.delay = 0
play.dur = 1e9
ivec.play(play._ref_amp, tvec, 1)
self.plays.append(play) # add to list
self.tvecs.append(tvec) # add to list
self.ivecs.append(ivec) # add to list
if self.id == 0: print "set_IPlay finished."
def set_IPlay2(self, stimulus, t):
"""
Initializes values for current clamp to play a signal.
"""
if self.do_run:
for m in self.tvecs:
#m.destroy()
del m
del self.tvecs
for m in self.ivecs:
#m.destroy()
del m
del self.ivecs
for m in self.plays:
#m.destroy()
del m
del self.plays
self.tvecs = []
self.ivecs = []
self.plays = []
for j in self.a_celltype:
tvec = h.Vector(t/ms)
ivec = []
for s in stimulus:
if hasattr(self.cells[j][0], 'input_vec'):
ivec.append(h.Vector(self.factor_celltype[j] * self.cells[j][0].ifac * s / self.cells[j][0].n_input_spiny / nA))
else:
ivec.append(h.Vector(self.factor_celltype[j]*s/nA))
self.tvecs.append(tvec) # add to list
self.ivecs.append(ivec) # add to list
for i, gid in enumerate(self.gidlist[j]): # for every cell in the gidlist
if hasattr(self.cells[j][i], 'input_vec'):
play = []
for iloc, vec in enumerate(self.cells[j][i].input_vec):
isig = self.syn_ex_dist[j][iloc]-1
#print isig
for inv in vec:
play.append(h.IClamp(inv(0.5)))
play[-1].delay = 0
play[-1].dur = 1e9
ivec[isig].play(play[-1]._ref_amp, tvec, 1)
else:
#fluctuating current
play = h.IClamp(self.cells[j][i].soma(0.5))
play.delay = 0
play.dur = 1e9
ivec[0].play(play._ref_amp, tvec, 1)
self.plays.append(play) # add to list
if self.id == 0: print "set_IPlay2 finished."
def set_IPlay3(self, stimulus, t, amp = None):
"""
Initializes values for current clamp to play a signal.
"""
if self.do_run:
for m in self.tvecs:
#m.destroy()
del m
del self.tvecs
for m in self.ivecs:
#m.destroy()
del m
del self.ivecs
for m in self.plays:
#m.destroy()
del m
del self.plays
self.tvecs = []
self.ivecs = []
self.plays = []
for j in self.a_celltype:
if amp is None:
amp0 = 0
else:
amp0 = amp[j]
tvec = h.Vector(t/ms)
self.tvecs.append(tvec) # add to list
for i, gid in enumerate(self.gidlist[j]): # for every cell in the gidlist
if isinstance(self.factor_celltype[j], ( int, long ) ):
ivec = h.Vector(self.factor_celltype[j]*(stimulus*amp0)/nA)
else:
np.random.seed(gid*40)
rnd.seed(gid*40)
if self.factor_celltype[j][1] > 0:
f = np.random.normal(self.factor_celltype[j][0], self.factor_celltype[j][1], 1).clip(min=0)
else:
f = self.factor_celltype[j][0]
if self.factor_celltype[j][2] > 0: # add inverted input with 50% probability, in future versions this will indicate the propability for -1 and 1
f = rnd.sample([-1,1],1)[0] * f
if self.id == 0: print "- inverted input with 50% probability:", f
if self.id == 0: print "- randomize play stimulus height"
ivec = h.Vector(f*(stimulus*amp0)/nA)
self.ivecs.append(ivec) # add to list
#fluctuating current
play = h.IClamp(self.cells[j][i].soma(0.5))
play.delay = 0
play.dur = 1e9
ivec.play(play._ref_amp, tvec, 1)
self.plays.append(play) # add to list
if self.id == 0: print "set_IPlay3 finished."
def set_PulseStim(self, start_time=[100*ms], dur=[1500*ms], steadyf=[100*Hz], pulsef=[150*Hz], pulse_start=[500*ms], pulse_len=[500*ms], weight0=1, tau01=[1*ms], tau02=[20*ms], weight1=1, tau11=[0*ms], tau12=[1*ms], noise = 1):
if self.do_run:
modulation_vec = []
for n in range(self.n_celltypes):
t_input = np.arange(0, dur[n], self.dt) # create stimulus time vector has to be in ms!!
mod = np.concatenate(([np.zeros(round(start_time[n]/self.dt)), steadyf[n]*np.ones(round((pulse_start[n]-start_time[n])/self.dt)), pulsef[n]*np.ones(round(pulse_len[n]/self.dt)),steadyf[n]*np.ones(round((dur[n]-pulse_start[n]-pulse_len[n])/self.dt)) ]))
modulation = (t_input, mod)
#print shape(t_input), shape(mod), shape(modulation)
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
if dur[n] > 0:
if self.celltype[n] == 'Grc':
nmf = 4
for j in range(nmf):
self.cells[n][i].createsyn(nmf = 1, ngoc = 0, weight = weight0)
e0 = len(self.cells[n][i].MF_L)-1 # get number of current synapse!
pulse_gid = int(self.gid_count + gid*1000 + j)
train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)
self.setup_Play_train(train = train, input_gid = pulse_gid)
self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
elif self.celltype[n] == 'Goc':
nmf = 53
for j in range(nmf):
self.cells[n][i].createsyn(nmf = 1, weight = weight1)
e0 = len(self.cells[n][i].MF_L)-1 # get number of current synapse!
pulse_gid = int(self.gid_count + gid*1000 + j)
train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)
self.setup_Play_train(train = train, input_gid = pulse_gid)
self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
elif self.celltype[n] == 'Goc_noloop':
ngrc = 100
for j in range(ngrc):
self.cells[n][i].createsyn(ngrc = 1, weight = weight0)
e0 = len(self.cells[n][i].GRC_L)-1 # get number of current synapse!
pulse_gid = int(self.gid_count + gid*1000 + j)
train = mod_spike_train(modulation, noise = noise, seed=pulse_gid)
self.setup_Play_train(train = train, input_gid = pulse_gid)
self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'grc')
else:
pulse_gid = int(self.gid_count + gid*1000 + 100)
train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)
self.trains.append(train)
setup_Play_train(train = train, input_gid = pulse_gid)
# NMDA
self.cells[n][i].create_synapses(n_ex=1, tau1=tau01[n], tau2=tau02[n])
e0 = len(self.cells[n][i].synlist)-1
weight=weight0[n]
np.random.seed(gid*60)
#weight = np.random.normal(weight, weight*0.5, 1).clip(min=0)
self.cells[n][i].pconnect_target(self.pc, source=pulse_gid, target=e0, syntype='ex', weight=weight, delay=1)
# AMPA
self.cells[n][i].create_synapses(n_ex=1, tau1=tau11[n], tau2=tau12[n])
e0 = len(self.cells[n][i].synlist)-1
weight=weight1[n]
np.random.seed(gid*60)
#weight = np.random.normal(weight, weight*0.5, 1).clip(min=0)
self.cells[n][i].pconnect_target(self.pc, source=pulse_gid, target=e0, syntype='ex', weight=weight, delay=1)
modulation = (t_input, mod) # mack to s!
modulation_vec.append(modulation)
return modulation_vec
def connect_Synapse(self, pulse_gid, nt, i, n, gid, j, syntype = "ex", nsyn=0):
if self.do_run:
if 'gsyn_in' in self.method_interpol:
if isinstance(self.factor_celltype[nt], ( int, long ) ):
f = self.factor_celltype[nt]
else:
f = self.factor_celltype[nt][0]
if syntype == "ex":
# each cell can receive different g_syn_ex !
if type(self.g_syn_ex[nt]) is ndarray:
if len(self.g_syn_ex[nt]) == len(self.global_gidlist[nt]):
w = self.g_syn_ex[nt][n]
else:
w = self.g_syn_ex[nt]
else:
w = self.g_syn_ex[nt]
seed = int(10000 + 10*gid + j)
np.random.seed(seed*41)
if self.g_syn_ex_s[nt] > 0:
w = np.random.normal(w, w*self.g_syn_ex_s[nt], 1).clip(min=0) # self.g_syn_ex_s[nt]
if self.celltype[nt] == 'Grc':
# delete old
if j == 0:
self.cells[nt][i].MF_L = []
self.cells[nt][i].mfncpc = []
if "gr" not in str(self.tau1_ex[nt]):
if "amfit" in str(self.tau1_ex[nt]):
syn = h.ExpZSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.f1_nmda = 0
elif "nmfit" in str(self.tau1_ex[nt]):
syn = h.ExpYSyn(self.cells[nt][i].soma(0.5))
syn.f1_ampa = 0
syn.f2_ampa = 0
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
elif "fit" in str(self.tau1_ex[nt]):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
else:
tau1 = self.tau1_ex[nt]
tau2 = self.tau2_ex[nt]
if tau1 == 0:
syn = h.ExpSyn(self.cells[nt][i].soma(0.5))
syn.tau = tau2/ms
else:
syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.e = 0/mV
self.cells[nt][i].MF_L.append(syn)
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
syn_idx = int(e0)
source = int(pulse_gid)
self.cells[nt][i].mfncpc.append(self.pc.gid_connect(source, self.cells[nt][i].MF_L[syn_idx]))
self.cells[nt][i].mfncpc[-1].delay = 1
self.cells[nt][i].mfncpc[-1].weight[0] = w
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1]._ref_g)
self.gsyn_in_fac.append(f)
else:
nrel = 0
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].createsyn(nmf = 1, ngoc = 0, weight_gmax = w, nrel=nrel)
if "ampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
print "- no pre"
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_1 = 0
if "nostdampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].gmax_factor = 0
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_1 = 0
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].r6FIX = 0
if "nostdnmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].gmax_factor = 0
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_1 = 0
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].RdRate = 0
if "nmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_1 = 0
if "nostdgr" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].r6FIX = 0 #1.12
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].RdRate = 0 #12e-3
print "- no std"
if "nomggr" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].v0_block = -1e9
print "- no mg block"
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
self.cells[nt][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0]._ref_g)
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0]._ref_g)
self.gsyn_in_fac.append(f)
self.gsyn_in_fac.append(f)
elif self.celltype[nt] == 'Goc':
# delete old
if j == 0:
self.cells[nt][i].MF_L = []
self.cells[nt][i].mfncpc = []
if "go" not in str(self.tau1_ex[nt]):
tau1 = self.tau1_ex[nt]
tau2 = self.tau2_ex[nt]
if tau1 == 0:
syn = h.ExpSyn(self.cells[nt][i].soma(0.5))
syn.tau = tau2/ms
else:
syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.e = 0/mV
self.cells[nt][i].MF_L.append(syn)
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
syn_idx = int(e0)
source = int(pulse_gid)
self.cells[nt][i].mfncpc.append(self.pc.gid_connect(source, self.cells[nt][i].MF_L[syn_idx]))
self.cells[nt][i].mfncpc[-1].delay = 1
self.cells[nt][i].mfncpc[-1].weight[0] = w
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1]._ref_g)
self.gsyn_in_fac.append(f)
else:
nrel = 0
mg = self.mglufac_ex[0]
if self.mglufac_ex[1] > 0:
mg = np.random.normal(self.mglufac_ex[0], self.mglufac_ex[1]*self.mglufac_ex[0], 1).clip(min=0) # self.g_syn_ex_s[nt]
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].createsyn(nmf = 1, weight_gmax = w, nrel=nrel, mglufac = mg)
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
self.cells[nt][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0]._ref_g)
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0]._ref_g)
self.gsyn_in_fac.append(f)
self.gsyn_in_fac.append(f)
elif self.celltype[nt] == 'IfCell':
# delete old
if j == 0:
self.cells[nt][i].synlist = []
self.cells[nt][i].nc = []
if "gr" in str(self.tau1_ex[nt]):
self.cells[nt][i].whatami = "grc"
nrel = 0
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].MF_L = self.cells[nt][i].synlist
self.cells[nt][i].synlist.append(Synapse('glom', self.cells[nt][i], self.cells[nt][i].soma, nrel=nrel, record_all=0, weight_gmax = w))
if "ampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
print "- no pre"
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_1 = 0
if "nmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_1 = 0
if "nostdampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_1 = 0
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].r6FIX = 0 #1.12
if "nostdnmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_1 = 0
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].RdRate = 0
if "nostdgr" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].r6FIX = 0 #1.12
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].RdRate = 0 #12e-3
print "- no std"
if "nomggr" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].v0_block = -1e9 #.k_block = 1e-9
print "- no mg block"
e0 = len(self.cells[nt][i].synlist)-1
syn_idx = int(e0)
source = int(pulse_gid)
self.cells[nt][i].nc.append(self.pc.gid_connect(source, self.cells[nt][i].synlist[syn_idx].input))
self.cells[nt][i].nc[-1].delay = 1
self.cells[nt][i].nc[-1].weight[0] = 1
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx].postsyns['AMPA'][0]._ref_g)
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx].postsyns['NMDA'][0]._ref_g)
self.gsyn_in_fac.append(f)
self.gsyn_in_fac.append(f)
else:
if "amfit" in str(self.tau1_ex):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.f1_nmda = 0
self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell
elif "nmfit" in str(self.tau1_ex):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.f1_ampa = 0
syn.f2_ampa = 0
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell
elif "fit" in str(self.tau1_ex):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell
else:
self.cells[nt][i].create_synapses(n_ex=1, tau1=self.tau1_ex[nt], tau2=self.tau2_ex[nt])
e0 = len(self.cells[nt][i].synlist)-1
syn_idx = int(e0)
self.cells[nt][i].pconnect_target(self.pc, source=pulse_gid, target=int(e0), syntype='ex', weight=w, delay=1)
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx]._ref_g)
self.gsyn_in_fac.append(f)
elif self.celltype[nt] == 'Prk':
# delete old
if j == 0:
self.cells[nt][i].PF_Lsync = []
self.cells[nt][i].spk_nc_pfsync = []
self.cells[nt][i].pfrand = []
m = len(self.cells[nt][i].dendrange)
seed = int(4*gid)
np.random.seed(seed)
for k in xrange(nsyn):
m -= 1
mi = np.random.randint(0, m)
self.cells[nt][i].dendrange[mi], self.cells[nt][i].dendrange[m] = self.cells[nt][i].dendrange[m], self.cells[nt][i].dendrange[mi]
self.cells[nt][i].pfrand.append(self.cells[nt][i].dendrange[m])
#print self.cells[nt][i].pfrand
if "prk" not in str(self.tau1_ex[nt]):
pass
else:
self.cells[nt][i].PF_Lsync.append(Synapse2('pf',self.cells[nt][i],self.cells[nt][i].pfrand[j],record_all=0))
e0 = len(self.cells[nt][i].PF_Lsync)-1 # get number of current synapse!
syn_idx = int(e0)
self.cells[nt][i].spk_nc_pfsync.append(self.pc.gid_connect(pulse_gid, self.cells[nt][i].PF_Lsync[syn_idx].input.newnetstim))
self.cells[nt][i].spk_nc_pfsync[-1].delay = 1
self.cells[nt][i].spk_nc_pfsync[-1].weight[0] = 1
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].PF_Lsync[-1].postsyns['AMPA'][0]._ref_g)
self.gsyn_in_fac.append(f)
elif syntype == "inh":
w = self.g_syn_inh[nt]
seed = int(10000 + 10*gid + j)
np.random.seed(seed*42)
if self.g_syn_inh_s[nt] > 0:
w = np.random.normal(w, w*self.g_syn_inh_s[nt], 1).clip(min=w*0.1) # self.g_syn_inh_s[nt]
if self.celltype[nt] == 'Grc':
if j == 0:
self.cells[nt][i].GOC_L = []
self.cells[nt][i].gocncpc = []
if "gr" not in str(self.tau1_inh[nt]):
tau1 = self.tau1_inh[nt]
tau2 = self.tau2_inh[nt]
if tau1 == 0:
syn = h.ExpSyn(self.cells[nt][i].soma(0.5))
syn.tau = tau2/ms
else:
syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.e = -65
self.cells[nt][i].GOC_L.append(syn)
i0 = len(self.cells[nt][i].GOC_L)-1 # get number of current synapse!
syn_idx = int(i0)
source = int(pulse_gid)
self.cells[nt][i].gocncpc.append(self.pc.gid_connect(source, self.cells[nt][i].GOC_L[syn_idx]))
self.cells[nt][i].gocncpc[-1].delay = 1
self.cells[nt][i].gocncpc[-1].weight[0] = w
else:
self.cells[nt][i].createsyn(nmf = 0, ngoc = 1, weight_gmax = w)
i0 = len(self.cells[nt][i].GOC_L)-1 # get number of current synapse!
self.cells[nt][i].pconnect(self.pc,pulse_gid,int(i0),'goc')
if self.celltype[nt] == 'IfCell':
if j == 0:
self.cells[nt][i].synlist_inh = []
self.cells[nt][i].nc_inh = []
if "gr" in str(self.tau1_inh[nt]):
nrel = 0
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].GOC_L = self.cells[nt][i].synlist
self.cells[nt][i].whatami = "grc"
self.cells[nt][i].synlist_inh.append(Synapse('goc', self.cells[nt][i], self.cells[nt][i].soma, nrel=nrel, record_all=0, weight_gmax = w))
i0 = len(self.cells[nt][i].synlist_inh)-1
syn_idx = int(i0)
source = int(pulse_gid)
self.cells[nt][i].nc_inh.append(self.pc.gid_connect(source, self.cells[nt][i].synlist_inh[syn_idx].input))
self.cells[nt][i].nc_inh[-1].delay = 1
self.cells[nt][i].nc_inh[-1].weight[0] = 1
if "gaba" in str(self.tau1_ex[nt]):
if 'gsyn_in' in self.method_interpol:
if "nostdgaba" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_1 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d3 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1d2 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d2 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d3_a6 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1d2_a6 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1_a6 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d2_a6 = 0
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0]._ref_g)
self.gsyn_in_fac.append(f)
else:
self.cells[nt][i].create_synapses(n_inh=1, tau1_inh=self.tau1_inh[nt], tau2_inh=self.tau2_inh[nt], e_inh=-65)
i0 = len(self.cells[nt][i].synlist_inh)-1
syn_idx = int(i0)
self.cells[nt][i].pconnect_target(self.pc, source=pulse_gid, target=int(i0), syntype='inh', weight=w, delay=1)
elif syntype == "intr":
if self.celltype[nt] == 'Prk':
pass
def set_SynPlay(self, farray, tarray, N = [], t_startstop = [], amode = 1):
if self.do_run:
delay = 1
if (self.use_pc is False):
delay = 0.1
if N == []:
N = self.N
self.pulse_list = []
self.global_pulse_list = []
self.global_pulse_list_inh = []
self.global_pulse_list_intr = []
f_cells_mean_local = []
f_cells_cv_local = []
f_cells_std_local = []
for nt in range(self.n_celltypes): # loop over all cells
if (self.n_syn_ex[nt] > 0) or (self.n_syn_inh[nt] > 0) or (self.n_syn_intr[nt] > 0):
local_gid_count = 0
local_gid_count_type = []
# EXCITATION
if str(type(self.g_syn_ex[nt] )) is not ndarray: self.g_syn_ex[nt] = np.array([self.g_syn_ex[nt] ]) # each cell can receive different g_syn_ex !
if len(self.g_syn_ex[nt]) == len(self.global_gidlist[nt]):
pass
else:
self.g_syn_ex[nt] = np.ones(len(self.global_gidlist[nt]))*self.g_syn_ex[nt][0]
#print "- single value in g_syn_ex, cells:", len(self.global_gidlist[nt])
self.global_pulse_list.append([])
for ns in range(self.n_syn_ex[nt]): # loop over all excitatory synapses!
self.global_pulse_list[-1].append([])
for n in range(self.syn_max_mf[nt]): # number of cells of this celltype
self.global_pulse_list[-1][-1].append(local_gid_count+self.gid_count)
local_gid_count += 1
local_gid_count_type.append([])
local_gid_count_type[-1].append('ex')
local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]
local_gid_count_type[-1].append(ns) # number of synapse
# INHIBITION
if np.array(self.inh_hold[nt]).size <= 1:
self.inh_hold[nt] = np.ones(len(self.global_gidlist[nt]))*self.inh_hold[nt]
#print "- single value in inh_hold", self.inh_hold[nt]
self.global_pulse_list_inh.append([])
for ns in range(self.n_syn_inh[nt]): # loop over all inhibitory synapses!
self.global_pulse_list_inh[-1].append([])
for n in range(self.syn_max_inh[nt]): # number of cells of this celltype
self.global_pulse_list_inh[-1][-1].append(local_gid_count+self.gid_count)
local_gid_count += 1
local_gid_count_type.append([])
local_gid_count_type[-1].append('inh')
local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]
local_gid_count_type[-1].append(ns) # number of synapse
# INTRUDER SYNAPSE
if str(type(self.g_syn_intr[nt] )) is not ndarray: self.g_syn_intr[nt] = np.array([self.g_syn_intr[nt] ]) # each cell can receive different g_syn_intr !
if len(self.g_syn_intr[nt]) == len(self.global_gidlist[nt]):
pass
else:
self.g_syn_intr[nt] = np.ones(len(self.global_gidlist[nt]))*self.g_syn_intr[nt][0]
#print "- single value in g_syn_intr, cells:", len(self.global_gidlist[nt])
self.global_pulse_list_intr.append([])
for ns in range(self.n_syn_intr[nt]): # loop over all intruding synapses!
self.global_pulse_list_intr[-1].append([])
for n in range(self.syn_max_intr[nt]): # number of generators for this celltype
self.global_pulse_list_intr[-1][-1].append(local_gid_count+self.gid_count)
local_gid_count += 1
local_gid_count_type.append([])
local_gid_count_type[-1].append('intr')
local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]
local_gid_count_type[-1].append(ns) # number of synapse
t_vec_input = np.array([]) # input trains
id_vec_input = np.array([]) # input trains id
fs = 1 / self.dt
ih_use_v = []
for i in range(int(self.id), local_gid_count, int(self.nhost)): # loop over all train generators and generate them
self.pulse_list.append(i+self.gid_count)
pulse_gid = self.pulse_list[-1]
gid = local_gid_count_type[i][1] # should correspond to this gid when multiple values inserted
if local_gid_count_type[i][0] == 'ex':
seed = int(10001 + pulse_gid) # unique gid for generators!
np.random.seed(seed*423)
if self.ihold_sigma[nt] > 0:
ih_use = np.random.normal(self.ihold[nt], self.ihold[nt]*self.ihold_sigma[nt], 1).clip(min=0) # self.ihold[nt]*self.ihold_sigma[nt]
elif self.ihold_sigma[nt] < 0:
ih_use = np.random.uniform(0.1, self.ihold[nt])
else:
ih_use = self.ihold[nt]
ih_use_v.append(ih_use)
if ih_use > 0:
# train has to be contructed here, to insert different train into each "dendrite"
## different ihold has to be implemented here!!
iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))
if isinstance(self.syn_ex_dist[nt], ( tuple ) ): # distribution of amplitude, only one noise source!
np.random.seed(pulse_gid*40)
if self.syn_ex_dist[nt][1] > 0:
f = np.random.normal(self.syn_ex_dist[nt][0], self.syn_ex_dist[nt][1], 1).clip(min=0)
else:
f = self.syn_ex_dist[nt][0]
f2 = f
rnd.seed(pulse_gid*40) # use gid so type 1, 2 is identical for each cell
#rnd.seed(gid*40) # use gid so type 1, 2 is identical for each cell
if self.syn_ex_dist[nt][2] > 0: # add inverted input with 50% probability, in future versions this will indicate the propability for -1 and 1
f2 = rnd.sample([-1,1],1)[0] * f
#f2 = f
if amode == 1:
inamp = (f2 * self.amod[nt] * ih_use)
elif amode == 2:
inamp = (f2 * self.amod[nt] * self.ihold[nt])
modulation = (tarray, inamp * farray[0] + iholdvec)
#if self.id == 0: print "- randomize play stimulus height, pulse_gid=", pulse_gid, " gid=", gid ," f=", f
if (gid==0): print "- randomize play stimulus height, pulse_gid=", pulse_gid, " gid=", gid ," f2=", f2,"inamp=",inamp
#rnd.seed(local_gid_count_type[i][1]*300) # pick seed based on number of cell
#nj = rnd.sample(range(len(farray)),1)[0]
nj = 1
else: # different noise sources can be used at different synapses, linear combination test in openloop
nj = self.syn_ex_dist[nt][local_gid_count_type[i][2]]
if nj == 0:
modulation = (tarray, iholdvec)
else:
if amode == 1:
inamp = (self.factor_celltype[nt] * self.amod[nt] * ih_use)
elif amode == 2:
inamp = (self.factor_celltype[nt] * self.amod[nt] * self.ihold[nt])
modulation = (tarray, inamp * farray[nj-1] + iholdvec)
if self.id == 0: print "ex farray number:", nj-1, "ih_use:", ih_use, "self.amod[nt]:", self.amod[nt], "inamp: ", inamp
# will be done n_syn_ex * number of cells!
if self.noise_syn_tau[nt] < 0: # variable threshold
no = self.noise_syn[nt]
else:
no = self.noise_syn[nt]*ih_use
train, self.n_train_ex = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau[nt], noise_a = self.noise_a[nt])
#plt.figure("input")
#plt.plot(train, train*0, '|')
#plt.show()
t_vec_input = np.append(t_vec_input, train*ms).flatten() # use ms to save!!
id_vec_input = np.append(id_vec_input, np.ones(len(train))*pulse_gid).flatten()
f_cells_mean_local0, f_cells_cv_local0, f_cells_std_local0 = self.calc_fmean(train*ms, t_startstop)
f_cells_mean_local.append(f_cells_mean_local0); f_cells_cv_local.append(f_cells_cv_local0); f_cells_std_local.append(f_cells_std_local0)
if self.id == 0: print "TRAIN: requ. mean:", ih_use ,"eff. mean:", f_cells_mean_local0, "cv: " , f_cells_cv_local0, "std:" , f_cells_std_local0
else:
train = []
self.n_train_ex = []
elif local_gid_count_type[i][0] == 'intr':
# train has to be contructed here, to insert different train into each "dendrite"
nj = 0
seed = int(10001 + pulse_gid)
np.random.seed(seed*4411)
if self.intr_hold_sigma[nt] > 0:
ih_use = np.random.normal(self.intr_hold[nt], self.intr_hold[nt]*self.intr_hold_sigma[nt], 1).clip(min=0)
else:
ih_use = self.intr_hold[nt]
ih_use_v.append(ih_use)
if ih_use > 0:
iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))
modulation = (tarray, iholdvec)
# will be done n_syn_in * number of cells!
if self.noise_syn_tau_intr[nt] < 0: # variable threshold
no = self.noise_syn_intr[nt]
else:
no = self.noise_syn_intr[nt]*ih_use
if self.noise_syn_tau_intr[nt] >= -1:
train, _ = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau_intr[nt], noise_a = self.noise_a_intr[nt]) # train in ms
else:
train = oscill_spike_train(sor = 4, spike_prob = 1/4, noise_fraction = 4, end_time = tarray[-1]/ms, seed = seed)
elif local_gid_count_type[i][0] == 'inh':
# train has to be contructed here, to insert different train into each "dendrite"
seed = int(10001 + pulse_gid)
np.random.seed(seed*44)
if self.inh_hold_sigma[nt] > 0:
ih_use = np.random.normal(self.inh_hold[nt][gid], self.inh_hold[nt][gid]*self.inh_hold_sigma[nt], 1).clip(min=0)
else:
ih_use = self.inh_hold[nt][gid]
iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))
nj = self.syn_inh_dist[nt][local_gid_count_type[i][2]]
if nj == 0:
modulation = (tarray, iholdvec)
else:
inamp = (self.amod[nt] * ih_use)
modulation = (tarray, inamp * farray[nj-1] + iholdvec)
#print "inh farray number:", nj-1, "ih_use:", ih_use, "amp: ", inamp #old: nj-1+nemax
# will be done n_syn_in * number of cells!
if self.noise_syn_tau_inh[nt] < 0: # variable threshold
no = self.noise_syn_inh[nt]
else:
no = self.noise_syn_inh[nt]*ih_use
train, _ = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau_inh[nt], noise_a = self.noise_a_inh[nt]) # train in ms
#print train
#print train
if len(train) > 0:
if self.id == 0:
print "-", pulse_gid, local_gid_count_type[i], "seed: ", seed, "ih_use:", ih_use, no, nj #, "first spike: ", train[0]
self.setup_Play_train(train = train+self.inh_delay, input_gid = pulse_gid, delay = delay) # train in ms
self.gid_count += local_gid_count # increase gid count
self.barrier()
for i, gid in enumerate(self.gidlist[nt]): # for all input cells
rnd.seed(gid*200)
n = self.global_gidlist[nt].index(gid) # index of cell within their population 0..N[nt]
# i is index on this node only!
self.record_syn = []
for j in range(self.n_syn_ex[nt]):
if N[nt] == len(self.global_pulse_list[nt][j]):
pulse_gid = self.global_pulse_list[nt][j][n] #every cell of this type receives one pulse gid
if self.id == 0: print "- gid:", gid ," n:", n ," one ex train for each synapse:", pulse_gid, "self.g_syn_ex[nt][n]:", self.g_syn_ex[nt][n]
else:
pulse_gid = rnd.sample(self.global_pulse_list[nt][j],1)[0] # not enough, just pick one at random, for inh/f search only one synapse available!
if self.id == 0: print "- gid:", gid ," n:", n ," one ex train from", len(self.global_pulse_list[nt][j]), ":", pulse_gid, "self.g_syn_ex[nt][n]:", self.g_syn_ex[nt][n]
if "gaba" in str(self.tau1_ex[nt]):
self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = "inh")
else:
self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = "ex", nsyn = self.n_syn_ex[nt])
if self.n_syn_inh[nt] > 0:
for j in range(self.n_syn_inh[nt]):
if N[nt] == len(self.global_pulse_list_inh[nt][j]):
pulse_gid = self.global_pulse_list_inh[nt][j][n] #every cell of this type receives one pulse gid
if self.id == 0: print "- one inh train for each synapse:", pulse_gid
else:
pulse_gid = rnd.sample(self.global_pulse_list_inh[nt][j],1)[0] # not enough, just pick one at random
if self.id == 0: print "- one inh train from", len(self.global_pulse_list_inh[nt][j]), ":", pulse_gid
self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = "inh")
if self.n_syn_intr[nt] > 0:
for j in range(self.n_syn_intr[nt]):
if N[nt] == len(self.global_pulse_list_intr[nt][j]):
pulse_gid = self.global_pulse_list_intr[nt][j][n] #every cell of this type receives one pulse gid
if self.id == 0: print "- one intruding train for each synapse:", pulse_gid
else:
pulse_gid = rnd.sample(self.global_pulse_list_intr[nt][j],1)[0] # not enough, just pick one at random
if self.id == 0: print "- one intruding train from", len(self.global_pulse_list_intr[nt][j]), ":", pulse_gid
if (self.use_pc is False):
if self.celltype[nt] == 'Prk': self.cells[nt][i].delrerun()
(msg,CF_input) = self.cells[nt][i].createsyn_CF(record_all=0,factor=self.g_syn_intr[nt][0],cf_setup_select='old')
CF_input.number = 3 # three bursts
CF_input.start = -0.3 # See synapsepfpurk.py
CF_input.interval = 3 # 3 ms interval between bursts
self.cells[nt][i].input_to_CF_nc.append(h.NetCon(self.vecstim[j], CF_input, 0, 0.1, 1))
self.netcons.append(self.cells[nt][i].input_to_CF_nc[-1])
else:
print "NOT IMPLEMENTED"
if self.id == 0: print "trains connected"
if local_gid_count_type[i][0] == 'intr':
pass
else:
self.id_all_vec_input.append(self.do_gather(id_vec_input, dtype = 'i'))
self.t_all_vec_input.append(self.do_gather(t_vec_input))
f_cells_mean = self.do_gather(f_cells_mean_local)
f_cells_cv = self.do_gather(f_cells_cv_local)
f_cells_std = self.do_gather(f_cells_std_local)
self.fmean_input = np.nan
self.fmax_input = np.nan
self.fmstd_input = np.nan
self.fcvm_input = np.nan
self.fstdm_input = np.nan
ih_use_v_all = self.do_gather(ih_use_v)
if self.id == 0 and local_gid_count_type[i][0] != 'intr':
self.fmean_input = mean(np.nan_to_num(f_cells_mean)) # compute mean of mean rate for all cells
self.fmstd_input = std(np.nan_to_num(f_cells_mean))
self.fmax_input = max(np.nan_to_num(f_cells_mean))
self.fcvm_input = mean(f_cells_cv[~np.isnan(f_cells_cv)])
self.fstdm_input = mean(f_cells_std[~np.isnan(f_cells_std)])
self.ih_use_max = max(ih_use_v_all)
print "- trains, fmean: ",self.fmean_input, "fmax: ",self.fmax_input, "Hz", "fmstd: ",self.fmstd_input, "Hz", "fcvm: ",self.fcvm_input, "fstdm: ",self.fstdm_input, "Hz, ih_use_max:", self.ih_use_max
else:
self.global_pulse_list.append([])
self.global_pulse_list_inh.append([])
def do_gather(self, v_local, dtype = 'd'):
if self.use_mpi:
self.barrier()
#v_local = v_local.astype(dtype).flatten()
v_local = np.array(v_local, dtype=dtype).flatten()
if self.use_pc == False:
v_global = None
counts_local = np.array(len(v_local), dtype='i')
counts = 0
if self.id == 0:
counts = np.empty(self.nhost, dtype='i')
self.comm.Gather(sendbuf=[counts_local, MPI.INT], recvbuf=[counts, MPI.INT], root=0)
if self.id == 0:
v_global = np.empty(sum(counts), dtype=dtype)
if dtype == 'd':
self.comm.Gatherv(sendbuf=[v_local, MPI.DOUBLE], recvbuf=[v_global, (counts, None), MPI.DOUBLE], root=0)
elif dtype == 'i':
self.comm.Gatherv(sendbuf=[v_local, MPI.INT], recvbuf=[v_global, (counts, None), MPI.INT], root=0)
#v_global = np.hstack(v_global)
else:
sendlist = [None]*self.nhost
sendlist[0] = v_local
getlist = self.pc.py_alltoall(sendlist)
v_global = np.hstack(getlist)
else:
v_global = np.hstack(v_local)
return v_global
def setup_Play_train(self, train = [], input_gid = 0, delay = 1):
self.trains.append(train)
# possibility to play spikes into the cells!
self.vecstim.append(h.VecStim(.5))
self.nc_vecstim.append(h.NetCon(self.vecstim[-1],None))
self.nc_vecstim[-1].delay = delay
self.spike_vec.append(h.Vector(self.trains[-1]))
self.vecstim[-1].play(self.spike_vec[-1])
if (self.use_mpi):
self.pc.set_gid2node(input_gid, self.id) # associate gid with this host
self.pc.cell(input_gid,self.nc_vecstim[-1]) # associate gid with spike detector
def record(self):
"""
Initializes recording vectors. Internal function
"""
if self.n_celltypes > 1:
#print "self.n_borders:",self.n_borders
for n in range(self.n_celltypes):
if self.n_borders[n] in self.gidlist[n]:
#print "np.shape(self.rec_v):",np.shape(self.rec_v)
#print "np.shape(self.cells):",np.shape(self.cells)
self.rec_v[n].record(self.cells[n][0].soma(0.5)._ref_v)
if self.id == 0: # only for first node and first cell
# Voltage
self.rec_v[0].record(self.cells[self.a_celltype[0]][0].soma(0.5)._ref_v)
# Stimuli
self.rec_i = h.Vector()
if (self.plays != []):
if (isinstance(self.plays[0], list) is False):
self.rec_i.record(self.plays[0]._ref_i)
else:
self.rec_i.record(self.plays[0][0]._ref_i)
self.rec_ich = h.Vector()
if self.ic_holds != [] and (isinstance(self.ic_holds[0], list) is False):
self.rec_ich.record(self.ic_holds[0]._ref_i)
self.rec_ics = h.Vector()
if self.ic_starts != []:
self.rec_ics.record(self.ic_starts[0]._ref_i)
self.rec_n = h.Vector()
if self.fluct_s[0] > 0:
# Fluctuating input
self.rec_n.record(self.flucts[0]._ref_i)
print "recording noise"
elif (len(self.flucts) > 0) and (len(self.fluct_g_i0)>0):
self.rec_n.record(self.flucts[0]._ref_g_i)
print "recording g noise"
else:
print "nonoise"
if hasattr(self.cells[self.a_celltype[0]][0], 'lkg2_noise'):
if self.cells[self.a_celltype[0]][0].lkg2_noise > 0:
self.rec_n.record(self.cells[self.a_celltype[0]][0].fluct._ref_il)
print "recording tonic gaba noise"
self.rec_step = h.Vector()
if self.ic_steps != []:
self.rec_step.record(self.ic_steps[0]._ref_i)
# Time
self.rec_t = h.Vector()
self.rec_t.record(h._ref_t)
def run(self, tstop = 10*s, do_loadstate = True):
"""
Starts the stimulation.
"""
self.record()
if self.first_run:
if self.use_mpi: self.pc.set_maxstep(100)
#self.pc.spike_compress(1) #test
if self.use_multisplit:
import multiprocessing
Hines = h.CVode()
Hines.active(0)
h.load_file("parcom.hoc")
p = h.ParallelComputeTool()
if self.use_mpi:
cpus = multiprocessing.cpu_count() #32 #self.pc.nhost()
else:
cpus = multiprocessing.cpu_count() #32
p.change_nthread(cpus,1)
p.multisplit(1)
print "Using multisplit, cpus:", cpus
else:
h.load_file("stdrun.hoc")
if self.use_local_dt:
h.cvode.active(1)
h.cvode.use_local_dt(1)
h.celsius = self.temperature
h.dt = self.dt/ms # Fixed dt
h.steps_per_ms = 1 / (self.dt/ms)
if self.cells[self.a_celltype[0]] != []:
if hasattr(self.cells[self.a_celltype[0]][0], 'v_init'):
h.v_init = self.cells[self.a_celltype[0]][0].v_init # v_init is supplied by cell itself!
else:
h.v_init = -60
h.stdinit()
h.finitialize()
if hasattr(self.cells[self.a_celltype[0]][0], 'load_states') and do_loadstate:
m = md5.new()
cell_exe_new = self.cell_exe[0]
m.update(cell_exe_new)
filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'
self.cells[self.a_celltype[0]][0].load_states(filename)
else:
pass
if self.id == 0:
import time
t0 = time.time()
if self.simstep == 0:
if self.id == 0: print "Running without steps",
if self.use_mpi:
self.pc.psolve(tstop/ms)
else:
h.init()
h.tstop = tstop/ms
h.run()
else:
h.finitialize()
cnt = 1
#if self.id == 50:
# print len(self.cells[1][0].nc), self.cells[1][0].nc[0].weight[0]
# print len(self.cells[0][0].nc_inh), self.cells[0][0].nc_inh[0].weight[0]
h.t = 0
while h.t < tstop/ms:
if self.id == 0:
print "Running...",
if self.use_mpi:
past_time = self.pc.time()
h.continuerun(cnt*self.simstep/ms)
if self.use_mpi: self.pc.barrier()
if self.id == 0:
if self.use_mpi:
print "Simulated time =",h.t*ms, "s, Real time = ", (self.pc.time()-past_time), 's'
else:
print "Simulated time =",h.t*ms, "s"
#if self.id == 0:
# print hpy.heap().byrcs
cnt += 1
if self.id == 0: print "psolve took ", time.time() - t0, "seconds"
self.first_run = False
self.barrier() # wait for other nodes
self.tstop = tstop
def get(self, t_startstop=[], i_startstop=[], N = []):
"""
Gets the recordings.
"""
if N == []:
N = self.N
if t_startstop == []:
t_startstop = np.array([2, self.tstop])
t_all_vec = []
id_all_vec = []
fmean = []
fbase = []
fmax = []
fmstd = []
fcvm = []
fstdm = []
gid_del = []
f_cells_mean_all = []
f_cells_base_all = []
f_cells_cv_all = []
f_cells_std_all = []
fmeanA = []
fmstdA = []
fmaxA = []
fcvmA = []
fstdmA = []
fbaseA = []
fbstdA = []
if self.id == 0: print "start gathering spikes"
for n in range(self.n_celltypes):
if self.use_mpi:
self.barrier() # wait for other node
t_vec = np.array(self.t_vec[n]).flatten()*ms - 1*ms # shift time because of output delay
id_vec = np.array(self.id_vec[n]).flatten()
else:
t_vec = np.array([])
id_vec = np.array([])
print np.shape(self.t_vec)
for i in self.gidlist[n]:
t_vec0 = np.array(self.t_vec[n][i]).flatten()*ms
t_vec = np.append(t_vec, t_vec0).flatten()
id_vec = np.append(id_vec, np.ones(len(t_vec0))*i).flatten()
fmean0, fmax0, fmstd0, fcvm0, fstdm0, gid_del0, f_cells_mean_all0, f_cells_cv_all0, f_cells_std_all0, fbase0, f_cells_base_all0 = self.get_fmean(t_vec, id_vec, t_startstop = t_startstop, gidlist = self.gidlist[n])
fmean.append(fmean0); fmax.append(fmax0), fmstd.append(fmstd0), fcvm.append(fcvm0), fstdm.append(fstdm0), gid_del.append(gid_del0), f_cells_mean_all.append(f_cells_mean_all0), f_cells_cv_all.append(f_cells_cv_all0), f_cells_std_all.append(f_cells_std_all0)
fbase.append(fbase0); f_cells_base_all.append(f_cells_base_all0)
t_all_vec.append(self.do_gather(t_vec))
id_all_vec.append(self.do_gather(id_vec))
if (self.id == 0) and (self.no_fmean == False):
f_cells_mean_all = np.array(f_cells_mean_all).flatten()
fmeanA = mean(f_cells_mean_all) # compute mean of mean rate for all cells
fmstdA = std(f_cells_mean_all)
fmaxA = max(f_cells_mean_all)
f_cells_base_all = np.array(f_cells_base_all).flatten()
fbaseA = mean(f_cells_base_all) # compute mean of mean rate for all cells
fbstdA = std(f_cells_base_all)
f_cells_cv_all = np.concatenate((np.array(f_cells_cv_all)))
f_cells_std_all = np.concatenate((np.array(f_cells_std_all)))
fcvmA = mean(f_cells_cv_all)
fstdmA = mean(f_cells_std_all)
print "- ALL, fmean: ",fmeanA, "fmax: ",fmaxA, "Hz", "fmstd: ",fmstdA, "Hz", "fcvm: ",fcvmA, "fstdm: ",fstdmA, "Hz", "fbase: ",fbaseA, "Hz", "fbstd: ", fbstdA, "Hz"
if self.id == 0: print "all spikes have been gathered"
self.barrier()
# do this here to have something to return
voltage = []
current = []
time = []
freq_times = []
spike_freq = []
gsyn = []
if self.id == 0: # only for first node
time = np.array(self.rec_t)*ms
# use self.bin_width as bin width!
freq_times = arange(0, time[-1], self.bin_width)
voltage.append(np.array(self.rec_v[0])*mV)
current = np.zeros(len(time))
if len(np.array(self.rec_ics)) > 0:
current = current + np.array(self.rec_ics)
if len(np.array(self.rec_ich)) > 0:
current = current + np.array(self.rec_ich)
if len(np.array(self.rec_i)) > 0:
current = current + np.array(self.rec_i)
if len(np.array(self.rec_n)) > 0:
current = current + np.array(self.rec_n)
print np.array(self.rec_n)
if len(np.array(self.rec_step)) > 0:
current = current + np.array(self.rec_step)
else:
time = [0]
self.barrier()
time = self.broadcast(time, fast = True)
gsyn_in = []
gsyn_in0 = []
if 'gsyn_in' in self.method_interpol:
gsyn_in = None
if self.id == 0: print "- collecting gsyn_in"
gsyn_in0 = np.zeros(len(time), dtype='d')
if self.record_syn is not []:
for i, j in enumerate(self.record_syn):
gsyn_in0 = gsyn_in0 + self.gsyn_in_fac[i] * np.array(j, dtype='d')
if self.use_mpi:
count = len(time)
#if self.id == 0: gsyn_in = np.empty(count*self.nhost, dtype='d')
#self.comm.Gatherv(sendbuf=[gsyn_in0, MPI.DOUBLE], recvbuf=[gsyn_in, MPI.DOUBLE], root=0)
gsyn_in = self.do_gather(gsyn_in0)
if self.id == 0:
gsyn_in = np.reshape(gsyn_in, (self.nhost,count))
gsyn_in = sum(gsyn_in,0)
else:
gsyn_in = gsyn_in0
self.barrier() # wait for other nodes
if self.n_celltypes > 1:
if self.id == 0: print "more than one celltype send voltage of first other cell to root"
for n in range(1, self.n_celltypes):
if self.use_pc == True:
srclist = [None]*self.nhost
if (self.n_borders[n] in self.gidlist[n]):
srclist[0] = np.array(self.rec_v[n])*mV
destlist = self.pc.py_alltoall(srclist)
if self.id == 0:
idx = [i for i, x in enumerate(destlist) if x is not None]
if len(idx) > 1: raise ValueError('Error, too many vectors sent, should be one at a time!')
voltage.append(np.array(destlist[idx[0]]))
else:
if self.id == 0:
if (self.n_borders[n] in self.gidlist[n]): # first node has it, do not wait to receive it!
v_temp = np.array(self.rec_v[n])*mV
else:
v_temp = np.zeros(len(voltage[0]))
self.comm.Recv([v_temp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(sum(N)+33))
voltage.append(v_temp)
else:
if self.n_borders[n] in self.gidlist[n]:
voltage = np.array(self.rec_v[n])*mV
self.comm.Ssend([voltage, MPI.DOUBLE], dest=0, tag=int(sum(N)+33))
self.barrier() # wait for other nodes
times = arange(0, time[-1], 1*ms)
gsyns = []
if self.called_syn_out_all == True:
for n in range(self.n_celltypes):
gsyns.append([])
if self.use_pc == True:
for i, gid in enumerate(self.global_gidlist[n]):
srclist = [None]*self.nhost
if gid in self.gidlist[n]: #only one node does this
a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
c = np.zeros(int((1*ms)/self.dt))
temp = np.append(a, c).flatten()
temp = temp[int((1*ms)/self.dt):len(temp)+1]
gtemp = interp(times,time,temp)
srclist[0] = gtemp # send to root only
destlist = self.pc.py_alltoall(srclist)
if self.id == 0:
idx = [i for i, x in enumerate(destlist) if x is not None]
if len(idx) > 1: raise ValueError('Error, too many vectors sent, should be one at a time!')
gsyns[n].append(np.array(destlist[idx[0]]))
else:
for i, gid in enumerate(self.global_gidlist[n]):
if self.id == 0:
if gid in self.gidlist[n]:
a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
c = np.zeros(int((1*ms)/self.dt))
temp = np.append(a, c).flatten()
temp = temp[int((1*ms)/self.dt):len(temp)+1]
gtemp = interp(times,time,temp)
else:
gtemp = np.zeros(len(times))
self.comm.Recv([gtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))
gsyns[n].append(np.array(gtemp))
else:
if gid in self.gidlist[n]:
a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
c = np.zeros(int((1*ms)/self.dt))
temp = np.append(a, c).flatten()
temp = temp[int((1*ms)/self.dt):len(temp)+1]
gtemp = interp(times,time,temp)
#np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
self.comm.Ssend([gtemp, MPI.DOUBLE], dest=0, tag=int(gid))
if self.id == 0: print "root gathered synaptic output conductance"
self.barrier() # wait for other nodes
times = arange(0, time[-1], 10*ms)
w_mat = []
winh_mat = []
if self.stdp_used == True:
for n in range(self.n_celltypes):
w_mat.append([])
for i, gid in enumerate(self.global_gidlist[n]):
if self.id == 0:
wall = []
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
wall.append(wtemp)
else:
while 1:
wtemp = np.zeros(len(times))
self.comm.Recv([wtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))
if wtemp[0] == -1:
break
else:
wall.append(wtemp)
w_mat[n].append(wall)
else:
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
wtemp = np.ones(len(times))*-1
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
if self.id == 0:
print "root gathered synaptic input conductance"
self.barrier() # wait for other nodes
for n in range(self.n_celltypes):
winh_mat.append([])
for i, gid in enumerate(self.global_gidlist[n]):
if self.id == 0:
wall = []
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w_inh']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
wall.append(wtemp)
else:
while 1:
wtemp = np.zeros(len(times))
self.comm.Recv([wtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))
if wtemp[0] == -1:
break
else:
wall.append(wtemp)
winh_mat[n].append(wall)
else:
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w_inh']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
wtemp = np.ones(len(times))*-1
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
if self.id == 0:
print "root gathered synaptic input conductance"
self.barrier() # wait for other nodes
t_all_vec_vec = []
id_all_vec_vec = []
f_cells_mean = []
if self.id == 0: # only for first node
for n in range(self.n_celltypes):
ie = argsort(t_all_vec[n])
t_all_vec_vec.append( t_all_vec[n][ie] )
id_all_vec_vec.append( id_all_vec[n][ie].astype(int) ) #
print "all spikes have been sorted"
if self.jitter > 0: # add jitter!
np.random.seed(40)
x = np.random.normal(0, self.jitter, len(t_all_vec_vec[self.a_celltype[0]]))
t_all_vec_vec[self.a_celltype[0]] = t_all_vec_vec[self.a_celltype[0]] + x
if self.delta_t > 0:
t_all_vec_vec[self.a_celltype[0]] = t_all_vec_vec[self.a_celltype[0]] + self.delta_t
gsyn = zeros(len(freq_times))
if 'gsyn_in' in self.method_interpol:
pass
else:
bvec = ["syn" in st for st in self.method_interpol]
if np.any(bvec):
if (not hasattr(self, 'passive_target')) | (self.jitter > 0): # if not already done in neuron via artificial cell
[resp, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[self.a_celltype[0]], bins = freq_times)
resp = np.concatenate((zeros(1),resp))
Ksyn = syn_kernel(arange(0,10*self.syn_tau2,self.bin_width), self.syn_tau1, self.syn_tau2)
Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))
gsyn = np.convolve(Ksyn, resp, mode='same')
print "Generated gsyn by convolution with Ksyn"
self.nc_delay = 0
else:
gsyn = interp(freq_times,time,np.array(self.rec_g))
spike_freq = np.zeros(len(freq_times))
for j in self.a_celltype:
#plt.figure('results_voltage')
#ax99 = plt.subplot(2,1,1)
#ax99.plot(time,voltage[j])
#plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
#plt.savefig("./figs/Pub/Voltage_" + str(self.pickle_prefix) + "_cell" + str(j) + "_N" + str(self.N[j]) + ".pdf", dpi = 300, transparent=True) # save it
#plt.show()
#plt.clf()
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)
if isinstance(self.factor_celltype[j], ( int, long ) ):
f = self.factor_celltype[j]
else:
f = self.factor_celltype[j][0]
spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width
self.barrier() # wait for other nodes
#figure('1')
#plot(time,np.array(self.rec_s1),'b', time,np.array(self.rec_s2),'r')
#plt.show()
return {'time':time, 'voltage':voltage, 'current':current, 'fmean':fmean, 'f_cells_mean':f_cells_mean,
'gsyn':gsyn, 'freq_times':freq_times, 'spike_freq':spike_freq, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fstdmA':fstdmA, 'fbstdA':fbstdA,
't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec, 'gsyns':gsyns, 'w_mat':w_mat, 'winh_mat':winh_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'fbaseA':fbaseA, 'fbase':fbase}
def clean(self):
self.pc.runworker()
self.pc.done()
def compute_Transfer(self, stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor=[1]):
stimulus0 = np.zeros(len(stimulus[0]))
for a in self.a_celltype:
# sum input to produce linear input that should be reconstructed!
if (any(self.syn_inh_dist) > 0) and (any(self.syn_ex_dist) > 0):
if max(self.syn_inh_dist) == max(self.syn_ex_dist): # same signal through ex and inh
print "inh_factor = [0,1]"
inh_factor = [0,1]
for ni in self.syn_ex_dist[a]:
if ni != 0:
stimulus0 += inh_factor[ni-1] * stimulus[ni-1]
print "+ex:", ni-1
for ni in self.syn_inh_dist[a]:
if ni != 0:
stimulus0 -= inh_factor[ni-1] * stimulus[ni-1] #old: +nemax
print "-inh:", ni-1 #old: +nemax
if (max(self.n_syn_ex) == 0) and (max(self.n_syn_inh) == 0):
stimulus0 += stimulus[0]
print "current"
#if self.n_syn_ex[self.celltype_syn[0]] == 0:
# stimulus0 += stimulus[0]
# amplitude should not matter since filter amplitude is simply adjusted
#stimulus = stimulus0 #/len(self.syn_ex_dist)
stimulus0 = stimulus0 / std(stimulus0) / 2
# linear interpolation inside compute_Transfer !!!
print "max(stimulus0):",max(stimulus0)
results = compute_Transfer(spike_freq = spike_freq, freq_times = freq_times,
stimulus = stimulus0, t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, do_csd = do_csd, t_kernel = 1*s,
method_interpol = self.method_interpol, nc_delay = self.nc_delay, w_length = 3, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, give_psd = self.give_psd) # freq_wp not defined, use all frequencies
# TEST:
#VAF = results.get('VAFf_mat')
#freq_used = results.get('freq_used')
#iend = mlab.find(freq_used >= self.xmax)[0]
#err = 1-mean(VAF[1][0,1:iend-1])
#print "err: ", err
return results
def residuals_compute_Transfer(self, p, stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor):
inh_factor_in = inh_factor[:]
ip = 0
for i, inhf in enumerate(inh_factor_in):
if inhf < 0:
inh_factor_in[i] = p[ip]
ip += 1
results = self.compute_Transfer(stimulus = stimulus, spike_freq = spike_freq, freq_times = freq_times,
t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in,
do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor = inh_factor_in)
VAF = results.get('VAFf_mat')
freq_used = results.get('freq_used')
iend = mlab.find(freq_used >= self.xmax)[0]
err = 1-mean(VAF[1][0,0:iend])
print "inh_factor:", inh_factor_in, "err: ", err
return err
#@profile
def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):
"""
Stimulate cell with colored noise
sexp = spectral exponent: Power ~ 1/freq^sexp
cutf = frequency cutoff: Power flat (white) for freq <~ cutf
do_csd = 1: use cross spectral density function for computation
"""
self.barrier() # wait for other nodes
filename = str(self.pickle_prefix) + "_results_pop_cnoise.p"
filepath = self.data_dir + "/" + filename
if self.id == 0: print "- filepath:", filepath
if self.do_run or (os.path.isfile(filepath) is False):
tstart = 0;
fs = 1 / self.dt # sampling rate
fmax = fs / 2 # maximum frequency (nyquist)
t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!
#print self.syn_ex_dist
#print self.syn_inh_dist
#exit()
if (self.syn_ex_dist == []):
for nt in range(self.n_celltypes): # loop over all cells
#print "nt", nt
if hasattr(self.cells[nt][0], 'input_vec'):
self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!
else:
self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!
#print self.syn_ex_dist
if (self.syn_ex_dist[0] == []):
nemax = 1
else:
nemax = max([item for sublist in self.syn_ex_dist for item in sublist])
if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)
for nt in range(self.n_celltypes): # loop over all cells
self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!
#print self.syn_inh_dist
#exit()
if (self.syn_inh_dist[0] == []):
nimax = 0
else:
nimax = max([item for sublist in self.syn_inh_dist for item in sublist])
#print "self.syn_inh_dist, self.syn_ex_dist", self.syn_inh_dist, self.syn_ex_dist
n_noise = max([nemax,nimax]) # number of noise sources
#print n_noise,nemax,nimax
# create reproduceable input
noise_data = []
for nj in range(n_noise):
if self.id == 0: # make sure all have the same signal !!!
if len(freq_used) == 0:
noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)
else:
noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal
else:
noise_data0 = np.empty(len(t_noise), dtype=np.float64)
noise_data0 = self.broadcast(noise_data0, fast = True)
noise_data.append(noise_data0)
noise_data0 = []
noise_data_points = len(noise_data[0])
# Create signal weight vector inh_factor if it is not fully given
if len(noise_data) > len(inh_factor):
inh_factor = [inh_factor[0]] * len(noise_data)
print "inh_factor:", inh_factor
#if equi:
#pass
# tstop = t_stim
if max(self.n_syn_ex) == 0: # this means current input
self.set_IStim() # sets amp
if self.fluct_s != []:
if self.fluct_s[self.a_celltype[0]] > 0:
if self.id == 0: print "- adding i fluct"
self.connect_fluct()
for i, m in enumerate(self.method_interpol):
if "syn" in m: self.method_interpol[i] = "syn " + str(self.syn_tau1/ms) + "/" + str(self.syn_tau2/ms) + "ms"
if "bin" in m: self.method_interpol[i] = "bin " + str(self.bin_width/ms) + "ms"
stimulus = []
for nj in range(len(noise_data)):
stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0
stimulus.append(stimulus0)
tstop = t[-1]
self.set_IPlay2(stimulus, t)
if self.id == 0: print "- starting colored noise transfer function estimation! with amp = " + str(np.round(self.amp[self.a_celltype[0]],4)) + ", ihold = " + str(np.round(self.ihold[self.a_celltype[0]],4)) + ", ihold_sigma = " + str(np.round(self.ihold_sigma,4)) + ", dt = " + str(self.dt) + " => maximum frequency = " + str(fmax) + "\r"
else:
self.give_freq = False
ihold = self.set_i(self.ihold) # just sets amp, ihold should not change!
if 'gsyn_in' not in self.method_interpol:
pass
else:
self.g_syn_ex = [1]*len(self.N)
if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):
if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):
if self.id == 0: print "- adding g fluct"
self.connect_gfluct(E_i=-65)
stimulus = []
for nj in range(len(noise_data)):
stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp
stimulus.append(stimulus0)
noise_data = []
tstop = t[-1]
if self.N[self.a_celltype[0]] > 1:
self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)
if self.id == 0: print "- add random start"
#print "Enter Synplay()"
self.set_SynPlay(stimulus, t, t_startstop = t_startstop)
#print "Exit Synplay()"
if self.id == 0: print "- starting colored noise transfer function estimation with synaptic input! with amp = " + str(np.round(self.amp,4)) + ", ihold = " + str(np.round(self.ihold,4)) + ", ihold_sigma = " + str(np.round(self.ihold_sigma,4)) + ", dt = " + str(self.dt) + " => maximum frequency = " + str(fmax) + "\r"
amp_vec = []
mag_vec = []
pha_vec = []
freq_used = []
ca = []
SNR_mat = []
VAFf_mat = []
Qual_mat = []
CF_mat = []
VAF_mat = []
stim = []
stim_re_mat = []
resp_mat = []
current_re = []
ihold1 = []
tk = []
K_mat = []
gsyn_in = []
fmean = []
fmax = []
fmstd = []
fcvm = []
fmeanA = []
fmaxA = []
fmstdA = []
fcvmA = []
t_all_vec_input_sorted = []
id_all_vec_input_sorted = []
if (self.id == 0) and (max(self.n_syn_ex) > 0):
print range(self.n_celltypes), np.shape(self.t_all_vec_input)
for l in range(self.n_celltypes):
ie = argsort(self.t_all_vec_input[l])
t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )
id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )
#if (self.id == 0):
# print self.g_syn_ex
# print np.array(self.g_syn_ex)>= 0
#print "g_syn_ex:",self.g_syn_ex
if np.array(np.array(self.g_syn_ex)>= 0).any():
if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:
print "- Equilibrate!"
self.run(tstop, do_loadstate = False)
m = md5.new()
cell_exe_new = self.cell_exe[0]
m.update(cell_exe_new)
filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'
self.cells[self.a_celltype[0]][0].get_states(filename)
else:
self.run(tstop, do_loadstate = False)
i_startstop = []
results = self.get(t_startstop, i_startstop)
time = results.get('time')
current = results.get('current')
voltage = results.get('voltage')
fmean = results.get('fmean')
gsyn = results.get('gsyn')
freq_times = results.get('freq_times')
spike_freq = results.get('spike_freq')
t_all_vec_vec = results.get('t_all_vec_vec')
id_all_vec_vec = results.get('id_all_vec_vec')
gsyns = results.get('gsyns')
gsyn_in = results.get('gsyn_in')
fmax = results.get('fmax')
fmstd = results.get('fmstd')
fcvm = results.get('fcvm')
fmeanA = results.get('fmeanA')
fmaxA = results.get('fmaxA')
fmstdA = results.get('fmstdA')
fcvmA = results.get('fcvmA')
fbaseA = results.get('fbaseA')
fbase = results.get('fbase')
fbstdA = results.get('fbstdA')
else: # do not run, analyse input!!!
time = t
voltage = []
for l in range(self.n_celltypes):
voltage.append(np.zeros(len(t)))
current = []
freq_times = []
spike_freq = []
gsyn = []
gsyn_in = []
t_all_vec_vec = []
id_all_vec_vec = []
fmean = []
fmax = []
fmstd = []
fcvm = []
fstdm = []
fmeanA = []
fmaxA = []
fmstdA = []
fcvmA = []
fbaseA = []
fbase = []
fbstdA = []
if self.id == 0:
current = self.n_train_ex
#t_all_vec = self.t_all_vec_input
#id_all_vec = self.id_all_vec_input
#ie = argsort(t_all_vec)
#t_all_vec_vec.append( t_all_vec[ie] )
#id_all_vec_vec.append( id_all_vec[ie].astype(int) )
t_all_vec_vec = t_all_vec_input_sorted
id_all_vec_vec = id_all_vec_input_sorted
freq_times = arange(0, tstop, self.bin_width)
spike_freq = np.zeros(len(freq_times))
for j in self.a_celltype:
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)
if self.tau2_ex[0] > 0:
spike_freq = np.concatenate((zeros(1),num_spikes))
print "NOSYN TEST: start convolution with Ksyn"
Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0])
Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))
spike_freq = np.convolve(Ksyn, spike_freq, mode='same')
print "NOSYN TEST: convolution finished"
else:
if isinstance(self.factor_celltype[j], ( int, long ) ):
f = self.factor_celltype[j]
else:
f = self.factor_celltype[j][0]
spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width
fmean.append(self.fmean_input)
fmax.append(self.fmax_input)
fmstd.append(self.fmstd_input)
fcvm.append(self.fcvm_input)
fstdm.append(self.fstdm_input)
if self.no_fmean == True:
fmean.append(ihold)
#plt.figure('spike_freq')
#plt.plot(freq_times, spike_freq)
#plt.savefig("./figs/Pub/Spike_freq_" + str(self.pickle_prefix) + ".pdf", dpi = 300, transparent=True) # save it
#plt.clf()
fmeanA = fmean[0]
fmaxA = fmax[0]
fmstdA = fmstd [0]
fcvmA = fcvm[0]
fstdmA = fstdm[0]
if self.id == 0:
if any([i<0 for i in inh_factor]):
p0 = []
inhf_idx = []
for i, inhf in enumerate(inh_factor):
if inhf < 0:
p0.append(0)
inhf_idx.append(i)
plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))
p = plsq
ip = 0
for i in inhf_idx:
inh_factor[i] = p[ip]
ip += 1
print "Final inh_factor: ", inh_factor
results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times,
t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in,
do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)
mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean')
SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat')
stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat')
self.barrier() # wait for other nodes
if self.id == 0:
if t_qual > 0:
#print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt
current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]
current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]
if len(self.i_holdrs) > 0:
ihold1 = self.i_holdrs[self.a_celltype[0]][0]
else:
ihold1 = []
for l in range(len(self.method_interpol)): # unwrap
pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase
# only return fraction of actual signal, it is too long!!!
if time[-1] > self.tmax:
imax = -1*int(self.tmax/self.dt)
time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]
for n in range(self.n_celltypes):
voltage[n] = voltage[n][imax:]
if freq_times != []:
if freq_times[-1] > self.tmax:
imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency
freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2]
bvec = ["_syn" in st for st in self.method_interpol]
if np.any(bvec):
# normalize synaptic integration with others
mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0]
if self.id == 0: print "start pickle"
results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,
'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,
'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec}
if self.id == 0:
if self.dumpsave == 1:
pickle.dump( results, gzip.GzipFile( filepath, "wb" ) )
print "pickle done"
if self.plot_train:
for a in self.a_celltype:
#i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]
#i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]
#t_all_cut = t_all_vec_vec[a][i_start:i_stop]
#id_all_cut = id_all_vec_vec[a][i_start:i_stop]
t_all_cut = t_all_vec_vec[a]
id_all_cut = id_all_vec_vec[a]
f_start_in = mlab.find(t_all_cut >= 0)
f_stop_in = mlab.find(t_all_cut <= 10)
f_start = f_start_in[0]
f_stop = f_stop_in[-1]+1
use_spikes = t_all_cut[f_start:f_stop]
use_id = id_all_cut[f_start:f_stop]
plt.figure('results_train')
ax99 = plt.subplot(1,1,1)
ax99.plot(use_spikes,use_id,'|', ms=2)
plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Train_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
if len(t_all_cut) > 0:
tbin = 100*ms
tb = np.arange(0,t[-1],tbin)
[all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)
all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin
plt.figure('results_train2')
plt.plot(tb,all_rate)
plt.savefig("./figs/Pub/PSTH_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
plt.figure('results_noise')
plt.plot(time,current)
plt.savefig("./figs/Pub/Noise_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
if self.plot_input:
if len(t_all_vec_input_sorted[0]) > 0:
i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]
i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]
t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]
id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]
plt.figure('results_input')
ax99 = plt.subplot(1,1,1)
ax99.plot(t_all_cut,id_all_cut,'|', ms=2)
plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Input_" + str(self.pickle_prefix) + "_N" + str(self.N[self.a_celltype[0]]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
else:
if self.id == 0:
results = pickle.load( gzip.GzipFile( filepath, "rb" ) )
#print results
#print {key:np.shape(value) for key,value in results.iteritems()}
if self.minimal_dir: # save only info needed for plot
print {key:np.shape(value) for key,value in results.iteritems()}
if "Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p" in filename:
results['ca'] = []
results['resp_mat'] = []
results['stim'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['freq_times'] = []
results['spike_freq'] = []
results['stim_re_mat'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['gsyn_in'] = []
elif ("Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['voltage'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['t1'] = []
results['gsyn_in'] = []
elif ("Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['voltage'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['t1'] = []
results['gsyn_in'] = []
results['freq_times'] = []
results['spike_freq'] = []
elif ("Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p" in filename) \
or ("Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['voltage'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['t1'] = []
results['gsyn_in'] = []
results['freq_times'] = []
results['spike_freq'] = []
elif ("Fig2_pop_transfer_" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['t1'] = []
results['voltage'] = []
results['freq_times'] = []
results['spike_freq'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['gsyn_in'] = []
else:
results['ca'] = []
results['resp_mat'] = []
results['stim'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['t1'] = []
results['voltage'] = []
results['freq_times'] = []
results['spike_freq'] = []
results['stim_re_mat'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['gsyn_in'] = []
print {key:np.shape(value) for key,value in results.iteritems()}
pickle.dump( results, gzip.GzipFile( self.minimal_dir + "/" + filename, "wb" ) )
else:
results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],
'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],
'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]}
if self.id == 0:
if self.plot_train:
for a in self.a_celltype:
t1 = results.get('t1')
voltage = results.get('voltage')
fmean = results.get('fmean')
fmax = results.get('fmax')
fmstd = results.get('fmstd')
if results.has_key('t_all_vec_vec'):
if len(results['t_all_vec_vec']) > 0:
t_all_vec_vec = results.get('t_all_vec_vec')
id_all_vec_vec = results.get('id_all_vec_vec')
t_all_cut = t_all_vec_vec[a]
id_all_cut = id_all_vec_vec[a]
f_start_in = mlab.find(t_all_cut >= 0)
f_stop_in = mlab.find(t_all_cut <= 10)
f_start = f_start_in[0]
f_stop = f_stop_in[-1]+1
use_spikes = t_all_cut[f_start:f_stop]
use_id = id_all_cut[f_start:f_stop]
plt.figure('results_train')
ax97 = plt.subplot(1,1,1)
ax97.plot(use_spikes,use_id,'|', ms=6)
plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Train_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.figure('results_voltage')
ax99 = plt.subplot(2,1,1)
ax99.plot(t1,voltage[a])
t_noise = arange(0, t_stim, self.dt)
noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)
stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline)
ax98 = plt.subplot(2,1,2)
ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')
plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Voltage_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.show()
plt.clf()
if (self.id == 0) and (do_csd == 1):
Qual = results.get('Qual')
for i, ii in enumerate(self.method_interpol):
print "\n[QUAL:] Interpol:", ii, "SNR0:", Qual[i,0,0], "SNR_cutff:", Qual[i,0,1], "SNR_mean:", Qual[i,0,2], "\n VAF0:", Qual[i,1,0], "VAF_cutff:", Qual[i,1,1], "VAF_mean:", Qual[i,1,2], "\n CF(subtracted):", Qual[i,2,0], "VAF(subtracted):", Qual[i,2,1]
VAF = results.get('VAF')
freq_used = results.get('freq_used')
iend = mlab.find(freq_used >= self.xmax)[0]
print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend]))
self.barrier() # wait for other nodes
return results
# def fun_ssine_Stim(self, freq_used = np.array([1, 10, 100, 1000])*Hz):
# """
# Compute impedance and/or transfer function using Single sine stimulation
# Only compute transfer function if there is a steady state (resting) firing rate!
# """
# self.barrier() # wait for other nodes
#
# filepath = "./data/" + str(self.pickle_prefix) + "_results_pop_ssine.p"
#
# if self.do_run or (os.path.isfile(filepath) is False):
#
# fs = 1 / self.dt # sampling rate
# fmax = fs / 2 # maximum frequency (nyquist)
#
# if self.id == 0: print "- starting single sine transfer function estimation! with amp = " + str(np.round(self.amp[a_celltype[0]],4)) + ", ihold = " + str(np.round(self.ihold[self.a_celltype[0]],4)) + ", dt = " + str(self.dt) + " => maximum frequency = " + str(fmax) + "\r"
#
# if max(self.n_syn_ex) == 0:
# self.set_IStim()
#
# if self.fluct_s != []:
# if self.fluct_s[self.a_celltype[0]] > 0:
# if self.id == 0: print "- adding i fluct"
# self.connect_fluct()
#
# for i, m in enumerate(self.method_interpol):
# if "syn" in m: self.method_interpol[i] = "syn " + str(self.syn_tau1/ms) + "/" + str(self.syn_tau2/ms) + "ms"
# if "bin" in m: self.method_interpol[i] = "bin " + str(self.bin_width/ms) + "ms"
#
# else:
# self.give_freq = False
# ihold = self.set_i(self.ihold) # just sets amp, ihold should not change!
#
# if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):
# if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):
# if self.id == 0: print "- adding g fluct"
# self.connect_gfluct(E_i=-65)
#
# #if ((self.fluct_std_e[self.a_celltype[0]] != []) or (self.fluct_std_i[self.a_celltype[0]] != [])):
# # if ((self.fluct_std_e[self.a_celltype[0]] > 0) or (self.fluct_std_i[self.a_celltype[0]] > 0)):
# # if self.id == 0: print "- adding g fluct"
# # self.connect_gfluct(E_i=-65)
#
# if 'gsyn_in' not in self.method_interpol:
# pass
# else:
# self.g_syn_ex = 1
#
#
# for i, fu in enumerate(freq_used):
#
# if self.id == 0: print "- single sine processing frequency = " + str(fu)
#
# t, stimulus, i_startstop, t_startstop = create_singlesine(fu = fu, amp = self.amp[a_celltype[0]], ihold = 0, dt = self.dt, periods = 20, minlength = 2*s, t_prestim = 1*s)
# tstop = t[-1]
#
# if i == 0: t_startstop_plot = t_startstop
#
# if max(self.n_syn_ex) == 0:
# self.set_IPlay(stimulus, t)
# else:
# self.set_SynPlay(stimulus, t)
#
# if self.g_syn_ex >= 0: # should also be true for current input!!!
#
# self.run(tstop)
#
# if i == 0: # do this here to have something to return
#
# # select first sinusoidal to plot, later
# voltage_plot = []
# current_plot = []
# time_plot = []
# freq_times_plot = []
# spike_freq_plot = []
# gsyn_plot = []
#
# # construct vectors
# amp_vec = zeros(len(freq_used)) # amplitude vector
# fmean_all = zeros(len(freq_used)) # mean firing frequency (all cells combined)
# fmean = zeros(len(freq_used)) # mean firing frequency (one cell)
# ca = zeros(len(freq_used), dtype=complex)
#
# # create matrix to hold all different interpolation methods:
# mag_vec = zeros((len(self.method_interpol),len(freq_used))) # magnitude vector
# pha_vec = zeros((len(self.method_interpol),len(freq_used))) # phase vector
# NI_vec = zeros((len(self.method_interpol),len(freq_used))) # NI vector
# VAF_vec = zeros((len(self.method_interpol),len(freq_used))) # VAF vector
#
# results = self.get(t_startstop, i_startstop) # t1 should be equal to t!!!
# time, voltage, current, fmean0, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')
# freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns')
#
# else:
#
# time = t
# voltage = []
# voltage.append(np.zeros(len(t)))
# current = stimulus
#
# freq_times = []
# spike_freq = []
# fmean0 = ihold
# gsyn = []
# gsyn_in = []
#
# t_all_vec_vec = []
# id_all_vec_vec = []
#
#
# if self.id == 0:
#
# t_all_vec = []
# t_all_vec.append([])
# t_all_vec[0] = np.concatenate(self.t_all_vec_input)
#
# id_all_vec = []
# id_all_vec.append([])
# id_all_vec[0] = np.concatenate(self.id_all_vec_input)
#
# ie = argsort(t_all_vec[0])
# t_all_vec_vec.append( t_all_vec[0][ie] )
# id_all_vec_vec.append( id_all_vec[0][ie].astype(int) ) #
#
#
# freq_times = arange(0, tstop, self.bin_width)
# [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[0], bins = freq_times)
# spike_freq = np.concatenate((zeros(1),num_spikes)) / self.bin_width
#
#
# if self.id == 0:
#
# fmean[i] = fmean0[0]
#
# if i == 0:
#
# # select first sinusoidal to plot
# voltage_plot = voltage
# current_plot = current
# time_plot = time
# freq_times_plot = freq_times
# spike_freq_plot = spike_freq
# gsyn_plot = gsyn
#
#
# for l in range(len(self.method_interpol)):
#
# if "bin" in self.method_interpol[l]:
#
# # binning and linear interpolation
# stimulus_signal = stimulus[i_startstop[0]:i_startstop[1]] # cut out relevant signal
# t_input_signal = t[i_startstop[0]:i_startstop[1]] - t[i_startstop[0]]
#
# spike_freq_interp = interp(t, freq_times, spike_freq, left=0, right=0) # interpolate to be eqivalent with input, set zero at beginning and end!
# freq_out_signal_interp = spike_freq_interp[i_startstop[0]:i_startstop[1]] # cut out relevant signal
# vamp, mag_vec[l,i], pha_vec[l,i], fmean_all[i], _ = get_magphase(stimulus_signal, t_input_signal, freq_out_signal_interp, t_input_signal, method = "fft", f = fu)
#
# results = est_quality(t_input_signal, fu, freq_out_signal_interp, self.amp[a_celltype[0]]*mag_vec[l,i], pha_vec[l,i]/ (180 / pi), fmean_all[i])
# NI_vec[l,i], VAF_vec[l,i] = results.get('NI'), results.get('VAF')
# print "-[bin] NI: " + str(NI_vec[l,i]) + ", VAF: " + str(VAF_vec[l,i])
#
# if "syn" in self.method_interpol[l]:
#
# # synaptic integration
# dt_out = t_input_signal[2] - t_input_signal[1]
# shift = self.nc_delay/dt_out # shift response by the nc delay to remove offset
# freq_out_signal_syn = gsyn[i_startstop[0]+shift:i_startstop[1]+shift] # cut out relevant signal
#
# vamp, mag_vec[l,i], pha_vec[l,i], fm, _ = get_magphase(stimulus_signal, t_input_signal, freq_out_signal_syn, t_input_signal, method = "fft", f = fu)
#
# results = est_quality(t_input_signal, fu, freq_out_signal_syn, self.amp[a_celltype[0]]*mag_vec[l,i], pha_vec[l,i]/ (180 / pi), fm)
# NI_vec[l,i], VAF_vec[l,i] = results.get('NI'), results.get('VAF')
# print "-[syn] NI: " + str(NI_vec[l,i]) + ", VAF: " + str(VAF_vec[l,i])
#
#
# self.barrier() # wait for other nodes
#
# #print "rest: " + str(vrest) + " freq_used:" + str(freq_used) + " amp_vec:" + str(amp_vec) + " mag_vec:" + str(mag_vec) + " pha_vec:" + str(pha_vec)
#
# if self.id == 0:
#
# for l in range(len(self.method_interpol)): # unwrap
# pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase
#
# # only return fraction of actual signal, it is too long!!!
# if time_plot[-1] > self.tmax:
# imax = where(time_plot > self.tmax)[0][0] # for voltage, current and time
# time_plot = time_plot[0:imax]; current_plot = current_plot[0:imax]; gsyn_plot = gsyn_plot[0:imax]
# for n in range(self.n_celltypes):
# voltage_plot[n] = voltage_plot[n][0:imax]
#
# if freq_times_plot != []:
# if freq_times_plot[-1] > self.tmax:
# imax2 = where(freq_times_plot > self.tmax)[0][0] # for spike frequency
# freq_times_plot = freq_times_plot[0:imax2]; spike_freq_plot = spike_freq_plot[0:imax2]
#
# # normalize synaptic integration with with first magnitude, may by syn itself!
# bvec = ["syn" in st for st in self.method_interpol]
# if np.any(bvec):
# k = where(bvec)
# mag_vec[k,:]= mag_vec[0,0]*mag_vec[k,:]/mag_vec[k,0]
#
# NI_vec = (freq_used, NI_vec)
# VAF_vec = (freq_used, VAF_vec)
# results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage_plot, 't_startstop':t_startstop_plot,
# 'current':current_plot,'t1':time_plot,'freq_times':freq_times_plot,'spike_freq':spike_freq_plot,
# 'fmean':mean(fmean),'method_interpol':self.method_interpol, 'NI':NI_vec, 'VAF':VAF_vec}
#
# if self.id == 0:
# pickle.dump( results, gzip.GzipFile( filepath, "wb" ) )
#
# else:
#
# if self.id == 0:
# results = pickle.load( gzip.GzipFile( filepath, "rb" ) )
# else:
# results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 't_startstop':[],
# 'current':[],'t1':[],'freq_times':[],'spike_freq':[],
# 'fmean':[],'method_interpol':self.method_interpol,'NI':[],'VAF':[]}
#
# return results
def get_RC(self, opt_plot):
if self.id == 0:
if "analytical" in opt_plot: # simplest case, only uses rm and tau, scaling necessary
exec self.cell_exe[self.a_celltype[0]]
sim = Stimulation(cell, temperature = self.temperature)
rm, cm, taum = sim.get_RCtau()
else:
rm = cm = taum = 0
if "if" in opt_plot:
Vrest = cell.soma(0.5).pas.e*mV
Vth = cell.spkout.thresh*mV
Vreset = cell.spkout.vrefrac*mV
else:
Vreset = 0*mV; Vth = 1*mV; Vrest = 0*mV
sim = None
cell = None
else:
rm = cm = taum = 0
Vreset = 0*mV; Vth = 1*mV; Vrest = 0*mV
return rm, cm, taum, Vreset, Vth, Vrest
def fun_plot(self, currlabel="control", dowhat="cnoise", freq_used=np.array([]), cutf=10, sexp=0, t_stim=100*s, ymax=0, ax=None, SNR=None, VAF=None, t_qual=0, opt_plot=np.array([]), method_interpol_plot=[], do_csd = 1):
SNR_switch = SNR
VAF_switch = VAF
rm, cm, taum, Vreset, Vth, Vrest = self.get_RC(opt_plot)
if dowhat == "cnoise":
if do_csd == 0:
t_qual = 0; SNR_switch = 0; VAF_switch = 0
results = self.fun_cnoise_Stim(t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = t_qual, freq_used = freq_used, do_csd = do_csd)
freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1')
freq_times, spike_freq, fmean, method_interpol, SNR, VAF, Qual = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('SNR'), results.get('VAF'), results.get('Qual')
stim, stim_re_mat, current_re, tk, K_mat_old = results.get('stim'), results.get('stim_re_mat'), results.get('current_re'), results.get('tk'), results.get('K_mat')
elif dowhat == "ssine":
results = self.fun_ssine_Stim(freq_used = freq_used0)
freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1')
freq_times, spike_freq, fmean, method_interpol, VAF = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('VAF')
tk = []
K_mat_old = []
# analyse
if self.id == 0:
print "Mean rate: " + str(fmean)
# Turn it off if set to zero
if SNR_switch == 0: SNR = None
if VAF_switch == 0: VAF = None
if t_qual > 0:
plt.figure("Reconstruct")
ax1 = subplot(2,1,1)
ax1.plot(np.arange(len(stim))*dt-1, current_re*1e3, 'b', linewidth=1)
ax1.plot(np.arange(len(stim))*dt-1, (stim)*1e3, 'k-', linewidth=1)
ax1.plot(np.arange(len(stim))*dt-1, (stim_re_mat[0,:])*1e3, 'r', linewidth=1, alpha=1)
#adjust_spines(ax1, ['left','bottom'], d_out = 10)
#ax1.axis(xmin=0, xmax=1)
#ax1.axis(ymin=8.3, ymax=10.7)
#ax1.yaxis.set_ticks(array([8.5,9,9.5,10,10.5]))
#ax1.set_title("Reconstruction")
#ax1.set_xlabel("s")
#ax1.set_ylabel("pA")
#ax1.text(0.15, 10.7, "Input current", color=color3, fontsize = 8)
#ax1.text(0.8, 10.7, "Signal", color="#000000", fontsize = 8)
#ax1.text(0.0, 8.2, "Reconstruction", color=color2, fontsize = 8)
ax2 = subplot(2,1,2)
ax2.plot(tk, K_mat_old[0], 'k', linewidth=1)
self.save_plot(directory = "./figs/dump/", prefix = "reconstruct")
plt.figure("Transfer")
currtitle = currlabel + " pop " + dowhat + ", " + self.celltype[self.a_celltype[0]]
ax = plot_transfer(currtitle, freq_used, mag, pha, t1, current, voltage[self.a_celltype[0]], freq_times, spike_freq, taum, fmean, self.ihold, rm, Vreset, Vth, Vrest, method_interpol, method_interpol_plot, SNR = SNR, VAF = VAF, ymax = self.ymax, ax = self.ax, linewidth = self.linewidth, color_vec = self.color_vec, alpha = self.alpha, opt_plot = opt_plot)
suptitle("Population transfer function of " + str(self.N[self.a_celltype[0]]) + " " + self.celltype[self.a_celltype[0]] + ", amp: " + str(np.round(self.amp[self.a_celltype[0]],4)) + ", amod: " + str(self.amod) + ", ih: " + str(np.round(self.ihold,4)) + ", ih_s: " + str(np.round(self.ihold_sigma,4)) + ", fm: " + str(np.round(fmean,2)) + ", fl_s: " + str(self.fluct_s))
return VAF, SNR, ax, tk, K_mat_old
def save_plot(self, directory = "./figs/dump/", prefix = " "):
if pop.id == 0:
from datetime import datetime
idate = datetime.now().strftime('%Y%m%d_%H%M') # %S
savefig(directory + idate + "-pop_transfer_" + prefix + "_" + self.celltype[self.a_celltype[0]] + "_N" + str(self.N[self.a_celltype[0]]) + "_ihold" + str(np.round(self.ihold,4)) + "_amp" + str(np.round(self.amp[self.a_celltype[0]],4)) + ".pdf", dpi = 300) # save it
def do_pca_ica(self, t_analysis_delay=0, t_analysis_stop=1, time=0, signals=0, output_dim=10, n_processes=32, n_chunks=32, do_ica=1, n_celltype = 0):
if self.use_mpi:
filepath = self.data_dir + "/" + str(self.pickle_prefix) + "_results_pop_pca_ica.p"
if self.do_run or (os.path.isfile(filepath) is False):
# PCA
# remove beginning
dt = time[2]-time[1]
t = time[int(t_analysis_delay/dt):int(t_analysis_stop/dt)]
pca_mat = np.array(signals[n_celltype]).T[int(t_analysis_delay/dt):int(t_analysis_stop/dt),:]
node = mdp.nodes.PCANode(output_dim=output_dim, svd=True)
# pad with zeros to be able to split into chunks!
n_add = n_chunks-np.remainder(np.shape(pca_mat)[0],n_chunks)
mat_add = np.zeros((n_add, np.shape(pca_mat)[1]))
pca_mat_add = np.concatenate((pca_mat, mat_add))
pca_mat_iter = np.split(pca_mat_add, n_chunks)
flow = mdp.parallel.ParallelFlow([node])
start_time = ttime.time()
with mdp.parallel.ProcessScheduler(n_processes=n_processes, verbose=True) as scheduler:
flow.train([pca_mat_iter], scheduler=scheduler) # input has to be list, why??
process_time = ttime.time() - start_time
s = np.array(flow.execute(pca_mat_iter))
s = s[0:len(t),:] # resize to length of t!
#print "node.d: ",node.d
var_vec = node.d/sum(node.d)
print 'Explained variance (', 0, ') : ', round(node.explained_variance,4)
print 'Variance (' , 0, ') : ', var_vec
print 'Time to run (' , 0, ') : ', process_time
s2 = []
if do_ica:
# ICA
#s2 = mdp.fastica(s)
ica = mdp.nodes.FastICANode() #CuBICANode()
ica.train(s)
s2 = ica(s)
results = {'t':t, 'pca':s,'pca_var':var_vec,'pca_var_expl':round(node.explained_variance,4), 'ica':s2}
if self.id == 0:
if self.dumpsave == 1:
pickle.dump( results, gzip.GzipFile( filepath, "wb" ) )
else:
if self.id == 0:
results = pickle.load( gzip.GzipFile( filepath, "rb" ) )
else:
# remove beginning
dt = time[2]-time[1]
t = time[int(t_analysis_delay/dt):int(t_analysis_stop/dt)]
pca_mat = np.array(signals[n_celltype]).T[int(t_analysis_delay/dt):int(t_analysis_stop/dt),:]
node = mdp.nodes.PCANode(output_dim=output_dim, svd=True)
start_time = ttime.time()
node.train(pca_mat)
s = node(pca_mat)
process_time = ttime.time() - start_time
#print "node.d: ",node.d
var_vec = node.d/sum(node.d)
print 'Explained variance (', 0, ') : ', round(node.explained_variance,4)
print 'Variance (' , 0, ') : ', var_vec
print 'Time to run (' , 0, ') : ', process_time
s2 = []
if do_ica:
# ICA
#s2 = mdp.fastica(s)
ica = mdp.nodes.FastICANode() #CuBICANode()
ica.train(s)
s2 = ica(s)
results = {'t':t, 'pca':s,'pca_var':var_vec,'pca_var_expl':round(node.explained_variance,4), 'ica':s2}
return results
def net_run(self, tstop, simprop = "default", t_analysis_delay=0, t_analysis_stop=1, stim_start=0):
freq_times = []
t_all_vec_vec = []
id_all_vec_vec = []
gsyns = []
w_mat = []
winh_mat = []
time = []
voltage = []
current = []
filepath = self.data_dir + "/" + str(self.pickle_prefix) + "_results_pop_randomnet.hdf5"
if self.do_run or (os.path.isfile(filepath) is False):
self.run(tstop)
self.no_fmean = True
results = self.get()
time, voltage, current, fmean, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')
freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns, w_mat, winh_mat = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns'), results.get('w_mat'), results.get('winh_mat')
if self.id == 0:
if self.dumpsave == 1:
#pickle.dump( results, open( filepath, "wb" ) ) # gzip.GzipFile
print "- Saving", filepath
f = h5py.File(filepath, 'w')
f.create_dataset('time', data=time, compression='gzip', shuffle=True)
f.create_dataset('voltage', data=np.array(voltage), compression='gzip', shuffle=True)
f.create_dataset('current', data=current, compression='gzip', shuffle=True)
f.create_dataset('freq_times', data=freq_times, compression='gzip', shuffle=True)
#f.create_dataset('t_all_vec_vec', data=np.array(t_all_vec_vec), compression='lzf', shuffle=True)
#f.create_dataset('id_all_vec_vec', data=np.array(id_all_vec_vec), compression='lzf', shuffle=True)
#f.create_dataset('gsyns', data=np.array(gsyns), compression='lzf', shuffle=True)
for i in range(len(self.N)):
subgroup = f.create_group("cell" + str(i))
subgroup.create_dataset('t_all_vec_vec', data=t_all_vec_vec[i], compression='gzip', shuffle=True)
subgroup.create_dataset('id_all_vec_vec', data=id_all_vec_vec[i], compression='gzip', shuffle=True)
subgroup.create_dataset('g', data=gsyns[i], compression='gzip', shuffle=True)
#for j in range(len(gsyns[i])):
# subsubgroup = subgroup.create_group("gsyn" + str(j))
# subsubgroup.create_dataset('g', data=gsyns[i][j], compression='lzf', shuffle=True)
f.close()
print "- Save finished"
#filename = slugify(simprop)
#syn_grc = np.array(gsyns[0])
#import scipy
#from scipy import io
#print "Saving .mat"
#data = {}
#data['syn_grc'] = syn_grc[:,int(t_analysis_delay/self.bin_width):int(t_analysis_stop/self.bin_width)]
#data['time'] = freq_times[int(t_analysis_delay/self.bin_width):int(t_analysis_stop/self.bin_width)]-stim_start
#scipy.io.savemat('./figs/' + filename + '.mat',data)
else:
if self.id == 0:
#results = pickle.load( open( filepath, "rb" ) ) #gzip.GzipFile
f = h5py.File(filepath, 'r')
time = np.array(f['time'])
voltage = np.array(f['voltage'])
current = np.array(f['current'])
freq_times = np.array(f['freq_times'])
for i in range(len(self.N)):
t_all_vec_vec.append(np.array(f['/cell' + str(i) + '/t_all_vec_vec']))
id_all_vec_vec.append(np.array(f['/cell' + str(i) + '/id_all_vec_vec']))
gsyns.append(np.array(f['/cell' + str(i) + '/g']))
#gsyns.append([])
#for j in range(self.N[i]):
# gsyns[i].append(np.array(f['/cell' + str(i) + '/gsyn' + str(j) + '/g' ]))
f.close()
return time, voltage, current, t_all_vec_vec, id_all_vec_vec, gsyns, freq_times, w_mat, winh_mat
def delall(self):
if self.use_mpi:
self.pc.gid_clear()
print "- clearing gids"
else:
pass
#h.topology()
#for sec in h.allsec():
# print "- deleting section:", sec.name()
# #h("%s{delete_section()}"%sec.name())
# sec.push()
# h.delete_section()
#h.topology()
for n in range(self.n_celltypes):
for m in self.cells[n]:
m.destroy()
del m
del self.cells
del self.nc_vecstim
del self.netcons
del self.nclist
print h.topology()
def delrerun(self):
del self.nc_vecstim
del self.netcons
del self.nclist
del self.vecstim
del self.spike_vec
del self.ST_stims
del self.PF_stims
self.netcons = []
self.nclist = []
self.nc_vecstim = []
self.vecstim = []
self.spike_vec = []
self.ST_stims = []
self.PF_stims = []
self.t_vec = []
self.id_vec = []
self.rec_v = []
for n in range(self.n_celltypes):
if self.use_mpi:
self.t_vec.append(h.Vector()) # np.array([0])
self.id_vec.append(h.Vector()) # np.array([-1], dtype=int)
else:
self.t_vec.append([])
self.rec_v.append(h.Vector())
for cell in self.cells[n]:
self.t_vec[n].append(h.Vector())
cell.nc_spike.record(self.t_vec[n][-1])
self.flucts = [] # Fluctuating inputs on this host
self.noises = [] # Random number generators on this host
self.plays = [] # Play inputs on this host
self.rec_is = []
self.trains = []
self.ic_holds = []
self.i_holdrs = []
self.i_holds = []
self.ic_starts = []
self.vc_starts = []
self.ic_steps = []
self.tvecs = []
self.ivecs = []
self.noises = []
self.record_syn = []
self.id_all_vec_input = []
self.t_all_vec_input = []
self.syn_ex_dist = []
self.syn_inh_dist = []
# test code
if __name__ == '__main__':
# mpiexec -f ~/machinefile -enable-x -n 96 python Population.py --noplot
from Stimulation import *
from Plotter import *
from Stimhelp import *
from cells.IfCell import *
import scipy
from scipy import io
dt = 0.1*ms
dt = 0.025*ms
do_run = 1
if results.norun: # do not run again use pickled files!
print "- Not running, using saved files"
do_run = 0
do = np.array(["transfer"])
opts = np.array(["if_cnoise", "grc_cnoise"]) #ssine
#opts = np.array(["if_cnoise"]) #ssine
#opts = np.array(["if_recon"]) #ssine
opts = np.array(["if_syn_CFvec"])
#opts = np.array(["prk_cnoise"])
opts = np.array(["if_cnoise", "if_ssine"]) #ssine
opts = np.array(["if_ssine"]) #ssine
opts = np.array(["grc_cnoise_addn_cn_", "grc_cnoise_cn_", "grc_cnoise_addn_cn_a01"])
opts = np.array(["grc_cnoise_addn100_cn_", "grc_cnoise_addn_cn_", "grc_cnoise_cn_"])
opts = np.array(["grc_cnoise_addn100_cn_"])
opts = np.array(["grc_cnoise_addn100_"])
opts = np.array(["grc_cnoise_addn_cn_"])
#opts = np.array(["grc_cnoise"])
#opts = np.array(["grc_cnoise_cn", "grc_cnoise_addn_cn"])
#opts = np.array(["if_cnoise_addn", "if_cnoise"])
do = np.array(["timeconst"])
#do = np.array(["transfer"])
#opts = np.array(["grc_cnoise_syn"])
#opts = np.array(["grc_recon_syn"])
#do = np.array(["prk_test"])
if "prk_test" in do:
import multiprocessing
from Purkinje import Purkinje
cell = Purkinje()
# set up recording
# Time
rec_t = h.Vector()
rec_t.record(h._ref_t)
# Voltage
rec_v = h.Vector()
rec_v.record(cell.soma(0.5)._ref_v)
tstop = 500
v_init = -60
stim = h.IClamp(cell.soma(0.5))
stim.amp = 0.0/nA
stim.delay = 1
stim.dur = 1000
cpu = multiprocessing.cpu_count()
h.load_file("parcom.hoc")
p = h.ParallelComputeTool()
p.change_nthread(cpu,1)
p.multisplit(1)
print 'cpus:', cpu
h.load_file("stdrun.hoc")
h.celsius = 37
h.init()
h.tstop = tstop
dt = 0.025 # ms
h.dt = dt
h.steps_per_ms = 1 / dt
h.v_init = v_init
h.finitialize()
h.run()
t1 = np.array(rec_t)
voltage = np.array(rec_v)
s, spike_times = get_spikes(voltage, -20, t1)
print 1000/diff( spike_times)
plt.figure()
plt.subplot(2,1,1)
plt.plot(t1, voltage)
plt.show()
if "transfer" in do:
# SET DEFAULT VALUES FOR THIS PLOT
fig_size = [11.7, 8.3]
params = {'backend': 'ps', 'axes.labelsize': 9, 'axes.linewidth' : 0.5, 'title.fontsize': 8, 'text.fontsize': 9,
'legend.borderpad': 0.2, 'legend.fontsize': 8, 'legend.linewidth': 0.1, 'legend.loc': 'best', # 'lower right'
'legend.ncol': 4, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': fig_size}
rcParams.update(params)
freq_used0 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 35, 40, 45, 50, 55, 60, 65, 70, 80, 100, 1000])*Hz
#freq_used0 = np.concatenate((arange(0.1, 1, 0.1), arange(1, 501, 1) ))
freq_used0 = np.array([1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 200, 400, 600, 800, 1000])
SNR = None
NI = None
VAF = None
t_stim = 1000*s # only for cnoise
opt_plot = np.array(["only_mag","normalize", "dB"]) #
#opt_plot = np.array(["normalize", "dB"]) #
color_vec = (np.array(["Red", "Blue", "HotPink", "Indigo"]), np.array(["Blue", "Orange", "HotPink", "Indigo"]))
#color=cm.jet(1.*i/x)
method_interpol = np.array(['bin','syn'])
method_interpol = np.array(['bin'])
for i, o in enumerate(opts):
dt = 0.025*ms
bin_width = 5*ms
bin_width = dt
jitter = 0*ms
n_syn_ex = [0]
g_syn_ex = [1]
noise_syn = 0
inh_hold = 0
n_syn_inh = [0]
g_syn_inh = [1]
tau1_ex = 0
tau2_ex = 10*ms
tau1_inh = 0
tau2_inh = 100*ms
cutf = 20
sexp = -1
cutf = 0
sexp = 0
ihold = [10]
amod = 0.1 # relative value
give_freq = True
anoise = [0]
fluct_tau = 0*ms
N = [100]
amp = 0 # absolute value
fluct_s = [0] # absolute value 0.0008
ihold_sigma = [0] # 0.01 absolute value
CF_var = [[5,10,20]]
CF_var = False
syn_tau1 = 5*ms
syn_tau2 = 5*ms
do_csd = 1
if "if" in o:
do_csd = 1
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
#color_vec = (np.array(["Red"]), np.array(["Red"]))
cellimport = []
celltype = ["IfCell"]
#cell_exe = ["cell = IfCell()"]
#cell_exe = ["cell = IfCell(e = -70*mV, thresh = -69*mV, vrefrac = -70*mV)"]
#cell_exe = ["cell = IfCell(e = 0*mV, thresh = 1*mV, vrefrac = 0*mV)"]
# Brunel
#cell_exe = ["cell = IfCell(C = 0.0005 *uF, R = 40*MOhm, e = -70*mV, thresh = -50*mV, vrefrac = -56*mV); cell.add_resonance(tau_r = 100*ms, gr = 0.025*uS)"]
#cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 40*MOhm, sigma_C = 0.2, sigma_R = 0.2)"]
#cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 40*MOhm)"] # tau = 4 ms
#cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 40*MOhm, s_reset_noise = 0*mV)"] # tau = 4 ms
#GrC resting: 737 MOhm, 2.985e-06 uF tau: 0.0022 s
#GrC transfer fit: tau: 0.027 s => with 2.985e-06 uF, R = 0.027/2.985e-12 = 9045 MOhm
#cell_exe = ["cell = IfCell(C = 2.985e-06*uF, R = 9045*MOhm)"]
thresh = -41.8
R = 5227*MOhm
#tau_passive = 3e-06*5227 = 15.7ms
cell_exe = ["cell = IfCell(C = 3.0e-06*uF, R = " + str(R) + ", e = -71.5*mV, thresh =" + str(thresh) + ", vrefrac = -71.5*mV)"]
prefix = "if_tf"
istart = 0
istop = 0.01
di = 0.00001
syn_tau1 = 10*ms
syn_tau2 = 10*ms
# Indirect
give_freq = True
ihold = [40]
amod = 1 # relative value
anoise = [0]
fluct_tau = 0*ms
#anoise = 0.1
#fluct_tau = 100*ms
# # Direct
# give_freq = False
# ihold = [0.00569223341176]
# amod = None
# amp = 7.31353725e-06
#
# anoise = None
# fluct_s = [3.65676863e-06]
# fluct_tau = 0*ms
#
# # Low CF, No low noise
# N = [10000]
# give_freq = False
# ihold = [0.004]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.0021
#
# anoise = None
# fluct_s = [0.00] # .005
# fluct_tau = 0*ms
# # Low CF, With low noise
# N = [10000]
# give_freq = False
# ihold = [0.002]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.001
#
# anoise = None
# fluct_s = [0.002] # .005
# fluct_tau = 100*ms
if "resif" in o:
do_csd = 1
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
#color_vec = (np.array(["Red"]), np.array(["Red"]))
cellimport = []
celltype = ["IfCell"]
gr = 5.56e-05*uS
tau_r = 19.6*ms
R = 5227*MOhm
delta_t = 4.85*ms
thresh = (0.00568*nA * R) - 71.5*mV #
thresh = -41.8
cellimport = []
celltype = "IfCell"
cell_exe = "cell = IfCell(C = 3e-06*uF, R = " + str(R) + ", e = -71.5*mV, thresh =" + str(thresh) + ", vrefrac = -71.5*mV, dgk =" + str(gr) + ", egk = -71.5*mV, ctau =" + str(tau_r) + ")"
prefix = "resif_tf"
istart = 0
istop = 0.01
di = 0.00001
syn_tau1 = 10*ms
syn_tau2 = 10*ms
# Indirect
give_freq = True
ihold = [40]
amod = 1 # relative value
anoise = [0]
fluct_tau = 0*ms
dt = 0.1*ms
if "if_syn" in o:
N = [1]
ihold = [40]
amod = 1 # relative value
prefix = "if_syntf"
n_syn_ex = 1
g_syn_ex = 0
noise_syn = 0
fluct_tau = 0*ms
freq_used = np.array([])
tau1_ex=0*ms
tau2_ex=10*ms
anoise = [0]
if "grc" in o:
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
cellimport = ["from GRANULE_Cell import Grc"]
celltype = ["Grc"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))"]
prefix = "grc_tf"
istart = 0
istop = 0.1
di = 0.01
syn_tau1 = 10*ms
syn_tau2 = 10*ms
# Indirect
give_freq = True
ihold = [40]
amod = 1 # relative value
anoise = [0]
fluct_tau = 0*ms
#anoise = 0.1
#fluct_tau = 100*ms
# # Direct
# give_freq = False
# ihold = [0.0058021085712642992]
# amod = None
# amp = 7.31353725e-06
#
# anoise = None
# fluct_s = [3.65676863e-06]
# fluct_tau = 0*ms
#
# # Low CF, No low noise
# N = [50]
# give_freq = False
# ihold = [0.0049]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.0021
#
# anoise = None
# fluct_s = [0.00] # .005
# fluct_tau = 0*ms
#
#
# # Low CF, With low noise
# N = [10000]
# give_freq = False
# ihold = [0.003]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.001
#
# anoise = None
# fluct_s = [0.002] # .005
# fluct_tau = 100*ms
use_multisplit = False
use_mpi = True
simstep = 1*s
if "prk" in o:
N = [1]
ihold = [60]
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
cellimport = ["from Purkinje import Purkinje"]
celltype = ["Prk"]
cell_exe = ["cell = Purkinje()"]
prefix = "prk_tf"
temperature = 37
istart = 0
istop = 0.1
di = 0.005
use_multisplit = True
use_mpi = False
t_stim = 5*s # only for cnoise
simstep = 1*s
if "grc_syn" in o:
N = [1]
ihold = [125]
amod = 1 # relative value
prefix = "grc_syntf"
cutf = 20
sexp = -1
cutf = 0
sexp = 0
n_syn_ex = 1
g_syn_ex = -1
noise_syn = 1
n_syn_inh = -1
inh_hold = 0
g_syn_inh = 0
fluct_tau = 0*ms
freq_used = np.array([])
anoise = 0
if "_addn" in o:
anoise = [6] # RESPONSIBLE FOR FILTERING EFFECT!!!
fluct_tau = 1*ms
prefix = prefix + "_addn"
color_vec = (np.array(["Red"]), np.array(["Red"]))
if "_addn100" in o:
anoise = [2] # RESPONSIBLE FOR FILTERING EFFECT!!!
fluct_tau = 100*ms
prefix = prefix + "100"
color_vec = (np.array(["Green"]), np.array(["Green"]))
if "_cn_" in o:
cutf = 20
sexp = -1
prefix = prefix + "_cn"
if "_a01" in o:
amod=0.1
prefix = prefix + "_a01"
plt.figure(i)
pickle_prefix = "Population.py_" + prefix
#comm = MPI.COMM_WORLD
#comm.Barrier() # wait for other nodes
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, ihold = ihold, ihold_sigma = ihold_sigma, amp = amp, amod = amod, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt)
pop.bin_width = bin_width
pop.jitter = jitter
pop.anoise = anoise
pop.fluct_s = fluct_s
pop.fluct_tau = fluct_tau
pop.method_interpol = method_interpol
pop.no_fmean = False
pop.CF_var = CF_var
pop.tau1_ex=tau1_ex
pop.tau2_ex=tau2_ex
pop.tau1_inh=tau1_inh
pop.tau2_inh=tau2_inh
pop.n_syn_ex = n_syn_ex
pop.g_syn_ex = g_syn_ex
pop.noise_syn = noise_syn
pop.inh_hold = inh_hold
pop.n_syn_inh = n_syn_inh
pop.g_syn_inh = g_syn_inh
pop.force_run = False
pop.use_multisplit = use_multisplit
pop.use_mpi = use_mpi
pop.simstep = simstep
pop.use_local_dt = False
pop.syn_tau1 = syn_tau1
pop.syn_tau2 = syn_tau2
pop.plot_input = False
if n_syn_inh == -1:
pop.connect_gfluct(g_i0=g_syn_inh)
#pop.test_mod(n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, do_plot = True)
if "ssine" in o:
pop.color_vec = color_vec
#pop.color_vec = (np.array(["Red", "Orange", "HotPink", "Indigo"]), np.array(["Red", "Orange", "HotPink", "Indigo"]))
pop.fun_plot(currlabel = "control", dowhat = "ssine", freq_used = freq_used0, opt_plot = opt_plot)
pop.save_plot(directory = "./figs/dump/")
if "cnoise" in o:
freq_used = np.array([])
pop.color_vec = color_vec
#pop.color_vec = (np.array(["Blue", "Green", "DimGray", "DarkGoldenRod"]), np.array(["Blue", "Green", "DimGray", "DarkGoldenRod"]))
pop.fun_plot(currlabel = "control", dowhat = "cnoise", t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 0, opt_plot = opt_plot, freq_used = freq_used, do_csd = do_csd)
pop.save_plot(directory = "./figs/dump/")
if "recon" in o:
pop.color_vec = color_vec
#VAF, SNR, ax, tk, K_mat_old = pop.fun_plot(currlabel = "control", dowhat = "cnoise", t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 0, opt_plot = opt_plot, n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, SNR=0, freq_used = freq_used)
# RECONSTRUCT!
freq_used = np.array([9, 47, 111, 1000])*Hz
t_stim = 10*s
tk = arange(0,0.8192*2,pop.dt)
K_mat_old = zeros((len(method_interpol),len(tk)), dtype=complex)
if pop.id == 0:
sigma = 0.1e-3
a=0.1
t0 = tk[floor(len(tk)/2)]
K_mat_old[0] = gauss_func(tk, a, t0, sigma)
K_mat_old = np.array([])
results = pop.fun_cnoise_Stim(t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 5, n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, freq_used = freq_used, K_mat_old = K_mat_old, seed = 311)
freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1')
freq_times, spike_freq, fmean, method_interpol, SNR, VAF, Qual = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('SNR'), results.get('VAF'), results.get('Qual')
stim, resp_mat, stim_re_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat')
if pop.id == 0:
plt.figure('Reconstruct')
axR0 = plt.subplot(4,1,1)
axR1 = plt.subplot(4,1,2)
axR2 = plt.subplot(4,1,3)
axR3 = plt.subplot(4,1,4)
axR0.plot(np.arange(len(stim))*pop.dt, resp_mat[0,:])
axR0.axis(xmin=0.9, xmax=1)
#axR0.plot(t1, voltage[0])
axR1.plot(np.arange(len(stim))*pop.dt, stim, 'b')
axR1.axis(xmin=0.9, xmax=1)
axR2.plot(np.arange(len(stim))*pop.dt, stim_re_mat[0,:], 'r')
axR2.axis(xmin=0.9, xmax=1)
axR3.plot(tk, K_mat_old[0])
plt.savefig("./figs/dump/Reconstruct.pdf", dpi = 300, transparent=True) # save it
pop = None
plt.show()
if "timeconst" in do:
from lmfit import minimize, Parameters
# SET DEFAULT VALUES FOR THIS PLOT
fig_size = [11.7, 8.3]
params = {'backend': 'ps', 'axes.labelsize': 9, 'axes.linewidth' : 0.5, 'title.fontsize': 8, 'text.fontsize': 9,
'legend.borderpad': 0.2, 'legend.fontsize': 8, 'legend.linewidth': 0.1, 'legend.loc': 'best', # 'lower right'
'legend.ncol': 4, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': fig_size}
rcParams.update(params)
dt = 0.025*ms
prefix = "timeconst"
pickle_prefix = "Population.py_" + prefix
stimtype = "inh_50ms_20ms"
if stimtype == "ex_20ms":
trun = 2.9
tstart = 1.8
tstop = 2.7
celltype = ["IfCell"]
cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 200*MOhm)"]
N = [5000]
pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms], dur=[3000*ms], steadyf=[100*Hz], pulsef=[150*Hz], pulse_start=[2000*ms], pulse_len=[500*ms], weight0=[1*nS], tau01=[0*ms], tau02=[20*ms], weight1=[0*nS], tau11=[0*ms], tau12=[1*ms])
params = Parameters()
params.add('amp', value=0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "ex_gr":
trun = 6.9
tstart = 4.8
tstop = 6.5
cellimport = ["from GRANULE_Cell import Grc"]
celltype = ["Grc"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))"]
N = [4096*10]
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms], dur=[7000*ms], steadyf=[20*Hz], pulsef=[30*Hz], pulse_start=[5000*ms], pulse_len=[500*ms])
params = Parameters()
params.add('amp', value=0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_50ms_20ms":
trun = 2.9
tstart = 1.8
tstop = 2.7
celltype = ["IfCell", "IfCell"]
cell_exe = ["cell = IfCell()", "cell = IfCell()"]
N = [10000,10000]
pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms,100*ms], dur=[3000*ms,3000*ms], steadyf=[100*Hz,50*Hz], pulsef=[100*Hz,80*Hz], pulse_start=[2000*ms,2000*ms], pulse_len=[500*ms,500*ms], weight0=[1*nS,1*nS], tau01=[1*ms,1*ms], tau02=[20*ms,20*ms], weight1=[0,0], tau11=[0*ms,0*ms], tau12=[1*ms,1*ms])
pop.connect_cells(conntype='inh', weight=0.001, tau=50)
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_gr":
trun = 9.9
tstart = 4.8
tstop = 8
cellimport = ["from GRANULE_Cell import Grc", "from templates.golgi.Golgi_template import Goc"]
celltype = ["Grc","Goc_noloop"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))","cell = Goc(np.array([0.,0.,0.]))"]
N = [100,4]
#N = [4096, 27]
#N = [4096*5, 27*5]
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms,100*ms], dur=[9800*ms,9800*ms], steadyf=[60*Hz,10*Hz], pulsef=[60*Hz,22*Hz], pulse_start=[5000*ms,5000*ms], pulse_len=[1500*ms,1500*ms])
pop.connect_cells(conntype='inh_gr', weight = 0.3)
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_50ms_curr":
trun = 2.9
tstart = 1.8
tstop = 2.8
celltype = ["IfCell", "IfCell"]
cell_exe = ["cell = IfCell()", "cell = IfCell()"]
N = [1000,1000]
give_freq = True
istart = 0
istop = 0.2
di = 0.01
ihold = [100, 50]
ihold_sigma = [0.01, 0.01] # relative sigma
pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, ihold = ihold, ihold_sigma = ihold_sigma, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
tstep = 2
tdur = 0.5
istep = [100,100]
current1 = np.concatenate(([ihold[1]*np.ones(round((tstep)/pop.dt)), istep[1]*np.ones(round(tdur/pop.dt)),ihold[1]*np.ones(round((trun-tstep-tdur)/pop.dt)) ]))
pop.set_IStim()
pop.set_IStep(istep = istep, istep_sigma = [0.01,0.01], tstep = tstep, tdur = tdur)
pop.connect_cells(conntype='inh', weight=0.0003, tau=50)
pop.fluct_s = [0.02,0.05]
pop.connect_fluct()
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_gr_curr":
trun = 9.9
tstart = 4.8
tstop = 8
cellimport = ["from GRANULE_Cell import Grc", "from templates.golgi.Golgi_template import Goc"]
celltype = ["Grc","Goc_noloop"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))","cell = Goc(np.array([0.,0.,0.]))"]
N = [100,4]
N = [4096, 27]
N = [4096*10, 27*10]
give_freq = True
# GRC
#istart = 0
#istop = 0.1
#di = 0.01
#GOC
istart = 0
istop = 0.5
di = 0.02
ihold = [100, 10]
ihold_sigma = [0, 0] # relative sigma
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, ihold = ihold, ihold_sigma = ihold_sigma, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
tstep = 5
tdur = 2
istep = [100,50]
current1 = np.concatenate(([ihold[1]*np.ones(round((tstep)/pop.dt)), istep[1]*np.ones(round(tdur/pop.dt)),ihold[1]*np.ones(round((trun-tstep-tdur)/pop.dt)) ]))
pop.set_IStim()
pop.set_IStep(istep = istep, istep_sigma = [0,0], tstep = tstep, tdur = tdur)
pop.connect_cells(conntype='inh_gr', weight = 0.4)
pop.fluct_s = [0.05,2]
pop.connect_fluct()
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
pop.run_steps(trun)
self.no_fmean = True
results = pop.get()
time, voltage, current, fmean, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')
freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns')
if pop.id == 0:
bin_width = 1*ms
freq_times = arange(0, time[-1], bin_width)
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[0], bins = freq_times)
spike_freq = np.concatenate((zeros(1),num_spikes)) / bin_width / N[0]
if "inh" in stimtype: # generate input current, to complicated to get it out
if "curr" in stimtype:
time1 = np.arange(0, trun, pop.dt)
r_mod = interp(freq_times, time1, current1, left=0, right=0)
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[1], bins = freq_times)
spike_freq1 = np.concatenate((zeros(1),num_spikes)) / bin_width / N[1]
else:
r_mod = interp(freq_times, modulation_vec[1][0], modulation_vec[1][1], left=0, right=0)
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[1], bins = freq_times)
spike_freq1 = np.concatenate((zeros(1),num_spikes)) / bin_width / N[1]
elif "ex" in stimtype:
r_mod = interp(freq_times, modulation_vec[0][0], modulation_vec[0][1], left=0, right=0)
def modelfun(amp, shift, tau1, tau2, bin_width, r_mod):
tau1 = tau1
tau2 = tau2
t1 = np.arange(0,10*tau2,bin_width)
K = amp*syn_kernel(t1, tau1, tau2)
K = np.concatenate((np.zeros(len(K)-1),K))
t2 = np.arange(0,len(K)*bin_width,bin_width)
model = np.convolve(K, r_mod, mode='same') + shift
return model
def residual(params, r_mod, data=None, bin_width=1*ms, tstart=0, tstop=3):
amp = params['amp'].value
shift = params['shift'].value
tau1 = params['tau1'].value
tau2 = params['tau2'].value
model = modelfun(amp, shift, tau1, tau2, bin_width, r_mod)
return (data[int(tstart/bin_width):int(tstop/bin_width)]-model[int(tstart/bin_width):int(tstop/bin_width)])
result = minimize(residual, params, args=(r_mod, spike_freq, bin_width, tstart, tstop))
print "chisqr: ", result.chisqr
print 'Best-Fit Values:'
for name, par in params.items():
print ' %s = %.4f +/- %.4f ' % (name, par.value, par.stderr)
amp = params['amp'].value
shift = params['shift'].value
tau1 = params['tau1'].value
tau2 = params['tau2'].value
model = modelfun(amp, shift, tau1, tau2, bin_width = bin_width, r_mod = r_mod)
if "ex" in stimtype:
plt.figure(0)
plt.plot(freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], model[int(0.5/bin_width):int(trun/bin_width)])
plt.figure(1)
plt.plot(time, voltage[0]), freq_times, r_mod, time, current
#plt.figure(100)
#plt.plot(t_all_vec_vec[0],id_all_vec_vec[0],'k|')
#plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_spikes.pdf", dpi = 300) # save it
else:
plt.figure(0)
plt.plot(freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq1[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], model[int(0.5/bin_width):int(trun/bin_width)])
plt.figure(1)
plt.plot(time, voltage[0], time, voltage[1], freq_times, r_mod, time, current)
plt.figure(100)
#plt.plot(t_all_vec_vec[0],id_all_vec_vec[0],'k|')
#plt.plot(t_all_vec_vec[1],id_all_vec_vec[1],'b|')
#plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_spikes.pdf", dpi = 300) # save it
plt.figure(0)
plt.title('Fit: ' + str(stimtype) + ', tau1=' + str(tau1) + ' tau2=' + str(tau2))
plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_rate.png", dpi = 300) # save it
plt.figure(1)
plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_voltage.png", dpi = 300) # save it
plt.show()
|
2,701 | 18a17c7326a6ae96f74c843d1a902074b377a6d2 | import os
import sys
import pandas as pd
import pickle as pkl
from src.utils import image as im
if __name__ == '__main__':
pickled = True
create_sets = True
normed = False
if len(sys.argv) > 2:
filename = sys.argv[1]
else:
filename = os.path.join(os.path.pardir, os.path.pardir, 'data', 'final_transp_directpkl.pkl')
if os.path.splitext(filename)[1] == '.txt':
iter_csv = pd.read_csv(filename, sep='\t', index_col=0, chunksize=20000)
df = pd.concat([chunk for chunk in iter_csv])
else:
df = pkl.load(open(filename, 'rb'))
fig = im.plot_genes(df.sample(1000))
fig.savefig(os.path.splitext(filename)[0]+'.png')
|
2,702 | 784159dfb2e85ca4634adf790e68129834155e4d | # -*- coding: utf-8 -*-
from pathlib import Path
from ruamel.yaml import YAML
from .screen import color2sgr
def _get(d, *paths):
""" Query into configuration dictionary, return None on any error
usag:
_get(d, 'k1.2.k3.k4', 2, 'name')
"""
if d is None:
return None
if paths is None:
return None
for path in paths:
if path is None:
return None
path = path.split('.')
for key in path:
try:
i = int(key)
if i in d:
return d[i]
else:
return None
except BaseException:
d = d.get(key, None)
if d is None:
return None
return d
class _Settings:
def __init__(self):
self._loadConfigs()
self._loadSymbols()
self._loadColors()
# margin
v, d = self._valueAt('margin')
if isinstance(v, int) and v > 0:
self.margin = v
else:
self.margin = d
# symbolWidth
v, d = self._valueAt('symbols.width')
if isinstance(v, int) and v > 0:
self.symbolWidth = v
else:
self.symbolWidth = d
# sessionTimeLinePadding
v, d = self._valueAt('sessionTimeLinePadding')
if isinstance(v, int) and v > 0:
self.sessionTimeLinePadding = v
else:
self.sessionTimeLinePadding = d
# logTimeLinePadding
v, d = self._valueAt('logTimeLinePadding')
if isinstance(v, int) and v > 0:
self.logTimeLinePadding = v
else:
self.logTimeLinePadding = d
def _valueAt(self, *paths):
u = _get(self.userConfig, *paths)
d = _get(self.defaultConfig, *paths)
return u, d
def _loadConfigs(self):
yaml = YAML()
defaultFile = Path(__file__).parent / 'resources' / 'jaclog.yml'
self.defaultConfig = yaml.load(defaultFile)
userFile = Path('~/.config/jaclog/jaclog.yml').expanduser()
userFile.parent.mkdir(parents=True, exist_ok=True)
if not userFile.exists():
userFile.write_text(defaultFile.read_text())
self.userConfig = yaml.load(userFile)
def _loadSymbols(self):
use = _get(self.userConfig, 'symbols.use')
scheme = _get(self.userConfig, 'symbols.schemes', use)
default = _get(self.defaultConfig, 'symbols.schemes.default')
symbols = {}
for name in default:
v = _get(scheme, name)
d = default[name]
if isinstance(v, str):
symbols[name] = v[0]
else:
symbols[name] = d
self.symbols = symbols
def _loadColors(self):
# colors
use = _get(self.userConfig, 'colors.use')
scheme = _get(self.userConfig, 'colors.schemes', use)
default = _get(self.defaultConfig, 'colors.schemes.default')
colors = {}
for name in default:
colors[name] = color2sgr(_get(scheme, name)) \
or color2sgr(default[name])
self.colors = colors
settings = _Settings()
|
2,703 | a3299a2945a638c74c2d16bc28079ed692718fbd | from collections import defaultdict
squares = dict()
for i in range(2000):
squares[i*i] = i
perims = defaultdict(int)
for a in range(1,1001):
for b in range(a+1, 1001):
if a*a+b*b not in squares:
continue
c = squares[a*a+b*b]
perims[a+b+c] += 1
for perim, v in sorted(perims.items(), key=lambda x:x[1]):
if v >1 and perim <= 1000:
print(perim, v) |
2,704 | c75c69b006734e476352de1913fd4a58021bffd6 | import datetime
from datetime import datetime, timedelta
import time
import json
import base64
import requests
from bson.objectid import ObjectId
import urllib
isinpackage = not __name__ in ['google_api', '__main__']
if isinpackage:
from .settings import settings
from . import util
from .util import Just
from .db import get_collection
from .import certificate
else:
from settings import settings
# import util
from util import Just
from db import get_collection
# import certificate
users_db = get_collection('users')
client_id = settings.google.client_id()
redirect_uri = f'{settings.url_prefix()}/api/v1/oauth/google/redirect'
scope = urllib.parse.quote(settings.google.scope(), safe='')
access_type = settings.google.access_type()
prompt = settings.google.prompt()
response_type = settings.google.response_type()
def get_certs_keys(kid):
url = 'https://www.googleapis.com/oauth2/v3/certs'
data = requests.get(url).json()['keys']
return next(filter(lambda e: kid == e['kid']), None)
def get_redirect_link(realid=None):
state = util.generate_id(50)
certificate.register_state(state, "google_oauth", {"realid": realid})
return 'https://accounts.google.com/o/oauth2/v2/auth?' \
+ f"client_id={client_id}&" \
+ f"include_granted_scopes={'true'}&" \
+ f"redirect_uri={redirect_uri}&" \
+ f"scope={scope}&" \
+ f"access_type={access_type}&" \
+ f"state={state}&" \
+ f"prompt={prompt}&" \
+ f"response_type={response_type}"
def code_to_refresh_token(code):
endpoint = 'https://oauth2.googleapis.com/token'
tokens = requests.post(endpoint, {
'code': code,
'client_id': client_id,
'client_secret': settings.google.google_client_secret(),
'redirect_uri': redirect_uri,
'grant_type': 'authorization_code'
}).json()
header, profile = decode_id_token(tokens['id_token'])
return profile, tokens
def decode_base64_padding(s):
return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()
def decode_id_token(id_token):
s = id_token.split('.')
header = json.loads(decode_base64_padding(s[0]))
payload = json.loads(decode_base64_padding(s[1]))
# key = get_certs_keys(header['kid'])
return header, payload
def register(profile, tokens, realid=None):
profile.update(tokens)
user = users_db.find_one({'_id': ObjectId(realid), 'connections.google.sub': profile['sub']})
if realid:
users_db.update_one({'_id': ObjectId(realid)}, {
'$set': {
'connections.google': profile,
},
'$inc': {
'connections.length': 0 if user else 1
}
})
print('add google info')
else:
users_db.insert_one({
'connections': {
'google': profile,
'length': 1
}
})
print('connect with google')
def refresh_token(refresh_token):
endpoint = 'https://oauth2.googleapis.com/token'
return requests.post(endpoint, {
'client_id': client_id,
'client_secret': settings.google.google_client_secret(),
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}).json()
def verify_access_token(access_token):
url = f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}'
return requests.get(url).status_code == 200
def get_access_token(google_user_id):
data = Just(users_db.find_one({'connections.google.sub': google_user_id}))
access_token = data.connections.google.access_token()
_refresh_token = data.connections.google.refresh_token()
assert _refresh_token
if access_token and verify_access_token(access_token):
return access_token
else:
return Just(refresh_token(_refresh_token)).access_token()
def get_real_user_id(user_id):
return str(users_db.find_one({"connections.google.sub": user_id})["_id"])
def get_google_user_id(real_user_id):
data = Just(users_db.find_one({"_id": ObjectId(real_user_id)}))
if data() and ('line' in data.connections()):
return data.connections.google.sub()
else:
raise RuntimeError
def add_event(real_user_id, start, end, options={
'summary': '',
'description': ''
}):
endpoint = 'https://www.googleapis.com/calendar/v3/calendars/primary/events'
d = {
'end': {
'dateTime': end,
'timeZone': 'Asia/Tokyo'
},
'start': {
'dateTime': start,
'timeZone': 'Asia/Tokyo'
},
}
d.update(options)
res = requests.post(endpoint, json=d, headers={
'content-type': 'application/json',
'authorization': f'Bearer {get_access_token(get_google_user_id(real_user_id))}'
})
r = res.status_code == 200
if not r:
print(res.text)
return r
|
2,705 | b257e36b3cb4bda28cf18e192aa95598105f5ae9 | import pandas as pd
# read the data
df = pd.read_csv("data/lottery.csv")
# extract needed column
df1 = df[['1','2','3','4','5','6','bonus']]
# translate dataframe to list for convenience
df2 = df1.values.tolist()
# cnt_number is each number's apearance times
cnt_number = []
for i in range(0, 46):
cnt_number.append(0)
# count the appearnce times
for i in range(0, len(df2)):
for j in range(0, 7):
cnt_index = df2[i][j]
cnt_number[int(cnt_index)] += 1
# print the appearance times
for k in range(1, 46):
print('%5d -> %3d times'%(k, cnt_number[k])) |
2,706 | c5d0b23396e084ad6ffade15b3aa3c59b6be3cc0 | from django.test import TestCase
from django.core.files import File
from ResearchManage.forms import ResearchFormMKI
from django.test import Client
from unittest import TestCase, mock
from datetime import date, timedelta
from django.core.files.uploadedfile import SimpleUploadedFile
import os
# Create your tests here.
class TestForms(TestCase):
def test_valid_ResearchFormMKI_form(self): #ะขะตัั ะฒะฐะปะธะดะฝะพะน ัะพัะผั ะฟะตัะฒะธัะฝะพะน ะฟะพะดะฐัะธ ะทะฐัะฒะบะธ
with open(os.path.abspath(os.curdir)+'Test.txt' ,'wb') as f:
f.write(b"ABOBA")
with open(os.path.abspath(os.curdir)+'Test.txt' ,'rb') as f:
testfile=f.read()
form=ResearchFormMKI(data={
'protocol_number':'224',
'description':'ะั ะผั ััั ัะตััะธะผ ัะตััั',
'main_researcher':1,
'ver_bio':'ะขะตััั ัะตััะพะฒ',
'version':'ะขะตััะพะฒะฐั',
'cast_researcher_date':date.today()-timedelta(days=2000*5),
'accept_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'accept_research_date':date.today()-timedelta(days=2000*5),
'protocol_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'protocol_research_date':date.today()-timedelta(days=2000*5),
'contract_date':date.today()-timedelta(days=2000*5),
'name_another_doc':'ะขะตัั',
'another_doc_version':'ะขะตััะพะฒะฐั',
'another_doc_date':date.today()-timedelta(days=2000*5)
},
files={'another_doc': SimpleUploadedFile('another_doc', testfile),
'contract': SimpleUploadedFile('contract', testfile),
'advertising': SimpleUploadedFile('advertising', testfile),
'write_objects': SimpleUploadedFile('write_objects', testfile),
'protocol_research': SimpleUploadedFile('protocol_research', testfile),
'accept_research': SimpleUploadedFile('accept_research', testfile),
'form_inf': SimpleUploadedFile('form_inf', testfile),
'cast_researcher': SimpleUploadedFile('cast_researcher', testfile),
'list_members': SimpleUploadedFile('list_members', testfile),
'document': SimpleUploadedFile('document', testfile)
})
os.remove(os.path.abspath(os.curdir)+"Test.txt")
print(form.errors)
print("test_valid_ResearchFormMKI_form")
self.assertTrue(form.is_valid())
def test_wrong_data_ResearchFormMKI_form(self): #ะขะตัั ัะพัะผั ะฟะตัะฒะธัะฝะพะน ะฟะพะดะฐัะธ ะทะฐัะฒะบะธ ั ะดะฐัะพะน ะดะพะบะพะฒ>ัะตะณะพะดะฝั.ะะฐ ะผะพะผะตะฝั ะฝะฐะฟะธัะฐะฝะธั ัะตัั ะบะตะนั ะฟัะพะฒะฐะปัะฝัะน!
with open(os.path.abspath(os.curdir)+'Test.txt' ,'wb') as f:
f.write(b"ABOBA")
with open(os.path.abspath(os.curdir)+'Test.txt' ,'rb') as f:
testfile=f.read()
form=ResearchFormMKI(data={
'protocol_number':'224',
'description':'ะั ะผั ััั ัะตััะธะผ ัะตััั',
'main_researcher':1,
'ver_bio':'ะขะตััั ัะตััะพะฒ',
'version':'ะขะตััะพะฒะฐั',
'cast_researcher_date':date.today()+timedelta(days=2000*5),
'accept_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'accept_research_date':date.today()+timedelta(days=2000*5),
'protocol_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'protocol_research_date':date.today()+timedelta(days=2000*5),
'contract_date':date.today()+timedelta(days=2000*5),
'name_another_doc':'ะขะตัั',
'another_doc_version':'ะขะตััะพะฒะฐั',
'another_doc_date':date.today()+timedelta(days=2000*5)
},
files={'another_doc': SimpleUploadedFile('another_doc', testfile),
'contract': SimpleUploadedFile('contract', testfile),
'advertising': SimpleUploadedFile('advertising', testfile),
'write_objects': SimpleUploadedFile('write_objects', testfile),
'protocol_research': SimpleUploadedFile('protocol_research', testfile),
'accept_research': SimpleUploadedFile('accept_research', testfile),
'form_inf': SimpleUploadedFile('form_inf', testfile),
'cast_researcher': SimpleUploadedFile('cast_researcher', testfile),
'list_members': SimpleUploadedFile('list_members', testfile),
'document': SimpleUploadedFile('document', testfile)
})
os.remove(os.path.abspath(os.curdir)+'Test.txt')
print(form.errors)
print("test_wrong_data_ResearchFormMKI_form")
self.assertFalse(form.is_valid())
def test_wrong_file_format_ResearchFormMKI_form(self): #ะขะตัั ัะพัะผั ะฟะตัะฒะธัะฝะพะน ะฟะพะดะฐัะธ ะทะฐัะฒะบะธ ั ะฝะตัััะตััะฒัััะธะผ ัะธะฟะพะผ ัะฐะนะปะฐ.ะะฐ ะผะพะผะตะฝั ะฝะฐะฟะธัะฐะฝะธั ัะตัั ะบะตะนั ะฟัะพะฒะฐะปัะฝัะน!
#TODO:ัะฐััะธัะธัั ะดะพ ะบะฐะถะดะพะณะพ ะพัะดะตะปัะฝะพะณะพ ะฟะพะปั
with open(os.path.abspath(os.curdir)+'Test.aboba', 'wb') as f:
f.write(b"ABOBA")
with open(os.path.abspath(os.curdir)+'Test.aboba','rb') as f:
testfile=f.read()
form=ResearchFormMKI(data={
'protocol_number':'224',
'description':'ะั ะผั ััั ัะตััะธะผ ัะตััั',
'main_researcher':1,
'ver_bio':'ะขะตััั ัะตััะพะฒ',
'version':'ะขะตััะพะฒะฐั',
'cast_researcher_date':date.today()-timedelta(days=2000*5),
'accept_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'accept_research_date':date.today()-timedelta(days=2000*5),
'protocol_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'protocol_research_date':date.today()-timedelta(days=2000*5),
'contract_date':date.today()-timedelta(days=2000*5),
'name_another_doc':'ะขะตัั',
'another_doc_version':'ะขะตััะพะฒะฐั',
'another_doc_date':date.today()-timedelta(days=2000*5)
},
files={'another_doc': SimpleUploadedFile('another_doc', testfile),
'contract': SimpleUploadedFile('contract', testfile),
'advertising': SimpleUploadedFile('advertising', testfile),
'write_objects': SimpleUploadedFile('write_objects', testfile),
'protocol_research': SimpleUploadedFile('protocol_research', testfile),
'accept_research': SimpleUploadedFile('accept_research', testfile),
'form_inf': SimpleUploadedFile('form_inf', testfile),
'cast_researcher': SimpleUploadedFile('cast_researcher', testfile),
'list_members': SimpleUploadedFile('list_members', testfile),
'document': SimpleUploadedFile('document', testfile)
})
os.remove(os.path.abspath(os.curdir)+'Test.aboba')
print(form.errors)
print("test_wrong_file_format_ResearchFormMKI_form")
self.assertFalse(form.is_valid())
def test_empty_main_researcher_format_ResearchFormMKI_form(self): #ะขะตัั ัะพัะผั ะฟะตัะฒะธัะฝะพะน ะฟะพะดะฐัะธ ะทะฐัะฒะบะธ ั ะฝะตะฒัะฑัะฐะฝะฝัะผ ะณะปะฐะฒะฝัะผ ะธััะปะตะดะพะฒะฐัะตะปะตะผ
with open(os.path.abspath(os.curdir)+'Test.txt', 'wb') as f:
f.write(b"ABOBA")
with open(os.path.abspath(os.curdir)+'Test.txt' ,'rb') as f:
testfile=f.read()
form=ResearchFormMKI(data={
'protocol_number':'224',
'description':'ะั ะผั ััั ัะตััะธะผ ัะตััั',
'main_researcher':None,
'ver_bio':'ะขะตััั ัะตััะพะฒ',
'version':'ะขะตััะพะฒะฐั',
'cast_researcher_date':date.today()-timedelta(days=2000*5),
'accept_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'accept_research_date':date.today()-timedelta(days=2000*5),
'protocol_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'protocol_research_date':date.today()-timedelta(days=2000*5),
'contract_date':date.today()-timedelta(days=2000*5),
'name_another_doc':'ะขะตัั',
'another_doc_version':'ะขะตััะพะฒะฐั',
'another_doc_date':date.today()-timedelta(days=2000*5)
},
files={'another_doc': SimpleUploadedFile('another_doc', testfile),
'contract': SimpleUploadedFile('contract', testfile),
'advertising': SimpleUploadedFile('advertising', testfile),
'write_objects': SimpleUploadedFile('write_objects', testfile),
'protocol_research': SimpleUploadedFile('protocol_research', testfile),
'accept_research': SimpleUploadedFile('accept_research', testfile),
'form_inf': SimpleUploadedFile('form_inf', testfile),
'cast_researcher': SimpleUploadedFile('cast_researcher', testfile),
'list_members': SimpleUploadedFile('list_members', testfile),
'document': SimpleUploadedFile('document', testfile)
})
os.remove(os.path.abspath(os.curdir)+'Test.txt')
print(form.errors)
print("test_empty_main_researcher_format_ResearchFormMKI_form")
self.assertFalse(form.is_valid())
def test_empty_char_fields_format_ResearchFormMKI_form(self): #ะขะตัั ัะพัะผั ะฟะตัะฒะธัะฝะพะน ะฟะพะดะฐัะธ ะทะฐัะฒะบะธ ั ะฝะตะทะฐะฟะพะปะฝะตะฝะฝัะผะธ ะฟะพะปัะผะธ ะดะปั ัะธะผะฒะพะปัะฝะพะณะพ ะฒะฒะพะดะฐ
#TODO:ัะฐััะธัะธัั ะดะพ ะบะฐะถะดะพะณะพ ะพัะดะตะปัะฝะพะณะพ ะฟะพะปั
with open(os.path.abspath(os.curdir)+'Test.txt', 'wb') as f:
f.write(b"ABOBA")
with open(os.path.abspath(os.curdir)+'Test.txt' ,'rb') as f:
testfile=f.read()
form=ResearchFormMKI(data={
'protocol_number':None,
'description':None,
'main_researcher':1,
'ver_bio':None,
'version':None,
'cast_researcher_date':date.today()-timedelta(days=2000*5),
'accept_research_version':None,
'accept_research_date':date.today()-timedelta(days=2000*5),
'protocol_research_version':None,
'protocol_research_date':date.today()-timedelta(days=2000*5),
'contract_date':date.today()-timedelta(days=2000*5),
'name_another_doc':None,
'another_doc_version':None,
'another_doc_date':date.today()-timedelta(days=2000*5)
},
files={'another_doc': SimpleUploadedFile('another_doc', testfile),
'contract': SimpleUploadedFile('contract', testfile),
'advertising': SimpleUploadedFile('advertising', testfile),
'write_objects': SimpleUploadedFile('write_objects', testfile),
'protocol_research': SimpleUploadedFile('protocol_research', testfile),
'accept_research': SimpleUploadedFile('accept_research', testfile),
'form_inf': SimpleUploadedFile('form_inf', testfile),
'cast_researcher': SimpleUploadedFile('cast_researcher', testfile),
'list_members': SimpleUploadedFile('list_members', testfile),
'document': SimpleUploadedFile('document', testfile)
})
os.remove(os.path.abspath(os.curdir)+'Test.txt')
print(form.errors)
print("test_empty_char_fields_format_ResearchFormMKI_form")
self.assertFalse(form.is_valid())
def test_empty_date_fields_ResearchFormMKI_form(self): #ะขะตัั ัะพัะผั ะฟะตัะฒะธัะฝะพะน ะฟะพะดะฐัะธ ะทะฐัะฒะบะธ ั ะฟััััะผะธ ะทะฝะฐัะตะฝะธัะผะธ ะฟะพะปะตะน ะดะฐัั ะะฐ ะผะพะผะตะฝั ะฝะฐะฟะธัะฐะฝะธั ัะตัั ะบะตะนั ะฟัะพะฒะฐะปัะฝัะน!
#TODO:ัะฐััะธัะธัั ะดะพ ะบะฐะถะดะพะณะพ ะพัะดะตะปัะฝะพะณะพ ะฟะพะปั
with open(os.path.abspath(os.curdir)+'Test.txt' ,'wb') as f:
f.write(b"ABOBA")
with open(os.path.abspath(os.curdir)+'Test.txt' ,'rb') as f:
testfile=f.read()
form=ResearchFormMKI(data={
'protocol_number':'224',
'description':'ะั ะผั ััั ัะตััะธะผ ัะตััั',
'main_researcher':1,
'ver_bio':'ะขะตััั ัะตััะพะฒ',
'version':'ะขะตััะพะฒะฐั',
'cast_researcher_date':None,
'accept_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'accept_research_date':None,
'protocol_research_version':'ะขะตััะพะฒะฐั ะฒะตััะธั',
'protocol_research_date':None,
'contract_date':None,
'name_another_doc':'ะขะตัั',
'another_doc_version':'ะขะตััะพะฒะฐั',
'another_doc_date':None
},
files={'another_doc': SimpleUploadedFile('another_doc', testfile),
'contract': SimpleUploadedFile('contract', testfile),
'advertising': SimpleUploadedFile('advertising', testfile),
'write_objects': SimpleUploadedFile('write_objects', testfile),
'protocol_research': SimpleUploadedFile('protocol_research', testfile),
'accept_research': SimpleUploadedFile('accept_research', testfile),
'form_inf': SimpleUploadedFile('form_inf', testfile),
'cast_researcher': SimpleUploadedFile('cast_researcher', testfile),
'list_members': SimpleUploadedFile('list_members', testfile),
'document': SimpleUploadedFile('document', testfile)
})
os.remove(os.path.abspath(os.curdir)+'Test.txt')
print(form.errors)
print("test_empty_date_fields_ResearchFormMKI_form")
self.assertTrue(form.is_valid()) |
2,707 | a96575d507a91472176c99d4d55e2a3bbf8111d1 | from django.contrib import admin
from .models import JobListing
from .models import Employer
admin.site.register(JobListing)
admin.site.register(Employer)
|
2,708 | 358fd8efd5c3823255ab64d5f8b88b343415ed0e | #Some people are standing in a queue. A selection process follows a rule where people standing on even positions are selected. Of the selected people a queue is formed and again out of these only people on even position are selected. This continues until we are left with one person. Find out the position of that person in the original queue.
#Input:
#The first line of input contains an integer T denoting the number of test cases.The first line of each test case is N,number of people standing in a queue.
#Output:
#Print the position(original queue) of that person who is left.
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def even(n):
if n == 0 or n == 1:
return
elif n == 2:
return 2
else:
for i in reversed(range(n+1)):
if 2**i < n:
return 2**i
t = int(input("Enter number of test cases:"))
arr = []
for i in range(t):
n = int(input())
ans = even(n)
arr.append(ans)
for i in range(len(arr)):
print(arr[i], end = ' ')
# --------------------------------------------------------------------------------------------------------------------
import math
t = int(input())
for i in range(t):
n =int(input())
print(pow(2,int(math.log(n,2))))
|
2,709 | 3badf65a5301cc9cf26811e3989631aec5d31910 | from django.db import models
# Create your models here.
class Pastebin(models.Model):
name= models.CharField(max_length=30)
textpaste = models.CharField(max_length=80)
pasteurl = models.AutoField(primary_key=True)
def __str__(self):
return self.name
|
2,710 | d07a26a69ccbbccf61402632dd6011315e0d61ed | from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
url = input('Enter - ')
html = urlopen(url).read()
soup = BeautifulSoup(html, "html.parser")
tags = soup.find_all('tr', {'id': re.compile(r'nonplayingnow.*')})
for i in tags:
casa = i.find("td", {'class': re.compile(r'team-home')}).find("a")
visitante = i.find("td", {'class': re.compile(r'team-away')}).find("a")
print ("Partido-> "+casa.get_text()+" vs "+visitante.get_text())
|
2,711 | 774bf2b49f6e546f16294edc17e9ac34fa8a9ba8 | class Figura:
def __init__(self):
print("Tworze obiekt klasy Figura...")
def pobierz_polozenie(self):
print("Metoda pobierz_polozenie klasy Figura.")
def nadaj_polozenie(self):
print("Metoda nadaj_polozenie klasy Figura.")
def wyswietl(self):
print("Metoda wyswietl klasy Figura.")
def wypelnij(self):
print("Metoda wypelnij klasy Figura.")
def nadaj_kolor(self):
print("Metoda nadaj_kolor klasy Figura.")
def usun(self):
print("Metoda usun klasy Figura.")
class Punkt(Figura):
def __init__(self):
print("Tworze obiekt klasy Punkt...")
def wyswietl(self):
print("Metoda wyswietl klasy Punkt.")
def wypelnij(self):
print("Metoda wypelnij klasy Punkt.")
def usun(self):
print("Metoda usun klasy Punkt.")
class Linia(Figura):
def __init__(self):
print("Tworze obiekt klasy Linia...")
def wyswietl(self):
print("Metoda wyswietl klasy Linia.")
def wypelnij(self):
print("Metoda wypelnij klasy Linia.")
def usun(self):
print("Metoda usun klasy Linia.")
class Kwadrat(Figura):
def __init__(self):
print("Tworze obiekt klasy Kwadrat...")
def wyswietl(self):
print("Metoda wyswietl klasy Kwadrat.")
def wypelnij(self):
print("Metoda wypelnij klasy Kwadrat.")
def usun(self):
print("Metoda usun klasy Kwadrat.")
class XXOkrag:
def __init__(self):
print("Tworze obiekt klasy XXOkrag...")
def wyswietlaj(self):
print("Metoda wyswietlaj klasy XXOkrag.")
def wypelniaj(self):
print("Metoda wypelniaj klasy XXOkrag.")
def usuwaj(self):
print("Metoda usuwaj klasy XXOkrag.")
def pobierz_polozenie(self):
print("Metoda pobierz_polozenie klasy XXOkrag.")
def nadaj_polozenie(self):
print("Metoda nadaj_polozenie klasy XXOkrag.")
def ustaw_kolor(self):
print("Metoda ustaw_kolor klasy XXOkrag.")
class Okrag(Figura):
def __init__(self):
self.xokrag = XXOkrag()
def pobierz_polozenie(self):
self.xokrag.pobierz_polozenie()
def nadaj_polozenie(self):
self.xokrag.nadaj_polozenie()
def wyswietl(self):
self.xokrag.wyswietlaj()
def wypelnij(self):
self.xokrag.wypelniaj()
def nadaj_kolor(self):
self.xokrag.ustaw_kolor()
def usun(self):
self.xokrag.usuwaj()
if __name__ == "__main__":
lista_figur = [Linia(), Kwadrat(), Okrag()]
for fig in lista_figur:
fig.wyswietl()
|
2,712 | e95ebb2aa6526e3bf3789da17d144e71cdb49aca | from DHT_Python import dht22
from oled96 import oled
from PiBlynk import Blynk
# read data using pin 4
instance = dht22.DHT22(pin=4)
token = "---token---"
blynk = Blynk(token)
def cnct_cb():
print ("Connected: ")
blynk.on_connect(cnct_cb)
def _funCb(ACT):
result = instance.read()
if result.is_valid():
strTemp=("%.2f" % result.temperature)
strHumi=("%.2f" % result.humidity)
# Show temperature and humidity on OLED
oled.yell2("Temp="+strTemp,"Humi="+strHumi)
blynk.virtual_write(1,strTemp) # User Virtual port V1
blynk.virtual_write(2,strHumi) # User Virtual port V2
blynk.Ticker(_funCb, 140, False) # ~2 Hz
blynk.gpio_auto("button")
blynk.run()
|
2,713 | 2105619102de0d4d976c7bdfc839ee08058b7ab5 | #!/usr/bin/python
# Script to time convolution using different number of processors.
# Jason Neal
# December 2016
from __future__ import division, print_function
import datetime
from eniric.nIRanalysis import convolve_spectra
spectrum_name = "lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat"
data_rep = "../data/nIRmodels/"
results_dir = "../data/results/"
spectrum_path = data_rep + "PHOENIX-ACES/PHOENIX-ACES-AGSS-COND-2011-HiRes/" + spectrum_name
# Some test parameters
band = "K"
R = 100000
vsini = 1
epsilon = 0.6
fwhm_lim = 5
plot = False
numprocs = 0
numprocs = [None, 0, 1, 2, 3, 4]
def time_diff_procs(numprocs):
"""Time the convolution with different number of processors"""
conv_times = dict()
for proc in numprocs:
start_time = datetime.datetime.now()
convolve_spectra(spectrum_path, band, vsini, R, epsilon, fwhm_lim, plot, numprocs=proc)
end_time = datetime.datetime.now()
conv_times[proc] = end_time - start_time
return conv_times
conv_times = time_diff_procs(numprocs)
print("Num Processors\t Time")
for key in numprocs:
print("{0}\t{1}".format(key, conv_times[key]))
|
2,714 | fff70312fa7c3259cf4c3d9e7ebd8ca5b9a56887 | from sqlalchemy import Integer, String, Column
from sqlalchemy.orm import Query
from server import db
class Formation(db):
__tablename__ = "formation"
query: Query
id_form = Column(Integer, primary_key=True)
filiere = Column(String, nullable=False)
lieu = Column(String, nullable=False)
niveau = Column(String, nullable=False)
@staticmethod
def create(filiere: str, lieu: str, niveau: str):
return Formation(filiere=filiere, lieu=lieu, niveau=niveau)
def to_json(self):
return {
'id': self.id_form,
'branch': self.filiere,
'location': self.lieu,
'level': self.niveau,
}
|
2,715 | 016255d74ccf4ac547e4b212d33bb9a39295c830 | # Generated by Django 3.2.3 on 2021-07-02 08:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('khovan', '0003_nhapkho'),
]
operations = [
migrations.AddField(
model_name='phieunhaphang',
name='xulykho',
field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'),
preserve_default=False,
),
]
|
2,716 | 8ecd1d6b43027153e05c771eb7183c062319eebc | #d
#b
#c
#b,c |
2,717 | 849c468e4890c19806c678089ec8668576538b12 | from flask import (Flask, g, render_template, flash, redirect, url_for)
from flask_login import (LoginManager, login_user, logout_user,
login_required, current_user)
import forms
import models
import sqlite3
DEBUG = True
app = Flask(__name__)
app.secret_key = 'auoesh.bouoastuh.43,uoausoehuoshuosth3ououea.auoub!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.user.get(models.User.id == userid)
except models.DoesNotExist:
return None
def initialize():
models.DATABASE.connect()
models.DATABASE.create_tables([models.User], safe=True)
models.DATABASE.closer()
@app.before_request
def before_request():
""""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""""Close the database connection after request. """
g.db.close()
return response
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegistrationForm()
if form.validate_on_submit():
flash("Yay, you registered", "sucess")
models.User.create_user(
username=form.username.data,
email=form.email.data,
password=form.password.data,
confrimpassword=form.password.data
)
return redirect(url_for('index'))
return render_template('register.html', form=form)
def check_password_hash(password, data):
pass
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.emails == form.email.data)
except models.DoesNOtExit:
flash("Your email or password doesn't match !", "error")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in:", "Sucess")
return redirect(url_for('index'))
else:
flash("Your email or password doesn't match!", "error")
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("You.ve been logged out! Come back soon!", "sucess")
return redirect(url_for('index'))
@app.route('/new_post', methods=('GET', 'POST'))
@login_required #makes sures the user is logged in before been able to post
def post():
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(),
content=form.content.data.strip())
flash("Message Posted! Thanks!", "sucess")
return redirect(url_for('index'))
return render_template('post.html', form=form)
@app.route('/')
def index():
return 'Hey!'
"""
models.initialize()
try:
models.User.create_user(
username='Steve',
email='stephenashom40@gmail.com',
password='passsword',
admin=True
)
except ValueError:
pass
"""
if __name__ == '__main__':
app.run(debug=DEBUG)
|
2,718 | 094f482ec6d36dfaed7e908bc445e6e015ec409d | # coding: utf-8
'''
Created on 2013-7-8
@author: huqiming
'''
import json
import re
import urllib2
'''
ๅพ่ฏดๅ
ๅฎน
'''
class ts_content:
'''
ๅพ่ฏดๆ ้ข
'''
title = ''
'''
ๅพ่ฏดๆฅๆ
'''
date = ''
'''
ๅพ่ฏดๆฎต่ฝ
'''
parts = []
def __str__(self):
return 'parts: ' + str(self.parts)
'''
ๅพ่ฏดๆฎต่ฝ
'''
class ts_content_part(json.JSONEncoder):
'''
ๆฎต่ฝๆ ้ข
'''
title = ''
'''
ๆฎต่ฝ็ๅญๅ
ๅฎน
'''
items = []
def __str__(self):
return 'title: ' + self.title + ' items: ' + str(self.items)
class ts_content_part_item(json.JSONEncoder):
txt_info = ''
img_url = ''
def __init__(self, txt, img):
if txt :
self.txt_info = txt
if img :
self.img_url = img
def __str__(self):
return 'info: ' + self.txt_info + ' img: ' + self.img_url
def parse_content(url):
# print(url)
page = urllib2.urlopen(url)
html = page.read()
source = html.decode('GBK')
parts = perform_parse_content(source)
result = ts_content()
result.parts = parts;
return result
def perform_parse_content(source):
li = re.finditer(ur'<P>\u3010\d*\u3011.*?</P>', source)
i = 0
index = []
res = []
for m in li:
title = m.group()
part = ts_content_part()
part.title = remove_tags(title)
res.append(part)
pos = m.start()
index.append(pos)
if(i > 0):
part_source = source[index[i - 1]:pos]
res_item = parse_content_part(part_source)
res[i - 1].items = res_item
i += 1
part_source = source[pos:source.index('<P> </P>')]
res_item = parse_content_part(part_source)
res[i - 1].items = res_item
return res
def parse_content_part(source):
li = re.finditer(r'<(P|DIV)>.*?</(P|DIV)>', source)
res = []
for m in li:
item = m.group()
img = parse_img_src(item)
txt = remove_tags(item)
res_item = ts_content_part_item(txt, img)
# print(res_item)
res.append(res_item)
return res
def parse_img_src(source):
m = re.search(r'<IMG.*?>', source)
if m:
img_tag = m.group()
img_m = re.search(r'src=".*?"', img_tag)
if img_m:
src = img_m.group()
src = src[5:-1]
return src
def remove_tags(source):
p = re.compile(r"(<.*?>|</.*?>|<|/>| )")
return p.sub('', source)
# res = parse('http://www.dapenti.com/blog/more.asp?name=xilei&id=79405')
# from ts_json import json_encode
# ss = json_encode().encode(res)
# print(ss)
|
2,719 | 844c9af4f0d4ca33e7c69b72f9886f58ceebefdb | from fastapi import APIRouter
from .endpoints import submissions
def get_api_router():
api_router = APIRouter()
api_router.include_router(submissions.router,
prefix="/submissions",
tags=["submissions"])
# api_router.include_router(users.router, prefix="/users", tags=["users"])
return api_router
|
2,720 | 20f56ff484321a7d623cead4315e5a6b3b0653a7 | # Generated by Django 3.1.2 on 2020-10-21 21:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitoring', '0002_auto_20201021_0027'),
]
operations = [
migrations.AlterField(
model_name='endpoint',
name='frequency_in_minutes',
field=models.FloatField(default=30),
),
migrations.AlterField(
model_name='endpoint',
name='last_check',
field=models.DateTimeField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='endpoint',
name='response_text',
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='endpoint',
name='status_code',
field=models.FloatField(default=200),
),
migrations.AlterField(
model_name='endpoint',
name='test_pattern',
field=models.CharField(blank=True,
default=None,
help_text='If left blank sys will only ping',
max_length=100,
null=True),
),
]
|
2,721 | 67db3a66e5525d41de13df665167a0db2d81056e | from django.views import generic
from .models import GPS
# This is the view for my home page. It is a list view because it needs to display a list of all
# of the GPS units that are currently in the database.
class HomeView(generic.ListView):
model = GPS
template_name = 'inv_templates/home.html'
context_object_name = 'unit'
# This is the view for my add item page.
class Add_ItemView(generic.TemplateView):
model = GPS
template_name = 'inv_templates/add_item.html'
# This is the view for my remove item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently in the database.
class Remove_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/remove_item.html'
context_object_name = 'unit'
# This is the view for my update item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently in the database.
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
# This is the view for my check out item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently checked in.
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
# This is the view for my check in item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently checked out.
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
|
2,722 | fdfb71595bf86fbe1763535814ec9c3cfd312d87 | """ Script to run pilon iteratively to correct genome assemblies """
import os
import argparse
import logging
import subprocess
def parse_arguments():
""" Parse command line arguments """
# Create parser
parser = argparse.ArgumentParser(description='Run pilon many times')
# Add arguments
parser.add_argument('--draft_seq', '-d', required=True,
help='Draft sequence to correct')
parser.add_argument('--forward', '-f', required=True,
help='Reads to use for correction')
parser.add_argument('--reverse', '-r',
help='Reverse read for correction')
parser.add_argument('--output', '-o', required=True,
help='Output directory')
parser.add_argument('--iterations', '-i', required=True,
help='How many times to run pilon')
parser.add_argument('--threads', '-t', required=True,
help='Threads to use')
parser.add_argument('--pilon', '-p', required=True,
help='Path to pilon.jar')
# Parse arguments
args = parser.parse_args()
return args
def run_bwa(reference_genome, forward_read, reverse_read, threads, output, i):
""" Run bwa to align reads to reference genome """
# Index ref genome
print('Align reads with BWA MEM')
bwa_index_args = ['bwa', 'index', reference_genome]
process = subprocess.Popen(bwa_index_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Align reads to reference genome
bwa_mem_args = ['bwa', 'mem', '-t', threads, '-x', 'ont2d', reference_genome, forward_read, reverse_read]
process = subprocess.Popen(bwa_mem_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Write alignment to file
sam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sam')
with open(sam_file, 'w') as bwa_mem_out:
bwa_mem_out.write(out)
return sam_file
def run_samtools(sam_file, threads, output, i):
""" Sort and convert to BAM using samtools """
# Conver the SAM-file to a BAM-file
print('Convert SAM-file to BAM-file')
bam_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.bam')
samtools_view_args = ['samtools', 'view', '-@', threads, '-bS', '-o',
bam_file, sam_file]
process = subprocess.Popen(samtools_view_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Sort and return the BAM-fil
print('Sort BAM-file')
bam_sorted_file = os.path.join(output, 'bwa_mem_' + str(i + 1) + '.sorted.bam')
samtools_sort_args = ['samtools', 'sort', bam_file, '-o', bam_sorted_file]
process = subprocess.Popen(samtools_sort_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
# Index sorted BAM-file
samtools_index_args = ['samtools', 'index', bam_sorted_file]
process = subprocess.Popen(samtools_index_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
return bam_sorted_file
def run_pilon(bam_sorted_file, reference_genome, pilon_output, threads, pilon_path):
""" Run Pilon """
print('Run Pilon')
pilon_args = ['java', '-Xmx16G', '-jar', pilon_path, '--genome', reference_genome,
'--frags', bam_sorted_file, '--threads', threads, '--output',
pilon_output]
process = subprocess.Popen(pilon_args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = process.communicate()
print(out)
with open(pilon_output + '.log', 'w') as pilon_log:
pilon_log.write(out)
def main():
""" Main Application """
# Get arguments
args = parse_arguments()
logging.basicConfig(filename='logging.log', level=logging.DEBUG)
output = args.output
reference_genome = args.draft_seq
if args.reverse:
reverse_read = args.reverse
else:
reverse_read = ""
forward_read = args.forward
threads = args.threads
iterations = args.iterations
pilon_path = args.pilon
logging.info('OUTPUT DIRECTORY:' + output)
logging.info('READS: ' + forward_read + ', ' + reverse_read)
logging.info('THREADS: ' + threads)
logging.info('ITERATIONS: ' + iterations)
# Set pilon output
pilon_output = os.path.join(output, 'pilon_1')
os.mkdir(output)
logging.info('START CORRECTION')
for i in range(int(iterations)):
# Log
logging.info('ITERATION: ' + str(i + 1))
logging.info('REFERENCE GENOME: ' + reference_genome)
logging.info('PILON OUTPUT: ' + pilon_output)
sam_file = run_bwa(reference_genome, forward_read, reverse_read, threads, output, i)
bam_sorted_file = run_samtools(sam_file, threads, output, i)
run_pilon(bam_sorted_file, reference_genome, pilon_output, threads, pilon_path)
# Set pilon output to new reference
reference_genome = os.path.join(output, 'pilon_' + str(i + 1) + '.fasta')
pilon_output = os.path.join(output, 'pilon_' + str(i + 2))
if __name__ == '__main__':
main()
|
2,723 | 0769003c248c099da5bcd75541d35234b01af5de | #!/usr/bin/env python
import os
import sys
from setuptools import setup
from textwrap import dedent
NAME = "docker-zabbix-script-sender"
GITHUB_ORG_URL = "https://github.com/troptop/"
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
exec(open('docker_zabbix_script_sender/version.py').read())
setup(
name=NAME,
version=version,
author="Cyril Moreau",
author_email="cyril.moreauu@gmail.com",
url= GITHUB_ORG_URL + '/' + NAME,
download_url="{0}/{1}/tarball/v{2}".format(GITHUB_ORG_URL, NAME, version),
description="Push Docker containers script results to Zabbix efficiently",
long_description=dedent("""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""),
keywords="docker zabbix monitoring",
packages=['docker_zabbix_script_sender'],
install_requires=[
'docker-py >= 1.0.0',
],
zip_safe=False,
license="Apache license version 2.0",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
entry_points = """
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
|
2,724 | 210d1a184d338d77d4c41327d0a9e2a5a56eb2ae | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Install and activate pre-commit and its hooks into virtual environment."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
# if sys.version_info[0] > 2 or sys.version_info[1] < 7:
# print("Python 2.7 required")
# sys.exit(1)
VENV_NAME = 'VIRTUAL_ENV'
VENV = ''
try:
VENV = os.environ[VENV_NAME]
if VENV == '':
print("Environment variable '%s' is empty" % VENV_NAME)
print('Please activate your virtualenv first')
sys.exit(3)
if not os.path.isdir(VENV):
print("Virtual environment '%s' does not exist" % VENV)
print('Please activate a valid virtualenv first')
sys.exit(2)
except KeyError:
print('No virtualenv defined')
print('Please activate a virtualenv (with mkvirtualenv, workon, or pyenv)')
sys.exit(1)
if os.system('git config diff.userdata.textconv $PWD/userdata_decode.py'):
print('Problem configuring Git diff filter for userdata')
if os.system('pre-commit --version'):
os.system('pip install pre-commit')
if os.system('pre-commit install'):
print('Error setting up pre-commit hooks, try updating with '
'pip install -U pre-commit')
sys.exit(4)
if os.system('pre-commit run --all-files'):
print('Problem running pre-commit hooks, check .pre-commit-config.yaml')
sys.exit(5)
sys.exit(0)
|
2,725 | d48353caa07d3bfa003ea9354b411fe0c79591db | """
k-element subsets of the set [n]
3-element subsets of the set [6]
123
"""
result = []
def get_subset(A, k, n):
a_list = [i for i in A]
if len(a_list) == k:
result.append(a_list)
return
s_num = max(a_list)+1 if a_list else 1
for i in range(s_num, n+1):
a_list.append(i)
get_subset(a_list, k, n)
a_list.remove(i)
def subset_algor(n, k):
V = []
get_subset(V, k, n)
def main():
# subset_algor(int(input()), int(input()))
subset_algor(7, 3)
for i in range(len(result)):
print(result[i], " Rank: ", i)
print(len(result))
if __name__ == "__main__":
main()
|
2,726 | f3f3bbb715f16dc84221f3349aa5f26e9a6dc7c8 | from typing import Dict, List
pilha = list()
print(pilha) |
2,727 | eec08b3fdd4beb7d88ac0dc6d2e8776cf54fda35 | import tempfile
import unittest
from unittest.mock import mock_open, patch, MagicMock, call
import compare_apple_music_and_spotify as music_compare
class get_apple_music_data(unittest.TestCase):
def test_open_file(self):
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
assert open("/apple_music").read() == "data"
mock_file.assert_called_with("/apple_music")
def test_save_one_artist_from_line(self):
with patch("builtins.open", mock_open(read_data="""<key>Sort Artist</key><string>Drew Goddard</string>""")):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual("Drew Goddard", apple_music_data_parser.one_song_and_artist.get('Artist'))
def test_save_one_song(self):
with patch("builtins.open",
mock_open(read_data="""<key>Sort Name</key><string>The Cabin In the Woods</string>""")):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual("The Cabin In the Woods", apple_music_data_parser.one_song_and_artist.get('Song'))
def test_save_one_song_and_artist(self):
with patch("builtins.open", mock_open(read_data="""<key>Sort Artist</key><string>Drew Goddard</string>
<key>Sort Name</key><string>The Cabin In the Woods</string>""")):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual([{'Artist': "Drew Goddard", 'Song': "The Cabin In the Woods"}],
apple_music_data_parser.all_songs_and_artists)
def test_save_several_songs_and_artists(self):
with patch("builtins.open", mock_open(read_data='''<key>Sort Name</key><string>The Cabin In the Woods</string>
<key>Sort Artist</key><string>Drew Goddard</string>
<key>Sort Name</key><string>Pulp Fiction</string>
<key>Sort Artist</key><string>Quentin Tarantino</string>''')):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual([{'Artist': "Drew Goddard", 'Song': "The Cabin In the Woods"},
{'Artist': "Quentin Tarantino", 'Song': "Pulp Fiction"}],
apple_music_data_parser.all_songs_and_artists)
class spotify_data_parser(unittest.TestCase):
def test_open_file_and_return_formated_data_split_by_coma(self):
with patch("builtins.open", mock_open(read_data="split,by,")):
result = music_compare.spotify_data_parser().read_file("/test_path")
open.assert_called_once_with("/test_path", "r", newline='')
self.assertTrue(result, "_csv.DictReader")
def test_no_artist_found_on_line(self):
lines_csv_dict_reader_formated = {
"not found": "not important",
}
result= music_compare.spotify_data_parser().is_artist(lines_csv_dict_reader_formated)
self.assertEqual(False,result)
def test_artist_found_on_line(self):
lines_csv_dict_reader_formated = {
"Artist Name": "Avenged Sevenfold",
}
result= music_compare.spotify_data_parser().is_artist(lines_csv_dict_reader_formated)
self.assertEqual(True,result)
def test_song_not_found_on_line(self):
lines_csv_dict_reader_formated = {
"not found": "Nightmare",
}
result= music_compare.spotify_data_parser().is_song(lines_csv_dict_reader_formated)
self.assertEqual(False,result)
def test_song_found_on_line(self):
lines_csv_dict_reader_formated = {
"Track Name": "Nightmare",
}
result= music_compare.spotify_data_parser().is_song(lines_csv_dict_reader_formated)
self.assertEqual(True,result)
def test_dont_save_if_artist_not_found(self):
lines_csv_dict_reader_formated = {
"not found": "not important",
}
music_compare.spotify_data_parser().save_artist(lines_csv_dict_reader_formated)
self.assertEqual({},music_compare.spotify_data_parser().one_song_and_artist)
def test_save_if_artist_found(self):
lines_csv_dict_reader_formated = {
"Artist Name": "test_artist",
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)
self.assertEqual('test_artist', self.spotify_data_parser.one_song_and_artist.get('Artist'))
def test_dont_save_if_song_not_found(self):
lines_csv_dict_reader_formated = {
"not found": "not important",
}
music_compare.spotify_data_parser().save_song(lines_csv_dict_reader_formated)
self.assertEqual({},music_compare.spotify_data_parser().one_song_and_artist)
def test_save_if_song_found(self):
lines_csv_dict_reader_formated = {
"Track Name": "test_song",
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)
self.assertEqual('test_song', self.spotify_data_parser .one_song_and_artist.get('Song'))
def test_combine_song_found_and_NOT_artist(self):
lines_csv_dict_reader_formated = {
"Name": "test_song",
"Artist": "test_artist"
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)
self.spotify_data_parser.combine_song_and_artist()
self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)
def test_combine_song_and_artist_if_found(self):
lines_csv_dict_reader_formated = {
"Track Name": "test_song",
"Artist Name": "test_artist"
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)
self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)
self.spotify_data_parser.combine_song_and_artist()
self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],
self.spotify_data_parser.all_songs_and_artists)
def test_combine_several_songs_and_artists(self):
with patch("builtins.open", mock_open(read_data='''Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At
"spotify:track:4UEo1b0wWrtHMC8bVqPiH8","Nightmare","Avenged Sevenfold","Nightmare","1","1","374453","spotify:user:","2010-10-17T20:18:40Z"
"spotify:track:1d5UuboIPRMD4HaU3yycKC","Somewhere I Belong","Linkin Park","Meteora (Bonus Edition)","1","3","213933","spotify:user:","2010-10-17T20:24:25Z"''')):
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.create("/test_path")
self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song': 'Nightmare'},
{'Artist': 'Linkin Park', 'Song': 'Somewhere I Belong'}],
self.spotify_data_parser.all_songs_and_artists)
class apple_music_and_spotify_comparer(unittest.TestCase):
def setUp(self):
self.comparer = music_compare.apple_music_and_spotify_comparer()
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_save_data_from_spotify_and_apple_music_in_class(self, apple_music, spotify):
test = music_compare.apple_music_and_spotify_comparer()
spotify.return_value = [{'Artist': 'test_artist1', 'Song': 'test_song1'}]
apple_music.return_value = [{'Artist': 'test_artist2', 'Song': 'test_song2'}]
test.save_data_locally("/spotify", "/apple_music")
self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}], test.spotify_lib)
self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}], test.apple_music_lib)
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_song_and_artist_when_song_not_found_in_apple_music(self, apple_music, spotify):
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
mock_print.assert_has_calls(
[call('following songs not found in apple_music:'),
call('test_song_no_match by artist test_artist_no_match')])
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_song_and_artist_when_song_not_found_in_spotify(self, apple_music, spotify):
spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
mock_print.assert_has_calls([call('following songs not found in spotify:'),
call('test_song by artist test_artist'),
call()])
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(self, apple_music, spotify):
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'},
{'Artist': 'test_artist_no_match2', 'Song': 'test_song_no_match2'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
self.assertEqual(3, mock_print.call_count)
mock_print.assert_has_calls(
[call('following songs not found in apple_music:'),
call('test_song_no_match by artist test_artist_no_match'),
call('test_song_no_match2 by artist test_artist_no_match2')],
any_order=False)
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_several_songs_and_artists_when_song_not_found_in_spotify(self, apple_music, spotify):
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'},
{'Artist': 'test_artist_no_match2', 'Song': 'test_song_no_match2'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
self.assertEqual(4, mock_print.call_count)
mock_print.assert_has_calls(
[call('following songs not found in spotify:'),
call('test_song_no_match by artist test_artist_no_match'),
call('test_song_no_match2 by artist test_artist_no_match2'),
call()],
any_order=False)
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(self, apple_music,
spotify):
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_only_apple_music', 'Song': 'test_song_only_apple_music'}]
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_only_spotify', 'Song': 'test_song_only_spotify'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
self.assertEqual(5, mock_print.call_count)
mock_print.assert_has_calls([call("following songs not found in spotify:"),
call('test_song_only_apple_music by artist test_artist_only_apple_music'),
call(),
call("following songs not found in apple_music:"),
call('test_song_only_spotify by artist test_artist_only_spotify')
])
|
2,728 | 60202758a0a42fc26dc1bca9f134a70f28967093 | import json
import pickle
import zlib
from diskcollections.interfaces import IHandler
class PickleHandler(IHandler):
dumps = staticmethod(pickle.dumps)
loads = staticmethod(pickle.loads)
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(
obj,
protocol=pickle.HIGHEST_PROTOCOL,
level=zlib.Z_DEFAULT_COMPRESSION
):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
|
2,729 | 76db5955b29696ca03ab22ef14ac018e0618e9e3 | '''
Seperate a number into several, maximize their product
'''
# recursive
def solution1(n):
if n <= 4:
return n
else:
return max(map(lambda x: solution1(x)*solution1(n-x), range(1, n//2 + 1)))
# dp
def solution2(n):
result_list = [1,2]
for i in range(3, n+1):
max_mult = max(list(map(lambda x: result_list[x] * (i-x-1), range(i-1))))
result_list.append(max_mult)
print(result_list, i)
return max_mult
if __name__ == '__main__':
result = solution1(8)
print(result)
result = solution2(8)
print(result) |
2,730 | 96d13a883590ca969e997bbb27bcdbee1b24252f | import csv as csv
import hashlib
from sets import Set
def func_hash(parameter):
hash_object = hashlib.sha384(parameter)
table_hash = hash_object.hexdigest()
return table_hash
def myFunk():
with open('users.csv', 'w') as fp:
a = csv.writer(fp, delimiter=',')
roles = ['inspector', 'admin']
data = [['Userneme', 'hash_password', 'role'],
['Olya', func_hash('Olya'), 'admin'],
['Stas', func_hash('Stas'), 'admin'],
['Dima', func_hash('Dima'), 'admin'],
['Kyrylo', func_hash('Kyrylo'), 'admin'],
['Lubchyk', func_hash('Lubchyk'), 'inspector'],
['Sashko', func_hash('Sashko'),roles],
]
a.writerows(data)
myFunk() |
2,731 | 2dddee735e23e8cdb7df83f47f63926727cf8963 | """Stencil based grid operations in 2D."""
from .advection_flux_2d import gen_advection_flux_conservative_eno3_pyst_kernel_2d
from .advection_timestep_2d import (
gen_advection_timestep_euler_forward_conservative_eno3_pyst_kernel_2d,
)
from .brinkmann_penalise_2d import (
gen_brinkmann_penalise_pyst_kernel_2d,
gen_brinkmann_penalise_vs_fixed_val_pyst_kernel_2d,
)
from .char_func_from_level_set_2d import (
gen_char_func_from_level_set_via_sine_heaviside_pyst_kernel_2d,
)
from .diffusion_flux_2d import gen_diffusion_flux_pyst_kernel_2d
from .diffusion_timestep_2d import gen_diffusion_timestep_euler_forward_pyst_kernel_2d
from .elementwise_ops_2d import (
gen_add_fixed_val_pyst_kernel_2d,
gen_elementwise_complex_product_pyst_kernel_2d,
gen_elementwise_copy_pyst_kernel_2d,
gen_elementwise_sum_pyst_kernel_2d,
gen_set_fixed_val_at_boundaries_pyst_kernel_2d,
gen_set_fixed_val_pyst_kernel_2d,
gen_elementwise_saxpby_pyst_kernel_2d,
)
from .inplane_field_curl_2d import gen_inplane_field_curl_pyst_kernel_2d
from .outplane_field_curl_2d import gen_outplane_field_curl_pyst_kernel_2d
from .penalise_field_boundary_2d import gen_penalise_field_boundary_pyst_kernel_2d
from .update_vorticity_from_velocity_forcing_2d import (
gen_update_vorticity_from_penalised_velocity_pyst_kernel_2d,
gen_update_vorticity_from_velocity_forcing_pyst_kernel_2d,
)
|
2,732 | fa531e8b07de6ee3c22146904ee8724cefab9033 | # presentation console
# - a python interpreter for "pseudo-interative" demos
#
# usage: $ python prescons.py <filename>
#
# <filename> should be a file that contains python code as would be entered
# directly in a terminal - see example.py
#
# while running, press 'space' to move through the code
#
# github.com/inglesp/prescons
from code import InteractiveConsole
from StringIO import StringIO
import sys, termios, tty
# get character from stdin
# based on http://code.activestate.com/recipes/134892/
# *nix only, and doesn't handle arrow keys well
def getch(ch=None):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
while True:
tty.setraw(fd)
gotch = sys.stdin.read(1)
if ch is None or gotch == ch:
break
if ord(gotch) == 3:
raise KeyboardInterrupt
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# subclasses InteractiveConsole from code module
class PresentationConsole(InteractiveConsole):
def __init__(self, path):
self.file = open(path)
InteractiveConsole.__init__(self)
def raw_input(self, prompt=''):
self.write(prompt)
if prompt == sys.ps1:
try:
getch(' ')
except KeyboardInterrupt:
print "KeyboardInterrupt"
exec "import ipdb; ipdb.set_trace()" in self.locals
line = self.file.readline()
if len(line) == 0:
self.file.close()
raise EOFError
self.write(line)
return line.rstrip()
def runcode(self, code):
sys.stdout = StringIO()
InteractiveConsole.runcode(self, code)
output = sys.stdout.getvalue()
sys.stdout = sys.__stdout__
if len(output) > 0:
getch(' ')
self.write(output)
if __name__ == '__main__':
path = sys.argv[1]
console = PresentationConsole(path)
console.interact()
|
2,733 | c5a7f269f579bd1960afa4f700b5c3436ac6d91a | from rest_framework.views import APIView
from .serializers import UserSerializer
from rest_framework import permissions
from .models import users
from rest_framework.response import Response
from django.http import JsonResponse
from rest_framework import viewsets
from profiles.models import profile
from profiles.serializers import ProfileSerializer
from follows.models import Follow
class GetDefaultUsers(APIView):
permission_classes =[
permissions.IsAuthenticated
]
def post(self,request, *args, **kwargs):
user = self.request.user
userers = users.objects.all()[:5]
users_to_pass = []
for user_now in userers:
user_id = user.id
check_if_already_followed = Follow.objects.filter(user_id = user_now.id).filter(follower_id = user.id)
if len(check_if_already_followed) == 0:
users_to_pass.append(user_now)
serilizer_class_many = UserSerializer(users_to_pass, many=True)
serilizer_class = UserSerializer(user)
return Response({
'users':serilizer_class_many.data,
"user":serilizer_class.data
})
class GetSpecificUser(APIView):
permission_classes =[
permissions.IsAuthenticated
]
def post(self, request,id=None, *args, **kwargs):
try:
queryset = users.objects.get(id=id)
except user.DoesNotExist:
return JsonResponse({'error':"user does not exits"}, status = 400)
try:
profile_queryset = profile.objects.get(user = queryset)
except profile.DoesNotExist:
return JsonResponse({'error':"user does not have a profile"}, status = 400)
serializer_class = UserSerializer(queryset)
serializer_class_profile = ProfileSerializer(profile_queryset)
return Response(
{'user':serializer_class.data,
'profile':serializer_class_profile.data
},
status=200)
|
2,734 | 5220ad793788927e94caf7d6a42df11292851c67 | from django.shortcuts import render
# from emaillist.models import Emaillist
from emaillist.models import Emaillist
from django.http import HttpResponseRedirect
# Create your views here.
# def index(request):
# emaillist_list = Emaillist.objects.all().order_by('-id') # db์์ objects ์ ์ฒด๋ฅผ ๋ถ๋ฌ์์ ๋ณ์์ ์ ์ฅ
# data = {'emaillist_list':emaillist_list} # ๋์
๋๋ฆฌ ํ์์ผ๋ก ๋ฐ์ดํฐ์ ์ ์ฅ
# return render(request, 'emaillist/index.html', data) # render ๋ผ๋ ์์๋ณ์์ url(request)์์ ๋ถ๋ฌ์จ ๊ฐ์ผ๋ก emillist/index.html ํ์์ผ๋ก data๊ฐ์ ์ถ๋ ฅํ๋ค.
def test_index(request):
print("test_index ํจ์ ์คํํ์ ")
emaillist_list = Emaillist.objects.all().order_by('-id') # db์์ objects ์ ์ฒด๋ฅผ ๋ถ๋ฌ์์ ๋ณ์์ ์ ์ฅ
data = {'emaillist_list':emaillist_list} # ๋์
๋๋ฆฌ ํ์์ผ๋ก ๋ฐ์ดํฐ์ ์ ์ฅ
return render(request, 'emaillist/test_index.html', data)
# def form(request):
# return render(request, 'emaillist/form.html')
def test_form(request):
print("test ํจ์ ์คํํ์ ")
return render(request, 'emaillist/test_form.html')
def add(request):
emaillist = Emaillist()
emaillist.first_name = request.POST['fn'] # ์น์ first_name๋ถ๋ถ์ ์์ฑํ ๊ฐ (index.html์์ input์ผ๋ก ๋ฐ์ password) ์ ๊ฐ์ ธ์์ ๋ฐ์ดํฐ๋ฒ ์ด์ค(emailist)์ first_name column์ ์ ์ฅ
emaillist.last_name = request.POST['ln'] # ์น์ last_name๋ถ๋ถ์ ์์ฑํ ๊ฐ (index.html์์ input์ผ๋ก ๋ฐ์ password) ์ ๊ฐ์ ธ์์ ๋ฐ์ดํฐ๋ฒ ์ด์ค(emailist)์ last_name column์ ์ ์ฅ
emaillist.email = request.POST['email'] # ์น์ email๋ถ๋ถ์ ์์ฑํ ๊ฐ (index.html์์ input์ผ๋ก ๋ฐ์ password) ์ ๊ฐ์ ธ์์ ๋ฐ์ดํฐ๋ฒ ์ด์ค(emailist)์ email column์ ์ ์ฅ
emaillist.save() # ์ ์ฅ๋ ๋ด์ญ์ DB์ ์ ์ฅ
return HttpResponseRedirect('/emaillist') # ์ ์ฅ์๋ฃ๋๋ฉด ๊ธฐ์กด ๋ฆฌ์คํธ ํ์ด์ง๋ก ์ด๋
#
# def add2(request):
# emaillist2 = Emaillist2()
# emaillist2.first_name = request.POST['fn']
# emaillist2.last_name = request.POST['ln']
# emaillist2.email = request.POST['email']
#
# emaillist2.save()
#
# return HttpResponseRedirect('/emaillist') |
2,735 | b7a60322b4a0fcb6de16cd12be33db265a2b8746 | import pytesseract
from PIL import Image
img = Image.open("flag.png")
text = pytesseract.image_to_string(img)
def rot(*symbols):
def _rot(n):
encoded = ''.join(sy[n:] + sy[:n] for sy in symbols)
lookup = str.maketrans(''.join(symbols), encoded)
return lambda s: s.translate(lookup)
return _rot
def rot_alpha(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
def rot_encode(n):
from string import ascii_lowercase as lc, ascii_uppercase as uc
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup)
print(rot_encode(7)(text))
if __name__ == '__main__':
pass
|
2,736 | f2bf4f5b057af1d2362ec8d1472aa76e774be1c7 | import art
import random
print(art.guess)
print(art.the)
print(art.number)
print("I'm thinking of a number between 1 and 100")
number = random.randint(1,100)
turns = 0
difficulty = input("Chose a difficulty. 'easy' or 'hard'?\n")
if difficulty == 'easy':
turns +=10
else:
turns +=5
gameover = False
while not gameover:
print(f"You've got {turns} turns left!")
guess = int(input("Guess a number!\n"))
if guess > number:
print("too high!")
turns -= 1
elif guess < number:
print("too low!")
turns -= 1
elif guess == number:
print("Thats it! You Win!")
gameover = True
if turns == 0:
print("You used all your chances!")
print("GAME OVER")
gameover = True |
2,737 | 09712a397ad7915d9865b4aebf16606f85988f67 | # 30 - Faรงa um programa que receba trรชs nรบmeros e mostre - os em ordem crescentes.
n1 = int(input("Digite o primeiro nรบmero: "))
n2 = int(input("Digite o segundo nรบmero: "))
n3 = int(input("Digite o terceiro nรบmero: "))
if n1 <= n2 and n2 <= n3:
print(f'A ordem crescente รฉ {n1}, {n2}, {n3}')
elif n1 <= n3 and n3 <= n2:
print(f'A ordem crescente รฉ {n1}, {n3}, {n2}')
elif n2 <= n1 and n1 <= n3:
print(f'A ordem crescente รฉ {n2}, {n1}, {n3}')
elif n2 <= n3 and n3 <= n1:
print(f'A ordem crescente รฉ {n2}, {n3}, {n1}')
elif n3 <= n1 and n1 <= n2:
print(f'A ordem crescente รฉ {n3}, {n1}, {n2}')
elif n3 <= n2 and n2 <= n1:
print(f'A ordem crescente รฉ {n3}, {n2}, {n1}') |
2,738 | f1396179152641abf76256dfeab346907cb1e386 | [Interactive Programming with Python - Part 1]
[Arithmetic Expressions]
# numbers - two types, an integer or a decimal number
# two correspending data types int() and float()
print 3, -1, 3.14159, -2.8
# we can convert between data types using int() and float()
# note that int() take the "whole" part of a decimal number
# float() applied to integers is boring
print type(3), type(3.14159), type(3.0)
#=> <type 'int'><type 'float'><type 'float'>
print int(3.14159), int(-2.8)
#=> 3 -2
print float(3), float(-1)
#=> 3.0 -1.0
# floating point number have around 15 decimal digits of accuracy
# pi is 3.1415926535897932384626433832795028841971...
# square root of two is 1.4142135623730950488016887242096980785696...
# approximation of pi, Python displays 12 decimal digits
print 3.1415926535897932384626433832795028841971
#=> 3.14159265359
# appoximation of square root of two, Python displays 12 decimal digits
print 1.4142135623730950488016887242096980785696
#=> 1.41421356237
# arithmetic operators
# + plus addition
# - minus subtraction
# * times multiplication
# / divided by division
# ** power exponentiation
# If one operand is a decimal (float), the answer is decimal
print 1.0 / 3, 5.0 / 2.0, -7 / 3.0
#=> 0.333333333333 2.5 -2.33333333333
# If both operands are ints, the answer is an int (rounded down)
print 1 / 3, 5 / 2, -7 / 3
#=> 0 2 -3
# expressions - number or a binary operator applied to two expressions
# minus is also a unary operator and can be applied to a single expression
print 1 + 2 * 3, 4.0 - 5.0 / 6.0, 7 * 8 + 9 * 10
# expressions are entered as sequence of numbers and operations
# how are the number and operators grouped to form expressions?
# operator precedence - "Please Excuse My Dear Aunt Sallie" = (), **, *, /, +,-
print 1 * 2 + 3 * 4
print 2 + 12
# always manually group using parentheses when in doubt
print 1 * (2 + 3) * 4
print 1 * 5 * 4
[Variables]
# valid variable names - consists of letters, numbers, underscore (_)
# starts with letter or underscore
# case sensitive (capitalization matters)
# legal names - ninja, Ninja, n_i_n_j_a
# illegal names - 1337, 1337ninja
# Python convention - multiple words joined by _
# legal names - elite_ninja, leet_ninja, ninja_1337
# illegal name 1337_ninja
# assign to variable name using single equal sign =
# (remember that double equals == is used to test equality)
# examples
my_name = "Joe Warren"
print my_name
my_age = 51
print my_age
my_age = my_age + 1 == my_age += 1
# the story of the magic pill
magic_pill = 30
print my_age - magic_pill
my_grand_dad = 74
print my_grand_dad - 2 * magic_pill
# Temperature examples
# convert from Fahrenheit to Celsuis
# c = 5 / 9 * (f - 32)
# use explanatory names
temp_Fahrenheit = 212
temp_Celsius = 5.0 / 9.0 * (temp_Fahrenheit - 32)
print temp_Celsius
# test it! 32 Fahrenheit is 0 Celsius, 212 Fahrenheit is 100 Celsius
# convert from Celsius to Fahrenheit
# f = 9 / 5 * c + 32
temp_Celsius = 100
temp_Fahrenheit = 9.0 / 5.0 * temp_Celsius + 32
print temp_Fahrenheit
[Functions]
# computes the area of a triangle
def triangle_area(base, height): # header - ends in colon
area = (1.0 / 2) * base * height # body - all of body is indented
return area # body - return outputs value
a1 = triangle_area(3, 8)
print a1
a2 = triangle_area(14, 2)
print a2
# converts fahrenheit to celsius
def fahrenheit2celsius(fahrenheit):
celsius = (5.0 / 9) * (fahrenheit - 32)
return celsius
# test!!!
c1 = fahrenheit2celsius(32)
c2 = fahrenheit2celsius(212)
print c1, c2
# converts fahrenheit to kelvin
def fahrenheit2kelvin(fahrenheit):
celsius = fahrenheit2celsius(fahrenheit)
kelvin = celsius + 273.15
return kelvin
# test!!!
k1 = fahrenheit2kelvin(32)
k2 = fahrenheit2kelvin(212)
print k1, k2
# prints hello, world!
def hello():
print "Hello, world!"
# test!!!
hello() # call to hello prints "Hello, world!"
h = hello() # call to hello prints "Hello, world!" a second time
print h # prints None since there was no return value
Do not forget:
- :
- return
- indentation
[More Operations]
# Remainder / % / modulo - modular arithmetic works both in negative as positive direction
# systematically restrict computation to a range
# long division - divide by a number, we get a quotient plus a remainder
# quotient is integer division //, the remainder is % (Docs)
# problem - get the ones digit of a number
num = 49
tens = num // 10 # --> 4
ones = num % 10 # --> 9
print tens, ones
print 10 * tens + ones, num
# application - 24 hour clock
# http://en.wikipedia.org/wiki/24-hour_clock
hour = 20
shift = 8
print (hour + shift) % 24
# application - screen wraparound
# Spaceship from week seven
width = 800
position = 797
move = 5
position = (position + move) % width
print position # --> 2
width = 800
position = 797
move = -5
position = (position + move) % width
print position # --> 797
# Data conversion operations
# convert an integer into string - str
# convert an hour into 24-hour format "03:00", always print leading zero
hour = 3
ones = hour % 10 # --> 3
tens = hour // 10 # --> 0
print tens, ones, ":00" # --> 0 3 :00
print str(tens), str(ones), ":00" # --> 0 3 :00
print str(tens) + str(ones) + ":00" # --> 03:00
# convert a string into numbers using int and float
# Python modules - extra functions implemented outside basic Python
import simplegui # access to drawing operations for interactive applications
import math # access to standard math functions, e.g; trig
import random # functions to generate random numbers
# look in Docs for useful functions
print math.pi
[Logic and Comparisons]
Evaluation hierarchy: NOT - AND - OR
-- Comparison Operators
# >
# <
# >=
# <=
# ==
# !=
[Conditionals]
def greet(friend, money):
if friend and (money > 20):
print "Hi!"
money = money - 20
elif friend:
print "Hello"
else:
print "Ha ha"
money = money + 10
return money
money = 15
money = greet(True, money)
print "Money:", money
print ""
money = greet(False, money)
print "Money:", money
print ""
money = greet(True, money)
print "Money:", money
print ""
[Programming Tips]
import random
def random_dice():
die1 = random.randrange(1, 7)
die2 = random.randrange(1, 7)
return die1 + die2
def volume_sphere(radius):
return 4.0/3.0 * math.pi * (radius ** 3)
# => attribute error is a syntax error after the '.'
def area_triangle(base, height):
return 0.5 * base * height
# Poor readability
def area(a,b,c):
s = (a+b+c)/2.0
return math.sqrt(s*(s-a)*(s-b)*(s-c))
# Improved readability
def area_triangle_sss(side1, side2, side3):
"""
Returns the area of a triangle, given the lengths of [Documentation String]
its three sides.
"""
# Use Heron's formula
semiperim = (side1 + side2 + side3) / 2.0
return math.sqrt(semiperim *
(semiperim - side1) *
(semiperim - side2) *
(semiperim - side3))
[Rock-paper-scissors-lizard-Spock]
n = 123
print n % 100 #=> 23
print n % 10 #=> 3
print n // 10 #=> 12
[Event-driven Programming]
Start --> Initialize --> Wait <---> (Event +) Handler
Events
- Input (e.g. button, text box)
- Keyboard (e.g key down, key up)
- Mouse (e.g. click, drag)
- Timer
# Example of a simple event-driven program
# CodeSkulptor GUI module
import simplegui
# Event handler
def tick():
print "tick!"
# Register handler
timer = simplegui.create_timer(1000, tick)
# Start timer
timer.start()
Event Queue
- System puts events in this (invisible) queue
[Local vs. Global Variables]
# global vs local examples
# num1 is a global variable
num1 = 1
print num1
# num2 is a local variable
def fun():
num1 = 2
num2 = num1 + 1
print num2
fun()
# the scope of global num1 is the whole program, num 1 remains defined
print num1
# the scope of the variable num2 is fun(), num2 is now undefined
# print num2 #=> error 'num2' not defined
# why use local variables?
# give a descriptive name to a quantity
# avoid computing something multiple times
def fahren_to_kelvin(fahren):
celsius = 5.0 / 9 * (fahren - 32)
zero_celsius_in_kelvin = 273.15
return celsius + zero_celsius_in_kelvin
print fahren_to_kelvin(212)
# the risk/reward of using global variables
# risk - consider the software system for an airliner
# critical piece - flight control system
# non-critical piece - in-flight entertainment system
# both systems might use a variable called "dial"
# we don't want possibility that change the volume on your audio
# causes the plane's flaps to change!
# example
num = 4
def fun1():
global num # to access global variable
num = 5
def fun2():
global num
num = 6
# note that num changes after each call with no obvious explanation
print num
fun1()
print num
fun2()
print num
# global variables are an easy way for event handlers
# to communicate game information.
# safer method - but they required more sophisticated
# object-programming techniques
[SimpleGUI]
import simplegui
message = "Welcome!"
# Handler for mouse click
def click():
global message
message = "Good job!"
# Handler to draw on canvas
def draw(canvas):
canvas.draw_text(message, [50,112], 36, "Red")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 300, 200)
frame.add_button("Click me", click)
frame.set_draw_handler(draw)
# Start the frame animation
frame.start()
-- Program Structure
1 - Define globals (state)
2 - Define Helper functions
3 - Define Classes
4 - Define event handlers
5 - Create a frame
6 - Register event handlers
7 - Start the frame & timers
# SimpleGUI program template
# Import the module
import simplegui
# Define global variables (program state)
counter = 0
# Define "helper" functions
def increment():
global counter
counter = counter + 1
# Define event handler functions
def tick():
increment()
print counter
def buttonpress():
global counter:
counter = 0
# Create a frame
frame = simplegui.create_frame["SimpelGUI Test", 100, 100]
# Register event handlers
timer = simplegui.create_timer[1000, tick]
frame.add_button("Click me!", buttonpress)
# Start frame and timers
frame.start()
timer.start()
[Buttons & Input Fields]
# calculator with all buttons
import simplegui
# intialize globals
store = 0
operand = 0
# event handlers for calculator with a store and operand
def output():
"""prints contents of store and operand"""
print "Store = ", store
print "Operand = ", operand
print ""
def swap():
""" swap contents of store and operand"""
global store, operand
store, operand = operand, store
output()
def add():
""" add operand to store"""
global store
store = store + operand
output()
def sub():
""" subtract operand from store"""
global store
store = store - operand
output()
def mult():
""" multiply store by operand"""
global store
store = store * operand
output()
def div():
""" divide store by operand"""
global store
store = store / operand
output()
def enter(t):
""" enter a new operand"""
global operand
operand = float(t)
output()
# create frame
f = simplegui.create_frame("Calculator",300,300)
# register event handlers and create control elements
f.add_button("Print", output, 100)
f.add_button("Swap", swap, 100)
f.add_button("Add", add, 100)
f.add_button("Sub", sub, 100)
f.add_button("Mult", mult, 100)
f.add_button("Div", div, 100)
f.add_input("Enter", enter, 100)
# get frame rolling
f.start()
[Programming Tips]
##############
# Example of missing "global"
n1 = 0
def increment():
n1 = n1 + 1
increment()
increment()
increment()
print n1
##############
# Example of missing "global"
n2 = 0
def assign(x):
n2 = x
assign(2)
assign(15)
assign(7)
print n2
##############
# Example of missing "return"
n3 = 0
def decrement():
global n3
n3 = n3 - 1
x = decrement()
print "x = ", x
print "n = ", n
##############
# Example of print debugging
import simplegui
x = 0
def f(n):
print "f: n,x = ", n, x
result = n ** x
print "f: result = ",result
return result
def button_handler():
global x
print "bh : x = ", x
x += 1
print "bh : x = ", x
def input_handler(text):
print "ih : text = ", text
print f(float(text))
frame = simplegui.create_frame("Example", 200, 200)
frame.add_button("Increment", button_handler)
frame.add_input("Number:", input_handler, 100)
frame.start()
##############
# Examples of simplifying conditionals
def f1(a, b):
"""Returns True exactly when a is False and b is True."""
if a == False and b == True:
return True
else:
return False
def f2(a, b):
"""Returns True exactly when a is False and b is True."""
if not a and b:
return True
else:
return False
def f3(a, b):
"""Returns True exactly when a is False and b is True."""
return not a and b
def g1(a, b):
"""Returns False eactly when a and b are both True."""
if a == True and b == True:
return False
else:
return True
def g2(a, b):
"""Returns False eactly when a and b are both True."""
if a and b:
return False
else:
return True
def g3(a, b):
"""Returns False eactly when a and b are both True."""
return not (a and b)
[PEP 8 - Styleguide]
- Use 4-space indentation, and no tabs.
- 4 spaces are a good compromise between small indentation (allows greater nesting depth) and large indentation (easier to read). Tabs introduce confusion, and are best left out.
- Wrap lines so that they donโt exceed 79 characters.
- This helps users with small displays and makes it possible to have several code files side-by-side on larger displays.
- Use blank lines to separate functions and classes, and larger blocks of code inside functions.
- When possible, put comments on a line of their own.
- Use docstrings.
- Use spaces around operators and after commas, but not directly inside bracketing constructs: a = f(1, 2) + g(3, 4).
- Name your classes and functions consistently; the convention is to use CamelCase for classes and lower_case_with_underscores for functions and methods. Always use self as the name for the first method argument (see A First Look at Classes for more on classes and methods).
- Donโt use fancy encodings if your code is meant to be used in international environments. Plain ASCII works best in any case.
[Guess the Number - http://www.codeskulptor.org/#user40_QwCzfXhK4H_9.py]
# template for "Guess the number" mini-project
import simplegui
import random
import math
# Global Variables
num_range = 100
num_guesses = 7
secret_number = 0
# Helper Function
def new_game():
global secret_number, num_range, num_guesses
secret_number = random.randint(0,num_range)
calculation_n_1 = max(0,num_range) - min(0,num_range) + 1
calculation_n_2 = math.ceil(math.log(calculation_n_1,2))
num_guesses = int(calculation_n_2)
print "New game started with range 0 - ", num_range, "!"
print "Number of guesses left: ", num_guesses
# Event Handlers
def range100():
global num_range
num_range = 100
new_game()
def range1000():
global num_range
num_range = 1000
new_game()
def input_guess(guess):
global secret_number, num_guesses
value = int(guess)
print "Guess was ", value
if value > secret_number:
num_guesses -= 1
if num_guesses == 0:
print "Lower & Game Over. Guesses left: ", num_guesses
new_game()
else:
print "Lower, number of guesses left: ", num_guesses
elif value < secret_number:
num_guesses -= 1
if num_guesses == 0:
print "Higher & Game Over. Guesses left: ", num_guesses
new_game()
else:
print "Higher, number of guesses left: ", num_guesses
elif value == secret_number:
num_guesses -= 1
print "Correct!"
new_game()
else:
print "Error"
# Create Frame
f = simplegui.create_frame("Guess the number", 200, 200)
# Registration Event Handlers & Start Frame
f.add_button("Range is (0, 100)", range100, 200)
f.add_button("range is (0, 1000)", range1000, 200)
f.add_input("Enter a guess", input_guess, 200)
# Starting the Game
new_game()
[Canvas and Drawing]
Event-Driven Drawing
- Computor monitor - 2D grid of pixels stored logically in a frame buffer (something which keeps track of the values of the pixels)
- Computers update the monitor based on the frame buffer at rate of around 60-72 times a second (refresh rate)
- Many applications will register a special function called a "draw handler" which will update the frame buffer.
- In CodeSkulptur we will register a simple draw handler using a simpleGUI command. CodeSkultor calls the draw handler at around 60 times per second.
- The draw handler updates the canvas using a collection of draw commands that include things like draw_text, draw_line, draw_circle.
Canvas Coordinates
- Origin (0) is always in the left uppper corner, not lower!
# first example of drawing on the canvas
import simplegui
# define draw handler
def draw(canvas):
canvas.draw_text("Hello!",[100, 100], 24, "White")
canvas.draw_circle([100, 100], 2, 2, "Red")
# create frame
frame = simplegui.create_frame("Text drawing", 300, 200)
# register draw handler
frame.set_draw_handler(draw)
# start frame
frame.start()
- You start text at the lower left of the string [X,Y.
# example of drawing operations in simplegui
# standard HMTL color such as "Red" and "Green"
# note later drawing operations overwrite earlier drawing operations
import simplegui
# Handler to draw on canvas
def draw(canvas):
canvas.draw_circle([100, 100], 50, 2, "Red", "Pink")
canvas.draw_circle([300, 300], 50, 2, "Red", "Pink")
canvas.draw_line([100, 100],[300, 300], 2, "Black")
canvas.draw_circle([100, 300], 50, 2, "Green", "Lime")
canvas.draw_circle([300, 100], 50, 2, "Green", "Lime")
canvas.draw_line([100, 300],[300, 100], 2, "Black")
canvas.draw_polygon([[150, 150], [250, 150], [250, 250], [150, 250]], 2,
"Blue", "Aqua")
canvas.draw_text("An example of drawing", [60, 385], 24, "Black")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 400, 400)
frame.set_draw_handler(draw)
frame.set_canvas_background("Yellow")
# Start the frame animation
frame.start()
[String Processing]
# String literals
s1 = "Rixner's funny"
s2 = 'Warren wears nice ties!'
s3 = " t-shirts!"
#print s1, s2
#print s3
# Combining strings
a = ' and '
s4 = "Warren" + a + "Rixner" + ' are nuts!'
print s4
# Characters and slices
print s1[3] #=> n
print s1[-1] #=> y
print s1[-2] #=> n
print len(s1)
print s1[0:6] + s2[6:] --> up to but NOT including.
print s2[:13] + s1[9:] + s3
# Converting strings
s5 = str(375)
print s5[1:]
i1 = int(s5[1:])
print i1 + 38
# Handle single quantity
def convert_units(val, name):
result = str(val) + " " + name
if val > 1:
result = result + "s"
return result
# convert xx.yy to xx dollars and yy cents
def convert(val):
# Split into dollars and cents
dollars = int(val)
cents = int(round(100 * (val - dollars)))
# Convert to strings
dollars_string = convert_units(dollars, "dollar")
cents_string = convert_units(cents, "cent")
# return composite string
if dollars == 0 and cents == 0:
return "Broke!"
elif dollars == 0:
return cents_string
elif cents == 0:
return dollars_string
else:
return dollars_string + " and " + cents_string
# Tests
print convert(11.23)
print convert(11.20)
print convert(1.12)
print convert(12.01)
print convert(1.01)
print convert(0.01)
print convert(1.00)
print convert(0)
[Interactive Drawing]
# interactive application to convert a float in dollars and cents
import simplegui
# define global value
value = 3.12
# Handle single quantity
def convert_units(val, name):
result = str(val) + " " + name
if val > 1:
result = result + "s"
return result
# convert xx.yy to xx dollars and yy cents
def convert(val):
# Split into dollars and cents
dollars = int(val)
cents = int(round(100 * (val - dollars)))
# Convert to strings
dollars_string = convert_units(dollars, "dollar")
cents_string = convert_units(cents, "cent")
# return composite string
if dollars == 0 and cents == 0:
return "Broke!"
elif dollars == 0:
return cents_string
elif cents == 0:
return dollars_string
else:
return dollars_string + " and " + cents_string
# define draw handler
def draw(canvas):
canvas.draw_text(convert(value), [60, 110], 24, "White")
# define an input field handler
def input_handler(text):
global value
value = float(text)
# create a frame
frame = simplegui.create_frame("Converter", 400, 200)
frame.add_input("Enter value", input_handler, 100)
# register event handlers
frame.set_draw_handler(draw)
# start the frame
frame.start()
---
string = '1lll1l1l1l1ll1l111ll1l1ll1l1ll1ll111ll1ll1ll1l1ll1ll1ll1ll1lll1l1l1l1l1l1l1l1l1l1l1l1ll1lll1l111ll1l1l1l1l1'
print len(string)
ones = 0
els = 0
other = 0
for i in range(0,len(string)):
if string[i] == '1':
ones += 1
elif string[i] == 'l':
els += 1
else:
other += 1
print "Ones: ", ones
print "L's: ", els
print "Other: ", other
[Timers]
# Simple "screensaver" program.
# Import modules
import simplegui
import random
# Global state
message = "Python is Fun!"
position = [50, 50]
width = 500
height = 500
interval = 2000
# Handler for text box
def update(text):
global message
message = text
# Handler for timer
def tick():
x = random.randrange(0, width)
y = random.randrange(0, height)
position[0] = x #=> When you are changing elements of a global variable, the global declaration is optional!
position[1] = y #=> When you are changing elements of a global variable, the global declaration is optional!
# Handler to draw on canvas
def draw(canvas):
canvas.draw_text(message, position, 36, "Red")
# Create a frame
frame = simplegui.create_frame("Home", width, height)
# Register event handlers
text = frame.add_input("Message:", update, 150)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(interval, tick)
# Start the frame animation
frame.start()
timer.start()
[Programming Tips - Week 3]
#####################
# Example of event-driven code, buggy version
import simplegui
size = 10
radius = 10
# Define event handlers.
def incr_button_handler():
"""Increment the size."""
global size
size += 1
label.set_text("Value: " + str(size))
def decr_button_handler():
"""Decrement the size."""
global size
# Insert check that size > 1, to make sure it stays positive
# NOTE that this restriction has changed from the video
# since draw_circle now throws an error if radius is zero
size -= 1
label.set_text("Value: " + str(size))
def change_circle_handler():
"""Change the circle radius."""
global radius
radius = size
# Insert code to make radius label change.
def draw_handler(canvas):
"""Draw the circle."""
canvas.draw_circle((100, 100), radius, 5, "Red")
# Create a frame and assign callbacks to event handlers.
frame = simplegui.create_frame("Home", 200, 200)
label = frame.add_label("Value: " + str(size))
frame.add_button("Increase", incr_button_handler)
frame.add_button("Decrease", decr_button_handler)
frame.add_label("Radius: " + str(radius))
frame.add_button("Change circle", change_circle_handler)
frame.set_draw_handler(draw_handler)
# Start the frame animation
frame.start()
---
import simplegui
#####################
# Buggy code -- doesn't start frame
message = "Welcome!"
def click():
"""Change message on mouse click."""
global message
message = "Good job!"
def draw(canvas):
"""Draw message."""
canvas.draw_text(message, [50,112], 36, "Red")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 300, 200)
frame.add_button("Click me", click)
frame.set_draw_handler(draw)
frame.start()
#####################
# Buggy code -- doesn't start timers
def timer1_handler():
print "1"
def timer2_handler():
print "2"
timer1 = simplegui.create_timer(100, timer1_handler)
timer2 = simplegui.create_timer(300, timer2_handler)
timer1.start()
timer2.start()
Mini-Project 3 - [Stopwatch: The Game]
http://www.codeskulptor.org/#user40_6D32nD7Dqj_6.py
# template for "Stopwatch: The Game"
import simplegui
# define global variables
time = 0
X = 0
Y = 0
XY = str(X) + '/' + str(Y)
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(time):
A = time // 600
B = (time - A * 600) // 100
C = time % 100 // 10
D = time % 10
return str(A) + ':' + str(B) + str(C) + ':' + str(D)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start():
timer.start()
def stop():
global X, Y, XY
if timer.is_running():
Y += 1
if time % 10 == 0:
X += 1
XY = str(X) + '/' + str(Y)
timer.stop()
def reset():
global time, X, Y, XY
time = 0
X = 0
Y = 0
XY = str(X) + '/' + str(Y)
# define event handler for timer with 0.1 sec interval
def tick():
global time
time += 1
# define draw handler
def draw(canvas):
canvas.draw_text(format(time), [110, 120], 36, 'White', 'sans-serif')
canvas.draw_text(XY, [215, 35], 36, 'Green', 'sans-serif')
# create frame
frame = simplegui.create_frame("Stopwatch", 300, 200)
timer = simplegui.create_timer(100, tick)
# register event handlers
frame.add_button('Start', start)
frame.add_button('Stop', stop)
frame.add_button('Reset', reset)
frame.set_draw_handler(draw)
# start frame
frame.start()
# Please remember to review the grading rubric
- In Python, the time module can be used to determine the current time. This module includes the method time which returns the current system time in seconds since a date referred as the Epoch. The Epoch is fixed common date shared by all Python installations. Using the date of the Epoch and the current system time, an application such as a clock or calendar can compute the current time/date using basic arithmetic.
import simplegui
n = 23
def collatz_conjecture():
global n
if n == 1:
timer.stop()
elif n % 2 == 0:
n = n / 2
print n
else:
n = (n * 3) + 1
print n
timer = simplegui.create_timer(100, collatz_conjecture)
timer.start()
[Lists]
- A list is a sequence type
- lists use square brackets
- [] = empty list
- position = [x, y]
l = [1, 3, 4, -7, 62, 43]
l2 = ['milk', 'eggs', 'bread', 'butter']
l3 = [[3, 4], ['a', 'b', 'c'], []]
print len(l) #=> 6
print len(l2) #=> 4
print len(l3) #=> 3
print "first element: ", l[0] #=> 1
print "last element: ", l[-1] #=> 43
print l3[1] #=> ['a', 'b', 'c'] -- start counting at 0
print l3[0][1] #=> 4
l4 = 12[1:3] # starting at element 1 but up to (not including) 3
print l4 #=> ['eggs', 'bread']
l2[0] = 'cheese'
print l2 #=> ['cheese', 'eggs', 'bread', 'butter']
- Good programmers keep their lists monogamous (basically vectors) --> all data types of the same type, strings, numerics, objects, etc.
[Keyboard Input]
===
# Keyboard echo
import simplegui
# initialize state
current_key = ' '
# event handlers
def keydown(key):
global current_key
current_key = chr(key) # chr turns a number into a string
def keyup(key):
global current_key
current_key = ' '
def draw(c):
# NOTE draw_text now throws an error on some non-printable characters
# Since keydown event key codes do not all map directly to
# the printable character via ord(), this example now restricts
# keys to alphanumerics
if current_key in "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789":
c.draw_text(current_key, [10, 25], 20, "Red")
# create frame
f = simplegui.create_frame("Echo", 35, 35)
# register event handlers
f.set_keydown_handler(keydown)
f.set_keyup_handler(keyup)
f.set_draw_handler(draw)
# start frame
f.start()
# <18> are the acutal key codes
===
# control the position of a ball using the arrow keys
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
# define event handlers
def draw(canvas):
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
def keydown(key):
vel = 4 # velocity
if key == simplegui.KEY_MAP["left"]:
ball_pos[0] -= vel
elif key == simplegui.KEY_MAP["right"]:
ball_pos[0] += vel
elif key == simplegui.KEY_MAP["down"]:
ball_pos[1] += vel
elif key == simplegui.KEY_MAP["up"]:
ball_pos[1] -= vel
# create frame
frame = simplegui.create_frame("Positional ball control", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
# start frame
frame.start()
===
[Motion]
position = velocity * time [p = v * t]
# assumes velocity is constant
===
# Ball motion with an explicit timer
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
init_pos = [WIDTH / 2, HEIGHT / 2] # middle of canvas
vel = [0, 3] # pixels per tick
time = 0
# define event handlers
def tick():
global time
time = time + 1
def draw(canvas):
# create a list to hold ball position
ball_pos = [0, 0]
# calculate ball position
ball_pos[0] = init_pos[0] + time * vel[0]
ball_pos[1] = init_pos[1] + time * vel[1]
# draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
# create frame
frame = simplegui.create_frame("Motion", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, tick)
# start frame
frame.start()
timer.start()
===
- [3,3] + vector [6,1] == [9,4]
P(0) ----> P(1) ----> P(2) ----------> P(3)
V(0) V(1) V(2)
P(t+1) = P(t) + (1 * V(t))
P[0] = P[0] + V[0]
P[1] = P[1] + V[1]
===
# Ball motion with an implicit timer
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [0, 1] # pixels per update (1/60 seconds -- implicit to the draw handler)
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
# create frame
frame = simplegui.create_frame("Motion", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
# start frame
frame.start()
===
[Collisions and Reflections]
# Distance between two points
Point 1 == p[x,y] # end
Point 2 == q[x,y] # start
math
dist(p,q)^2 == (p[0] - q[0])^2 + (p[1] - q[1])^2 # C^2 = A^2 + B^2
Python
def dist(p, q):
return math.sqrt((p[0] - q[0])**2 + (P[1] - q[1])**2)a=
# Vectors and Motion
v[0] = p[0] - q[0]
v[1] = p[1] - v[1]
Moving/translate a point using a vector: p = q + v
p[0] = q[0] + v[0]
p[1] = q[1] + v[1]
# Update for Motion
Math - point at position p with velocity v
p = p + a * v # 'a' is 'some' constant multiple of the velocity
p[0] = p[0] + a * v[0]
p[1] = p[1] + a * v[1]
[Collisions]
Left wall
p[0] <= 0
Right wall
p[0] >= width - 1
Collision of ball with center p and radius r with wall
Left wall
p[0] <= r
Right wall
p[0] >= (width - 1) - r
Bottom wall
p[1] >= (height - 1) - r
Reflections - update the velocity vector v
Left wall - compute reflected velocity vector (negate it)
v[0] = -v[0] # negate
v[1] = v[1] # stays the same
===
0 == x == horizontal
1 == y == vertical
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [-40.0 / 60.0, 5.0 / 60.0]
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# collide and reflect off of left hand side of canvas
if ball_pos[0] <= BALL_RADIUS:
vel[0] = - vel[0]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
# create frame
frame = simplegui.create_frame("Ball physics", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
# start frame
frame.start()
===
[Velocity Control]
===
# control the position of a ball using the arrow keys
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
# define event handlers
def draw(canvas):
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
def keydown(key):
vel = 4
if key == simplegui.KEY_MAP["left"]:
ball_pos[0] -= vel
elif key == simplegui.KEY_MAP["right"]:
ball_pos[0] += vel
elif key == simplegui.KEY_MAP["down"]:
ball_pos[1] += vel
elif key == simplegui.KEY_MAP["up"]:
ball_pos[1] -= vel
print ball_pos
# create frame
frame = simplegui.create_frame("Positional ball control", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
# start frame
frame.start()
===
# control the velocity of a ball using the arrow keys
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [0, 0]
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
def keydown(key):
acc = 1
if key==simplegui.KEY_MAP["left"]:
vel[0] -= acc
elif key==simplegui.KEY_MAP["right"]:
vel[0] += acc
elif key==simplegui.KEY_MAP["down"]:
vel[1] += acc
elif key==simplegui.KEY_MAP["up"]:
vel[1] -= acc
print ball_pos
# create frame
frame = simplegui.create_frame("Velocity ball control", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
# start frame
frame.start()
[Visualizing Lists and Mutation]
###################################
# Mutation vs. assignment
is == ==
################
# Look alike, but different
a = [4, 5, 6]
b = [4, 5, 6]
print "Original a and b:", a, b
print "Are they same thing?", a is b #=> False
a[1] = 20
print "New a and b:", a, b
print
################
# Aliased
c = [4, 5, 6]
d = c
print "Original c and d:", c, d
print "Are they same thing?", c is d #=> True
c[1] = 20
print "New c and d:", c, d
print
################
# Copied
e = [4, 5, 6]
f = list(e)
print "Original e and f:", e, f
print "Are they same thing?", e is f
e[1] = 20
print "New e and f:", e, f
print
###################################
# Interaction with globals
a = [4, 5, 6]
def mutate_part(x):
a[1] = x #=> for item assignment (mutation) you don't need to specify global, it assumes it
def assign_whole(x):
a = x #=> here it assumes a is a local variable
def assign_whole_global(x):
global a
a = x
mutate_part(100)
print a
assign_whole(200)
print a
assign_whole_global(300)
print a
[Programming Tips]
print 1 is 1 # True
print 1.0 is 1.0 # True
print True is True # True
print "abc" is "abc" # True
print [4, 5, 6] is [4, 5, 6] # False - only type that is mutable // two different lists that happen to look-a-like
print 1 is 1.0 # False - integers are not floating type
print (4, 5, 6) is (4, 5, 6) # False - Tuple
Tuples
- Look like lists but are NOT mutable.
- Tuples and lists support the same non-mutation operations. Like lists, you can loop on tuples.
- The benefit is that sometimes you want to make sure your data is not changed, to protect you data.
# Lists (mutable) vs. tuples (immutable)
print [4, 5, 6] #=> [4, 5, 6]
print (4, 5, 6) #=> (4, 5, 6)
print type([4, 5, 6]) #=> <class 'list'>
print type((4, 5, 6)) #=> <class 'tuple'>
a = [4, 5, 6]
a[1] = 100
print a #=> [4, 100, 6]
b = (4, 5, 6)
b[1] = 100
print b #=> Error - 'tuple' does not support item assignment
[Pong]
===
# Implementation of classic arcade game Pong
import simplegui
import random
# initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
PAD_WIDTH = 8
PAD_HEIGHT = 80
HALF_PAD_WIDTH = PAD_WIDTH / 2
HALF_PAD_HEIGHT = PAD_HEIGHT / 2
LEFT = False
RIGHT = True
paddle1_vel = [0] # only one item since we do not move horizontally
paddle1_pos = [(WIDTH - 4.0),(HEIGHT / 2.0)]
paddle2_vel = [0] # only one item since we do not move horizontally
paddle2_pos = [(WIDTH - (PAD_WIDTH / 2.0)),(HEIGHT / 2.0)]
ball_pos = [(WIDTH/2), (HEIGHT/2)]
ball_vel = [0.0, 0.0]
acc = 4
vel_increase = 0.1
score_left = 0
score_right = 1
def spawn_ball(direction):
global ball_pos
ball_pos = [(WIDTH/2), (HEIGHT/2)]
if direction == 'LEFT':
# draw handler draws 60x per second: 120/60 = 2 & 240/60 = 4
ball_vel[0] = (random.randrange(2.0, 4.0, 1) * -1)
ball_vel[1] = (random.randrange(1.0, 3.0, 1) * -1)
elif direction == 'RIGHT':
# draw handler draws 60x per second: 60/60 = 1 & 180/60 = 3
ball_vel[0] = random.randrange(2.0, 4.0, 1)
ball_vel[1] = (random.randrange(1.0, 3.0, 1) * -1)
else:
print "Direction parameter of spawn_ball() not recognized.."
# define event handlers
def new_game():
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel
global score_left, score_right
score_left = 0
score_right = 0
random_side = random.randint(1, 2)
if random_side == 1:
spawn_ball('LEFT')
elif random_side == 2:
spawn_ball('RIGHT')
else:
print "Error new_game() direction not recognized"
def draw(canvas):
global vel_increase, score_left, score_right
# draw mid line and gutters
canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, "White")
canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, "White")
canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, "White")
# draw ball
canvas.draw_circle([(ball_pos[0] + ball_vel[0]),(ball_pos[1] + ball_vel[1])], BALL_RADIUS, 5, "White", "White")
# Paddle 1 position + keep on screen
if paddle1_pos[1] - HALF_PAD_HEIGHT < 0:
paddle1_pos[1] = HALF_PAD_HEIGHT
elif paddle1_pos[1] + HALF_PAD_HEIGHT > HEIGHT:
paddle1_pos[1] = (HEIGHT - HALF_PAD_HEIGHT)
else:
paddle1_pos[1] += paddle1_vel[0]
# Paddle 2 position + keep on screen
if paddle2_pos[1] - HALF_PAD_HEIGHT < 0:
paddle2_pos[1] = HALF_PAD_HEIGHT
elif paddle2_pos[1] + HALF_PAD_HEIGHT > HEIGHT:
paddle2_pos[1] = (HEIGHT - HALF_PAD_HEIGHT)
else:
paddle2_pos[1] += paddle2_vel[0]
# Ball position + collision
if ball_pos[1] >= (HEIGHT - 1) - BALL_RADIUS:
ball_vel[1] = -ball_vel[1]
elif ball_pos[1] < BALL_RADIUS + 1:
ball_vel[1] = -ball_vel[1]
elif ball_pos[0] + BALL_RADIUS >= WIDTH - PAD_WIDTH:
if ball_pos[1] > (paddle2_pos[1] - HALF_PAD_HEIGHT) and ball_pos[1] < (paddle2_pos[1] + HALF_PAD_HEIGHT):
ball_vel[0] = -ball_vel[0]
ball_vel[0] = ball_vel[0] * (1 + vel_increase)
ball_vel[1] = ball_vel[1] * (1 + vel_increase)
else:
spawn_ball('LEFT')
score_right += 1
elif ball_pos[0] - BALL_RADIUS <= PAD_WIDTH:
if ball_pos[1] > (paddle1_pos[1] - HALF_PAD_HEIGHT) and ball_pos[1] < (paddle1_pos[1] + HALF_PAD_HEIGHT):
ball_vel[0] = -ball_vel[0]
ball_vel[0] = ball_vel[0] * (1 + vel_increase)
ball_vel[1] = ball_vel[1] * (1 + vel_increase)
else:
spawn_ball('RIGHT')
score_left += 1
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# Draw Paddle 1
canvas.draw_line([(PAD_WIDTH / 2),(paddle1_pos[1] + HALF_PAD_HEIGHT)], [(PAD_WIDTH / 2),(paddle1_pos[1] - HALF_PAD_HEIGHT)], PAD_WIDTH, "White")
# Draw Paddle 2
canvas.draw_line([(WIDTH - (PAD_WIDTH / 2)),(paddle2_pos[1] + HALF_PAD_HEIGHT)], [(WIDTH - (PAD_WIDTH / 2)),(paddle2_pos[1] - HALF_PAD_HEIGHT)], PAD_WIDTH, "White")
# draw scores
canvas.draw_text(str(score_left), (450, 30), 24, "White", "monospace")
canvas.draw_text(str(score_right), (150, 30), 24, "White", "monospace")
def keydown(key):
global acc
if key == simplegui.KEY_MAP["w"]:
paddle1_vel[0] -= acc
elif key == simplegui.KEY_MAP["s"]:
paddle1_vel[0] += acc
elif key == simplegui.KEY_MAP["up"]:
paddle2_vel[0] -= acc
elif key == simplegui.KEY_MAP["down"]:
paddle2_vel[0] += acc
def keyup(key):
if key == simplegui.KEY_MAP["w"]:
paddle1_vel[0] = 0
elif key == simplegui.KEY_MAP["s"]:
paddle1_vel[0] = 0
elif key == simplegui.KEY_MAP["up"]:
paddle2_vel[0] = 0
elif key == simplegui.KEY_MAP["down"]:
paddle2_vel[0] = 0
# create frame
frame = simplegui.create_frame("Pong", WIDTH, HEIGHT)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.add_button('Restart', new_game)
# start frame
new_game()
frame.start()
http://www.codeskulptor.org/#user40_zOy9sLlDqc_31.py
===
Dividing lists:
my_list[: len(my_list) // 2] and my_list[len(my_list) // 2 :]
my_list[0 : len(my_list) // 2] and my_list[len(my_list) // 2 : len(my_list)]
import math
def dist(p, q):
radius = 2
distance = math.sqrt((p[0] - q[0])**2 + (p[1] - q[1])**2)
result = distance - radius
return result
p = [4, 7]
q = [2, 9]
print dist(p,q)
===
import simplegui
global_var = 5
def draw(canvas):
global global_var
canvas.draw_text(str(global_var), (10, 50), 24, "White", "monospace")
def keydown(key):
global global_var
if key == simplegui.KEY_MAP["w"]:
global_var *= 2
def keyup(key):
global global_var
if key == simplegui.KEY_MAP["w"]:
global_var -= 3
frame = simplegui.create_frame("Quiz", 100, 100)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.set_draw_handler(draw)
frame.start() |
2,739 | ce3e2aa2534bb404b45202bcb76e9d07080560cb | import torch
from torch import nn
import pytorch_ssim
class Custom_Loss_for_Autoencoder(nn.Module):
def __init__(self, window_size=6):
super(Custom_Loss_for_Autoencoder, self).__init__()
self.ssim = pytorch_ssim.SSIM(window_size=window_size)
self.mse = nn.MSELoss()
def forward(self, reconstructed_images, images):
l1 = self.mse(reconstructed_images, images)
l2 = self.ssim(reconstructed_images, images)
return l1 - l2
|
2,740 | 9e8b5cebd48b3b98e421c896d9835ada5ec4166e | from django.db.models import Q, Avg
from django.http import JsonResponse
from rest_framework import permissions
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from base_backend import permissions as my_perms
from base_backend.utils import RequestDataFixer
from restaurants.models import User, Cuisine, MealType, AppVersion, RestaurantType, Restaurant, Menu, Order, OrderLine, \
Wilaya, City, Address, Phone
from restaurants.serializers import UserSerializer, SmsConfirmationSerializer, CuisineSerializer, \
RestaurantTypeSerializer, RestaurantSerializer, MenuSerializer, OrderLineSerializer, WilayaSerializer, \
CitySerializer, OrderWRestaurantSerializer, MealTypesWithMenuSerializer, MealTypeSerializer, OrderSerializer, \
AddressSerializer, PhoneSerializer
class LoginApi(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context=dict(request=request))
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response(
dict(
token=token.key,
user_id=user.pk,
phone=user.phone,
email=user.email,
type=user.user_type,
photo=user.photo.url if user.photo else None,
address=user.address,
city=user.lives_in_id,
birth_date=user.birth_date,
username=user.username,
# is_participant=user.client.is_participant if user.client is not None else None,
# participant_id=user.client.participant.participant_id if user.client else None,
)
)
class UserViewSet(ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.filter(is_active=True)
def get_permissions(self):
if self.action == 'create' or self.action == 'register':
return [permissions.AllowAny()]
else:
return [permissions.IsAuthenticatedOrReadOnly()]
@action(methods=['post'], detail=False, url_path='register', permission_classes=[permissions.AllowAny()])
def register(self, request, *args, **kwargs):
response = super().create(request, *args, **kwargs)
if response:
response.data = dict(status=True, code=4)
return response
def create(self, request, *args, **kwargs):
return self.register(request, *args, **kwargs)
class OtpApi(APIView):
permission_classes = [permissions.AllowAny]
def get(self, request):
serializer = SmsConfirmationSerializer(data=request.GET)
result = serializer.resend()
if result:
response = dict(status=True, code=5)
else:
response = dict(status=False, code=21)
return Response(response)
def put(self, request):
serializer = SmsConfirmationSerializer(data=request.data)
result = serializer.activate()
if result:
response = dict(status=True, code=5)
else:
response = dict(status=False, code=20)
return Response(response)
class CuisineViewSet(ModelViewSet):
serializer_class = CuisineSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = Cuisine.objects.all()
class MealTypeViewSet(ModelViewSet):
permission_classes = [my_perms.IsAdminOrReadOnly]
serializer_class = MealTypeSerializer
queryset = MealType.objects.all()
def get_serializer(self, *args, **kwargs):
if self.action == "get_types_with_menus":
serializer_class = MealTypesWithMenuSerializer
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
return super(MealTypeViewSet, self).get_serializer(*args, **kwargs)
@action(['get'], detail=False, url_path="type-with-menus", )
def get_types_with_menus(self, request, *args, **kwargs):
types = self.get_queryset().filter(menus__offered_by=request.query_params.get('restaurant', 0))
types = self.get_serializer(types, many=True).data
return Response(types)
class RestaurantTypeViewSet(ModelViewSet):
serializer_class = RestaurantTypeSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = RestaurantType.objects.all()
class RestaurantViewSet(ModelViewSet):
serializer_class = RestaurantSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Restaurant.objects.all()
def _get_recommended_restaurants(self) -> queryset:
queryset = self.get_queryset()
recommended = queryset.all().annotate(rates_avg=Avg('rates__stars'))
return recommended
def _get_special_restaurants(self) -> queryset:
queryset = self.get_queryset()
special_offers_restaurants = queryset.filter(Q(menus__discount__gt=0) | Q(on_special_day=True))
return special_offers_restaurants
@action(['get'], detail=False, url_path="get-home")
def home(self, request, *args, **kwargs):
recommended = self._get_recommended_restaurants().order_by('?')[:5]
special = self._get_special_restaurants().order_by('?')[:5]
all_restaurants = self.get_queryset().order_by('?')[:5]
recommended = self.get_serializer(recommended, many=True).data
special = self.get_serializer(special, many=True).data
all_restaurants = self.get_serializer(all_restaurants, many=True).data
response = {
'recommended': recommended,
'special': special,
'all': all_restaurants
}
return Response(response)
@action(['get'], detail=False, url_path="special-offers")
def special_offers(self, request, *args, **kwargs):
serializer = self.get_serializer(self._get_special_restaurants().order_by('-created_at'), many=True)
return Response(serializer.data)
@action(['get'], detail=False, url_path="recommended-offers")
def recommended_offers(self, request, *args, **kwargs):
serializer = self.get_serializer(self._get_recommended_restaurants().order_by('-rates_avg'), many=True)
return Response(serializer.data)
@action(['get'], detail=True, url_path="restaurant-menus")
def get_restaurant_menus(self, request, *args, **kwargs):
categorized_menus = Menu.objects.grouped_by_meal_type_for_a_restaurant(restaurant_id=self.kwargs.get('pk'))
return Response(categorized_menus)
class MenuViewSet(ModelViewSet):
serializer_class = MenuSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Menu.objects.all()
@action(['get'], detail=False, url_path="get-home")
def home(self, request, *args, **kwargs):
queryset = self.get_queryset()
special_offers = queryset.filter(~Q(discount=0)).order_by('?')[:5]
recommended = queryset.all().order_by('?')[:5]
special_offers = self.get_serializer(special_offers, many=True).data
recommended = self.get_serializer(recommended, many=True).data
response = {
'recommended': recommended,
'special_offers': special_offers
}
return Response(data=response)
@action(['get'], detail=False, url_path="special-offers")
def special_offers(self, request, *args, **kwargs):
queryset = self.get_queryset()
special_offers = queryset.filter(~Q(discount=0)).order_by('-created_at')
serializer = self.get_serializer(special_offers, many=True)
return Response(serializer.data)
@action(['get'], detail=False, url_path="recommended-offers")
def recommended_offers(self, request, *args, **kwargs):
queryset = self.get_queryset()
recommended = queryset.all().order_by('-created_at')
serializer = self.get_serializer(recommended, many=True)
return Response(serializer.data)
class OrderViewSet(ModelViewSet):
serializer_class = OrderWRestaurantSerializer
permission_classes = [permissions.IsAuthenticated]
queryset = Order.objects.all().order_by('-created_at')
def get_serializer(self, *args, **kwargs):
if self.action == "create":
return OrderSerializer(*args, **kwargs)
return super(OrderViewSet, self).get_serializer(*args, **kwargs)
def get_queryset(self):
return super(OrderViewSet, self).get_queryset().filter(client=self.request.user.client)
def create(self, request, *args, **kwargs):
fixer = RequestDataFixer(request=request)
return super(OrderViewSet, self).create(fixer, *args, **kwargs)
class OrderLineViewSet(ModelViewSet):
serializer_class = OrderLineSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = OrderLine.objects.all()
class WilayaViewSet(ModelViewSet):
serializer_class = WilayaSerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = Wilaya.objects.all()
class CityViewSet(ModelViewSet):
serializer_class = CitySerializer
permission_classes = [my_perms.IsAdminOrReadOnly]
queryset = City.objects.all()
def version(request):
print('inside this')
if request.GET.get('code', None):
code = request.GET.get('code')
AppVersion.objects.all().update(code=code)
return JsonResponse({'updated': True})
else:
code = AppVersion.objects.all().first().code
return JsonResponse({'code': code})
class AddressViewSet(ModelViewSet):
serializer_class = AddressSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Address.objects.all()
@action(['PUT'], detail=True, url_path="set-default", url_name='set-default')
def set_default(self, request, *args, **kwargs):
instance = self.get_object()
instance.default = True
instance.save()
self.get_queryset().filter(~Q(pk=instance.pk), belongs_to=request.user.client).update(default=False)
return Response(self.get_serializer(instance).data)
@action(['PUT'], detail=False, url_path="set-main", url_name='set-main')
def set_main(self, request, *args, **kwargs):
self.get_queryset().filter(belongs_to=request.user.client).update(default=False)
return Response({"status": True})
def get_queryset(self):
return super(AddressViewSet, self).get_queryset().filter(belongs_to=self.request.user.client)
class PhoneViewSet(ModelViewSet):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
serializer_class = PhoneSerializer
queryset = Phone.objects.all()
@action(['PUT'], detail=False, url_path="set-main", url_name='set-main')
def set_main(self, request, *args, **kwargs):
self.get_queryset().filter(user=request.user).update(default=False)
return Response({"status": True})
@action(['PUT'], detail=True, url_path="set-default", url_name='set-default')
def set_default(self, request, *args, **kwargs):
instance = self.get_object()
instance.default = True
instance.save()
self.get_queryset().filter(~Q(pk=instance.pk), user=request.user).update(default=False)
return Response(self.get_serializer(instance).data)
def get_queryset(self):
return self.get_queryset().filter(user=self.request.user)
|
2,741 | 9655cba5b459ae8b6812bcebc31cc46e19e52386 | # Given two binary strings, return their sum (also a binary string).
#
# For example,
# a = "11"
# b = "1"
# Return "100".
#
# Show Company Tags
# Show Tags
# Show Similar Problems
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
max_len = max(len(a), len(b))
a = a.zfill(max_len)
b = b.zfill(max_len)
carry = 0
res = ''
for i in range(max_len - 1, -1, -1):
sums = int(a[i]) + int(b[i]) + carry
if sums < 2:
res += str(sums)
carry = 0
elif sums == 2:
res += '0'
carry = 1
else:
res += '1'
carry = 1
if carry == 1:
res += '1'
return res[::-1]
|
2,742 | 8b97c1e14adfcb09806e2d37e2f5c4f0b356c009 | #
# abc088 c
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_ๅ
ฅๅไพ_1(self):
input = """1 0 1
2 1 2
1 0 1"""
output = """Yes"""
self.assertIO(input, output)
def test_ๅ
ฅๅไพ_2(self):
input = """2 2 2
2 1 2
2 2 2"""
output = """No"""
self.assertIO(input, output)
def test_ๅ
ฅๅไพ_3(self):
input = """0 8 8
0 8 8
0 8 8"""
output = """Yes"""
self.assertIO(input, output)
def test_ๅ
ฅๅไพ_4(self):
input = """1 8 6
2 9 7
0 7 7"""
output = """No"""
self.assertIO(input, output)
def resolve():
c = []
for _ in range(3):
c.append(list(map(int, input().split())))
a1 = 0
b1 = c[0][0] - a1
b2 = c[0][1] - a1
b3 = c[0][2] - a1
a2 = c[1][0] - b1
a3 = c[2][0] - b1
if a2+b2 == c[1][1] and a2+b3 == c[1][2] and a3+b2 == c[2][1] and a3+b3 == c[2][2]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
# unittest.main()
resolve()
|
2,743 | f739fb56eae1ada2409ef7d75958bad2018f5134 | from flask import Flask
from raven.contrib.flask import Sentry
from flask.signals import got_request_exception
app = Flask(__name__)
sentry = Sentry(dsn=app.config['SENTRY_DSN'])
@got_request_exception.connect
def log_exception_to_sentry(app, exception=None, **kwargs):
"""
Logs an exception to sentry.
:param app: The current application
:param exception: The exception that occurred
"""
sentry.captureException(exception)
|
2,744 | 05851df7ae64d792e0c1faf96e2aca5b40e86d53 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-20 11:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0052_encounter_note'),
]
operations = [
migrations.CreateModel(
name='FormPrintingCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('form_name', models.CharField(max_length=255, verbose_name='เธเธทเนเธญเนเธเธฅเน jasper')),
('key', models.CharField(help_text='เธชเธดเนเธเธเธตเนเนเธเนเธฃเธฐเธเธธเนเธญเธเธชเธฒเธฃเธเธฑเนเธเน เนเธเนเธ pk, เธงเธฑเธเธเธตเน', max_length=255)),
('count', models.PositiveIntegerField(default=0, verbose_name='เธเธณเธเธงเธเธเธฃเธฑเนเธเธเธตเนเธเธดเธกเธเน')),
],
),
migrations.AlterUniqueTogether(
name='formprintingcount',
unique_together=set([('form_name', 'key')]),
),
]
|
2,745 | ff1db5981a0163df1dfb44869a3d4af2be03c10a | import struct
class H264Packet:
UNKNOWN_TYPE, I_HDR, P_HDR, B_HDR, I_DATA, P_DATA, B_DATA = range(7)
def __init__(self, packet):
self.packet = packet
self.type = None
self.data = None
if len(packet) > 3:
(self.type,) = struct.unpack('H', packet[0:2])
self.data = packet[2:]
def serialize(self):
return self.packet
def type(self):
return self.type
def data(self):
return self.data |
2,746 | edd2b7b453d7fa33e6cca3b5dbc895f034a9e22a | import torch
import numpy as np
from torch.autograd import Variable
from util import helpers
from util.metrics import ECELoss, ece_score
import sklearn.metrics as skm
import os
import pandas as pd
import pickle
def eval(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
ece_criterion = ECELoss().cuda()
net.eval()
net.training = False
correct = 0
total = 0
logits_list = []
labels_list = []
confidence_list = []
correct_list = []
predicted_list = []
sne_embeddings = []
print('| Classification confidence for ID is saved at: {}'.format(path_in))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f1.write("{}\n".format(np.max(nnOutputs[k])))
confidence_list.append(np.max(nnOutputs[k]))
sne_embeddings.append(hidden.data.cpu()[k].numpy())
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
correct_list.extend(predicted.eq(targets.data).cpu().tolist())
predicted_list.extend(predicted.cpu().tolist())
logits_list.append(outputs.data)
labels_list.append(targets.data)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
ece = ece_criterion(logits, labels)
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne.pkl'), 'wb') as f:
pickle.dump(sne_embeddings, f)
with open(os.path.join(save_dir, 'mcp_targets.txt'), 'w') as f:
for item in labels_list:
f.write('{}\n'.format(item.cpu().numpy()[0]))
with open(os.path.join(save_dir, 'mcp_pred.txt'), 'w') as f:
for item in predicted_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_correct.txt'), 'w') as f:
for item in correct_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_confidence.txt'), 'w') as f:
for item in confidence_list:
f.write('{}\n'.format(item))
acc = 100.*correct/total
acc_list = (sum(correct_list)/len(correct_list))
# calculate AUROC for classifcation accuracy
fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
print("| Test Result\tAcc@1: %.2f%%" %(acc))
print(f'| ECE: {ece.item()}')
# print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')
print(f'| Acc list: {acc_list}')
print(f'| AUROC classification: {auroc_classification}')
sne_embeddings_ood = []
print('| Classification confidence for OOD is saved at: {}'.format(path_out))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(oodloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f2.write("{}\n".format(np.max(nnOutputs[k])))
sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_ood.pkl'), 'wb') as f:
pickle.dump(sne_embeddings_ood, f)
def eval_cifar10(path_in, path_out, net, testloader, oodloader, use_cuda=True, save_dir=None):
f1 = open(path_in, 'w')
f2 = open(path_out, 'w')
ece_criterion = ECELoss().cuda()
net.eval()
net.training = False
correct = 0
total = 0
logits_list = []
labels_list = []
confidence_list = []
correct_list = []
predicted_list = []
sne_embeddings = []
print('| Classification confidence for ID is saved at: {}'.format(path_in))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f1.write("{}\n".format(np.max(nnOutputs[k])))
confidence_list.append(np.max(nnOutputs[k]))
sne_embeddings.append(hidden.data.cpu()[k].numpy())
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
correct_list.extend(predicted.eq(targets.data).cpu().tolist())
predicted_list.extend(predicted.cpu().tolist())
logits_list.append(outputs.data)
labels_list.append(targets.data)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
labels_list = torch.cat(labels_list).cpu().tolist()
ece = ece_criterion(logits, labels)
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_cifar10.pkl'), 'wb') as f:
pickle.dump(sne_embeddings, f)
with open(os.path.join(save_dir, 'mcp_targets_cifar10.txt'), 'w') as f:
for item in labels_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_pred_cifar10.txt'), 'w') as f:
for item in predicted_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_correct_cifar10.txt'), 'w') as f:
for item in correct_list:
f.write('{}\n'.format(item))
with open(os.path.join(save_dir, 'mcp_confidence_cifar10.txt'), 'w') as f:
for item in confidence_list:
f.write('{}\n'.format(item))
acc = 100.*correct/total
acc_list = (sum(correct_list)/len(correct_list))
# calculate AUROC for classifcation accuracy
fpr, tpr, _ = skm.roc_curve(y_true = correct_list, y_score = confidence_list, pos_label = 1) #positive class is 1; negative class is 0
auroc_classification = skm.auc(fpr, tpr)
print("| Test Result\tAcc@1: %.2f%%" %(acc))
print(f'| ECE: {ece.item()}')
# print(f'| ECE v2: {ece_score(logits.cpu(), labels.cpu())}')
print(f'| Acc list: {acc_list}')
print(f'| AUROC classification: {auroc_classification}')
sne_embeddings_ood = []
print('| Classification confidence for OOD is saved at: {}'.format(path_out))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(oodloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, hidden = net(inputs)
# this is the OOD magic
nnOutputs = helpers.softmax(outputs)
for k in range(len(inputs)):
f2.write("{}\n".format(np.max(nnOutputs[k])))
sne_embeddings_ood.append(hidden.data.cpu()[k].numpy())
if save_dir:
with open(os.path.join(save_dir, 'mcp_sne_ood_cifar10.pkl'), 'wb') as f:
pickle.dump(sne_embeddings_ood, f)
def train():
pass
|
2,747 | def2721cd89501b1004d5d3f4f58df300616c1be |
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
list1 = stringe.split(" | ")
list2 = list1[0].split(" ")
kha = 0
for item in list2:
for c in list1[1]:
if c in item:
kha +=1
if kha == len(list1[1]):
print (item)
break
else:
print (False)
break |
2,748 | 1c31649ac75214a6d26bcb6d6822579be91e5074 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
con = lite.connect('./logs.db')
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS log")
cur.execute('''CREATE TABLE log (msg_id text, u_id text, username text, first_name text, last_name text, msg text, ch_id text, day text)''') |
2,749 | 25dc0395da1f1ac2ccd990151c3e5b250802b402 | from schemasheets.schemasheet_datamodel import SchemaSheet
RECORD = "Record"
FIELD = "Field"
METATYPE = "MetaType"
INFO = "Info"
CV = "CV"
PV = "PV"
SDO_MAPPINGS = "schema.org"
WD_MAPPINGS = "wikidata"
DATATYPE = "Datatype"
CASES = [
(1,
[
{
RECORD: "> class",
INFO: " description",
SDO_MAPPINGS: "exact_mappings: {curie_prefix: sdo}",
WD_MAPPINGS: "exact_mappings"
},
{
RECORD: ">",
WD_MAPPINGS: "curie_prefix: wd"
},
]
),
(2,
[
{RECORD: "> class", FIELD: " slot", INFO: " description"},
]
),
(3,
[
{METATYPE: "> metatype", INFO: " description"},
]
),
(4,
[
{CV: "> enum", PV: "permissible_value", INFO: " description"},
]
),
(5,
[
{DATATYPE: "> type", INFO: " description"},
]
),
# unnecessary/incompatible with the latest meta-model
# (6,
# [
# {DATATYPE: "> metaslot.type", INFO: " description"},
# ]
# ),
]
def test_parse_header():
print()
for case_id, case in CASES:
ss = SchemaSheet.from_dictreader(case)
tc = ss.table_config
info_cc = tc.columns[INFO]
assert info_cc.name == INFO
assert info_cc.maps_to == "description"
assert info_cc.metaslot is not None
assert info_cc.metaslot.name == "description"
if case_id == 1 or case_id == 2:
assert tc.metatype_column is None
record_cc = tc.columns[RECORD]
assert record_cc.name == RECORD
assert record_cc.maps_to == "class"
assert record_cc.metaslot is None
if case_id == 2:
field_cc = tc.columns[FIELD]
assert field_cc.name == FIELD
assert field_cc.maps_to == "slot"
assert field_cc.metaslot is None
if case_id == 1:
sdo_cc = tc.columns[SDO_MAPPINGS]
assert sdo_cc.name == SDO_MAPPINGS
assert sdo_cc.maps_to == "exact_mappings"
assert sdo_cc.metaslot is not None
assert sdo_cc.metaslot.name == "exact mappings" or\
sdo_cc.metaslot.name == "exact_mappings"
assert sdo_cc.settings.curie_prefix == "sdo"
wd_cc = tc.columns[WD_MAPPINGS]
assert wd_cc.name == WD_MAPPINGS
assert wd_cc.maps_to == "exact_mappings"
assert wd_cc.metaslot is not None
assert wd_cc.metaslot.name == "exact mappings" or \
wd_cc.metaslot.name == "exact_mappings"
assert wd_cc.settings.curie_prefix == "wd"
if case_id == 3:
assert tc.metatype_column == METATYPE
record_cc = tc.columns[METATYPE]
assert record_cc.name == METATYPE
assert record_cc.maps_to == "metatype"
assert record_cc.metaslot is None
if case_id == 4:
cv_cc = tc.columns[CV]
assert cv_cc.name == CV
assert cv_cc.maps_to == "enum"
assert cv_cc.metaslot is None
pv_cc = tc.columns[PV]
assert pv_cc.name == PV
assert pv_cc.maps_to == "permissible_value"
assert pv_cc.metaslot is None
if case_id == 5:
dt_cc = tc.columns[DATATYPE]
#print(dt_cc)
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == "type"
assert dt_cc.metaslot is None
if case_id == 6:
# See https://github.com/linkml/schemasheets/issues/75
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == "type"
assert dt_cc.metaslot is not None
assert dt_cc.metaslot.name == "type"
|
2,750 | 8b0e7e8f2031df217894e980758e15d7401c0981 | import sys
def read(inp):
res = []
n, v = map(int, inp.readline().split())
for i in range(n):
x, y = map(int, inp.readline().split())
res.append((x, y))
return v, res
def solve(v, items):
res = 0
rem_v = v
for item in items:
if rem_v > item[1]:
res += item[0]
rem_v -= item[1]
else:
res += item[0] * (rem_v/item[1])
break
return res
if __name__ == '__main__':
inp = open('1', 'r')
# inp = sys.stdin
v, items = read(inp)
s_items = sorted(items, key=lambda i: i[0]/i[1], reverse=True)
res = solve(v, s_items)
print(res)
|
2,751 | f11e6a53d8dfc60f73f346772df7a3cab14088ce | """
* @section LICENSE
*
* @copyright
* Copyright (c) 2017 Intel Corporation
*
* @copyright
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* @copyright
* http://www.apache.org/licenses/LICENSE-2.0
*
* @copyright
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @section DESCRIPTION
"""
from re import match
from os import environ
import sys
from cts_core.commons.error import cts_error
from cts_core.commons.replay_controller import ReplayController
from cts_framework.actions.action import Action
from cts_framework.actions.execute.execute_test_scripts_action import ExecuteTestScriptsAction
from cts_framework.build_information import BuildInformation
from cts_framework.commons.color_printer import ColorPrinter
from cts_framework.commons.logging_helper import LoggingHelper
from cts_framework.db.dao.script_dao import ScriptDAO
from cts_framework.tests_managing.test_package.tests_packages_container import TestsPackagesContainer
from cts_framework.tests_managing.tests_manager import TestsManager
from cts_framework.tests_running.execution_feed import ExecutionFeed
def split_replay_id(replay_id):
"""converts replay_id provided by the user into script execution id
:type replay_id: str
:rtype: (Boolean, int)
"""
m = match(r"^(\d+)$", replay_id.strip())
if m:
return None, int(m.groups()[0])
cts_error("Replay id has invalid format. Expected: unsigned integer")
return True, None
class ReplayTestRunAction(Action):
ACTION = "replay"
PARAM_NAME = "ACTION"
def __init__(self, *params, **kwargs):
Action.__init__(self, *params, **kwargs)
self._logger = LoggingHelper(__name__)
def fill_parser_arguments(self):
self.parser.add_argument("replay_id", help="ID of the test script run to replay", type=str, nargs=1)
def process_action(self, configuration):
replay_id = configuration.replay_id[0]
print "Using CTS in version %s to replay execution %s" \
% (ColorPrinter.format_text(BuildInformation.BUILD_VERSION, bold=True), replay_id)
error, script_execution_id = split_replay_id(replay_id)
if error:
return
# TODO: warn user when he tries to replay using newer CTS
script_execution = ScriptDAO.get_script_execution_details(script_execution_id)
if script_execution is None:
cts_error("Recording for script execution id={id:ignore} not found", id=script_execution_id)
return
script_path = script_execution.script_path
configuration = self._configuration_from_string(script_execution.configuration)
test_plan = self._prepare_test_plan(script_path)
environ[ReplayController.CTS_REPLAY_SCRIPT_EXECUTION_ID] = str(script_execution_id)
self._execute(configuration, test_plan)
def _configuration_from_string(self, configuration_str):
configuration = {b[0]: b[1] for b in
(a.strip().split(' ', 1) for a in filter(None, configuration_str.split('--')))}
return configuration
def _prepare_test_plan(self, script_path):
test_plan = TestsPackagesContainer()
tests_manager = TestsManager()
test_scripts_found = tests_manager.get_packages()
test_scripts_found.filter(script_paths=[script_path], remove_empty=True)
test_plan += test_scripts_found
if not test_plan.packages:
print "Script to execute not found in any package"
sys.exit(0)
return test_plan
def _execute(self, configuration, test_plan):
"""
:type configuration: dict
:type test_plan: cts_framework.tests_managing.test_package.tests_packages_container.TestsPackagesContainer
"""
message = "Executing "
print "Executing:"
for package in test_plan.packages:
for suite in package.suites:
for script in suite.scripts:
print "\t* %s from suite %s from package %s" % (script.name, suite.name, package.name)
message += "%s from suite %s from package %s, " % (script.name, suite.name, package.name)
self._logger.log_debug(message)
execution_feed = ExecutionFeed(test_plan, configuration)
ExecuteTestScriptsAction.execute_configuration_group(execution_feed)
|
2,752 | ee8e117db0348aa37d6aa37e6c06255101f1cff4 | import socket
from time import time, sleep
from threading import Thread
# Define drone
class dm107s():
# Default control value
def __init__(self):
# 4 values for flight
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
# 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration
self.commands = 0
# Required for wifi control
self.onoff = 1
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple calibrate button presses
self._calibrate_flag = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
#self.sess.connect(('192.168.100.1', 19798))
# Initialize timer value
self._takeoff_timer = 0
self._calibrate_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Turn hex to byte package
def _get_packet(self):
self._hex_code = self.get_hex()
self.package = bytes.fromhex(self._hex_code)
return self.package
# Send control to drone
def send_ctrl(self):
while not self._stopped:
self._package = self._get_packet()
#self.sess.send(self._package)
self.sess.sendto(self._package, ('192.168.100.1', 19798))
self.Flag_off()
sleep(0.02)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
# Pitch forward
def pitch_fwd(self):
self.pitch += 20
if self.pitch > 248:
self.pitch = 248
# Increase throttle
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
# Yaw right
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
# Roll left
def roll_left(self):
self.roll -= 20
if self.roll < 18:
self.roll = 18
# Pitch backward
def pitch_bwd(self):
self.pitch -= 20
if self.pitch < 18:
self.pitch = 18
# Decrease throttle
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
# Yaw left
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Landing
def land(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):
self.commands = 0
self._takeoff_flag = False
if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):
self.commands = 0
self.onoff = 1
self._calibrate_flag = False
# Stop IMMEDIATELY
def emergency_stop(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 2
self.onoff = 1
self._takeoff_flag = False
# Calibrate gyroscope
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza():
# Default control value
def __init__(self, ip, port):
# 4 values for flight
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple ignite button presses
self._ignite_flag = False
self._ignite_send = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
#self.sess.connect((ip, port))
# Initialize timer value
self._ignite_timer = 0
self._takeoff_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Send control to drone
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
#self.sess.send(self._package)
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
if self.roll < 15:
self.roll += 1
# Pitch forward
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
# Increase throttle
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
# Yaw right
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
# Roll left
def roll_left(self):
if self.roll > 0:
self.roll -= 1
# Pitch backward
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
# Decrease throttle
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
# Yaw left
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
# Start engine
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if self._ignite_flag == True:
if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
# Warming up engine
if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):
self.throttle = 2
if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):
self.throttle = 4
if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):
self.throttle = 6
if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):
self.throttle = 8
# After starting engine, takeoff after 4s
if (time() - self._ignite_timer >= 4):
self._ignite_flag = False
self.takeoff()
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):
self.throttle = 8
self._takeoff_flag = False
|
2,753 | ef124e8c15ef347efd709a5e3fb104c7fd1bccde | #!/usr/bin/env python
#coding=utf-8
"""
__init__.py
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import sys
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, g, session, request, flash, redirect, jsonify, url_for
from flaskext.babel import Babel
from bg import helpers
from bg.extensions import db, mail, cache, photos, identity_changed, Identity
from bg.views import frontend,admin,post,account
from bg.models import Post
DEFAULT_MODULES = (
(frontend, ""),
(post, "/post"),
(account, "/account"),
(admin, "/admin"),)
DEFAULT_APP_NAME = 'bg'
def create_app(config=None, modules=None):
if modules is None:
modules = DEFAULT_MODULES
app = Flask(DEFAULT_APP_NAME)
#config
app.config.from_pyfile(config)
configure_extensions(app)
configure_logging(app)
configure_errorhandlers(app)
configure_before_handlers(app)
configure_template_filters(app)
configure_context_processors(app)
configure_signals(app)
babel = Babel(app)
# register module
configure_modules(app, modules)
return app
def on_identity_changed(app, identity):
g.identity = identity
session['identity'] = identity
def configure_signals(app):
identity_changed.connect(on_identity_changed, app)
def configure_errorhandlers(app):
@app.errorhandler(401)
def unauthorized(error):
#if request.is_xhr:
# return jsonfiy(error=_("Login required"))
flash(("Please login to see this page"), "error")
#return redirect(url_for("account.login", next=request.path))
return redirect(url_for("account.login"))
def configure_before_handlers(app):
@app.before_request
def authenticate():
try:
g.identity = session['identity']
except Exception:
g.identity = Identity(0,'Login')
def configure_extensions(app):
# configure extensions
db.init_app(app)
#db.app = app
#db.create_all()
mail.init_app(app)
cache.init_app(app)
#setup_themes(app)
def configure_context_processors(app):
@app.context_processor
def archives():
archives = set()
for dt in Post.query.from_self(Post.create_date).order_by().filter_by(author_id=g.identity.id):
item = (dt.create_date.year, dt.create_date.month)
archives.add(item)
if len(archives) > 5:
break
archives = sorted(list(archives))
return dict(archives=archives)
def configure_modules(app, modules):
for module, url_prefix in modules:
app.register_module(module, url_prefix=url_prefix)
def configure_template_filters(app):
@app.template_filter()
def timesince(value):
return helpers.timesince(value)
@app.template_filter()
def endtags(value):
return helpers.endtags(value)
@app.template_filter()
def gravatar(email,size):
return helpers.gravatar(email,size)
@app.template_filter()
def format_date(date,s='full'):
return helpers.format_date(date,s)
@app.template_filter()
def format_datetime(time,s='full'):
return helpers.format_datetime(time,s)
@app.template_filter()
def format_yearmonth(date):
return '%s-%s'%date
def configure_logging(app):
mail_handler = \
SMTPHandler(app.config['MAIL_SERVER'],
app.config['DEFAULT_MAIL_SENDER'],
app.config['ADMINS'],
'application error',
(
app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'],
))
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
debug_log = os.path.join(app.root_path,
app.config['DEBUG_LOG'])
debug_file_handler = \
RotatingFileHandler(debug_log,
maxBytes=100000,
backupCount=10)
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(formatter)
app.logger.addHandler(debug_file_handler)
error_log = os.path.join(app.root_path,
app.config['ERROR_LOG'])
error_file_handler = \
RotatingFileHandler(error_log,
maxBytes=100000,
backupCount=10)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
|
2,754 | 19c0c3156488ce99316ce40f32e84e476b7afdac | import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
#from pandas import datetime
#from pandas.tseries.t
from sklearn.preprocessing import MinMaxScaler
#from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import Series
data = pd.read_csv(
r'E:\Thesis Content\ukdale\house_1\channel_7.dat',
delimiter=' ',
header=None,
names=['date', 'KWh'],
dtype={'date': np.int64, 'KWh': np.float64},
index_col='date'
) #initially KWh column contains Ws in 6 second interval, later it will be converted to KWh
data.index = pd.to_datetime((data.index.values), unit='s')
#data.head(5)
#before_process = data
after_process=data
#before_process = before_process.resample('d').sum()
#before_process['KWh'] = round(((before_process.KWh * 6) / (1000 * 3600)) , 3)
#before_process.head(5)
after_process = after_process.drop(after_process[(after_process.KWh < 10) | (after_process.KWh > 4000) ].index)
after_process = after_process.resample('d').sum()
#after_process.head(5)
after_process['KWh'] = round(((after_process.KWh * 6) / (1000 * 3600)) , 3)
after_process.head(5)
after_process.to_csv(path_or_buf=r'E:\Thesis Content\ukdale CSV\Without Noise\Tvday.csv', sep = ',' , index_label = 'date')
#rcParams['figure.figsize'] = 16, 10
#plt.subplot(2, 1, 1)
#plt.scatter(before_process.index ,before_process['KWh'].values, s=10)
#plt.title('Before and After Pre Processing')
#plt.ylabel('KWh')
#plt.subplot(2, 1, 2)
#plt.scatter(after_process.index ,after_process['KWh'].values, s=10)
#plt.xlabel('Date')
#plt.ylabel('KWh')
#plt.show() |
2,755 | 720ab0c0fcb40a50d73770e4ada6a78465e9ff96 |
# ----------------------
#
# *** WELCOME TO "HANGMAN" GAME ***
# Let's start programming
#
# ----------------------
def displayBoard(missedLetters, correctLetters, secretWord, alfabet_board, theme):
print(hangnam_pics[len(missedLetters)])
print("ะขะตะผะฐ:", theme)
# ะะพะบะฐะทัะฒะฐะตะผ ัะพััะพัะฝะธะต ัะณะฐะดัะฒะฐะตะผะพะณะพ ัะปะพะฒะฐ ะฝะฐ ัะตะนัะฐั
for index in range(len(secretWord)):
dashed_word = ""
for char in secretWord:
if char in correctLetters:
dashed_word = dashed_word + char + " "
else:
dashed_word += "_ "
print("ะกะปะพะฒะพ ะฝะฐ ะดะพัะบะต: ", dashed_word)
# ะะพะบะฐะทัะฒะฐะตะผ ะพััะฐะปัะฝัะต ะฑัะบะฒั, ะดะพัััะฟะฝัะต ะบ ัะณะฐะดัะฒะฐะฝะธั
for index in range (len(alfabet)):
if alfabet[index] in correctLetters or alfabet[index] in missedLetters:
alfabet_board += "_ "
else:
alfabet_board = alfabet_board + alfabet[index] + " "
print("ะััะฐะฒัะธะตัั ะฑัะบะฒั: ", alfabet_board)
#ะะพะบะฐะทัะฒะฐะตะผ ัะฟะธัะพะบ ะพัะธะฑะพัะฝัั
ะฑัะบะฒ
print("ะัะธะฑะพัะฝัะต ะฑัะบะฒั: ", end = "")
if missedLetters == "":
print(" -", end="")
else:
for letter in missedLetters:
print(letter + " ", end="")
print()
def getRandomWord(themes):
theme = random.choice(tuple(themes.keys()))
word = random.choice(themes[theme])
word = word.upper()
return theme, word
def getGuess(correctLetters, missedLetters):
while True:
print()
guess = input("ะะฒะตะดะธัะต ะฑัะบะฒั --> ").upper()
if len(guess) != 1:
print("ะะพะถะฐะปัะนััะฐ, ะฒะฒะตะดะธัะต ะพะดะฝั ะฑัะบะฒั.")
elif guess in correctLetters or guess in missedLetters:
print("ะั ัะถะต ะฝะฐะทัะฒะฐะปะธ ััั ะฑัะบะฒั")
elif guess in (" _") or guess not in alfabet or type(guess) != str:
print("ะญัะพ ะฝะต ะฑัะบะฒะฐ. ะะฒะตะดะธัะต ะะฃะะะฃ")
else:
break
print()
return guess
def gameFinish(correctLetters, missedLetters, secretWord):
unikLettersInSecretWord = set()
for i in secretWord:
unikLettersInSecretWord.add(i)
if len(correctLetters) == len(unikLettersInSecretWord):
print()
print()
print(f''' ะะะะะ ะะะะฏะะ!
ะั ัะณะฐะดะฐะปะธ ัะปะพะฒะพ {secretWord} ะธ ะฒัะธะณัะฐะปะธ ะธะณัั "ะะะกะะะะฆะ"!''')
return True
elif len(missedLetters) == 6:
print()
print()
print(f''' ะะะ ะ ะะะะะงะะะ!
ะั ะฝะต ัะณะฐะดะฐะปะธ ัะปะพะฒะพ {secretWord} ะธ ะฟัะพะณัะฐะปะธ ะฒ ะธะณัั "ะะะกะะะะฆะ"!''')
return True
else:
return False
def oneMore():
while True:
print()
answer = input("ะฅะพัะธัะต ััะณัะฐัั ะตัะต ัะฐะท? ะะฒะตะดะธัะต ะดะฐ/ะฝะตั --->").lower()
if answer == "ะดะฐ":
print()
print()
print()
print()
return True
elif answer == "ะฝะตั":
return False
else:
print("ะะฐั ะพัะฒะตั ะฝะต ะฟะพะฝััะตะฝ. ะะพะฟัะพะฑัะตะผ ะตัะต ัะฐะท.")
def mainGame(themes):
missedLetters = ""
correctLetters = ""
alfabet_board = ""
print()
print(
''' ะะพะฑัะพ ะฟะพะถะฐะปะพะฒะฐัั ะฒ ะธะณัั ะะะกะะะะฆะ!
ะฃ ะะฐั ะตััั 6 ะฟะพะฟััะพะบ ัะณะฐะดะฐัั ัะปะพะฒะพ ะฟะพ ะทะฐะดะฐะฝะฝะพะน ัะตะผะต.
ะะพัะปะต ะบะฐะถะดะพะน ะฝะตะฒะตัะฝะพะน ะฟะพะฟััะบะธ ะบ ัะธััะฝะบั ะฑัะดะตั ะดะพะฑะฐะฒะปััััั ัะฐััั ัะตะปะพะฒะตัะบะฐ.
ะัะปะธ ัะปะพะฒะพ ะฑัะดะตั ัะณะฐะดะฐะฝะพ ะดะพ ัะพะณะพ, ะบะฐะบ ัะตะปะพะฒะตัะตะบ ััะฐะฝะตั ะฒะธะดะตะฝ ะฟะพะปะฝะพัััั - ะั ะฒัะธะณัะฐะปะธ!
ะฃะดะฐัะธ!
''')
print()
input("ะะฐะถะผะธัะต ENTER ะดะปั ััะฐััะฐ.")
#ะัะฑะธัะฐะตะผ ัะตะบัะตัะฝะพะต ัะปะพะฒะพ
theme, secretWord = getRandomWord(themes)
while True:
#ะะพะบะฐะทัะฒะฐะตะผ ัะตะบััะตะต ัะพััะพัะฝะธะต ะธะณัั
displayBoard(missedLetters , correctLetters, secretWord, alfabet_board, theme)
#ะัะพะฒะตัะบะฐ ัะตะทัะปััะฐัะพะฒ ะะณัั - ะฟะธัะตััั ะฟะพัะปะตะดะฝะธะผ
if gameFinish(correctLetters, missedLetters, secretWord):
if oneMore():
mainGame(themes)
else:
break
#ะะฐะฟัะพั ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะฐ ะฒะฒะตะดะตะฝะธะต ะฑัะบะฒั. ะัะพะฒะตัะบะฐ ะฑัะบะฒั.
guess = getGuess(correctLetters, missedLetters)
#ะกะฒะตัะบะฐ ะฑัะบะฒั ะธ ะทะฐะฟะธัั ะฒ ัะพะพัะฒะตัััะฒัััะธะน ะผะฐััะธะฒ
if guess in secretWord:
print("ะขะฐะบะฐั ะฑัะบะฒะฐ ะตััั ะฒ ัะปะพะฒะต!")
correctLetters += guess
time.sleep(2)
else:
print("ะขะฐะบะพะน ะฑัะบะฒั ะฝะตั ะฒ ัะปะพะฒะต!")
missedLetters += guess
time.sleep(2)
import random
import time
hangnam_pics = [
'''
+---+
|
|
|
===''',
'''
+---+
O |
|
|
===''',
'''
+---+
O |
| |
|
===''',
'''
+---+
O |
|\ |
|
===''',
'''
+---+
O |
/|\ |
|
===''',
'''
+---+
O |
/|\ |
/ |
===''',
'''
+---+
O |
/|\ |
/ \ |
==='''
]
alfabet = ["ะ","ะ","ะ","ะ","ะ","ะ","ร","ะ","ะ","ะ","ะ","ะ","ะ","ะ","ะ","ะ","ะ","ะ ","ะก","ะข","ะฃ","ะค", "ะฅ","ะง","ะฆ","ะง","ะจ","ะฉ","ะฌ","ะช","ะซ","ะญ","ะฎ","ะฏ"]
goroda = ["ะะธะตะฒ", "ะะดะตััะฐ", "ะฅะฐััะบะพะฒ", "ะัะฒะพะฒ", "ะะธะบะพะปะฐะตะฒ", "ะะธัะพะผะธั", "ะะพะปัะฐะฒะฐ", "ะงะตัะฝะธะณะพะฒ"]
zhyvotnye = ["ะฐะธัั","ะฐะบัะปะฐ","ะฑะฐะฑัะธะฝ","ะฑะฐัะฐะฝ", "ััะธัะพะฝ", "ัะตัะตะฟะฐั
ะฐ", "ััััะตะฑ", "ััะตัะธัะฐ", "ะผััะฐะฒะตะน","ะฑะฐัััะบ","ะผะตะดะฒะตะดั", "ะผะตะดะพะตะด", "ะผััะฐะฒัะตะด", "ะฟะฐะฝะดะฐ", "ะปะตะฝะธะฒะตั"]
themes = {"ะะพัะพะดะฐ ะฃะบัะฐะธะฝั": goroda, "ะะธะฒะพัะฝัะต": zhyvotnye}
mainGame(themes)
print()
print(" ะะกะะะ ะะะะ ะะะ!")
|
2,756 | 874fa927a1c0f1beeb31ca7b0de7fd2b16218ea4 | """main.py"""
import tkinter as tk
from tkinter import ttk
from ttkthemes import ThemedStyle
import wikipedia as wk
from newsapi import NewsApiClient as nac
import datetime
import random
class MainWindow:
"""Application controller object."""
def __init__(self):
self.p = None
self.main_page = tk.Tk()
self.main_page.title("MetaWikipedia")
self.main_page.geometry("500x500")
self.style = ThemedStyle(self.main_page)
self.style.set_theme("scidblue")
self.left_pane = ttk.PanedWindow(self.main_page)
self.right_pane = ttk.PanedWindow(self.main_page)
# Left pane
self.search = ttk.Button(self.left_pane, text="Search", command=self.search_wikipedia)
self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5)
self.randomize_but = ttk.Button(self.left_pane, text="Randomize", command=self.randomize)
self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5)
self.search_box = tk.Text(self.left_pane)
self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1)
self.summary = tk.Text(self.left_pane, wrap=tk.WORD)
self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1)
extra_list_choices = ["none", "categories", "pageid", "sections", "html"]
self.extra_list_choice = tk.StringVar()
self.extra_list_choice.set("none")
self.extra_list = ttk.OptionMenu(
self.left_pane,
self.extra_list_choice,
*extra_list_choices,
command=self.update_choice
)
self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1)
self.other_text = tk.Text(self.left_pane)
self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1)
# Right pane
self.api_key_label = ttk.Label(self.right_pane, text="API Key")
self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4)
self.api_key_entry = ttk.Entry(self.right_pane, text="ABC...")
self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6)
self.news_box = tk.Text(self.right_pane)
self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1)
self.top_categories_label = ttk.Label(self.right_pane, text="Top Categories")
self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1)
self.top_categories = tk.Text(self.right_pane)
self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1)
self.category_map = {}
self.randomize()
self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)
self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5)
self.main_page.mainloop()
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0',tk.END).replace("\n",""))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state="normal")
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state="disabled")
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == "none":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', "")
if value == "categories":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == "pageid":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == "sections":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == "html":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ["wiki", "sources", "article", "stub",
"wayback", "cs1"]:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ""
for cat in cats:
hit = "hits" if self.category_map[cat] > 1 else "hit"
display += f"{cat}, {self.category_map[cat]} {hit}\n"
self.top_categories.configure(state="normal")
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state="disabled")
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == "":
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = (now-datetime.timedelta(days=14))
#today = now.strftime()
query = ""
for cat in self.sorted_categories():
query += f"{cat},"
search = api.get_top_headlines(q=query,
sources="bbc-news,the-verge",
language="en")
news = ""
for article in search["articles"]:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
if __name__ == "__main__":
main_window = MainWindow()
|
2,757 | 67d79a5c9eceef9f1ed69f79d6a9d1f421f3246c | import numpy as np
def calculate_distance_for_tour(tour, node_id_to_location_dict):
length = 0
num = 0
for i in tour:
j = tour[num - 1]
distance = np.linalg.norm(node_id_to_location_dict[i] - node_id_to_location_dict[j])
length += distance
num += 1
return length
def aco_distance_callback(node_1, node_2):
x_distance = abs(node_1[0] - node_2[0])
y_distance = abs(node_1[1] - node_2[1])
# c = sqrt(a^2 + b^2)
import math
return math.sqrt(pow(x_distance, 2) + pow(y_distance, 2))
|
2,758 | a7fae2da8abba6e05b4fc90dec8826194d189853 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#allisnone 20200403
#https://github.com/urllib3/urllib3/issues/1434
#https://github.com/dopstar/requests-ntlm2
#https://github.com/requests/requests-ntlm
#base on python3
#if you request https website, you need to add ASWG CA to following file:
#/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem
#ulimit โn 2000
#pip install requests_ntlm
import argparse
import re
import os
import csv
import string,sys,time,datetime
import requests
from requests_toolbelt.adapters import source
#from requests_ntlm import HttpNtlmAuth
import random
import subprocess
#import zthreads
def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'):
if type=='ip' and max(start,end)>255:
end = 255
i = random.randint(start,end)
return prefix + str(i)
def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'):
if type=='ip' and max(start,end)>255:
end = 255
sequences = []
for i in range(start,end+1):
sequences.append(prefix+str(i))
if num> len(sequences):
num = len(sequences)
choices = random.sample(sequences,num)
return choices
def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):
curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(
cert,eth,user,proxy,url)
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #็ญๅพ
่ถ
ๆถ
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-ๅคฑ่ดฅ: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
็จไบurlๅ็ฑปๆต่ฏ๏ผๆต่ฏๆไปถไธญๅญๆพๅคง้็urlๅฐๅ
:param from_file: str
:return: list๏ผ URL_list๏ผGenerator๏ผ
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
else: #ๆ ๅ่ฎฎๅคด้จ๏ผ้ป่ฎคๅ httpๅ่ฎฎ
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #ๅพช็ฏ๏ผๅค็จ๏ผๅไฝ
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='่ฏฅPython3่ๆฌ็จไบASWGๅๅนถๅ่ฎค่ฏๆต่ฏใ\n 1ใไฝฟ็จๆนๆณ็คบไพ:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅๆต่ฏ็ๆต่ฏๆฌกๆฐ๏ผ้ป่ฎค1่ฝฎๆต่ฏๅณๅๆญข')
parser.add_argument('-s','--starttime', type=str, default='',help='้ฆๆฌก่ฎค่ฏๅนถๅๆต่ฏ็ๆถ้ด๏ผๅฆ 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผๅญ่ฟๆๆถ้ด๏ผ้ป่ฎค600็ง')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆท็ซฏIPๅ็ผ๏ผ้ป่ฎคๅชๆฏๆCๆฎต๏ผๅ
ถไปๆนๅผ่ช่ก้้
')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆฏๅฆไฝฟ็จ็ธๅURLๆต่ฏ')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝๆๅฎไฝฟ็จ็ธๅURLๆถ๏ผๆๅฎๆฏhttp่ฟๆฏhttps่ฏทๆฑ')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆไปถ')
parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆไปถไธญๅญๆฎตๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็จๆท็ๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็จๆทๆฐ้')
parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผๅง็ๅญ็ฝๅกๅบๅท๏ผ้ป่ฎคไป0ๅผๅง')
parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='ๅญ็ฝๅกๆฅๅฃๆฐ้๏ผๆฏไธชๆฅๅฃไธไธชIPๅฐๅ')
parser.add_argument('-d','--is-debug', type=bool, default=False,help='ๆฏๅฆๅผๅฏcurl็ๆๅฐๆฅๅฟ')
args = parser.parse_args()
max_round = args.round
first_schedule_time = args.starttime
now = datetime.datetime.now()
now_str = now.strftime("%H:%M:%S")
if first_schedule_time:
if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str:
pass
else:
print('-sๆ่
--starttime ๆ ผๅผไธๅฏน๏ผ่ฏท่พๅ
ฅๅคงไบๅฝๅๆถ้ดๅญ็ฌฆไธฒ๏ผๅฆ๏ผ16:20:60 ')
sys.exit()
else:
nexttime = now + datetime.timedelta(seconds=60)
first_schedule_time = nexttime.strftime("%H:%M:%S")
auth_cache_timeout = args.auth_cache_timeout
proxy = args.aswg_proxy
ip_prefix = args.ip_prefix
is_same_url = args.is_same_url
is_same_url = True
url_file = args.url_file
url_index = args.url_index
start_user_index = args.start_user_index
user_num = args.user_num
start_eth0_index = args.start_eth0_index
sub_eth0_num = args.sub_eth0_num
is_debug = args.is_debug
urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.')
#print('urls=',urls)
#url = 'https://www.baidu.com'
print('urls_len=',len(urls))
#urls = urls[:300]
print('urls_len=',len(urls))
#from zthreads.threadpools.threadpools import Threadpools
#thread_pool = Threadpools(5)
i = 0
#unique_users = 1275
user_start = start_user_index
user_num = user_num
sub_eth_start = start_eth0_index
eth_num = sub_eth0_num
cert = 'rootCA.cer'
is_http = True
#first_schedule_time = "16:45:00"
#auth_cache_timeout = 60
#max_round = 2
print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout))
round_num = 0
while True:
#time_now = time.strftime("%H:%M:%S", time.localtime())
now = datetime.datetime.now()
time_now = now.strftime("%H:%M:%S")
if time_now == first_schedule_time:
print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num))
start_time = time.time()
urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num,
ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug)
total_sending_time_seconds = time.time() - start_time
print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num))
round_num = round_num + 1
if round_num >= max_round:
print("-" * 50)
print('Finished all test with {0} rounds!!!'.format(max_round))
break
else:
print("-" * 50)
print('Please make sure clear cache before the next schedule time!!!')
#now = datetime.datetime.now()
#date_str = now.strftime("%Y-%m-%d ")
#last_schedule_time_str = date_str + first_schedule_time
last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S')
nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds
first_schedule_time = nexttime.strftime("%H:%M:%S")
print('Next_schedule_time={0}...'.format(first_schedule_time))
#time.sleep(sleep_time)
else:
#print('time_now=',time_now)
pass
#thread_pool.close()
#initial_requests_session(ip=ip,user=ntlm_user)
|
2,759 | fbd5c7fa335d6bde112e41a55d15aee31e3ebaf7 | import os, sys
sys.path.append('./Pytorch-UNet/')
import torch
from torch import optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
import wandb
from datasets import parse_dataset_args, create_dataset
from wt_utils import wt, create_filters, load_checkpoint, load_weights
from arguments import parse_args
from unet.unet_model import UNet_NTail_128_Mod
from train import train_unet256
from logger import Logger
if __name__ == "__main__":
# Set up logger
logger = Logger()
# Accelerate training with benchmark true
torch.backends.cudnn.benchmark = True
# Parse arguments & log
args = parse_args()
logger.update_args(args)
# Create output directory
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
else:
print('WARNING: Output directory already exists and will be overwriting (if not resuming)')
# Initialize wandb
wandb.init(project=args.project_name)
# Create filters for dataloader
filters_cpu = create_filters(device='cpu')
# Create transforms
default_transform = transforms.Compose([
transforms.CenterCrop(args.image_size),
transforms.Resize(args.image_size),
transforms.ToTensor()
])
# Parsing dataset arguments
ds_name, classes = parse_dataset_args(args.dataset)
# Create train dataset
train_dataset = create_dataset(ds_name, args.train_dir, transform=default_transform, classes=classes[0] if classes else None)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers,
pin_memory=True, drop_last=True)
# Create validation dataset
valid_dataset = create_dataset(ds_name, args.valid_dir, transform=default_transform, classes=classes[1] if classes else None)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers,
pin_memory=True, drop_last=True)
# Load 128 model
print('Loading model 128 weights')
model_128 = UNet_NTail_128_Mod(n_channels=12, n_classes=3, n_tails=12, bilinear=True).to(args.device)
model_128 = load_weights(model_128, args.model_128_weights, args)
# Model and optimizer
model = UNet_NTail_128_Mod(n_channels=48, n_classes=3, n_tails=48, bilinear=True).to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
state_dict = {'itr': 0}
if args.resume:
print('Loading weights & resuming from iteration {}'.format(args.checkpoint))
model, optimizer, logger = load_checkpoint(model, optimizer, '256', args)
state_dict['itr'] = args.checkpoint
for epoch in range(args.num_epochs):
train_unet256(epoch, state_dict, model, model_128, optimizer, train_loader, valid_loader, args, logger) |
2,760 | bddba2fd710829db17c6419878ce535df0aba01c | # -*- coding: utf-8 -*-
from yuancloud import models, fields, api, _
import yuancloud.addons.decimal_precision as dp
from yuancloud.exceptions import UserError
from yuancloud.osv import fields as old_fields
class event_event(models.Model):
_inherit = 'event.event'
event_ticket_ids = fields.One2many(
'event.event.ticket', 'event_id', string='Event Ticket',
default=lambda rec: rec._default_tickets(), copy=True)
@api.model
def _default_tickets(self):
try:
product = self.env.ref('event_sale.product_product_event')
return [{
'name': _('Subscription'),
'product_id': product.id,
'price': 0,
}]
except ValueError:
return self.env['event.event.ticket']
class event_ticket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
name = fields.Char('Name', required=True, translate=True)
event_id = fields.Many2one('event.event', "Event", required=True, ondelete='cascade')
product_id = fields.Many2one(
'product.product', 'Product',
required=True, domain=[("event_type_id", "!=", False)],
default=lambda self: self._default_product_id())
registration_ids = fields.One2many('event.registration', 'event_ticket_id', 'Registrations')
price = fields.Float('Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date("Sales End")
is_expired = fields.Boolean('Is Expired', compute='_is_expired')
@api.model
def _default_product_id(self):
try:
product = self.env['ir.model.data'].get_object('event_sale', 'product_product_event')
return product.id
except ValueError:
return False
@api.one
@api.depends('deadline')
def _is_expired(self):
if self.deadline:
current_date = fields.Date.context_today(self.with_context({'tz': self.event_id.date_tz}))
self.is_expired = self.deadline < current_date
else:
self.is_expired = False
# FIXME non-stored fields wont ends up in _columns (and thus _all_columns), which forbid them
# to be used in qweb views. Waiting a fix, we create an old function field directly.
"""
price_reduce = fields.Float("Price Reduce", compute="_get_price_reduce", store=False,
digits=dp.get_precision('Product Price'))
@api.one
@api.depends('price', 'product_id.lst_price', 'product_id.price')
def _get_price_reduce(self):
product = self.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
self.price_reduce = (1.0 - discount) * self.price
"""
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for ticket in self.browse(cr, uid, ids, context=context):
product = ticket.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
res[ticket.id] = (1.0 - discount) * ticket.price
return res
_columns = {
'price_reduce': old_fields.function(_get_price_reduce, type='float', string='Price Reduce',
digits_compute=dp.get_precision('Product Price')),
}
# seats fields
seats_availability = fields.Selection(
[('limited', 'Limited'), ('unlimited', 'Unlimited')],
'Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer('Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will "
"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self._cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.one
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise UserError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def onchange_product_id(self):
price = self.product_id.list_price if self.product_id else 0
return {'value': {'price': price}}
class event_registration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', 'Event Ticket')
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sale orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', 'Source Sale Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', 'Sale Order Line', ondelete='cascade')
@api.one
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
if self.event_ticket_id.seats_max and self.event_ticket_id.seats_available < 0:
raise UserError(_('No more available seats for this ticket'))
@api.multi
def _check_auto_confirmation(self):
res = super(event_registration, self)._check_auto_confirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(event_registration, self).create(vals)
if res.origin or res.sale_order_id:
message = _("The registration has been created for event %(event_name)s%(ticket)s from sale order %(order)s") % ({
'event_name': '<i>%s</i>' % res.event_id.name,
'ticket': res.event_ticket_id and _(' with ticket %s') % (('<i>%s</i>') % res.event_ticket_id.name) or '',
'order': res.origin or res.sale_order_id.name})
res.message_post(body=message)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(event_registration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
|
2,761 | 65a9f732fc8c7b9c63f6ef0d7b2172bb4138a895 | """
Copyright (C) 2019-2020 Zilliz. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import requests
original_table_name = "raw_data"
table_name = "nyctaxi"
csv_path = "/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv"
SCOPE = "nyc_taxi"
def _get_line_count(file):
with open(file, "r") as f:
return len(f.readlines())
class TestScope():
@pytest.mark.run(order=1)
def test_create_scope(self, host, port):
url = "http://" + host + ":" + port + "/scope"
r = requests.post(url=url)
print(r.text)
assert r.status_code == 200
global SCOPE # pylint: disable=global-statement
SCOPE = r.json()['scope']
@pytest.mark.run(order=2)
def test_load_file(self, host, port):
url = "http://" + host + ":" + port + "/loadfile"
payload = {
"scope": SCOPE,
"tables": [
{
"name": original_table_name,
"format": "csv",
"path": csv_path,
"options": {
"header": "True",
"delimiter": ","
},
"schema": [
{"VendorID": "string"},
{"tpep_pickup_datetime": "string"},
{"tpep_dropoff_datetime": "string"},
{"passenger_count": "long"},
{"trip_distance": "double"},
{"pickup_longitude": "double"},
{"pickup_latitude": "double"},
{"dropoff_longitude": "double"},
{"dropoff_latitude": "double"},
{"fare_amount": "double"},
{"tip_amount": "double"},
{"total_amount": "double"},
{"buildingid_pickup": "long"},
{"buildingid_dropoff": "long"},
{"buildingtext_pickup": "string"},
{"buildingtext_dropoff": "string"}
]
}
]
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
# TODO: neccessary for /savefile? not convenient for cleaning up
@pytest.mark.run(order=3)
def test_table_schema(self, host, port):
url = "http://" + host + ":" + port + "/table/schema?table={}&scope={}".format(original_table_name, SCOPE)
r = requests.get(url=url)
print(r.text)
assert r.status_code == 200
assert len(r.json()['schema']) == 16
@pytest.mark.run(order=4)
def test_num_rows(self, host, port):
url = "http://" + host + ":" + port + "/query"
sql = "select count(*) as num_rows from {}".format(original_table_name)
payload = {
"scope": SCOPE,
"sql": sql,
"collect_result": "1"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
assert len(r.json()['result']) == 1
assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path) - 1
@pytest.mark.run(order=5)
def test_query(self, host, port):
url = "http://" + host + ":" + port + "/query"
limit = 1
sql = "select * from {} limit {}".format(original_table_name, limit)
payload = {
"scope": SCOPE,
"sql": sql,
"collect_result": "1"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
assert len(r.json()['result']) == limit
@pytest.mark.run(order=6)
def test_create_table(self, host, port):
url = "http://" + host + ":" + port + "/query"
payload = {
"scope": SCOPE,
"sql": "create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))".format(table_name, original_table_name),
"collect_result": "0"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=7)
def test_pointmap(self, host, port):
url = "http://" + host + ":" + port + "/pointmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"point_color": "#2DEF4A",
"point_size": 3,
"opacity": 0.5
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=8)
def test_weighted_pointmap(self, host, port):
url = "http://" + host + ":" + port + "/weighted_pointmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"color_gradient": ["#0000FF", "#FF0000"],
"color_bound": [0, 2],
"size_bound": [0, 10],
"opacity": 1.0,
"coordinate_system": "EPSG:4326"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=9)
def test_heatmap(self, host, port):
url = "http://" + host + ":" + port + "/heatmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"map_zoom_level": 10,
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=10)
def test_choroplethmap(self, host, port):
url = "http://" + host + ":" + port + "/choroplethmap"
payload = {
"scope": SCOPE,
"sql": "select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"color_gradient": ["#0000FF", "#FF0000"],
"color_bound": [2.5, 5],
"opacity": 1,
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=11)
def test_icon_viz(self, host, port):
url = "http://" + host + ":" + port + "/icon_viz"
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
png_path = dir_path + "/taxi.png"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
'width': 1024,
'height': 896,
'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],
'coordinate_system': 'EPSG:4326',
'icon_path': png_path
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=12)
def test_fishnetmap(self, host, port):
url = "http://" + host + ":" + port + "/fishnetmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"color_gradient": ["#0000FF", "#FF0000"],
"cell_size": 4,
"cell_spacing": 1,
"opacity": 1.0,
"coordinate_system": "EPSG:4326",
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=13)
def test_drop_table(self, host, port):
url = "http://" + host + ":" + port + '/query'
sql1 = "drop table if exists {}".format(table_name)
sql2 = "drop table if exists {}".format(original_table_name)
payload1 = {
"scope": SCOPE,
"sql": sql1,
"collect_result": "0"
}
payload2 = {
"scope": SCOPE,
"sql": sql2,
"collect_result": "0"
}
r = requests.post(url=url, json=payload1)
print(r.text)
assert r.status_code == 200
r = requests.post(url=url, json=payload2)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=14)
def test_command(self, host, port):
url = "http://" + host + ":" + port + '/command'
command = """
from __future__ import print_function
import sys
from random import random
from operator import add
partitions = 2
n = 100000 * partitions
def f(_):
x = random() * 2 - 1
y = random() * 2 - 1
return 1 if x ** 2 + y ** 2 <= 1 else 0
count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
print("Pi is roughly %f" % (4.0 * count / n))
"""
payload = {
"scope": SCOPE,
"command": command
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=15)
def test_remove_scope(self, host, port):
scope = SCOPE
url = "http://" + host + ":" + port + "/scope/" + scope
r = requests.delete(url=url)
print(r.text)
assert r.status_code == 200
|
2,762 | 6e434ff213166768a6adadf99dc5d6d8611fa2ba | import os
import shutil
import numpy as np
import unittest
from lsst.ts.wep.Utility import FilterType, runProgram
from lsst.ts.wep.WepController import WepController
from lsst.ts.wep.ctrlIntf.RawExpData import RawExpData
from lsst.ts.aoclcSim.Utility import getModulePath
from lsst.ts.aoclcSim.WepCmpt import WepCmpt
class TestWepCmpt(unittest.TestCase):
""" Test the WepCmpt class."""
def setUp(self):
self.outputDir = os.path.join(getModulePath(), "tests", "tmp")
self._makeDir(self.outputDir)
isrDirName = "input"
isrDir = os.path.join(self.outputDir, isrDirName)
self._makeDir(isrDir)
self.wepCmpt = WepCmpt(isrDir)
# Set the survey paramters
self.wepCmpt.setFilter(FilterType.REF)
self.wepCmpt.setBoresight(0.0, 0.0)
self.wepCmpt.setRotAng(0.0)
def _makeDir(self, newDir):
os.makedirs(newDir, exist_ok=True)
def tearDown(self):
self.wepCmpt.disconnect()
shutil.rmtree(self.outputDir)
def testGetWepController(self):
wepCntlr = self.wepCmpt.getWepController()
self.assertTrue(isinstance(wepCntlr, WepController))
def testGetFilter(self):
filterType = self.wepCmpt.getFilter()
self.assertEqual(filterType, FilterType.REF)
def testSetFilter(self):
filterType = FilterType.R
self.wepCmpt.setFilter(filterType)
self.assertEqual(self.wepCmpt.getFilter(), filterType)
def testGetBoresight(self):
raInDeg, decInDeg = self.wepCmpt.getBoresight()
self.assertEqual(raInDeg, 0.0)
self.assertEqual(decInDeg, 0.0)
def testSetBoresight(self):
raInDeg = 10.0
decInDeg = 20.0
self.wepCmpt.setBoresight(raInDeg, decInDeg)
raInDegInWepCmpt, decInDegInWepCmpt = self.wepCmpt.getBoresight()
self.assertEqual(raInDegInWepCmpt, raInDeg)
self.assertEqual(decInDegInWepCmpt, decInDeg)
def testGetRotAng(self):
rotAngInDeg = self.wepCmpt.getRotAng()
self.assertEqual(rotAngInDeg, 0.0)
def testSetRotAng(self):
rotAngInDeg = 10.0
self.wepCmpt.setRotAng(rotAngInDeg)
self.assertEqual(self.wepCmpt.getRotAng(), rotAngInDeg)
def testIngestCalibs(self):
sensorNameList = ["R22_S11"]
fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)
numOfFile = self._getNumOfFileInFolder(fakeFlatDir)
self.assertEqual(numOfFile, 6)
self.wepCmpt.ingestCalibs(fakeFlatDir)
numOfFile = self._getNumOfFileInFolder(fakeFlatDir)
self.assertEqual(numOfFile, 0)
def _makeCalibs(self, outputDir, sensorNameList):
fakeFlatDirName = "fake_flats"
fakeFlatDir = os.path.join(self.outputDir, fakeFlatDirName)
self._makeDir(fakeFlatDir)
detector = " ".join(sensorNameList)
self._genFakeFlat(fakeFlatDir, detector)
return fakeFlatDir
def _genFakeFlat(self, fakeFlatDir, detector):
currWorkDir = os.getcwd()
os.chdir(fakeFlatDir)
self._makeFakeFlat(detector)
os.chdir(currWorkDir)
def _makeFakeFlat(self, detector):
command = "makeGainImages.py"
argstring = "--detector_list %s" % detector
runProgram(command, argstring=argstring)
def _getNumOfFileInFolder(self, folder):
return len([name for name in os.listdir(folder)
if os.path.isfile(os.path.join(folder, name))])
def testGetSkyFile(self):
skyFile = self.wepCmpt.getSkyFile()
self.assertEqual(skyFile, "")
def testSetSkyFile(self):
skyFile = "testSetSkyFile"
self.wepCmpt.setSkyFile(skyFile)
self.assertEqual(self.wepCmpt.getSkyFile(), skyFile)
def testCalculateWavefrontErrorsComCam(self):
# Make the calibration products and do the ingestion
sensorNameList = ["R22_S11", "R22_S12"]
fakeFlatDir = self._makeCalibs(self.outputDir, sensorNameList)
self.wepCmpt.ingestCalibs(fakeFlatDir)
# Set the skyFile
repackagedDir = os.path.join(getModulePath(), "tests", "testData",
"comcamRepackagedData")
skyFilePath = os.path.join(repackagedDir, "skyComCamInfo.txt")
self.wepCmpt.setSkyFile(skyFilePath)
# Collect the wavefront data
intraRawExpData = RawExpData()
intraObsId = 9006002
intraRawExpDir = os.path.join(repackagedDir, "intra")
intraRawExpData.append(intraObsId, 0, intraRawExpDir)
extraRawExpData = RawExpData()
extraObsId = 9006001
extraRawExpDir = os.path.join(repackagedDir, "extra")
extraRawExpData.append(extraObsId, 0, extraRawExpDir)
# Calculate the wavefront error
wfErrMap = self.wepCmpt.calculateWavefrontErrorsComCam(intraRawExpData,
extraRawExpData)
self.assertEqual(len(wfErrMap), 2)
for wfErr in wfErrMap.values():
self.assertEqual(wfErr.argmax(), 1)
if __name__ == "__main__":
# Run the unit test
unittest.main()
|
2,763 | c6174fae929366cabb8da3d810df705b19895c1c | ๏ปฟ"""
Function of main.py:
config loader
hprams loader
feature extraction
Call model training and validation
Model Save and Load
Call model validation
่ฝฝๅ
ฅ่ฎญ็ปๅๆฐ
่ฝฝๅ
ฅๆๅฎๆจกๅ่ถ
ๅๆฐ
่ฐ็จ็นๅพๆๅ
่ฐ็จๆจกๅ่ฎญ็ปๅ้ช่ฏ
ๆจกๅไฟๅญไธ่ฝฝๅ
ฅ
่ฐ็จๆจกๅ้ช่ฏ
"""
"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
usage: main.py [options]
options:
--data_dir=<dir> Where to get training data [default: ./datasets/MNIST/].
--base_log_dir=<dir> Where to save models [default: ./generated/logdir/].
--model Which model to use [default: autoencoder_vae].
--experiment_name Name of experiment defines the log path [default: Date-of-now].
--load_model=<dir> Where to load checkpoint, if necessary [default: None]
--total_epoch Max num of training epochs [default: by the model].
--eval_per_epoch Model eval per n epoch [default: by the model].
--save_per_epoch Model save per n epoch [default: by the model].
--batch_size Batch size [default: by the model].
-h, --help Show this help message and exit
"""
import argparse
import sys
import datetime
from tqdm import tqdm
import numpy as np
import os
import tensorflow as tf
from model.model_example import model_example
from model.deep_mnist import deep_mnist
from model.VAE.autoencoder_vae import autoencoder
from model.deep_mnist_with_Res import deep_mnist_with_Res
from preprocessing_util import autoencoder_vae_add_noise
from training_util import save,load
import params
FLAGS = None
def prepare_params(FLAGS):
if FLAGS.experiment_name == "default":
now=datetime.datetime.now()
FLAGS.experiment_name=now.strftime('%Y%m%d%H%M%S')
FLAGS.log_dir = FLAGS.base_log_dir+FLAGS.experiment_name+'_'+FLAGS.model+'/'
return FLAGS
def main():
#Avoid tensorboard error on IPython
tf.reset_default_graph()
# Prepare data
train_data = np.load(os.path.join(FLAGS.data_dir, 'train_data.npy'))
train_labels = np.load(os.path.join(FLAGS.data_dir, 'train_labels.npy'))
test_data = np.load(os.path.join(FLAGS.data_dir, 'test_data.npy'))
test_labels = np.load(os.path.join(FLAGS.data_dir, 'test_labels.npy'))
train_set = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
test_set = tf.data.Dataset.from_tensor_slices((test_data, test_labels))
if FLAGS.model == "autoencoder_vae":
train_set = train_set.map(autoencoder_vae_add_noise)
test_set = test_set.map(autoencoder_vae_add_noise)
# Do reshuffle to avoid biased estimation when model reloaded
train_set = train_set.shuffle(
FLAGS.batch_size,reshuffle_each_iteration=True).batch(
FLAGS.batch_size).repeat(10)
test_set = test_set.shuffle(
FLAGS.batch_size,reshuffle_each_iteration=True).batch(
FLAGS.batch_size).repeat(10)
trainIter = train_set.make_initializable_iterator()
next_examples, next_labels = trainIter.get_next()
testIter = test_set.make_initializable_iterator()
test_examples, text_labels = testIter.get_next()
# Create the model
if FLAGS.model == "deep_mnist":
hp = params.Deep_MNIST_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
y = tf.placeholder(tf.float32, [None, hp.output_dim])
keep_probe = tf.placeholder(tf.float32)
model = deep_mnist(hp, x ,y, keep_probe)
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.accuracy,model.merged]
if FLAGS.model == "deep_mnist_AdamW":
hp = params.Deep_MNIST_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
y = tf.placeholder(tf.float32, [None, hp.output_dim])
keep_probe = tf.placeholder(tf.float32)
model = deep_mnist(hp, x ,y, keep_probe,use_adamW = True)
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.accuracy,model.merged]
if FLAGS.model == "deep_mnist_with_Res":
hp = params.Deep_MNIST_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
y = tf.placeholder(tf.float32, [None, hp.output_dim])
keep_probe = tf.placeholder(tf.float32)
model = deep_mnist_with_Res(hp, x ,y, keep_probe)
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.accuracy,model.merged]
if FLAGS.model == "autoencoder_vae":
hp = params.autoencoder_vae_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
x_hat = tf.placeholder(tf.float32, [None, hp.input_dim])
keep_probe = tf.placeholder(tf.float32)
model = autoencoder(hp, x ,x_hat, keep_probe)
y=x_hat
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.loss_mean,model.merged]
#Prepare tensorboard
train_writer = tf.summary.FileWriter(FLAGS.log_dir+'/train',model.train_step.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_dir+'/test')
print('checkout result of this time with "tensorboard --logdir={}"'.format(FLAGS.log_dir))
print('For result compare run "tensorboard --logdir={}"'.format(FLAGS.base_log_dir))
session_conf = tf.ConfigProto(
gpu_options=tf.GPUOptions(
allow_growth=True,
),
)
saver = tf.train.Saver()
#Start tf session
with tf.Session(config=session_conf) as sess:
try:
sess.run(tf.global_variables_initializer())
sess.run(trainIter.initializer)
sess.run(testIter.initializer)
# Restore variables from disk.
if FLAGS.load_model != None:
load(saver, sess, FLAGS.load_model)
for epoch in tqdm(range(FLAGS.total_epoch)):
batch_xs, batch_ys = sess.run([next_examples, next_labels])
train_feed_dict={x: batch_xs,
y: batch_ys,
keep_probe: hp.keep_probe}
_,summary = sess.run(train_fetch_list, feed_dict=train_feed_dict)
if epoch % 10 == 0:
train_writer.add_summary(summary, epoch)
if epoch % FLAGS.eval_per_epoch == 0:
batch_xs, batch_ys = sess.run([test_examples, text_labels])
test_feed_dict={x: batch_xs,
y: batch_ys,
keep_probe: hp.keep_probe_test}
mertics,summary = sess.run(test_fetch_list, feed_dict=test_feed_dict)
test_writer.add_summary(summary, epoch)
if epoch % FLAGS.save_per_epoch == 0:
save(saver, sess, FLAGS.log_dir, epoch)
except:
pass
finally:
save(saver, sess, FLAGS.log_dir, epoch)
train_writer.close()
test_writer.close()
if __name__ == '__main__':
default_hp=params.default_hyper_params
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./datasets/MNIST/")
parser.add_argument('--experiment_name', type=str, default="deep_mnist_AdamW_wd1e4")
parser.add_argument('--base_log_dir', type=str, default="./generated/logdir/")
parser.add_argument('--model', type=str, default="deep_mnist_AdamW")
parser.add_argument('--load_model', type=str, default=None)
parser.add_argument('--total_epoch', type=int, default=default_hp.num_epochs)
parser.add_argument('--eval_per_epoch', type=int, default=default_hp.eval_per_epoch)
parser.add_argument('--save_per_epoch', type=int, default=default_hp.save_per_epoch)
parser.add_argument('--batch_size', type=int, default=default_hp.batch_size)
FLAGS, unparsed = parser.parse_known_args()
FLAGS = prepare_params(FLAGS)
main() |
2,764 | a74653f01b62445c74c8121739bd9185ce21c85a | import urllib.request
import http.cookiejar
import requests
import re
import sys
import time
import json
from bs4 import BeautifulSoup
head = {
"Host": "www.pkuhelper.com",
"Accept": "*/*",
"Accept-Language": "zh-Hans-CN;q=1",
"Connection": "keep-alive",
"Accept-Encoding": "gzip, deflate",
"User-Agent": "PKU Helper/2.3.8 (iPhone; iOS 12.1; Scale/3.00)"
}
url = "http://162.105.205.61/services/pkuhole/api.php"
#ๆ ๆดๅๅค็ฌ่ซ๏ผ็ฌๅๆ ๆดๅๅคๅทใๅ
ๅฎนใๅงๅ
def crawler(pid):
print("hole reply start!")
cids = []
texts = []
names = []
try:
para = {"action": "getcomment", "pid": pid, "token": "pnh3dmks5fmo00u0177qplsre44qo4fk"}
r = requests.get(url, headers=head, params=para)
data = json.loads(r.text)["data"]
for t in data:
cids.append(int(t["cid"]))
texts.append(t["text"])
names.append(t["name"])
print("hole reply end!")
return cids, texts, names
except:
print("HOLE REPLY ERROR!!!!!!")
return cids, texts, names |
2,765 | e83b6b1f4cb12fe3b932903eddddfb0dc0e7d98d | import os, sys, datetime, csv, platform
####FUNCTIONS####
#Get Creation Time
def get_lastupdate_date(path):
return os.path.getmtime(path)
#Get Date From String
def convertIntToTimestamp(timeint):
return str(datetime.datetime.fromtimestamp(timeint))
#Get Filename
def getFilename(name):
return os.path.basename(name)
# Get File Creation Time
def creation_date(path):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path)
else:
stat = os.stat(path)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
#Print List
def print_list(x):
for i in range(0,len(x)):
print(x[i])
return x
#Listing Files
def fileList(source, filetype='.als'):
matches = []
for root, dirnames, filenames in os.walk(source):
for filename in filenames:
if filename.endswith((filetype)):
matches.append(os.path.join(root, filename))
return matches
def mylistdir(directory):
"""A specialized version of os.listdir() that ignores files that
start with a leading period."""
filelist = os.listdir(directory)
return [x for x in filelist
if not (x.startswith('.'))]
def collectElements(dir):
## collecting elements into a list
for directory in dir:
for filename in directory:
if filename.endswith(".als"):
thefiles.append(filename)
return thefiles
## INPUTDIRECTORIES
subpath = []
subdirs = []
thefiles = []
thelist = []
## Examples of Directories
#/Users/blakenicholson/Documents/Personal/Projects/Music Production/Ableton Projects
#/Volumes/Samsung_T3/Old Ableton Projects/1.RELEASED/Neuromansah - DumbBlake Project
filePath = r"/Users/blakenicholson/Dropbox/Ableton Projects"
#filePath = raw_input('File path would you like to use: ')
dirs = mylistdir(filePath)
print(dirs)
print(collectElements(dirs))
#Writes contents of filePath to a txt file
file = open("testtext.txt","w+")
for item in fileList(filePath):
file.write(os.path.basename(item) +", "+convertIntToTimestamp(get_lastupdate_date(item))+", "+convertIntToTimestamp(creation_date(item))+", "+os.path.abspath(item)+"\n")
file.close
#convert txt -> csv
with open('testcsv.csv', 'w+') as fp:
a = csv.writer(fp, delimiter=',')
a.writerow(['File Name','Updated Date','Created Date','Path'])
for item in fileList(filePath):
a.writerow([ os.path.basename(item) , convertIntToTimestamp(get_lastupdate_date(item)), convertIntToTimestamp(creation_date(item)), os.path.abspath(item)])
|
2,766 | 7e11a33d82926ed544640a0192e905d373f575da | # Generated by Django 3.2.3 on 2021-05-23 19:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_notebook_smathphone'),
]
operations = [
migrations.RenameModel(
old_name='Smathphone',
new_name='Smartphone',
),
]
|
2,767 | 8f30de819412b03ef12009320978cb1becd85131 | #!/usr/bin/python
#Program for functions pay scale from user input
hrs = raw_input("Enter Hours:")
h = float(hrs)
rate = raw_input("Enter Rate:")
r = float(rate)
def computepay(h,r):
if (h>40) :
pay = (40*r)+(h-40)*1.5*r
else:
pay = (h*r)
return pay
print computepay(h,r)
|
2,768 | 943db90aa7721ddad3d7f5103c4d398fbf4e143b | import sys
import utils
#import random
def findNearestPoint(points,no_used , src):
# If no nearest point found, return max.
dest = src
minDist = sys.float_info.max
for i in range(len(points)):
if no_used[i] and i!=src:
dist = utils.length(points[src], points[i])
if dist < minDist:
dest =i
minDist = dist
return dest, minDist
def solve(points):
#get an initial tour by NearestPoint method
tour = [0 for i in range(len(points))]
no_used = [True for i in range(len(points))]
totalDist = 0.0
# src =int( random.random()*(len(points)-1))
# no_used[src] = False
# tour[0]=src
src =0
no_used[0] = False
for i in range(1, len(points)):
dest, minDist = findNearestPoint(points, no_used, src) #find Nearest Point
tour[i] = dest
no_used[dest] = False #have been used
src = dest
totalDist += minDist
#plus distance between last point and initial point
return totalDist + utils.length(points[tour[-1]], points[tour[0]]), tour
|
2,769 | cd5929496b13dd0d5f5ca97500c5bb3572907cc5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
from espeak import espeak
except ImportError:
class espeak():
@classmethod
def synth(*args):
print('Cannot generate speech. Please, install python3-espeak module.')
return 1
def run(*args, **kwargs):
text = ' '.join(map(str, args))
espeak.synth(text)
|
2,770 | 6914656a2f78fa1fe74a67bf09b017585b3eac88 | """
Main class of the interface.
It setups the experimental parameters such as the :class:`.Experiment`'s and
:class:`.Sample`, geometry (:attr:`geometry <Stratagem.geometry>`), type of
:math:`\\phi(\\rho z)` model (:attr:`prz_mode <Stratagem.prz_mode>`) and
fluorescence mode (:attr:`fluorescence <Stratagem.fluorescence>`).
"""
# Standard library modules.
import os
import ctypes as c
import logging
logger = logging.getLogger(__name__)
from operator import attrgetter
import random
import string
import functools
try:
import winreg
except ImportError:
try:
import _winreg as winreg
except ImportError:
class winreg:
HKEY_CURRENT_USER = None
class _PyHKEY(object):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def OpenKey(self, key, sub_key, res, sam):
return self._PyHKEY()
def QueryValueEx(self, key, value_name):
return None
# Third party modules.
# Local modules.
from stratagemtools.sample import Sample, CONC_UNKNOWN, CONC_DIFF
from stratagemtools.experiment import Experiment, LINE_KA
from stratagemtools.element_properties import \
atomic_mass_kg_mol, mass_density_kg_m3
# Globals and constants variables.
_REGISTRY_KEY = "Software\SAMx\Stratagem\Configuration"
_REGISTRY_VALUENAME = 'InstallOEMDirectory'
PRZMODE_XPP = 0
""":math:`\\phi(\\rho z)` from XPP"""
PRZMODE_PAP = 1
""":math:`\\phi(\\rho z)` from PAP"""
PRZMODE_GAU = 2
""":math:`\\phi(\\rho z)` *unknown*, possibly two Gaussians"""
FLUORESCENCE_NONE = 0
"""No fluorescence"""
FLUORESCENCE_LINE = 1
"""Only characteristic fluorescence"""
FLUORESCENCE_LINE_CONT = 2
"""Characteristic and Bremsstrahlung fluorescence"""
_CONCENTRATION_FLAG_KNOWN = 0
_CONCENTRATION_FLAG_UNKNOWN = 1
_CONCENTRATION_FLAG_STOICHIOMETRIC = 2
_CONCENTRATION_FLAG_TRACE = 3
_CONCENTRATION_FLAG_DIFFERENCE = 4
class StratagemError(Exception):
"""
Exception raised for all errors related to the STRATAGem interface.
"""
pass
def _check_key(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self._key is None:
raise StratagemError('Not initialize. Call init().')
return method(self, *args, **kwargs)
return wrapper
class Stratagem:
"""
Main interface establishing a connection to the STRATAGem OEM interface and
perform calculations using SAMx's STRATAGem.
It is highly recommended to use :class:`Stratagem` as a context manager
(i.e. ``with`` statement) to ensure that the connection to the DLL is
properly closed.
For instance::
>>> with Stratagem() as strata:
... strata.prz_mode = PRZMODE_XPP
Otherwise the following series of method must be called::
>>> strata = Stratagem()
>>> strata.init()
>>> strata.prz_mode = PRZMODE_XPP
>>> strata.close()
"""
def __init__(self, dll_path=None, display_error=True):
"""
:arg dll_path: complete path to the location of ``stratadllogger.dll``
(optional). If ``None``, the path is found in the Windows registry
under ``Software\SAMx\Stratagem\Configuration``. If the DLL is not
found a :class:`StratagemError` is raised.
:type dll_path: :class:`str`
:arg display_error: whether to display a message dialog on error
:type display_error: :class:`bool`
"""
if dll_path is None:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, _REGISTRY_KEY) as key: #@UndefinedVariable
basedir = winreg.QueryValueEx(key, _REGISTRY_VALUENAME)[0] #@UndefinedVariable
dll_path = os.path.join(basedir, 'bin', 'stratadll.dll')
cwd = os.getcwd()
try:
logger.debug("dll=%s", dll_path)
self._lib = c.WinDLL(dll_path)
finally:
os.chdir(cwd) # Change back to real cwd
logger.debug("StEnableErrorDisplay(%r)", display_error)
self._lib.StEnableErrorDisplay(c.c_bool(display_error))
self._key = None
self._cwd = os.getcwd()
self._layers = {} # layer: index
self._substrate = None
self._experiments = {} # experiment: (element, line, kratio) indexes
self._tmpstandards = []
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _stobjectnew(self, key=None, standard=False):
if key is None:
characters = string.ascii_lowercase
key = ''.join(random.choice(characters) for _ in range(8))
key = key.encode('ascii')
if not isinstance(key, c.c_byte):
key = c.create_string_buffer(key)
bnormal_ = c.c_bool(not standard)
iniflags_ = c.c_int(0)
logger.debug("StObjectNew(key, %r, %i)", not standard, 0)
if not self._lib.StObjectNew(key, bnormal_, iniflags_):
self._raise_error("Cannot create object")
return key
def _raise_error(self, alternate=''):
"""
Raises a :class:`StratagemError`.
The error code and message of known errors are retrieved from STRATAGem.
If this is not possible, *alternate* is used as the error message.
"""
errnum_ = c.c_ulong()
errtype_ = c.c_int()
self._lib.StGetLastError(c.byref(errnum_), c.byref(errtype_))
if errnum_.value != 0:
if errtype_.value == 0:
buf_ = c.create_string_buffer(256)
self._lib.StGetMsg(errnum_, buf_, 256)
raise StratagemError(buf_.value.decode('ascii'))
elif errtype_.value == 1:
raise c.WinError(errtype_.value)
else:
raise StratagemError('Error %i' % errnum_.value)
else:
raise StratagemError(alternate)
def init(self):
"""
Initializes and setups STRATAGem.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
raise RuntimeError('Already initialized. Call close() first.')
self._key = self._stobjectnew()
self._cwd = os.getcwd()
self.reset()
def close(self):
"""
Closes the connection to the STRATAGem DLL.
It does not have to be used if :class:`Stratagem` is used as a context
manager.
"""
if self._key is not None:
logger.debug('StObjectDelete(key)')
self._lib.StObjectDelete(self._key)
self._key = None
for filepath in self._tmpstandards:
os.remove(filepath)
logger.debug('Remove temporary standard: %s', filepath)
self.reset()
def reset(self):
"""
Resets all parameters to the defaults, remove all experiments and sample.
"""
if self._key:
self._lib.StObjectReset(self._key)
os.chdir(self._cwd)
self._layers.clear() # layer: index
self._substrate = None
self._experiments.clear() # analyzed experiments
self._tmpstandards.clear()
@_check_key
def set_sample(self, sample):
"""
Sets the sample, which will be used in all subsequent calculations.
Note that only one sample can be defined.
:arg sample: sample definition
:type sample: :class:`Sample`
"""
self.reset()
for layer in sample.layers:
index = self._add_layer(layer, substrate=False)
self._layers.setdefault(layer, index)
index = self._add_layer(sample.substrate, substrate=True)
self._substrate = (sample.substrate, index)
@_check_key
def get_sample(self):
"""
Returns the current sample.
It can correspond to the sample defined by :meth:`set_sample` or the
sample resulting from the computations (see :meth:`compute`).
.. note:: a new sample is returned every time this method is called
:return: current sample
:rtype: :class:`Sample`
"""
sample = Sample(self._substrate[0].composition)
for layer in self._layers:
sample.add_layer(layer.composition, layer.thickness_m,
layer.mass_thickness_kg_m2, layer.density_kg_m3)
return sample
sample = property(get_sample, set_sample, doc="Property to set/get sample")
def _add_layer(self, layer, substrate=False, key=None):
"""
Internal method to add a layer from top to bottom.
The last layer added is considered as the substrate.
:arg layer: layer
:type layer: :class:`.Layer`
:return: index of the layer
"""
if key is None:
key = self._key
logger.debug("StSdAddLayer(key)")
ilayer_ = self._lib.StSdGetNbLayers(key)
logger.debug("StSdAddLayer(key, %i)", ilayer_)
if not self._lib.StSdAddLayer(key, ilayer_):
self._raise_error("Cannot add layer")
for i, value in enumerate(layer.composition.items()):
ielt_ = c.c_int(i)
logger.debug("StSdAddElt(key, %i, %i)", ilayer_, i)
if not self._lib.StSdAddElt(key, ilayer_, ielt_):
self._raise_error("Cannot add element")
z, wf = value
nra_ = c.c_int(z)
logger.debug("StSdSetNrAtom(key, %i, %i, %i)", ilayer_, i, z)
if not self._lib.StSdSetNrAtom(key, ilayer_, ielt_, nra_):
self._raise_error("Cannot set atomic number")
if wf is None or wf == CONC_UNKNOWN:
flag = _CONCENTRATION_FLAG_UNKNOWN
elif wf == CONC_DIFF:
flag = _CONCENTRATION_FLAG_DIFFERENCE
else:
flag = _CONCENTRATION_FLAG_KNOWN
wf_ = c.c_double(wf)
logger.debug("StSdSetConc(key, %i, %i, %f)", ilayer_, i, wf)
if not self._lib.StSdSetConc(key, ilayer_, ielt_, wf_):
self._raise_error("Cannot set concentration")
logger.debug("StSdSetConcFlag(key, %i, %i, %i)", ilayer_, i, flag)
if not self._lib.StSdSetConcFlag(key, ilayer_, ielt_, c.c_int(flag)):
self._raise_error("Cannot set concentration flag")
if not substrate:
thick_known = layer.is_thickness_known()
thick_known_ = c.c_bool(thick_known)
if layer.is_density_known():
density = layer.density_kg_m3 / 1e3 # g/cm3
else:
density = 10.0
density_ = c.c_double(density)
if thick_known:
thickness = layer.thickness_m * 1e10 # Angstroms
mass_thickness = layer.mass_thickness_kg_m2 * 0.1 # g/cm2
else:
thickness = 0.0
mass_thickness = 0.0
thickness_ = c.c_double(thickness)
mass_thickness_ = c.c_double(mass_thickness)
logger.debug("StSdSetThick(key, %i, %r, %d, %d, %d)", ilayer_,
thick_known, mass_thickness, thickness, density)
if not self._lib.StSdSetThick(key, ilayer_, thick_known_,
mass_thickness_, thickness_, density_):
self._raise_error("Cannot set thickness")
return int(ilayer_)
def _create_standard(self, standard):
"""
Internal method to create a new object defining the standard
:class:`.Sample`.
"""
# Create new object
key_ = self._stobjectnew(standard=True)
# Set sample
for layer in standard.layers:
self._add_layer(layer, substrate=False, key=key_)
self._add_layer(standard.substrate, substrate=True, key=key_)
# Save
filename = key_.value.decode('ascii') + '.tfs'
filepath = os.path.join(self.get_standard_directory(), filename)
filepath_ = c.create_string_buffer(filepath.encode('ascii'))
logger.debug('StObjectWriteFile(key, %s)', filepath)
if not self._lib.StObjectWriteFile(key_, filepath_):
self._raise_error("Cannot save standard")
# Delete object
self._lib.StObjectDelete(key_)
self._tmpstandards.append(filepath)
return filepath
@_check_key
def add_experiment(self, experiment):
"""
Adds an experiment, i.e. measurements of k-ratio at different energies.
.. hint:: Use :meth:`reset` method to remove defined experiments.
:arg experiment: experiment
:type experiment: :class:`Experiment`
"""
nra_ = c.c_int(experiment.z)
klm_ = c.c_int(experiment.line)
hv_ = c.c_double(experiment.energy_eV / 1e3)
ielt_ = c.c_int()
iline_ = c.c_int()
iexpk_ = c.c_int()
logger.debug('StEdAddNrAtomLineHV(key, %i, %i)', experiment.z, experiment.line)
if not self._lib.StEdAddNrAtomLineHV(self._key, nra_, klm_, hv_,
c.byref(ielt_), c.byref(iline_), c.byref(iexpk_)):
self._raise_error("Cannot add atomic number and line")
standard = experiment.standard
if isinstance(standard, Sample):
standard = self._create_standard(standard)
standard_ = c.create_string_buffer(standard.encode('ascii'))
logger.debug('StEdSetLine(key, %i, %i, %i, %s)', ielt_.value, iline_.value, klm_.value, standard)
if not self._lib.StEdSetLine(self._key, ielt_, iline_, klm_, standard_):
self._raise_error("Cannot set standard")
analyzed = experiment.is_analyzed()
analyzed_ = c.c_bool(analyzed)
logger.debug("StEdSetAnalyzedFlag(key, %i, %r)", ielt_.value, analyzed)
if not self._lib.StEdSetAnalyzedFlag(self._key, ielt_, analyzed_):
self._raise_error("Cannot add experiment analyzed flag")
kratio_ = c.c_double(experiment.kratio)
logger.debug("StEdSetExpK(key, %i, %i, %i, %f, %f, %f, 0.0, 2)",
ielt_.value, iline_.value, iexpk_.value,
experiment.energy_eV / 1e3, experiment.energy_eV / 1e3,
experiment.kratio)
if not self._lib.StEdSetExpK(self._key, ielt_, iline_, iexpk_,
hv_, hv_, kratio_, c.c_double(0.0),
c.c_int(2)):
self._raise_error("Cannot set experiment k-ratio")
if experiment.is_analyzed():
indexes = (ielt_.value, iline_.value, iexpk_.value)
self._experiments.setdefault(experiment, indexes)
@_check_key
def add_experiments(self, *exps):
"""
Adds several experiments::
>>> strata.add_experiments(exp1, exp2, exp3)
"""
for exp in exps:
self.add_experiment(exp)
def get_experiments(self):
"""
Returns a :class:`tuple` of all defined experiments.
:rtype: :class:`tuple`
"""
return tuple(self._experiments.keys())
@_check_key
def set_geometry(self, toa, tilt, azimuth):
"""
Sets the geometry.
:arg toa: take off angle (in radians)
:arg tilt: tilt angle (in radians)
:arg azimuth: azimuthal angle (in radians)
"""
toa_ = c.c_double(toa)
tilt_ = c.c_double(tilt)
azimuth_ = c.c_double(azimuth)
logger.debug('StSetGeomParams(key, %f, %f, %f)', toa, tilt, azimuth)
if not self._lib.StSetGeomParams(self._key, toa_, tilt_, azimuth_):
self._raise_error("Cannot set geometry parameters")
@_check_key
def get_geometry(self):
"""
Returns the geometry.
:return: take off angle (in radians), tilt angle (in radians),
azimuthal angle (in radians)
"""
toa_ = c.c_double()
tilt_ = c.c_double()
azimuth_ = c.c_double()
logger.debug('StGetGeomParams(key)')
if not self._lib.StGetGeomParams(self._key, c.byref(toa_),
c.byref(tilt_), c.byref(azimuth_)):
self._raise_error("Cannot get geometry parameters")
return toa_.value, tilt_.value, azimuth_.value
geometry = property(get_geometry, doc='Property to get geometry')
@_check_key
def set_prz_mode(self, mode):
"""
Sets the type of model to use for the :math:`\\phi(\\rho z)`.
:arg mode: type of model, either
* :data:`PRZMODE_XPP`
* :data:`PRZMODE_PAP`
* :data:`PRZMODE_GAU`
:type mode: :class:`int`
"""
mode_ = c.c_int(mode)
logger.debug('StSetPrzMode(%i)', mode)
self._lib.StSetPrzMode(mode_)
@_check_key
def get_prz_mode(self):
"""
Returns the type of model to use for the :math:`\\phi(\\rho z)`.
:return: either :data:`PRZMODE_XPP`, :data:`PRZMODE_PAP` or
:data:`PRZMODE_GAU`
:rtype: :class:`int`
"""
return self._lib.StGetPrzMode()
prz_mode = property(get_prz_mode, set_prz_mode,
doc='Property to get/set prz mode')
@_check_key
def set_fluorescence(self, flag):
"""
Sets the fluorescence flag.
:arg flag: either
* :data:`FLUORESCENCE_NONE`
* :data:`FLUORESCENCE_LINE`
* :data:`FLUORESCENCE_LINE_CONT`
:type flag: :class:`int`
"""
flag_ = c.c_int(flag)
logger.debug('StSetFluorFlg(%i)', flag)
self._lib.StSetFluorFlg(flag_)
@_check_key
def get_fluorescence(self):
"""
Returns the fluorescence flag.
:return: either :data:`FLUORESCENCE_NONE`, :data:`FLUORESCENCE_LINE`
or :data:`FLUORESCENCE_LINE_CONT`
:rtype: :class:`int`
"""
return self._lib.StGetFluorFlg()
fluorescence = property(get_fluorescence, set_fluorescence,
doc='Property to get/set fluorescence')
@_check_key
def set_standard_directory(self, dirpath):
"""
Sets the directory where standard files are stored.
:arg dirpath: path to directory
:type dirpath: :class:`str`
"""
dirpath_ = c.create_string_buffer(dirpath.encode('ascii'))
self._lib.StSetDirectory(c.c_int(1), dirpath_)
@_check_key
def get_standard_directory(self):
"""
Returns the directory where standard files are stored.
:rtype: :class:`str`
"""
dirpath = (c.c_char * 256)()
self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)
return dirpath.value.decode('ascii')
standard_directory = property(get_standard_directory, set_standard_directory,
doc='Property to get/set standard directory')
@_check_key
def compute_kratio_vs_thickness(self, layer,
thickness_low_m, thickness_high_m, step):
"""
Computes the variation of the k-ratio as a function of the thickness
for a layer.
:arg layer: layer of a sample (must have been previously added)
:type layer: :class:`.Layer`
:arg thickness_low_m: lower limit of the thickness in meters
:type thickness_low_m: :class:`float`
:arg thickness_high_m: upper limit of the thickness in meters
:type thickness_high_m: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of thicknesses
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each thickness
"""
logger.debug('StSetKvsThicknessUnit(2)')
self._lib.StSetKvsThicknessUnit(2) # unit in nm
if layer not in self._layers:
raise ValueError("Unknown layer")
ilayer = self._layers[layer]
ilayer_ = c.c_int(ilayer)
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
# Compute
low_ = c.c_double(thickness_low_m * 1e9)
high_ = c.c_double(thickness_high_m * 1e9)
logger.debug('StComputeKvsThickness(key, %i, %f, %f)',
ilayer, thickness_low_m * 1e9, thickness_high_m * 1e9)
if not self._lib.StComputeKvsThickness(self._key, ilayer_, low_, high_):
self._raise_error("Cannot compute k-ratio vs thickness")
# Fetch results
thicknesses = []
kratios = {}
thick_ = c.c_double()
k_ = c.c_double()
for i in range(step + 1):
i_ = c.c_int(i)
if not self._lib.StGetKvsT_Thick(self._key, i_, c.byref(thick_)):
self._raise_error("Cannot get thickness")
thicknesses.append(thick_.value)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
iHv_ = c.c_int(indexes[2])
if not self._lib.StGetKvsT_K(self._key, i_, ielt_, iline_,
iHv_, c.byref(k_)):
self._raise_error("Cannot get k-ratio")
kratios.setdefault(experiment, []).append(k_.value)
return thicknesses, kratios
@_check_key
def compute_kratio_vs_energy(self, energy_high_eV, step):
"""
Computes the variation of the k-ratio as a function of the incident
energy.
Note that the computation also starts at 0 keV up to the specified energy.
:arg energy_high_eV: upper limit of the thickness in electronvolts
:type energy_high_eV: :class:`float`
:arg step: number of steps
:type step: :class:`int`
:return: :class:`tuple` containing
* :class:`list` of energies in electronvolts
* :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are :class:`list`
containing k-ratios for each energy
"""
step_ = c.c_int(step)
logger.debug('StSetNbComputedHV(%i)', step)
self._lib.StSetNbComputedHV(step_)
energy_ = c.c_double(energy_high_eV / 1e3)
logger.debug('StSetMaxHV(%f)' % (energy_high_eV / 1e3,))
self._lib.StSetMaxHV(energy_)
# Compute
logger.debug('StComputeKvsHV(key)')
if not self._lib.StComputeKvsHV(self._key):
self._raise_error("Cannot compute k-ratio vs energy")
# Fetch results
energies = []
kratios = {}
k_ = c.c_double()
bHV_ = c.c_bool(True)
increment = float(energy_high_eV / 1e3) / step
for i in range(step + 1):
hv = i * increment
hv_ = c.c_double(hv)
for experiment, indexes in self._experiments.items():
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
if not self._lib.StKvsHvOrRx(self._key, ielt_, iline_, hv_, bHV_, c.byref(k_)):
self._raise_error("Cannot get k-ratio")
kratios.setdefault(experiment, []).append(k_.value)
energies.append(hv)
return energies, kratios
@_check_key
def compute_kratios(self):
"""
Computes the k-ratios of the different experiments.
:return: :class:`dict` where the keys are experiments (as defined by
:meth:`.add_experiment`) and the values are k-ratios
(:class:`float`).
"""
if len(self._layers) == 0:
return self._compute_kratios_substrate()
else:
return self._compute_kratios_multilayers()
@_check_key
def _compute_kratios_multilayers(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_thickness`.
"""
for i, layer in enumerate(self._layers.keys()):
if not layer.is_thickness_known():
raise ValueError("Thickness of layer %i is unknown" % i)
# Compute
layer = list(self._layers.keys())[0]
thickness_low_m = layer.thickness_m
thickness_high_m = layer.thickness_m * 10
step = 1
_thicknesses, kratios = \
self.compute_kratio_vs_thickness(layer, thickness_low_m,
thickness_high_m, step)
# Reorganize results
output = {}
for experiment, kratio in kratios.items():
output.setdefault(experiment, kratio[0])
return output
@_check_key
def _compute_kratios_substrate(self):
"""
Internal method to compute the k-ratios using the
:meth:`compute_kratio_vs_energy`.
"""
output = {}
step = 2
for experiment in self._experiments:
energy_high_eV = experiment.energy_eV
_energies, kratios = \
self.compute_kratio_vs_energy(energy_high_eV, step)
kratio = kratios[experiment][-1]
if (kratio < 0): # Bug in strategem that some energy don't work
logger.warn("STRATAGem returns a negative k-ratio, re-try with energy + 1 eV")
_energies, kratios = \
self.compute_kratio_vs_energy(energy_high_eV + 1.0, step)
kratio = kratios[experiment][-1]
output.setdefault(experiment, kratio)
return output
@_check_key
def compute(self, iteration_max=50):
"""
Computes the unknown composition(s) and thickness(es) in the specified
sample.
:arg iteration_max: maximum number of iterations of the solve
(default: 50)
:type iteration_max: :class:`int`
:return: calculated sample
:rtype: :class:`.Sample`
"""
# Add missing experiments
zs = set(exp.z for exp in self._experiments.keys())
for layer in list(self._layers.keys()) + [self._substrate[0]]:
for z, wf in layer.composition.items():
if z in zs:
continue
if wf is None:
continue
logger.debug('Added dummy experiment for z=%i', z)
exp = Experiment(z, LINE_KA, 0.0, analyzed=False) # dummy
self.add_experiment(exp)
# Set iteration maximum
iteration_max_ = c.c_int(iteration_max)
logger.debug('StSetMaxNbIter(%i)', iteration_max)
self._lib.StSetMaxNbIter(iteration_max_)
# Compute
logger.debug('StComputeIterpStart(key)')
if not self._lib.StComputeIterpStart(self._key):
self._raise_error("Cannot start iteration")
continue_ = c.c_bool(True)
iteration = 0
logger.debug('Start iteration')
while True:
iteration += 1
logger.debug('Iteration #%i' % iteration)
logger.debug('StComputeIterpNext(key, %r)' % continue_.value)
if not self._lib.StComputeIterpNext(self._key, c.byref(continue_)):
break
if not continue_.value:
break
logger.debug('Iteration completed')
# Fetch results
thick_known = c.c_bool()
mass_thickness = c.c_double()
thickness = c.c_double()
density = c.c_double()
def get_layer(layer, ilayer):
ilayer_ = c.c_int(ilayer)
logger.debug('StSdGetNbElts(key, %i)' % ilayer)
nbelt = self._lib.StSdGetNbElts(self._key, ilayer_)
if nbelt == -1:
self._raise_error("Cannot get number of elements")
flag_ = (c.c_int * nbelt)()
wfs_ = (c.c_double * nbelt)()
logger.debug('StSdGetLayRawConcs(key, %i, flag, wfs)' % ilayer)
if not self._lib.StSdGetLayRawConcs(self._key, ilayer_,
flag_, wfs_):
self._raise_error("Cannot get layer concentration")
composition = {}
for z in layer.composition.keys():
nra_ = c.c_int(z)
logger.debug('StSdGetEltIdx(key, %i, %i)' % (ilayer, z))
zindex = self._lib.StSdGetEltIdx(self._key, ilayer_, nra_)
composition[z] = wfs_[zindex]
logger.debug("StSdGetThick(key, %i)", ilayer)
if not self._lib.StSdGetThick(self._key, ilayer_, c.byref(thick_known),
c.byref(mass_thickness), c.byref(thickness),
c.byref(density)):
self._raise_error("Cannot get thickness")
return (composition, thickness.value / 1e10,
mass_thickness.value * 10.0, density.value * 1e3)
sample = Sample(get_layer(*self._substrate)[0])
for layer, ilayer in self._layers.items():
sample.add_layer(*get_layer(layer, ilayer))
return sample
@_check_key
def compute_prz(self, maxdepth_m=None, bins=100):
"""
Compute :math:`\\phi(\\rho z)` of all experiments.
.. warning:: Only available for substrate (no layers).
:arg maxdepth_m: maximum depth of the :math:`\\phi(\\rho z)`
distribution in meters. If ``None``, Kanaya-Okayama electron range
is used with a safety factor of 1.5.
:type maxdepth_m: :class:`float`
:arg bins: number of bins in the :math:`\\phi(\\rho z)` distribution
:type bins: :class:`int`
:return: a :class:`dict` where the keys are the experiments and the
values are a tuple containing three lists:
* :math:`\\rho z` coordinates (in g/cm2)
* generated intensities of :math:`\\phi(\\rho z)` (no absorption)
* emitted intensites of :math:`\\phi(\\rho z)`
"""
if len(self._layers) > 0:
raise RuntimeError('PRZ can only be computed for substrate')
# Set scaling
hvs_eV = map(attrgetter('energy_eV'), self._experiments.keys())
maxhv_eV = max(hvs_eV)
maxhv_ = c.c_double(maxhv_eV / 1e3)
logger.debug('StSetScaleHV(%s)', maxhv_eV / 1e3)
self._lib.StSetScaleHV(maxhv_)
# Compute
logger.debug('StComputePrz(key)')
if not self._lib.StComputePrz(self._key):
self._raise_error('Cannot compute prz')
# Get values
przs = {}
for experiment, indexes in self._experiments.items():
# Size of each bin
if maxdepth_m is None:
# Calculate max depth using Kanaya-Okayama
maxdepth_m = 0.0
energy_keV = experiment.energy_eV / 1e3
for z, fraction in self._substrate[0].composition.items():
dr = (0.0276 * atomic_mass_kg_mol(z) * 1e3 * energy_keV ** 1.67) / \
(z ** 0.89 * mass_density_kg_m3(z) / 1e3)
maxdepth_m += fraction / (dr * 1e-6)
maxdepth_m = 1.0 / maxdepth_m
maxdepth_m *= 1.5 # safety factor
increment_kg_m2 = (maxdepth_m * self._substrate[0].density_kg_m3) / bins
# Indexes
ielt_ = c.c_int(indexes[0])
iline_ = c.c_int(indexes[1])
ihv_ = c.c_int(0)
rzs = []
ys_generated = []
ys_emitted = []
for i in range(bins):
rz_ = c.c_double(i * increment_kg_m2 * 0.1)
rzs.append(i * increment_kg_m2)
y_ = c.c_double()
bUseExp_ = c.c_bool(True)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_emitted.append(y_.value)
y_ = c.c_double()
bUseExp_ = c.c_bool(False)
self._lib.StPhiRhoZ(self._key, ielt_, iline_, ihv_, rz_,
bUseExp_, c.byref(y_))
ys_generated.append(y_.value)
przs.setdefault(experiment, (rzs, ys_generated, ys_emitted))
return przs
|
2,771 | 3a7f9bf5420b2d3587f1988c35f2f88bd2fa2b32 | #!/usr/bin/env python3
def main():
pass
def handle_result(args, result, target_window_id, boss):
if args[1] == "next":
boss.active_tab_manager.next_tab(1)
elif args[1] == "previous":
boss.active_tab_manager.next_tab(-1)
boss.active_tab.neighboring_window(args[1])
handle_result.no_ui = True
|
2,772 | 42ae3804c2d8f6a0d440e2bb6231186a868630b1 | import numpy as np
import cv2
import skimage.color
import skimage.filters
import skimage.io
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import pickle
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Conv2DTranspose, Activation,\
Concatenate
from keras.losses import sparse_categorical_crossentropy
from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam
from keras.models import load_model, Model
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler
from preprocess_data import get_data
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.regularizers import l2
from keras.utils import to_categorical
import keras.metrics
from sklearn.utils import class_weight
from utils import scheduler
image_size = 256
method = 0
batch_size = 8
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
]
#get_data(save_data=True, method=method)
X_Train = np.load('data/X_train_256_GRY.npy')
X_Val = np.load('data/X_val_256_GRY.npy')
X_Test = np.load('data/X_test_256_GRY.npy')
Y_Train = np.load('data/Y_train.npy')
Y_Val = np.load('data/Y_val.npy')
Y_Test = np.load('data/Y_test.npy')
print("Train Benign: " + str(np.count_nonzero(Y_Train == 0)))
print("Train Malignant: " + str(np.count_nonzero(Y_Train == 1)))
print("Test Benign: " + str(np.count_nonzero(Y_Test == 0)))
print("Test Malignant: " + str(np.count_nonzero(Y_Test == 1)))
print("X_Train shape: " + str(X_Train.shape))
print("Y_Train shape: " + str(Y_Train.shape))
print("X_Test shape: " + str(X_Test.shape))
print("Y_Test shape: " + str(Y_Test.shape))
print("X_Val shape: " + str(X_Val.shape))
print("Y_Val shape: " + str(Y_Val.shape))
batches_per_epoch = int(X_Train.shape[0] / batch_size)
print("batches_per_epoch= " + str(batches_per_epoch))
val_batches_per_epoch = int(X_Val.shape[0] / batch_size)
print("validation batches_per_epoch= " + str(val_batches_per_epoch))
print("Steps per epoch: ", batches_per_epoch)
epoch_count = 25
class_weights = {0: 0.5, 1: 1.0}
#data Augmentation
train_generator = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=180,
shear_range=15,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
val_generator = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=180,
shear_range=15,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
train_generator.fit(X_Train)
val_generator.fit(X_Val)
# Create callbacks
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1, mode='min')
#reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='min')
reduce_lr = LearningRateScheduler(scheduler)
filepath="checkpoints/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, mode='min')
callbacks = [reduce_lr, early_stopping, checkpointer]
vgg = VGG16(weights='imagenet',
include_top=False,
input_shape=(image_size, image_size, 3))
model = Sequential()
model.add(vgg)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Freeze the convolutional base
vgg.trainable = False
opt = keras.optimizers.Adam(learning_rate=0.001)
# Compile the model
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=["accuracy"])
# Train
history = model.fit(
train_generator.flow(X_Train, Y_Train, batch_size=batch_size),
steps_per_epoch=len(X_Train) / batch_size,
epochs=14,
class_weight=class_weights,
shuffle=True,
validation_data=val_generator.flow(X_Val, Y_Val, batch_size=batch_size),
callbacks=callbacks,
verbose=2
)
model.save("models/vgg.h5")
|
2,773 | 4ffc00e9425992bdd8277341d67a0739119a4798 | def ex(x,y):
max=0
print(x)if x>y else print(y)
return max
|
2,774 | 3b99cc0eb163f4a94bc47429ad3627a6ecad4818 | from sys import stdin
def get_time(d, sp, dists, i, d_old, sp_old):
if i == len(dists):
return 0
times = []
d_new = d[i]
sp_new = sp[i]
if d_new >= dists[i]:
res1 = get_time(d, sp, dists, i + 1, d_new - dists[i], sp_new)
if res1 is not None:
times.append(res1 + (dists[i] + 0.0) / sp_new)
if d_old >= dists[i]:
res1 = get_time(d, sp, dists, i + 1, d_old - dists[i], sp_old)
if res1 is not None:
times.append(res1 + (dists[i] + 0.0) / sp_old)
if len(times) == 0:
return None
else:
return min(times)
def get_answer():
parts = [int(el) for el in stdin.readline().strip().split()]
n = parts[0]
d = []
sp = []
for i in range(n):
ps = [int(el) for el in stdin.readline().strip().split()]
d.append(ps[0])
sp.append(ps[1])
dist = []
for i in range(n):
dist.append([int(el) for el in stdin.readline().strip().split()])
p = stdin.readline()
dists = []
for line in dist[:len(dist) - 1]:
for i in range(len(line)):
if line[i] != -1:
dists.append(line[i])
break
res = get_time(d, sp, dists, 0, 0, 0)
return res
def main():
t = int(stdin.readline().strip())
for i in range(t):
print "Case #{0}: {1}".format(i + 1, get_answer())
if __name__ == "__main__":
main()
|
2,775 | be16e13c0e03952e45f98b175975795bba19cf9a | my_list = [9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 4, 4, 4, 2, 2, 1]
new_num = int(input('Enter a new number - '))
i = 0
for n in my_list:
if new_num <= n:
i += 1
my_list.insert(i, float(new_num))
print(my_list) |
2,776 | 561763d4d7b613446f2890ef629b631542f2f472 | from datetime import datetime
start = datetime.now()
# Poker Hand Analyser Library for Project Euler: Problem 54
from collections import namedtuple
# import pe_lib
def character_frequency(s):
freq = {}
for i in s:
if i in freq:
freq[i] += 1
else:
freq[i] = 1
return freq
suits = "HDCS".split()
faces = "2,3,4,5,6,7,8,9,T,J,Q,K,A"
face = faces.split(',')
class Card(namedtuple('Card', 'face, suit')):
def __repr__(self):
return ''.join(self)
def royal_flush(hand):
royalface = "TJQKA"
# sort the cards based on the face rank of each card
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))
first_card = ordered[0]
other_cards = ordered[1:]
# check if all are of the same suit
if all(first_card.suit == card.suit for card in other_cards):
# check if they are in sequential order
# compare the ordered faces substring with the face list (which is converted to string)
if ''.join(card.face for card in ordered) in royalface:
return 'royal-flush', ordered[-1].face
return False
def straight_flush(hand):
# sort the cards based on the face rank of each card
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))
first_card = ordered[0]
other_cards = ordered[1:]
# check if all are of the same suit
if all(first_card.suit == card.suit for card in other_cards):
# check if they are in sequential order
# compare the ordered faces substring with the face list (which is converted to string)
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight-flush', ordered[-1].face
return False
def four_of_a_kind(hand):
allfaces = [f for f,s in hand]
# create a unique set of ranks
uniqueRanks = set(allfaces)
# if there are more than 2 ranks, it's not four of a kind
if len(uniqueRanks) != 2:
return False
for f in uniqueRanks:
# if there are 4 faces, it is four of a kind
if allfaces.count(f) == 4:
uniqueRanks.remove(f)
return "four-of-a-kind", f
return False
def full_house(hand):
allfaces = [f for f,s in hand]
rankFrequency = character_frequency(allfaces)
# if there are 2 types of ranks and there's a card with 1 pair and 3 of a kind
if len(rankFrequency) == 2 and (rankFrequency.values()[0] == 2 and rankFrequency.values()[1] == 3):
return 'full-house'
return False
def flush(hand):
allfaces = [f for f,s in hand]
first_card = hand[0]
other_cards = hand[1:]
if all(first_card.suit == card.suit for card in other_cards):
return 'flush', sorted(allfaces, key=lambda f: face.index(f), reverse=True)
return False
def straight(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight', ordered[-1].face
return False;
def three_of_a_kind(hand):
allfaces = [f for f,s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 3:
return False
for f in uniqueRanks:
if allfaces.count(f) == 3:
uniqueRanks.remove(f)
return "three-of-a-kind", f
return False;
def two_pair(hand):
allfaces = [f for f,s in hand]
allftypes = set(allfaces)
# collect pairs
pairs = [f for f in allftypes if allfaces.count(f) == 2]
# if there are more than two pairs
if len(pairs) != 2:
return False
p1, p2 = pairs
# get the difference using sets
other_cards = [(allftypes - set(pairs)).pop()]
return 'two-pair', pairs + other_cards if(face.index(p1) > face.index(p2)) else pairs[::-1] + other_cards
def one_pair(hand):
allfaces = [f for f,s in hand]
allftypes = set(allfaces)
# collect pairs
pairs = [f for f in allftypes if allfaces.count(f) == 2]
# if there's more than one pair
if len(pairs) != 1:
return False
allftypes.remove(pairs[0])
return 'one-pair', pairs + sorted(allftypes, key=lambda f: face.index(f), reverse=True)
def high_card(hand):
# collect all faces from each card
allfaces = [f for f,s in hand]
#sort the faces and show the highest card
return "high_card", sorted(allfaces, key=lambda f: allfaces.index(f), reverse=True)[0]
def create_hand_tuple(cards = "5D 8C 9S JS AC"):
hand = []
for card in cards.split():
face, suit = card[:-1], card[-1]
hand.append(Card(face, suit))
return hand;
# functions
handrankorder = (royal_flush,straight_flush,four_of_a_kind,full_house,
flush,straight,three_of_a_kind,two_pair,
one_pair,high_card)
def determine_rank(cards):
hand = create_hand_tuple(cards)
for ranker in handrankorder:
rank = ranker(hand)
if rank:
break
return rank
for play in open('p054_poker.txt', 'r').readlines():
play = play.strip()
h1 = play[:15]
h2 = play[15:]
print(f"{determine_rank(h1)}\t\t{determine_rank(h2)}")
print(f"\n\n\nfin in {datetime.now()-start}")
|
2,777 | 14fb6776ac30802edf43c43acbee64263c6bdd7b | import numpy as np
import itertools
from scipy.linalg import eig, schur
from eigen_rootfinding.polynomial import MultiCheb, MultiPower
from eigen_rootfinding.utils import memoize
from scipy.stats import ortho_group
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
def indexarray_cheb(matrix_terms, which, var):
"""Compute the array mapping Chebyshev monomials under multiplication by x_var:
T_1*T_0 = T_1
T_1*T_n = .5(T_(n+1)+ T_(n-1))
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the degree for each univariate Chebyshev monomial in the ith
multivariate monomial
m : int
Number of monomials of highest degree, i.e. those that do not need to be
multiplied
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr1 : 1d integer ndarray
Array containing the indices of T_(n+1)
arr2 : 1d
Array containing the indices of T_(n-1)
"""
up = matrix_terms[which].copy()
up[:, var] += 1
down = matrix_terms[which].copy()
down[:, var] -= 1
down[down[:, var]==-1, var] += 2
arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
return arr1, arr2
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Mรถller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Mรถller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m,None), i)
M[..., i] = Q.conj().T@A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Mรถller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Mรถller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)
M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])
return M
def ms_matrices_p(E, P, matrix_terms, dim, cut):
"""Compute the Mรถller-Stetter matrices in the power basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Mรถller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr = indexarray(matrix_terms, slice(r,None), i)
M[..., i] = A[arr]
return M
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Mรถller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Mรถller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)
M[..., i] = .5*(A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst]-eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
@memoize
def get_rand_combos_matrix(rows, cols, normal=False):
""" Generates a rows by cols random matrix with orthogonal rows or columns,
depending on if rows > cols or cols > rows.
Parameters
----------
rows : int
Number of rows
cols : int
Number of columns
normal : bool
Optional. Whether or not to create a matrix using entries drawn
from the standard normal distribution (N(0, 1)) or not. If it's
False, it will return an orthogonal matrix.
Returns
-------
C : (rows, cols) ndarray
Matrix with orthgonal rows or columns, depending on if rows > cols or
cols > rows if normal is False, otherwise a matrix with
coefficients drawn from the standard normal (N(0, 1)).
"""
np.random.seed(57)
# TODO perhaps explore different types of random matrices?
# randn was giving me conditioning problems
if normal:
C = np.random.normal(loc=0, scale=1, size=(rows, cols))
return C
size = max(rows, cols)
C = ortho_group.rvs(size)
return C[:rows, :cols]
@memoize
def get_Q_c(dim):
""" Generates a once-chosen random orthogonal matrix and a random linear combination
for use in the simultaneous eigenvalue compution.
Parameters
----------
dim : int
Dimension of the system
Returns
-------
Q : (dim, dim) ndarray
Random orthogonal rotation
c : (dim, ) ndarray
Random linear combination
"""
np.random.seed(103)
Q = ortho_group.rvs(dim)
c = np.random.randn(dim)
return Q, c
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Mรถller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Mรถller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
# perform a random rotation with a random orthogonal Q
Q, c = get_Q_c(dim)
M = (Q@M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
# Compute the matrix U that triangularizes a random linear combination
U = schur((M*c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = (U.T.conj())@(M[..., i])@U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
# Rotate back before returning, transposing to match expected shape
return (Q.T@eigs).T
|
2,778 | c2ff3c5e44fa361671a3fdb38060517bcc4bc82c | from django import forms
class CommentForm(forms.Form):
name = forms.CharField(label='็งฐๅผ')
email = forms.EmailField(label='้ฎ็ฎฑ')
content = forms.CharField(label='ๅ
ๅฎน')
|
2,779 | 6f05b1352e776e20d6a9e0eb457d8914cbfc2d22 | password = ["123456", "1111"]
pw = input("เธฃเธซเธฑเธชเธเนเธฒเธเธเธทเธญ>>>")
for data in password:
if data != pw:
pass
else:
print("เธเธเธเนเธญเธกเธนเธฅเธฃเธซเธฑเธชเธเนเธฒเธเธเธตเน")
print("เนเธฅเนเธงเนเธเธญเธเธฑเธเนเธซเธกเน")
|
2,780 | 9b73037e8af7d4f91261cebf895b68650182fcd5 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.artifact, name="artifacts"),
path('<int:artifact_id>', views.detail, name="detail"),
path('register/', views.register, name="register")
] |
2,781 | d89e1d653c6db322feb6edba93cbfc622bf47aa2 | #%%
### ๋ ์ง ๋ฐ์ดํฐ ๋ถ๋ฆฌ
# ์ฐ-์-์ผ ๋ ์ง ๋ฐ์ดํฐ์์ ์ผ๋ถ ๋ถ๋ฆฌ ์ถ์ถ
import pandas as pd
df = pd.read_csv('../../datasets/part5/stock-data.csv')
# ๋ฌธ์์ด์ธ ๋ ์ง ๋ฐ์ดํฐ๋ฅผ ํ๋ค์ค Timestamp๋ก ๋ณํ
df['new_Date'] = pd.to_datetime(df['Date']) # df์ ์๋ก์ด ์ด๋ก ์ถ๊ฐ
print(df.head())
print()
# dt ์์ฑ์ ์ด์ฉํ์ฌ new_Data ์ด์ ์ฐ-์-์ผ ์ ๋ณด๋ฅผ ๋
, ์, ์ผ๋ก ๊ตฌ๋ถ
df['Year'] = df['new_Date'].dt.year
df['Month'] = df['new_Date'].dt.month
df['Day'] = df['new_Date'].dt.day
print(df.head())
print('------------------')
# Timestamp๋ฅผ Period๋ก ๋ณํํ์ฌ ์ฐ-์-์ผ ํ๊ธฐ ๋ณ๊ฒฝํ๊ธฐ
# to_period() ๋ฉ์๋๋ฅผ ์ ์ฉํ์ฌ, ์ฐ-์-์ผ ์ค ์ฐ-์ ๋๋ ์ฐ๋๋ฅผ ์ถ์ถ
df['Date_yr'] = df['new_Date'].dt.to_period(freq='A') # ์ฐ๋๋ฅผ ๋ํ๋ด๋ ๊ฐ ์ ์ฅ
df['Date_m'] = df['new_Date'].dt.to_period(freq='M') # ์ฐ-์์ ๋ํ๋ด๋ ๊ฐ ์ ์ฅ
print(df.head())
print('------------------')
# ์ํ๋ ์ด์ ํ ์ธ๋ฑ์ค๋ก ์ง์
df.set_index('Date_m', inplace=True)
print(df.head())
# %%
|
2,782 | 0e57e25c11ba97aef5467f61d99065609e127f5b | import multiprocessing
import sys
import warnings
from pathlib import Path
import attr
import librosa
import pandas as pd
from rich.progress import BarColumn, Progress, TimeRemainingColumn
from sklearn.preprocessing import LabelEncoder
from tslearn.piecewise import SymbolicAggregateApproximation
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
import utils
if not sys.warnoptions:
warnings.simplefilter("ignore")
@attr.s
class MusicDB(object):
df = attr.ib()
feat = attr.ib()
sax = attr.ib()
# start of private methods
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False)
miao = our_feat[[("track", "genre_top")]]
miao = miao.loc[self.df.index]
miao.columns = ["genre"]
le = LabelEncoder()
label_encoders = dict()
column2encode = [("genre")]
for col in column2encode:
le = LabelEncoder()
miao["enc_genre"] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
@df.default
def _dataframe_default(self):
pick = self._dataframe_pickleload()
if type(pick) is not bool:
return pick
# if not, populate
return self._dataframe_populate()
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(
scaler.fit_transform(self.df.values).reshape(
self.df.values.shape[0], self.df.values.shape[1]
)
)
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments, alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0], segments))
miaoooooo.index = self.df.index
return miaoooooo
def _dataframe_pickleload(self):
path_to_pickle = Path("data/picks/small.pkl")
try:
pipi = pd.read_pickle(path_to_pickle)
except FileNotFoundError:
return False
return pipi
def _dataframe_populate(self):
# estabilish number of features using the main song
y, sr = librosa.load("data/music/000/000002.mp3", sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
# make df
print(f"Building a dataframe with {number_of_feat} features.")
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
# populate collection of paths of mp3s
p = Path("data/music").glob("**/*.mp3")
tracks = [x for x in p if x.is_file()]
print(f"Making a Dataframe of len {len(tracks)}.")
# make progress reporting
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"{task.completed} of {task.total}",
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn(),
)
# populate df
with progress:
task_id = progress.add_task("[cyan]Extracting...", total=len(tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
# ensure the shape is the one of the main song
dfm = dfm.loc[:, : number_of_feat - 1]
print(f"There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.")
print(f"There also were {num_errors} errors.")
dfm = dfm.fillna(value=0)
dfm.to_pickle("data/picks/small.pkl")
return dfm
def _do_one_song(self, song):
# extract waveform and convert
try:
y, sr = librosa.load(str(song), sr=None)
miao = librosa.resample(y, sr, 120)
# fix the index
miao = pd.Series(data=miao)
miao.name = int(song.stem)
return miao
except:
return False
if __name__ == "__main__":
music = MusicDB()
# some printing just to understand how this works
print(music.df.info())
print(music.df.head())
|
2,783 | a567a2dc1dbb59979d849a5a772e4592910a9f27 | num=5
a=5
for row in range(num,0,-1):
for col in range(row,0,-1):
print(a,end="")
a-=1
print() |
2,784 | 4c54cfefbaf90c1dd0648485e62bff1f2787ccfe | from django.db import models
class IssueManager(models.Manager):
def open(self):
return self.filter(status__is_closed=False)
def closed(self):
return self.filter(status__is_closed=True) |
2,785 | b92f24cddae7b392af2417b39bb4f58e3f661cc6 | from activitystreams.core import Object
class Actor(Object):
"""Describes a generic actor."""
pass
class Application(Actor):
"""Describes a software application."""
pass
class Group(Actor):
"""Represents a formal or informal collective of Actors."""
pass
class Organization(Actor):
"""Represents an organization."""
pass
class Person(Actor):
"""Represents an individual person."""
pass
class Service(Actor):
"""Represents a service of any kind."""
pass
|
2,786 | 2edbf18c90da1ff40fd9abaf25a35dbdaf733bc1 | # -*- coding: utf-8 -*-
"""Success request logging.
This logging is used by "CheckZope" to determine the amount
of work performed by Zope (in order not to bother it with monitor
probes when it is heavily active) and to detect an unreasonable
error rate.
This logging writes two files "<base>_good.<date>" and "<base>_bad.<date>".
For each request, a character is writen to either the good or
the bad logfile, depending on whether the request was successful or
unsuccessful. This means, that only the file size matters for
these logfiles.
Usually, response codes >= 500 are considered as unsuccessful requests.
You can register an "ISuccessFull" adapter, when you need
a different classification.
To activate this logging, both "successlogging.zcml" must be activated
and a "product-config" section with name "successlogging" must be defined
containing the key "filebase".
It specifies the basename of the logfiles (represented as "<base>" above).
"""
from .interfaces import IStatus
from .interfaces import ISuccessFull
from .Rotator import Rotator
from zope.processlifetime import IProcessStarting
from zope.component import adapter
from zope.component import provideHandler
from ZPublisher.interfaces import IPubFailure
from ZPublisher.interfaces import IPubSuccess
_log_good = _log_bad = None
@adapter(IProcessStarting)
def start_successlogging(unused):
"""start successlogging if configured."""
from App.config import getConfiguration
config = getConfiguration().product_config.get('successlogging')
if config is None:
return # not configured
global _log_good, _log_bad
_log_good = Rotator(config['filebase'] + '_good', lock=True)
_log_bad = Rotator(config['filebase'] + '_bad', lock=True)
# register publication observers
provideHandler(handle_request_success)
provideHandler(handle_request_failure)
@adapter(IPubSuccess)
def handle_request_success(event):
"""handle "IPubSuccess"."""
_log_good.write('*')
@adapter(IPubFailure)
def handle_request_failure(event):
"""handle "IPubFailure"."""
request = event.request
if event.retry:
handle_request_success(event)
else:
# Note: Zope forgets (at least sometimes)
# to inform the response about the exception.
# Work around this bug.
# When Zope3 views are used for error handling, they no longer
# communicate via exceptions with the ZPublisher. Instead, they seem
# to use 'setBody' which interferes with the 'exception' call below.
# We work around this problem by saving the response state and then
# restore it again. Of course, this no longer works around the Zope
# bug (forgetting to call 'exception') mentioned above.
response = request.response
saved = response.__dict__.copy()
response.setStatus(event.exc_info[0])
ok = ISuccessFull(response, None)
if ok is None:
status = IStatus(response, None)
if status is None:
status = response.getStatus()
else:
status = int(status)
ok = status < 500
if bool(ok):
handle_request_success(event)
else:
_log_bad.write('*')
response.__dict__.update(saved) # restore response again
|
2,787 | 2f1193e3ab5e0527ab5f89141613eddb18b5f61d | from difflib import SequenceMatcher
import csv
naam = "straat"
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
f = open("straten.txt", "r")
f.readline()
names = f.readlines()
for name in names:
if similar(name[:-1].lower(),naam.lower()) > 0.7:
sim = similar(name[:-1].lower(),naam.lower())
print("gevonden: " + name[:-1] + " ---- " + naam + " ---- " + str(sim))
# with open('straatnamen.csv') as csvfile:
# reader = csv.DictReader(csvfile)
# for row in reader:
# print(row['straatnaam'])
|
2,788 | 47ad08bb153801f592d90c48d62338d0f7703899 | import csv, requests
from bs4 import BeautifulSoup
items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,
# name, pricePerOne, subCategory, subKey, totalTradeCount,
# mainLabel, subLabel, description
mpItems = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,
# name, pricePerOne, subCategory, subKey, totalTradeCount
def openCsv():
"""Open csv file."""
csvFile = 'BDO_app/modules/priceCheck/itemID.csv'
return csvFile
def importAll():
"""Import all the items from csv file."""
csvFile = openCsv()
items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,
# name, pricePerOne, subCategory, subKey, totalTradeCount,
# mainLabel, subLabel, description
with open(csvFile) as i:
readItem = csv.reader(i)
itemRow = next(readItem)
for row in readItem:
items.append(row)
return items
def priceCheck(a, b, c):
"""Read one item from the link."""
mpItem = []
checkedItem = []
url = 'http://omegapepega.com/' + a + '/' + b + '/' + c
# url = http://omegapepega.com/region/mainKey/subKey
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(text=True)
splittedText = results.rsplit('\n')
for line in splittedText:
a = line.rstrip()
mpItem.append(a.lstrip())
mpItem.pop(0)
mpItem.pop(-1)
for i in mpItem:
try:
s = i.index(':')
k = (i[:s])
if i.endswith(','):
v = (i[s+1:-1])
else: v = (i[s+1:])
checkedItem.append(v.strip())
except:
continue
return checkedItem
|
2,789 | f6974c0e5908710031bc3c3bb75c277be426632c | # Greedy Algorithm solves a problem by building a solution incrementally
# The algorithm is greedy because it chooses the next step that gives the most benefit
# Can save a lot of time when used correctly since they don't have to look at the entire problem space
# It's either the most optimal solution or it doesn't work at all, so you have to know for sure when to use it
# It's a short-sighted algorithm since we are only looking to optimize the input, not the entire solution
# Problem 1 JUMP GAME
# given an array of non-negative integers, we are starting at the first index of the array
# each element in the array represents our maximum jump length at that position
# determine if we can reach the last index
# this stands out as a greedy algorithm
#ex. [2,3,1,1,4]
# true since we can go from 2 to 3 to 4, or 2 to 1 to 1 to 4
class Solution:
#O(n) runtime b/c iterating through array
#O(1) SC b/c no extra space taken up
def canJump(self, nums):
best_index = 0
# for each index in the array
for i in range(len(nums)):
# if the current index is greater than the best index
if i > best_index:
return False
# the best index will become the maximum between the best index and the number at the current index + the current index
best_index = max(best_index, nums[i] + i)
return True
if __name__ == "__main__":
ok = Solution()
ans = ok.canJump([2,3,1,1,4])
print(ans)
|
2,790 | 52ebe80e2d520bf07b21dc668223348002eb6d42 | from django.test import TestCase
# Create your tests here.
import pymongo
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
db = client.NBA_china_spider
collection = db.data
data = [title for title in collection.find()]
print(data[0]['url'])
|
2,791 | 5e8f9a222fb2c35b4720e48f0277481e410aee47 | import random
def createRandomPhoneNumber():
phoneNumberFront = ['130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '150', '151', '152',
'153', '158', '159', '177', '180', '181', '182', '183', '186', '188', '189']
phoneNumberBack = []
for i in range(8):
phoneNumberBack.append(str(random.randint(0, 9)))
return random.choice(phoneNumberFront) + ''.join(phoneNumberBack)
|
2,792 | a2593d5b89b9a35d91b0e1011f5b2a23a5a2062e | # -*- coding: utf-8 -*-
import re
import argparse
import utils
# Les arguments ร fournir en ligne de commande
parser = argparse.ArgumentParser(description="""Gรฉnรจre le graph des articles""")
parser.add_argument('corpus', type=str, help="Le nom du corpus (sans l'extension .tsv')")
parser.add_argument('-v', '--verbose', action='store_true',
help="Afficher les messages d'information")
args = parser.parse_args()
corpus_file = args.corpus + '.tsv'
with open(corpus_file) as f:
o = open(args.corpus + '_graph.adj', 'w')
f.readline()
for i, raw_line in enumerate(f):
doc = utils.Document(raw_line)
renvois = re.findall("\[([^][]*?([[]\w*[]][^][]*)*)->(>?)([^]]*)\]", doc.text)
for ref in renvois:
if re.match("(?:art)?(\d+)", ref[3]):
o.write(doc.id + ' ' + re.match("(?:art)?(\d+)", ref[3]).group(1) + '\n')
if re.match("http://(www\.)?monde-diplomatique\.fr/\d{4}/\d{2}/\w*/(\d+)", ref[3]):
o.write(doc.id + ' ' + re.match("http://(www\.)?monde-diplomatique\.fr/\d{4}/\d{2}/\w*/(\d+)", ref[3]).group(2 ) + '\n')
if args.verbose:
print "Article nยฐ%d traitรฉ" % (i)
o.close()
|
2,793 | 286a47cece7002a88f34ace3e08d013e2d14801a | #!/usr/bin/env python
from io import StringIO
import sys
from contextlib import redirect_stdout
import pytest
# test input_name():
from mailroom3 import input_name
def test_1(monkeypatch): # tests "list"
monkeypatch.setattr('builtins.input', lambda x: "list")
f = StringIO()
with redirect_stdout(f):
input_name()
testdata = f.getvalue()
assert testdata == "\n\nJohn Doe\nJane Doe\nJohn Smith\nJane Smith\nBilly Jo Jones\n\n\n"
def test_2(monkeypatch): # tests ""
monkeypatch.setattr('builtins.input', lambda x: "")
testdata = input_name()
assert testdata == None
def test_3(monkeypatch): # tests name
monkeypatch.setattr('builtins.input', lambda x: "John")
testdata = input_name()
assert testdata == "John"
#test input_dollars():
from mailroom3 import input_dollars
def test_4(monkeypatch): # tests name from dict with dollars > 0
monkeypatch.setattr('builtins.input', lambda x: "500")
testdata = input_dollars('John Doe')
assert testdata == 1
def test_5(monkeypatch): # tests name from dict with value error
monkeypatch.setattr('builtins.input', lambda x: "a")
with pytest.raises(ValueError):
testdata = input_dollars('John Doe')
def test_6(monkeypatch): # tests name from dict with dollars <= 0
monkeypatch.setattr('builtins.input', lambda x: "0")
testdata = input_dollars('John Doe')
assert testdata == None
def test_7(monkeypatch): # tests new name with dollars > 0
monkeypatch.setattr('builtins.input', lambda x: "500")
testdata = input_dollars('JD')
assert testdata == 1
def test_8(monkeypatch): # tests new name with value error
monkeypatch.setattr('builtins.input', lambda x: "a")
with pytest.raises(ValueError):
testdata = input_dollars('JD')
def test_9(monkeypatch): # tests new name with dollars <= 0
monkeypatch.setattr('builtins.input', lambda x: "0")
testdata = input_dollars('JD')
assert testdata == None
#test sort_by_dollars():
from mailroom3 import sort_by_dollars
def test_11():
donor_lists = [['John Doe', 873.33],['Jane Doe', 3500.04]]
sort_by_dollars(donor_lists)
testdata = donor_lists
assert testdata == [['Jane Doe', 3500.04],['John Doe', 873.33]]
#test create_donor_list():
from mailroom3 import create_donor_list
def test_12():
testdata = create_donor_list()
assert testdata == [['Jane Doe', 6124.48], ['John Doe', 1373.33], ['JD', 500], ['John Smith', 462.53], ['Billy Jo Jones', 300.00], ['Jane Smith', 2.00]]
#test def create_gift_list():
from mailroom3 import create_gift_list
def test_13():
temp_donor_lists = create_donor_list()
testdata = create_gift_list(temp_donor_lists)
assert testdata == [['Jane Doe', 6124.48, 2, 3062.24], ['John Doe', 1373.33, 4, 343.33], ['JD', 500.00, 1, 500.00], ['John Smith', 462.53, 3, 154.18], ['Billy Jo Jones', 300.00, 3, 100.00], ['Jane Smith', 2.00, 1, 2.00]]
#test print_donor_report():
from mailroom3 import print_donor_report
def test_14():
temp_data = [['Jane Doe', 6124.48, 2, 3062.24], ['John Doe', 1373.33, 4, 343.33]]
f = StringIO()
with redirect_stdout(f):
print_donor_report(temp_data)
testdata = f.getvalue()
assert testdata == "\n\nDonor Name | Total Given | Num Gifts | Average Gift\n---------------------------------------------------------------------\nJane Doe 6124.48 2 3062.24\nJohn Doe 1373.33 4 343.33\n\n\n"
#test plural_donate():
from mailroom3 import plural_donate
def test_15():
assert plural_donate(1) == 'donation of'
def test_16():
assert plural_donate(2) == 'donations totaling'
#test total_donate():
from mailroom3 import total_donate
def test_17():
assert total_donate([120.00, 353.33, 400.00]) == 873.33
def test_18():
assert total_donate([1, 100.00]) == 101.00
#test end_program():
from mailroom3 import end_program
def test_19():
with pytest.raises(SystemExit):
end_program()
|
2,794 | 386fa51b9b285d36c75d6446f9348f6713e0dbaa | import os
WOO_HOST = os.environ.get('WOO_HOST')
#WooCommerce key credentials
WOO_CONSUMER_KEY = os.environ.get('WOO_CONSUMER_KEY')
WOO_CONSUMER_SECRET = os.environ.get('WOO_CONSUMER_SECRET')
#XML feed fields and settings
XML_FEED_FILENAME = os.environ.get('XML_FEED_FILENAME', 'feedXML')
XML_SITE_NAME = os.environ.get('XML_SITE_NAME')
XML_SITE_HOST = os.environ.get('XML_SITE_HOST')
XML_FEED_DESCRIPTION = os.environ.get('XML_FEED_DESCRIPTION', 'Feed XML autogenerated')
XML_CONFIG_FILENAME = os.environ.get('XML_CONFIG_FILENAME', 'config.json')
PRODUCTS_STATUS_CODE = os.environ.get('PRODUCTS_STATUS_CODE', 'publish')
CRONTAB_HOUR = os.environ.get('CRONTAB_HOUR', '*/7')
REDIS_HOST = os.environ.get('REDIS_HOST', 'redis')
SENTRY_URL = os.environ.get('SENTRY_URL')
try:
from local_settings import *
except ImportError:
pass
if SENTRY_URL:
import sentry_sdk
sentry_sdk.init(SENTRY_URL)
|
2,795 | fe081a422db6b7f10c89179beab852c6b74ec687 | '''
vetor = ["pares de pregos ligados por uma linha"]
indice do vetor representa os pregos na vertical, e o
inteiro em cada pos, os pregos na horizontal.
i(vertical) e j(horizontal) entao:
vetor[i] = j
pregos a(vertical) e pregos b(horizontal)
se a>i and b<j or a<i and b>j
a e i(sรฃo indices) b e j(sรฃo os elemntos salvos na pos)
'''
def merge(p,n):
global vet
global aux
if n <= 1:
return 0
c = merge(p,n//2) + merge(p+n//2,n-n//2)
d,a,b = 0,0,n//2
while d<n:
if a != n//2 and (b == n or vet[p+a]<vet[p+b]):
aux[d] = vet[p+a]
a+=1
else:
aux[d] = vet[p+b]
c+=n//2+a
b+=1
d+=1
for i in range(n):
vet[p+i] = aux[i]
return c
entrada = int(input())
vet = [int(x) for x in input().split()]
aux = [0]*entrada
print(merge(0,entrada))
|
2,796 | a077221d91f75645172ba5d86afad8e49cb7ed2f | #!/usr/bin/python
import calendar
a=int(raw_input("enter the year to check that year is leap year or not\n"))
cal=calendar.isleap(a)
if cal :
print "leap year"
else :
print "not a leap year"
print "\nthanks "
'''
'''
|
2,797 | 1f385fda1bdc0008ff91b935998c95c8ffcbd297 | tej="votary"
for i in range(5):
print(tej[i])
|
2,798 | c77e320cee90e8210e4c13d854649b15f6e24180 | from .ctoybox import Game, State as FrameState, Input
import numpy as np
from PIL import Image
import json
from typing import Dict, Any, List, Tuple, Union, Optional
def json_str(js: Union[Dict[str, Any], Input, str]) -> str:
"""
Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings).
"""
if type(js) is dict:
js = json.dumps(js)
elif type(js) is Input:
js = json.dumps(js.__dict__)
elif type(js) is not str:
raise ValueError(
"Unknown json type: %s (only str and dict supported)" % type(js)
)
return js
class Simulator(object):
"""
The Simulator is an instance of a game configuration.
You can call new_game on it to begin.
"""
def __init__(self, game_name, sim=None):
"""
Construct a new instance.
Parameters:
game_name: one of "breakout", "amidar", etc.
sim: optionally a Rust pointer to an existing simulator.
"""
if sim is None:
sim = Game(game_name)
self.__sim = sim
# sim should be a pointer
self.game_name = game_name
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def set_seed(self, seed: int):
"""Configure the random number generator that spawns new game states.
Parameters:
seed: a parameter to reset the built-in random number generator.
"""
self.__sim.seed(seed)
def get_frame_size(self) -> Tuple[int, int]:
"""Get the width in pixels of the frames this game renders."""
return self.__sim.frame_size()
def get_frame_width(self) -> int:
"""Get the width in pixels of the frames this game renders."""
return self.__sim.frame_size()[0]
def get_frame_height(self) -> int:
"""Get the height in pixels of the frames this game renders."""
return self.__sim.frame_size()[1]
def get_simulator(self) -> Game:
"""Get access to the raw simulator pointer."""
return self.__sim
def new_game(self) -> "State":
"""Start a new game."""
return State(self, self.__sim.new_game())
def state_from_json(self, js: Union[Dict[str, Any], str]) -> "State":
"""Generate a State from the state json and this configuration.
Parameters:
js: a JSON object or string containing a serialized state.
"""
state: FrameState = self.__sim.new_state(json_str(js))
return State(self, state=state)
def to_json(self) -> Dict[str, Any]:
"""Get the configuration of this simulator/config as JSON"""
return json.loads(self.__sim.to_json())
def from_json(self, config_js: Union[Dict[str, Any], str]):
"""Mutably update this simulator/config with the replacement json."""
self.__sim = self.__sim.from_json(json_str(config_js))
def schema_for_state(self) -> Dict[str, Any]:
"""Get the JSON Schema for any state for this game."""
return json.loads(self.__sim.frame_schema())
def schema_for_config(self) -> Dict[str, Any]:
"""Get the JSON Schema for any config for this game."""
return json.loads(self.__sim.config_schema())
class State(object):
"""
The State object represents everything the game needs to know about any single simulated frame.
You can rewind in time by storing and restoring these state representations.
- Access the json: ``to_json``
- Access the image: ``render_frame``
"""
def __init__(self, sim: Simulator, state=None):
"""
Construct a new State instance wrapper.
Parameters:
sim: The simulator responsible for this state.
state: Optional pointer to a state to use (otherwise it will create one).
"""
self.sim = sim
"""A reference to the simulator that created this state."""
self.__state = state or sim.__sim.new_game()
"""The raw pointer to the state itself."""
self.game_name = sim.game_name
"""The name of the game that created this state."""
def __enter__(self):
return self
def __del__(self):
self.__state = None
self.sim = None
def __exit__(self, exc_type, exc_value, traceback):
self.__del__()
def clone(self) -> 'State':
"""Quickly make a copy of this state; should be more efficient than saving the JSON."""
return State(self.sim, state=self.get_state().copy())
def get_state(self) -> FrameState:
"""Get the raw state pointer."""
assert self.__state is not None
return self.__state
def lives(self) -> int:
"""How many lives are remaining in the current state?"""
return self.__state.lives()
def level(self) -> int:
"""How many levels have been completed in the current state?"""
return self.__state.level()
def score(self) -> int:
"""How many points have been earned in the current state?"""
return self.__state.score()
def game_over(self):
"""Determine whether the game has ended; i.e., the player has run out of lives.
>>> assert self.lives() < 0 == self.game_over()
"""
return self.lives() < 0
def query_json(
self, query: str, args: Union[Dict[str, Any], str] = "null"
) -> Dict[str, Any]:
"""
Ask a question of the Rust state; queries are currently implemented manually.
Parameters:
query: the message to send to the rust state.
args: the arguments to send to the rust state, defaults to "null".
Returns:
response: A JSON response loaded to python objects.
Raises:
ValueError: if anything goes wrong with the query
```python
with Toybox("breakout") as tb:
tb.query_json("bricks_remaining")
```
"""
return json.loads(self.__state.query(json_str(query), json_str(args)))
def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array:
"""Generate an image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
grayscale: True if we want to render in grayscale rather than in color (RGBA).
"""
if grayscale:
return self.render_frame_rgb(sim)
else:
return self.render_frame_color(sim)
def render_frame_color(self, sim: Simulator) -> np.array:
"""Generate an RGBA image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
"""
(w, h) = sim.get_frame_size()
rgba = 4
size = h * w * rgba
frame = bytearray(size)
self.get_state().render_into_buffer(frame, True)
return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)
def render_frame_rgb(self, sim: Simulator) -> np.array:
"""Generate an RGB image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
"""
rgba_frame = self.render_frame_color(sim)
return rgba_frame[:, :, :3]
def render_frame_grayscale(self, sim: Simulator) -> np.array:
"""Generate a grayscale image from the current frame state object.
Parameters:
sim: the simulator to use; this tells us the width/height necessary.
"""
(w, h) = sim.get_frame_size()
depth = 1
size = h * w * depth
frame = bytearray(size)
self.get_state().render_into_buffer(frame, False)
return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth)
def to_json(self) -> Dict[str, Any]:
"""Get a JSON representation of the state."""
return json.loads(self.get_state().to_json())
class Toybox(object):
"""
This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks:
```python
with Toybox("amidar") as tb:
print(tb.get_score())
# the 'tb' variable only lives in the block.
```
Important:
Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks.
"""
def __init__(self,
game_name: str,
grayscale: bool = True,
frameskip: int = 0,
seed: Optional[int] = None,
withstate: Optional[dict] = None):
"""
Construct a new Toybox state/game wrapper. Use this in a with block!
Parameters:
game_name: One of "breakout", "space_invaders", "amidar", etc.
grayscale: Toybox can render directly to grayscale, saving time. Default is True.
frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0.
seed: The seed
"""
self.game_name = game_name
self.frames_per_action = frameskip + 1
self.rsimulator = Simulator(game_name)
self.rstate = self.rsimulator.new_game()
self.grayscale = grayscale
if seed:
self.set_seed(seed)
self.new_game()
if withstate:
self.write_state_json(withstate)
def new_game(self):
"""
Modify this Toybox wrapper to have a new_game state.
Important:
This discards the old state!
"""
old_state = self.rstate
del old_state
self.rstate = self.rsimulator.new_game()
def get_height(self) -> int:
"""Get the height of the rendered game in pixels."""
return self.rsimulator.get_frame_height()
def get_width(self) -> int:
"""Get the width of the rendered game in pixels."""
return self.rsimulator.get_frame_width()
def get_legal_action_set(self) -> List[int]:
"""Get the set of actions consumed by this game: they are ALE numbered."""
sim = self.rsimulator.get_simulator()
return sim.legal_actions()
def apply_ale_action(self, action_int: int):
"""Takes an integer corresponding to an action, as specified in ALE.
This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.
```python
ALE_INPUT_MAPPING = {
0 : "NOOP",
1 : "FIRE",
2 : "UP",
3 : "RIGHT",
4 : "LEFT",
5 : "DOWN",
6 : "UPRIGHT",
7 : "UPLEFT",
8 : "DOWNRIGHT",
9 : "DOWNLEFT",
10 : "UPFIRE",
11 : "RIGHTFIRE",
12 : "LEFTFIRE",
13 : "DOWNFIRE",
14 : "UPRIGHTFIRE",
15 : "UPLEFTFIRE",
16 : "DOWNRIGHTFIRE",
17 : "DOWNLEFTFIRE"
}
```
Parameters:
action_int: A number from 0 to 17 inclusive.
"""
# implement frameskip(k) by sending the action (k+1) times every time we have an action.
for _ in range(self.frames_per_action):
if not self.rstate.get_state().apply_ale_action(action_int):
raise ValueError(
"Expected to apply action, but failed: {0}".format(action_int)
)
def apply_action(self, action_input_obj: Input):
"""Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing.
This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor.
Parameters:
action_input_obj: An instance of the [ctoybox.Input][] class.
"""
# implement frameskip(k) by sending the action (k+1) times every time we have an action.
for _ in range(self.frames_per_action):
self.rstate.get_state().apply_action(action_input_obj)
def get_state(self) -> np.array:
"""This state here actually refers to the graphical, RGBA or grayscale representation of the current state."""
return self.rstate.render_frame(self.rsimulator, self.grayscale)
def set_seed(self, seed: int):
"""Control the random number generator of the config -- only affects a new_game.
Parameters:
seed: a parameter to reset the built-in random number generator.
"""
self.rsimulator.set_seed(seed)
# Maybe call new game here?
def save_frame_image(self, path: str, grayscale: bool = False):
"""Save the current frame image to a PNG file.
Parameters:
path: the filename to save to.
grayscale: whether images should be saved in color or black & white.
"""
img = None
if grayscale:
img = Image.fromarray(
self.rstate.render_frame_grayscale(self.rsimulator), "L"
)
else:
img = Image.fromarray(
self.rstate.render_frame_color(self.rsimulator), "RGBA"
)
img.save(path, format="png")
def get_rgb_frame(self) -> np.array:
"""Get the RGB frame as a numpy array."""
return self.rstate.render_frame_rgb(self.rsimulator)
def get_score(self) -> int:
"""Access the current score.
Returns:
The number of points earned in the current state."""
return self.rstate.score()
def get_lives(self) -> int:
"""Access the number of lives.
Returns:
The number of lives remaining in the current state."""
return self.rstate.lives()
def get_level(self) -> int:
"""
Access the number of levels.
Returns:
The number of levels completed in the current state."""
return self.rstate.level()
def game_over(self) -> bool:
"""
Check for game over condition.
Returns:
``True`` if the player has run out of lives in the current state.
"""
return self.rstate.game_over()
def state_to_json(self) -> Dict[str, Any]:
"""Get the state's JSON representation as a python object."""
return self.rstate.to_json()
def to_state_json(self) -> Dict[str, Any]:
"""Get the state's JSON representation as a python dict.
Important:
This method is deprecated; please use ``state_to_json`` instead!
"""
return self.state_to_json()
def config_to_json(self) -> Dict[str, Any]:
"""Get the state's JSON representation as a python dict."""
return self.rsimulator.to_json()
def write_state_json(self, js: Dict[str, Any]):
"""Overwrite the state's JSON representation from a python dict.
Parameters:
js: the python representation of the JSON state.
"""
old_state = self.rstate
del old_state
self.rstate = self.rsimulator.state_from_json(js)
def write_config_json(self, config_js: Dict[str, Any]):
"""Overwrite the config's JSON representation from a python dict.
It is likely that some changes will be seen until you call new_game()
Parameters:
config_js: the python representation of the config JSON
"""
# from_json replaces simulator!
self.rsimulator.from_json(config_js)
# new_game replaces state!
self.new_game()
def query_state_json(
self, query: str, args: Union[Dict[str, Any], str] = "null"
) -> Dict[str, Any]:
"""Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection.
Parameters:
query: the query string to send to the game.
args: a JSON argument to attach to the query string.
"""
return self.rstate.query_json(query, args)
def __del__(self):
self.rstate = None
self.rsimulator = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__del__()
def schema_for_state(self) -> Dict[str, Any]:
"""Get the JSON Schema for the frame State object."""
return self.rsimulator.schema_for_state()
def schema_for_config(self) -> Dict[str, Any]:
"""Get the JSON Schema for the Config object."""
return self.rsimulator.schema_for_config()
|
2,799 | dce496c9ae6605e95ffbbb2885ec15b19fb756ef | ii = [('CookGHP3.py', 1), ('AubePRP2.py', 1), ('WilkJMC3.py', 1), ('LeakWTI3.py', 1), ('AubePRP.py', 2), ('GellWPT.py', 2), ('AdamWEP.py', 1), ('KiddJAE.py', 1), ('CoolWHM.py', 1), ('WadeJEB.py', 1), ('SoutRD.py', 2), ('WheeJPT.py', 1), ('HowiWRL2.py', 1), ('WilkJMC.py', 1), ('WestJIT.py', 1), ('DequTKM.py', 2), ('StorJCC.py', 1), ('DibdTRL.py', 1), ('TaylIF.py', 1), ('ThomWEC.py', 1)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.