blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30044b5829a883a71c6f425fb8f3121b747e8d53 | e1f9baea0f9ad270fe6cd26817b8f58666e1e8ed | /fig1_projection_newplot.py | 75d9563aa13cc1729e33a1ed7d6a3ede81af159d | [] | no_license | tyoo37/fesc_project | 4f2747ab814f2d9ae716fc88d8f0a82abf1c0d07 | d4211a41f75042dcc63a1a5d8895b789491c403e | refs/heads/main | 2023-02-28T14:56:30.508354 | 2021-02-06T06:43:59 | 2021-02-06T06:43:59 | 336,473,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,649 | py | import numpy as np
from scipy.io import FortranFile
from scipy.io import readsav
import matplotlib.pyplot as plt
import time
import os.path
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from astropy.cosmology import FlatLambdaCDM
from astropy.visualization import ZScaleInterval,ImageNormalize
import matplotlib.pyplot as plt
from skimage.transform import rescale
#from skimage import data, color
from scipy import stats
from scipy.ndimage import gaussian_filter
read_old_01Zsun = '/Volumes/THYoo/RHD_10pc/'
read_old_002Zsun = '/Volumes/THYoo/RHD_10pc_lowZ/'
read_new_01Zsun = '/Volumes/THYoo/kisti/RHD_10pc_0.1Zsun/'
read_new_1Zsun = '/Volumes/THYoo/kisti/RHD_10pc_1Zsun/'
read_new_01Zsun_re = '/blackwhale/dbahck37/kisti/0.1Zsun/'
read_new_1Zsun_re = '/blackwhale/dbahck37/kisti/1Zsun/'
read_new_gasrich = '/Volumes/THYoo/kisti/RHD_10pc_gasrich/G9_gasrich/'
read_new_1Zsun_highSN_old = '/Volumes/gdrive/1Zsun_SNen_old/'
read_new_1Zsun_highSN_new = '/Volumes/gdrive/1Zsun_SNen_new/'
read_new_03Zsun_highSN = '/Volumes/gdrive/0.3Zsun_SNen/'
read_new_01Zsun_05pc = '/Volumes/gdrive/0.1Zsun_5pc/'
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] =15
plt.rcParams['axes.labelsize'] = 15
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
plt.rcParams['figure.titlesize'] = 13
#plt.rc('text', usetex=True)
#plt.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
#plt.switch_backend('agg')
class Part():
def __init__(self, dir, nout):
self.dir = dir
self.nout = nout
partdata = readsav(self.dir + '/SAVE/part_%05d.sav' % (self.nout))
self.snaptime = partdata.info.time*4.70430e14/365/3600/24/1e6
pc = 3.08e18
self.boxpc = partdata.info.boxlen * partdata.info.unit_l / pc
self.boxlen = partdata.info.boxlen
xp = partdata.star.xp[0]
self.xp = xp * partdata.info.unit_l / 3.08e18
self.unit_l = partdata.info.unit_l
self.unit_d = partdata.info.unit_d
self.starage = self.snaptime - partdata.star.tp[0]*4.70430e14/365/3600/24/1e6
self.starid = np.abs(partdata.star.id[0])
self.mp0 = partdata.star.mp0[0] * partdata.info.unit_d * partdata.info.unit_l / 1.989e33 * partdata.info.unit_l*partdata.info.unit_l
tp = (partdata.info.time-partdata.star.tp[0]) * 4.70430e14 / 365. /24./3600/1e6
sfrindex = np.where((tp >= 0) & (tp < 10))[0]
self.SFR = np.sum(self.mp0[sfrindex]) / 1e7
self.dmxp = partdata.part.xp[0] * partdata.info.unit_l / 3.08e18
dmm = partdata.part.mp[0]
self.dmm = dmm * partdata.info.unit_d * partdata.info.unit_l / 1.989e33 * partdata.info.unit_l * partdata.info.unit_l
dmindex = np.where(dmm > 2000)
self.dmxpx = self.dmxp[0][dmindex]
self.dmxpy = self.dmxp[1][dmindex]
self.dmxpz = self.dmxp[2][dmindex]
class Fesc_rjus():
def __init__(self,read,nout,rjus):
dat = FortranFile(read+'ray_nside4_%3.2f/ray_%05d.dat' % (rjus, nout), 'r')
npart, nwave2 = dat.read_ints()
wave = dat.read_reals(dtype=np.double)
sed_intr = dat.read_reals(dtype=np.double)
sed_attH = dat.read_reals(dtype=np.double)
sed_attD = dat.read_reals(dtype=np.double)
npixel = dat.read_ints()
tp = dat.read_reals(dtype='float32')
fescH = dat.read_reals(dtype='float32')
self.fescD = dat.read_reals(dtype='float32')
self.photonr = dat.read_reals(dtype=np.double)
self.fesc = np.sum(self.fescD*self.photonr)/np.sum(self.photonr)
class Fesc_indSED():
def __init__(self, dir, nout):
dat2 = FortranFile(dir + 'ray_indSED/ray_%05d.dat' % (nout), 'r')
npart, nwave2, version = dat2.read_ints()
wave = dat2.read_reals(dtype=np.double)
sed_intr = dat2.read_reals(dtype=np.double)
sed_attHHe = dat2.read_reals(dtype=np.double)
sed_attHHeD = dat2.read_reals(dtype=np.double)
sed_attHHI = dat2.read_reals(dtype=np.double)
sed_attHH2 = dat2.read_reals(dtype=np.double)
sed_attHHe= dat2.read_reals(dtype=np.double)
sed_attD= dat2.read_reals(dtype=np.double)
npixel = dat2.read_ints()
tp = dat2.read_reals(dtype='float32')
self.fescH = dat2.read_reals(dtype='float32')
self.fescD = dat2.read_reals(dtype='float32')
self.photonr = dat2.read_reals(dtype=np.double)
idp = dat2.read_ints()
self.rflux = dat2.read_reals(dtype=np.double)
self.gflux = dat2.read_reals(dtype=np.double)
self.bflux = dat2.read_reals(dtype=np.double)
self.fesc = np.sum(self.fescD * self.photonr) / np.sum(self.photonr)
self.fesc2 = np.sum(self.fescH * self.photonr) / np.sum(self.photonr)
self.fescwodust = np.sum(self.fescH * self.photonr) / np.sum(self.photonr)
class Cell():
def __init__(self, dir, nout, Part):
self.dir = dir
self.nout = nout
celldata = readsav(self.dir + '/SAVE/cell_%05d.sav' % (self.nout))
self.nH = celldata.cell[0][4][0] * 30.996344
self.x = celldata.cell.x[0] * Part.boxpc
self.y = celldata.cell.y[0] * Part.boxpc
self.z = celldata.cell.z[0] * Part.boxpc
self.dx = celldata.cell.dx[0] * Part.boxpc
self.mindx = np.min(celldata.cell.dx[0])
self.nHI = self.nH * celldata.cell[0][4][7]
nHII = self.nH * celldata.cell[0][4][8]
nH2 = self.nH * (1 - celldata.cell[0][4][7] - celldata.cell[0][4][8])/2
YY= 0.24/(1-0.24)/4
nHeII = self.nH * YY*celldata.cell[0][4][9]
nHeIII = self.nH * YY*celldata.cell[0][4][10]
nHeI = self.nH * YY*(1 - celldata.cell[0][4][9] - celldata.cell[0][4][10])
ne = nHII + nHeII + nHeIII *2
ntot = self.nHI + nHII + nHeI + nHeII + nHeIII + ne + nH2
mu = celldata.cell[0][4][0] * Part.unit_d / 1.66e-24 / ntot
self.m = celldata.cell[0][4][0] *Part.unit_d * Part.unit_l / 1.989e33 * Part.unit_l *Part.unit_l *(celldata.cell.dx[0]*Part.boxlen)**3
self.T = celldata.cell[0][4][5]/celldata.cell[0][4][0] * 517534.72 * mu
self.xHI =celldata.cell[0][4][7]
self.xHII=celldata.cell[0][4][8]
self.xH2 = (1 - celldata.cell[0][4][7] - celldata.cell[0][4][8])/2
self.lev = np.round(np.log2(1 / celldata.cell.dx[0]), 0).astype(int)
print('minmax', np.min(self.lev),np.max(self.lev))
self.minlev = round(np.log2(1 / np.max(celldata.cell.dx[0])))
self.maxlev = round(np.log2(1 / np.min(celldata.cell.dx[0])))
class Cellfromdat():
def __init__(self, dir, nout, Part):
celldata = FortranFile(dir + 'dat/cell_%05d.dat' % nout, 'r')
nlines,xx,nvarh=celldata.read_ints(dtype=np.int32)
xc = celldata.read_reals(dtype=np.double)
yc = celldata.read_reals(dtype=np.double)
zc = celldata.read_reals(dtype=np.double)
dxc = celldata.read_reals(dtype=np.double)
self.x = xc * Part.boxpc
self.y = yc * Part.boxpc
self.z = zc * Part.boxpc
self.dx = dxc * Part.boxpc
var = np.zeros((nlines,nvarh))
for i in range(nvarh):
var[:,i] = celldata.read_reals(dtype=np.double)
self.nH = var[:, 0] * 30.996344
self.m = var[:, 0] * Part.unit_d * Part.unit_l / 1.989e33 * Part.unit_l * Part.unit_l * (
dxc * Part.boxlen) ** 3
self.xHI = var[:,7]
self.xH2=(1 - var[:,7] - var[:,8])/2
xHII = var[:,8]
self.mHIH2 = self.m * (1-xHII)
self.lev = np.round(np.log2(1/dxc),0)
self.minlev = round(np.log2(1/np.max(dxc)))
self.maxlev = round(np.log2(1/np.min(dxc)))
class GasrichCell():
def __init__(self, dir, nout, Part):
self.dir = dir
self.nout = nout
print(nout)
celldata = readsav(self.dir + '/SAVE/cell_%05d.sav' % (self.nout))
self.nH = celldata.cell.nh[0] * 30.996344
self.x = celldata.cell.x[0] * Part.boxpc
self.y = celldata.cell.y[0] * Part.boxpc
self.z = celldata.cell.z[0] * Part.boxpc
self.dx = celldata.cell.dx[0] * Part.boxpc
self.mindx = np.min(celldata.cell.dx[0])
self.m = celldata.cell.nh[0] *Part.unit_d * Part.unit_l / 1.989e33 * Part.unit_l *Part.unit_l *(celldata.cell.dx[0]*Part.boxlen)**3
class Clump():
def __init__(self, dir, nout, Part):
self.dir = dir
self.nout = nout
unit_d = Part.unit_d
unit_l = Part.unit_l
if dir==read_new_01Zsun_lya0ff_re:
clumpdata = np.loadtxt(self.dir + '/dat/clump_%05d.txt' % (self.nout),
dtype=np.double)
else:
clumpdata = np.loadtxt(self.dir + '/clump3/clump_%05d.txt' % (self.nout),
dtype=np.double)
self.xclump = clumpdata[:, 4] * Part.unit_l / 3.08e18
self.yclump = clumpdata[:, 5] * Part.unit_l / 3.08e18
self.zclump = clumpdata[:, 6] * Part.unit_l / 3.08e18
self.massclump = clumpdata[:,
10] * unit_d * unit_l / 1.989e33 * unit_l * unit_l
self.rclump = (clumpdata[:, 10] / clumpdata[:, 9] / 4 / np.pi * 3) ** (0.333333) * unit_l / 3.08e18
self.nclump = len(self.xclump)
def minmax(var):
return np.min(var), np.max(var)
class new_projection:
def __init__(self, Cell, xcenter, ycenter, zcenter, xwid, ywid, zwid, var, projection):
maxlev = Cell.maxlev
minlev = Cell.minlev
numlev = int(maxlev - minlev + 1)
mindx = np.min(Cell.dx)
self.mindx = mindx
self.var = var
start = time.time()
self.xcenter = int(xcenter / mindx)
self.ycenter = int(ycenter / mindx)
self.zcenter = int(zcenter / mindx)
self.xwid = xwid
self.ywid = ywid
self.zwid = zwid
self.xwid2 = int(xwid / mindx)
self.ywid2 = int(ywid / mindx)
self.zwid2 = int(zwid / mindx)
xind_root = (Cell.x)/mindx/2**(maxlev-minlev)
yind_root = (Cell.y)/mindx/2**(maxlev-minlev)
zind_root = (Cell.z)/mindx/2**(maxlev-minlev)
#select the root grids which are interested
xcenind_root = int(xcenter/mindx/2**(maxlev-minlev))
ycenind_root = int(ycenter/mindx/2**(maxlev-minlev))
zcenind_root = int(zcenter/mindx/2**(maxlev-minlev))
xwidind_root = int(xwid/mindx / 2**(maxlev-minlev))
ywidind_root = int(ywid/mindx / 2**(maxlev-minlev))
zwidind_root = int(zwid/mindx / 2**(maxlev-minlev))
xstaind_root = int(xcenind_root-1-xwidind_root)
xendind_root = int(xcenind_root+1+xwidind_root)
ystaind_root = int(ycenind_root-1-ywidind_root)
yendind_root = int(ycenind_root+1+ywidind_root)
zstaind_root = int(zcenind_root-1-zwidind_root)
zendind_root = int(zcenind_root+1+zwidind_root)
#print(self.xcenter,xwid2,xcenind_root,xwidind_root)
sumvol=0
numind=0
numind3=0
ind2 = np.where((xind_root>=xstaind_root) & (xind_root<=xendind_root+1) & (yind_root>=ystaind_root) & (yind_root<=yendind_root+1) & (zind_root>=zstaind_root) & (zind_root<=zendind_root+1))
# print('minmax',minmax(xind_root),minmax(yind_root),minmax(zind_root))
zmin = (zstaind_root) * 2 ** (maxlev - minlev) * self.mindx
zmax = (zendind_root+1) * 2 ** (maxlev - minlev) * self.mindx
xmin = (xstaind_root) * 2 ** (maxlev - minlev) * self.mindx
xmax = (xendind_root+1) * 2 ** (maxlev - minlev) * self.mindx
ymin = (ystaind_root) * 2 ** (maxlev - minlev) * self.mindx
ymax = (yendind_root+1) * 2 ** (maxlev - minlev) * self.mindx
if projection =='xy':
histrange = [[xmin, xmax], [ymin, ymax]]
if projection == 'xz':
histrange = [[xmin, xmax], [zmin, zmax]]
ind4 = np.where((Cell.x-Cell.dx/2>=xmin)&(Cell.x+Cell.dx/2<=xmax)&(Cell.y-Cell.dx/2>=ymin)&(Cell.y+Cell.dx/2<=ymax)&(Cell.z-Cell.dx/2>=zmin)&(Cell.z+Cell.dx/2<=zmax))
for n in range(numlev):
lev = minlev + n
#ind3 = np.where((Cell.dx==mindx*(2**(maxlev-minlev-n)))&(Cell.x-Cell.dx/2>=xmin)&(Cell.x+Cell.dx/2<=xmax)&(Cell.y-Cell.dx/2>=ymin)&(Cell.y+Cell.dx/2<=ymax)&(Cell.z-Cell.dx/2>=zmin)&(Cell.z+Cell.dx/2<=zmax))
#numind3 = numind3 + len(ind3[0])
ind = np.where((Cell.lev.astype(int)==lev) & (xind_root>=xstaind_root) & (xind_root<xendind_root+1) & (yind_root>=ystaind_root) & (yind_root<yendind_root+1) & (zind_root>=zstaind_root) & (zind_root<zendind_root+1))
#ind = ind3
numind = numind + len(ind[0])
dx = Cell.dx[ind]
if projection == 'xy':
x = Cell.x[ind]
y = Cell.y[ind]
z = Cell.z[ind]
bins = [int((3 + 2 * xwidind_root) * 2 ** (lev - minlev)),
int((3 + 2 * ywidind_root) * 2 ** (lev - minlev))]
elif projection =='xz':
x = Cell.x[ind]
y = Cell.z[ind]
z = Cell.y[ind]
bins = [int((3 + 2 * xwidind_root) * 2 ** (lev - minlev)),
int((3 + 2 * zwidind_root) * 2 ** (lev - minlev))]
else:
raise ValueError('improper projection description')
if var=='nH': #volume-weighted
sumvar = Cell.nH[ind]*Cell.dx[ind]
weight = Cell.dx[ind]
elif var=='T': #mass-weighted
sumvar = Cell.T[ind]*Cell.m[ind]/Cell.dx[ind]**2
weight = Cell.m[ind]
elif var=='xHI':
sumvar = Cell.xHI[ind] * Cell.dx[ind]
weight = Cell.dx[ind]
elif var=='xH2':
sumvar = Cell.xH2[ind] * Cell.dx[ind]
weight = Cell.dx[ind]
print('indexing level %d, t=%3.2f (s), #=%d' % (lev, time.time() - start, len(ind[0])))
numarr = np.histogram2d(x, y, bins=bins, range=histrange)[0]
sumarr = np.histogram2d(x, y, bins=bins, weights=sumvar,range=histrange)[0]
weiarr = np.histogram2d(x, y, bins=bins, weights=weight,range=histrange)[0]
#sumstat = stats.binned_statistic_2d(x, y, sumvar, bins=[2**lev,2**lev], statistic='sum')
#weistat = stats.binned_statistic_2d(x, y, weight, bins=[2 ** lev, 2 ** lev], statistic='sum')
print('complete level %d binning, t=%3.2f (s)' % (lev, time.time() - start))
start = time.time()
#sumarr = sumstat.statistic
#weiarr = weistat.statistic
"""
if n==0:
sumarr2 = sumarr
weiarr2 = weiarr
else:
#print(sumarr2)
#print(sumarr2.shape)
for i in range(2):
sumarr2 = np.repeat(sumarr2,2,axis=i)
weiarr2 = np.repeat(weiarr2,2,axis=i)
sumarr2 = sumarr2 + sumarr
weiarr2 = weiarr2 + weiarr
"""
if n==0:
sumarr2 = sumarr
weiarr2 = weiarr
else:
sumarr2 = rescale(sumarr2, 2, mode='constant', order=0, multichannel=False, anti_aliasing=False)
weiarr2 = rescale(weiarr2, 2, mode='constant', order=0, multichannel=False, anti_aliasing=False)
sumarr2 = sumarr2 + sumarr
weiarr2 = weiarr2 + weiarr
print('complete level %d increasing size, t=%3.2f (s)' % (lev, time.time() - start))
start = time.time()
sumvol = sumvol + np.sum(dx**3)
#print(np.min(sumarr2), np.max(sumarr2), np.min(weiarr2), np.max(weiarr2))
#print(np.where(sumarr2==0), np.where(weiarr2==0))
if projection=='xy':
xstacut = int(self.xcenter - self.xwid2 - (xstaind_root) * 2 ** (maxlev - minlev))
xendcut = int(self.xcenter + self.xwid2 - (xstaind_root) * 2 ** (maxlev - minlev))
ystacut = int(self.ycenter - self.ywid2 - (ystaind_root) * 2 ** (maxlev - minlev))
yendcut = int(self.ycenter + self.ywid2 - (ystaind_root) * 2 ** (maxlev - minlev))
if projection =='xz':
self.xcen2 = self.xcenter
self.zcen2 = self.zcenter
xstacut = int(self.xcenter - self.xwid2 - (xstaind_root) * 2 ** (maxlev - minlev))
xendcut = int(self.xcenter + self.xwid2 - (xstaind_root) * 2 ** (maxlev - minlev))
ystacut = int(self.zcenter - self.zwid2 - (zstaind_root) * 2 ** (maxlev - minlev))
yendcut = int(self.zcenter + self.zwid2 - (zstaind_root) * 2 ** (maxlev - minlev))
# crop
self.xmin = int(self.xcenter-self.xwid2)*mindx
self.xmax = int(self.xcenter+self.xwid2)*mindx
self.ymin = int(self.ycenter - self.ywid2) * mindx
self.ymax = int(self.ycenter + self.ywid2) * mindx
self.zmin = int(self.zcenter - self.zwid2) * mindx
self.zmax = int(self.zcenter + self.zwid2) * mindx
#print(xstacut,xendcut,ystacut, yendcut)
sumarr2 = sumarr2[xstacut:xendcut,ystacut:yendcut]
weiarr2 = weiarr2[xstacut:xendcut,ystacut:yendcut]
print(np.min(sumarr2),np.max(sumarr2), np.min(weiarr2),np.max(weiarr2))
self.avarr = np.log10(sumarr2/weiarr2)
#print(self.avarr)
#print(self.avarr.shape)
self.projection = projection
def projectionPlot(self, ax, cm, ticks,cbar,ruler,corr,text, label):
start = time.time()
if self.projection == 'xy':
im = ax.imshow(np.rot90(self.avarr), cmap=cm,
extent=[-self.xwid / 1000, self.xwid / 1000, -self.ywid / 1000,
self.ywid / 1000], vmin=np.min(ticks), vmax=np.max(ticks), aspect='equal')
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.ywid / 1000, self.ywid / 1000)
elif self.projection =='xz':
im = ax.imshow(np.rot90(self.avarr), cmap=cm,
extent=[-self.xwid / 1000, self.xwid / 1000, -self.zwid / 1000,
self.zwid / 1000], vmin=np.min(ticks), vmax=np.max(ticks), aspect='equal')
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.zwid / 1000, self.zwid / 1000)
if cbar==True:
cbaxes = inset_axes(ax, width="100%", height="100%", loc=3, bbox_to_anchor=(0.05,0.05,0.25,0.02),bbox_transform=ax.transAxes)
cbar = plt.colorbar(im, cax=cbaxes, ticks=ticks, orientation='horizontal', cmap=cm)
cbar.set_label('log(' + self.var + ')', color='w', labelpad=-50, fontsize=15)
cbar.ax.xaxis.set_tick_params(color='w')
cbar.ax.xaxis.set_ticks_position('bottom')
plt.setp(plt.getp(cbar.ax.axes, 'xticklabels'), color='w')
"""
you have to insert appropriate number for below 'rectangles'
this is ruler which indicates the size of projected image
ex) if you want to draw 5 kpc width projected image and want to insert ruler with 3 kpc size, then
replace 5 kpc into 3 kpc and you have to multiply 3/5 instead of 5/14 in width.
"""
if ruler==True:
if self.projection == 'xy':
rectangles = {
'5 kpc': patches.Rectangle(xy=(0.25 * self.xwid / 1000, -0.88 * self.ywid / 1000),
width=(2 * self.ywid / 1000) * 5 / 14,
height=0.01 * (2 * self.ywid / 1000)*corr, facecolor='white')}
for r in rectangles:
ax.add_artist(rectangles[r])
rx, ry = rectangles[r].get_xy()
cx = rx + rectangles[r].get_width() / 2.0
cy = ry + rectangles[r].get_height() / 2.0
ax.annotate(r, (cx, cy + 0.02 * (2 * self.ywid / 1000)*corr), color='w', weight='bold',
fontsize=15, ha='center', va='center')
if self.projection == 'xz':
rectangles = {
'5 kpc': patches.Rectangle(xy=(0.25 * self.xwid / 1000, -0.88 * self.zwid / 1000),
width=(2 * self.xwid / 1000) * 5 / 14,
height=0.01 * (2 * self.zwid / 1000) * corr, facecolor='white')}
for r in rectangles:
ax.add_artist(rectangles[r])
rx, ry = rectangles[r].get_xy()
cx = rx + rectangles[r].get_width() / 2.0
cy = ry + rectangles[r].get_height() / 2.0
ax.annotate(r, (cx, cy + 0.02 * (2 * self.zwid / 1000) * corr), color='w', weight='bold',
fontsize=15, ha='center', va='center')
if text==True:
if self.projection=='xy':
ax.text(-self.xwid/1000*0.9,self.ywid/1000*0.8,label,color='w',fontsize=40)
if self.projection=='xz':
ax.text(-self.xwid/1000*0.9,self.zwid/1000*0.8,label,color='w',fontsize=40)
return im
def star_plot(self, Part, ax):
start=time.time()
print('star plotting...')
ex_xcenter = self.xcenter*self.mindx
ex_ycenter = self.ycenter*self.mindx
ex_zcenter = self.zcenter*self.mindx
sxplot = (Part.xp[0] - ex_xcenter)/1000
syplot = (Part.xp[1] - ex_ycenter)/1000
szplot = (Part.xp[2] - ex_zcenter)/1000
if self.projection == 'xy':
cax1 = ax.scatter(sxplot, syplot, c='grey', s=0.1, alpha=0.3)
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.ywid / 1000, self.ywid / 1000)
if self.projection == 'xz':
cax1 = ax.scatter(sxplot, szplot, c='grey', s=0.1, alpha=0.3)
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.zwid / 1000, self.zwid / 1000)
print('plotting stars finished , t = %.2f [sec]' %(time.time()-start))
return cax1
def star_plot3(self, Part, ax,binsize,cm,ticks,vmin,vmax, cbar, ruler,corr):
start = time.time()
print('star plotting...')
ex_xcenter = self.xcenter * self.mindx
ex_ycenter = self.ycenter * self.mindx
ex_zcenter = self.zcenter * self.mindx
sxplot = (Part.xp[0] - ex_xcenter) / 1000
syplot = (Part.xp[1] - ex_ycenter) / 1000
szplot = (Part.xp[2] - ex_zcenter) / 1000
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
fwhm = cosmo.kpc_comoving_per_arcmin(0.1)
# print('fwhm',fwhm)
if self.projection == 'xy':
x = sxplot
y = syplot
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.ywid / 1000, self.ywid / 1000)
#histrange = [[self.xmin, self.xmax], [self.ymin, self.ymax]]
histrange = [[-self.xwid / 1000, self.xwid / 1000], [-self.ywid / 1000, self.ywid / 1000]]
bin = [(self.xmax - self.xmin) / binsize, (self.ymax - self.ymin) / binsize]
elif self.projection =='xz':
x = sxplot
y = szplot
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.zwid / 1000, self.zwid / 1000)
histrange = [[-self.xwid / 1000, self.xwid / 1000], [-self.zwid / 1000, self.zwid / 1000]]
bin = [(self.xmax - self.xmin) / binsize, (self.zmax - self.zmin) / binsize]
sfrd= np.histogram2d(x, y, weights=Part.mp0 / (binsize/1000)**2, range=histrange, bins=bin)[0]
sfrd_gauss = gaussian_filter(sfrd, sigma=2)
if self.projection =='xy':
im = ax.imshow(np.log10(np.rot90(sfrd_gauss+1.)), cmap=cm,extent=[-self.xwid / 1000, self.xwid / 1000, -self.ywid / 1000,
self.ywid / 1000],interpolation='none', aspect='equal',vmin=vmin,vmax=vmax)
elif self.projection =='xz':
im = ax.imshow(np.log10(np.rot90(sfrd_gauss+1.)), cmap=cm,extent=[-self.xwid / 1000, self.xwid / 1000, -self.zwid / 1000,
self.zwid / 1000], interpolation='none', aspect='equal',vmin=vmin,vmax=vmax)
if cbar==True:
cbaxes = inset_axes(ax, width="100%", height="100%", loc=3, bbox_to_anchor=(0.05, 0.05, 0.25, 0.02),
bbox_transform=ax.transAxes)
cbar = plt.colorbar(im, cax=cbaxes, ticks=ticks, orientation='horizontal', cmap=cm)
cbar.set_label('$log(\Sigma_*) (M_\odot\cdot kpc^{-2}$)', color='w', labelpad=-50, fontsize=15)
cbar.ax.xaxis.set_tick_params(color='w')
cbar.ax.xaxis.set_ticks_position('bottom')
plt.setp(plt.getp(cbar.ax.axes, 'xticklabels'), color='w')
if ruler==True:
if self.projection =='xy':
rectangles = {
'5 kpc': patches.Rectangle(xy=(0.25 * self.xwid / 1000, -0.88 * self.xwid / 1000),
width=(2 * self.xwid / 1000) * 5 / 14,
height=0.01 * (2 * self.ywid / 1000)*corr, facecolor='white')}
for r in rectangles:
ax.add_artist(rectangles[r])
rx, ry = rectangles[r].get_xy()
cx = rx + rectangles[r].get_width() / 2.0
cy = ry + rectangles[r].get_height() / 2.0
ax.annotate(r, (cx, cy + 0.02 * (2 * self.ywid / 1000)*corr), color='w', weight='bold',
fontsize=15, ha='center', va='center')
if self.projection =='xz':
rectangles = {
'5 kpc': patches.Rectangle(xy=(0.25 * self.xwid / 1000, -0.88 * self.zwid / 1000),
width=(2 * self.xwid / 1000) * 5/ 14,
height=0.01 * (2 * self.zwid / 1000)*corr, facecolor='white')}
for r in rectangles:
ax.add_artist(rectangles[r])
rx, ry = rectangles[r].get_xy()
cx = rx + rectangles[r].get_width() / 2.0
cy = ry + rectangles[r].get_height() / 2.0
ax.annotate(r, (cx, cy + 0.02 * (2 * self.zwid / 1000)*corr), color='w', weight='bold',
fontsize=15, ha='center', va='center')
def clump_plot(self, Clump, ax):
# for appropriate description of size of clump, dpi = 144, figsize * size of axis = size
# clump finding
start = time.time()
print('finding gas clumps...')
ex_xcenter = self.xcenter * self.mindx
ex_ycenter = self.ycenter * self.mindx
ex_zcenter = self.zcenter * self.mindx
cxplot = (Clump.xclump - ex_xcenter) / 1000
cyplot = (Clump.yclump - ex_ycenter) / 1000
czplot = (Clump.zclump - ex_zcenter) / 1000
cax1 = ax.scatter(cxplot, cyplot, edgecolor='k', marker='o',
s=(Clump.rclump * ax.get_window_extent().width / (2 * self.xwid))** 2, linewidths=1, facecolors='none')
ax.set_xlim(-self.xwid / 1000, self.xwid / 1000)
ax.set_ylim(-self.ywid / 1000, self.ywid / 1000)
return cax1
def zscale(arr):
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(arr)
return vmin, vmax
def CoM_check_plot(Part1, Cell1, wid, height, depth, xcen,ycen,zcen):
fig = plt.figure(figsize=(8, 8),dpi=144)
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
cm1 = plt.get_cmap('rainbow')
a = new_projection(Cell1, xcen,ycen,zcen, wid, height, depth,'nH','xy')
ss1 = a.projectionPlot(Cell1, ax1, cm1)
a.star_plot(Part1, ax1)
ax1.scatter((xcen - a.mindx * a.xcenter) / 1000, (ycen - a.mindx * a.ycenter) / 1000, s=100, marker='*')
ax1.set_xlabel('X(kpc)')
ax1.set_ylabel('Y(kpc)')
cax1 = fig.add_axes([0.9, 0.1, 0.02, 0.3])
plt.colorbar(ss1, cax=cax1, cmap=cm1)
plt.show()
plt.close()
def getr(x,y,z,xcenter,ycenter,zcenter):
return np.sqrt((x-xcenter)**2+(y-ycenter)**2+(z-zcenter)**2)
def get2dr(x,y,xcen,ycen):
return np.sqrt((x-xcen)**2+(y-ycen)**2)
def getmass(marr, rarr, r):
ind = np.where(rarr<r)
return np.sum(marr[ind])
def mxsum(marr, xarr,ind):
return np.sum(marr[ind]*xarr[ind])
def msum(marr,ind):
return np.sum(marr[ind])
def simpleCoM(x,y,z,marr,rarr,r):
ind = np.where(rarr<r)
xx = np.sum(x[ind]*marr[ind])/np.sum(marr[ind])
yy = np.sum(y[ind]*marr[ind])/np.sum(marr[ind])
zz = np.sum(z[ind]*marr[ind])/np.sum(marr[ind])
return xx,yy,zz
def getmass_zlim(marr, rarr, r,z,zcen,zlim):
ind = np.where((rarr < r)&(np.abs(z-zcen)<zlim))
return np.sum(marr[ind])
#half-mass CoM
def CoM_pre(Part1, Cell1,rgrid,totmass, xcen, ycen, zcen, gasonly):
rstar = getr(Part1.xp[0],Part1.xp[1],Part1.xp[2],xcen, ycen, zcen)
rpart = getr(Part1.dmxp[0],Part1.dmxp[1],Part1.dmxp[2],xcen, ycen, zcen)
rcell = getr(Cell1.x,Cell1.y,Cell1.z,xcen, ycen, zcen)
for i in range(len(rgrid)):
mstar = getmass(Part1.mp0, rstar, rgrid[i])
mpart = getmass(Part1.dmm, rpart, rgrid[i])
mcell = getmass(Cell1.m, rcell, rgrid[i])
summass = mstar + mpart + mcell
if summass > totmass/2:
rrr = rgrid[i]
break
if i == len(rgrid)-1:
rrr = rgrid[-1]
if gasonly==False:
indstar = np.where(rstar < rrr)
indpart = np.where(rpart < rrr)
indcell = np.where(rcell < rrr)
totalmx = mxsum(Part1.xp[0], Part1.mp0, indstar) + mxsum(Part1.dmxp[0], Part1.dmm, indpart) + mxsum(Cell1.x,
Cell1.m,
indcell)
totalmy = mxsum(Part1.xp[1], Part1.mp0, indstar) + mxsum(Part1.dmxp[1], Part1.dmm, indpart) + mxsum(Cell1.y,
Cell1.m,
indcell)
totalmz = mxsum(Part1.xp[2], Part1.mp0, indstar) + mxsum(Part1.dmxp[2], Part1.dmm, indpart) + mxsum(Cell1.z,
Cell1.m,
indcell)
totalm = msum(Part1.mp0, indstar) + msum(Part1.dmm, indpart) + msum(Cell1.m, indcell)
else:
indcell = np.where(rcell < rrr)
totalmx = mxsum(Cell1.x, Cell1.m,indcell);totalmy = mxsum(Cell1.y, Cell1.m,indcell);totalmz = mxsum(Cell1.z, Cell1.m,indcell)
totalm=msum(Cell1.m, indcell)
xx = totalmx/totalm
yy = totalmy/totalm
zz = totalmz/totalm
return xx, yy, zz
def CoM_main(Part1,Cell1,diskmass):
rgrid1=np.linspace(100, 4000, num=40)
boxcen = Part1.boxpc/2
x1, y1, z1 = CoM_pre(Part1,Cell1,rgrid1,1e11,boxcen,boxcen,boxcen,False)
x2, y2, z2 = CoM_pre(Part1,Cell1,rgrid1,diskmass,x1,y1,z1,True)
x3, y3, z3 = CoM_pre(Part1,Cell1,rgrid1,diskmass,x2,y2,z2,True)
#print(x2,y2,z2)
return x3, y3, z3
def CoM_Main(Part1, diskmass):
xcen = np.zeros(11)
ycen = np.zeros(11)
zcen = np.zeros(11)
hmr = np.zeros(11)
rgrid = np.linspace(100,4000,num=40)
xcen[0] = np.sum(Part1.mp0*Part1.xp[0])/np.sum(Part1.mp0)
ycen[0] = np.sum(Part1.mp0*Part1.xp[1])/np.sum(Part1.mp0)
zcen[0] = np.sum(Part1.mp0*Part1.xp[2])/np.sum(Part1.mp0)
for j in range(len(rgrid)):
mass = getmass_zlim(Part1.mp0,get2dr(Part1.xp[0],Part1.xp[1],xcen[0],ycen[0]),rgrid[j],Part1.xp[2],zcen[0],2000)
if mass>np.sum(Part1.mp0)/2:
hmr[0]=rgrid[j]
break
for i in range(10):
ind = np.where((get2dr(Part1.xp[0],Part1.xp[1],xcen[i],ycen[i])<hmr[i])&(np.abs(Part1.xp[2]-zcen[i])<2000))
xcen[i+1]=np.sum(Part1.xp[0][ind]*Part1.mp0[ind])/np.sum(Part1.mp0[ind])
ycen[i+1]=np.sum(Part1.xp[1][ind]*Part1.mp0[ind])/np.sum(Part1.mp0[ind])
zcen[i+1]=np.sum(Part1.xp[2][ind]*Part1.mp0[ind])/np.sum(Part1.mp0[ind])
for j in range(len(rgrid)):
mass = getmass_zlim(Part1.mp0, get2dr(Part1.xp[0], Part1.xp[1], xcen[i+1], ycen[i+1]), rgrid[j], Part1.xp[2],
zcen[i+1], 2000)
if mass > np.sum(Part1.mp0) / 2:
hmr[i+1] = rgrid[j]
break
return xcen, ycen, zcen, hmr
def plot(ax1, ax2, read, nout, Part, Cell, xwid, ywid, zwid, label,diskmass):
start = time.time()
Part1 = Part(read, nout)
Cell1 = Cell(read, nout, Part1)
print('reading finished , t = %.2f [sec]' % (time.time() - start))
xcen, ycen, zcen = CoM_main(Part1, Cell1,diskmass)
print('finish to find CoM, t= %.2f [sec]'%(time.time()-start))
a = new_projection( Cell1, xcen, ycen, zcen,xwid, ywid, zwid, 'nH','xy')
cm = plt.get_cmap('inferno')
ss = a.projectionPlot(ax1, cm)
ax1.set_xlim(-a.xwid/1000,a.xwid/1000)
ax1.set_ylim(-a.xwid/1000,a.xwid/1000)
ax2.set_xlim(-a.xwid / 1000, a.xwid / 1000)
ax2.set_ylim(-a.xwid / 1000, a.xwid / 1000)
ax1.text(-a.xwid/1000*0.8,a.xwid/1000*0.8,label,color='white')
cc= a.star_plot(Part1, ax2)
# aa= a.clump_plot(Clump1, ax1)
# a.set_facecolor('none')
#ax.set_xlabel('X(kpc)')
#ax.set_ylabel('Y(kpc)')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.set_xticks([])
ax2.set_yticks([])
return ss
def main():
fig = plt.figure(figsize=(15,12))
ax1 = fig.add_axes([0.05,0.1,0.2,0.25])
ax2 = fig.add_axes([0.25,0.1,0.2,0.25])
ax3 = fig.add_axes([0.05,0.4,0.2,0.25])
ax4 = fig.add_axes([0.25,0.4,0.2,0.25])
ax5 = fig.add_axes([0.05,0.7,0.2,0.25])
ax6 = fig.add_axes([0.25,0.7,0.2,0.25])
ax7 = fig.add_axes([0.5,0.4,0.2,0.25])
ax8 = fig.add_axes([0.7,0.4,0.2,0.25])
ax9 = fig.add_axes([0.5,0.7,0.2,0.25])
ax10 = fig.add_axes([0.7,0.7,0.2,0.25])
ax11 = fig.add_axes([0.5,0.1,0.2,0.25])
ax12 = fig.add_axes([0.7,0.1,0.2,0.25])
ss = plot(ax11, ax12,read_new_01Zsun_05pc,380,Part,Cellfromdat,7000,7000,3000,'G9_01Zsun_5pc',1.75e9)
ss = plot(ax5, ax6,read_new_01Zsun,480,Part,Cell,7000,7000,3000,'G9_01Zsun',1.75e9)
ss = plot(ax7, ax8,read_new_1Zsun_highSN_new,380,Part,Cell,7000,7000,3000,'G9_1Zsun_SNboost',1.75e9)
ss = plot(ax1, ax2,read_new_gasrich,250,Part,Cellfromdat,7000,7000,7000,'G9_01Zsun_gasrich',1.15e10)
ss = plot(ax9, ax10,read_new_03Zsun_highSN,380,Part,Cell,7000,7000,3000,'G9_03Zsun_SNboost',1.75e9)
ss = plot(ax3, ax4,read_new_1Zsun,480,Part,Cell,7000,7000,3000,'G9_1Zsun',1.75e9)
#plt.savefig('/Volumes/THYoo/2019_thesis/projection(fig1).png' )
plt.show()
def thr_fiducial(read,nout,Cell,savedirec,dpi,label):
start = time.time()
fig = plt.figure(figsize=(10*3, 2.5*3),dpi=dpi)
ax1 = fig.add_axes([0, 0, 0.25, 1])
ax3 = fig.add_axes([0.25, 0, 0.25, 1])
ax2 = fig.add_axes([0.75, 0, 0.25, 1])
ax4 = fig.add_axes([0.5,0,0.25,0.5])
ax5 = fig.add_axes([0.5,0.5,0.25,0.5])
ax1.set_xticks([])
ax2.set_xticks([])
ax3.set_xticks([])
ax4.set_xticks([])
ax5.set_xticks([])
ax1.set_yticks([])
ax2.set_yticks([])
ax3.set_yticks([])
ax4.set_yticks([])
ax5.set_yticks([])
ax3.set_facecolor('black')
Part1 = Part(read, nout)
Cell1 = Cell(read, nout, Part1)
# Fesc_indSED1 = Fesc_indSED(read, nout)
print('reading finished , t = %.2f [sec]' % (time.time() - start))
xcen, ycen, zcen = CoM_main(Part1, Cell1, 1.75e9)
print('finish to find CoM, t= %.2f [sec]'%(time.time()-start))
a = new_projection(Cell1, xcen, ycen, zcen, 7000,7000, 3500, 'nH', 'xy')
cm1 = plt.get_cmap('inferno')
cm2 = plt.get_cmap('viridis_r')
cm3 = plt.get_cmap('bone')
a.projectionPlot(ax1,cm1,[-3,-2,-1,0,1,2],True,True,1,True,label)
b = new_projection(Cell1, xcen, ycen, zcen, 7000, 7000, 100, 'xHI', 'xy')
b.projectionPlot(ax2,cm2,[-2, -1,0],True,True,1,False,label)
filter = [[7950/4, 10050/4],[17550/4, 22260/4],[24160/4,31270/4]]
a.star_plot3(Part1,ax3,a.mindx,cm3,[6,7,8],5.5,8.5,True,True,1)
c = new_projection(Cell1, xcen, ycen, zcen, 7000, 7000, 3500, 'nH', 'xz')
c.projectionPlot(ax5, cm1, [-3, -2, -1, 0, 1, 2],False,True,2,False,label)
c.star_plot3(Part1,ax4,a.mindx,cm3,[6,7,8],5.5,8.5,False,True,1)
plt.savefig(savedirec+'thr_fiducial%s_%05d.png'%(label,nout))
#plt.show()
def thr_fiducial_mol(read,nout,Cell,savedirec,dpi,label):
start = time.time()
fig = plt.figure(figsize=(10*3, 2*3),dpi=dpi)
ax1 = fig.add_axes([0, 0, 0.2, 1])
ax3 = fig.add_axes([0.2, 0, 0.2, 1])
ax2 = fig.add_axes([0.6, 0, 0.2, 1])
ax4 = fig.add_axes([0.4,0,0.2,0.5])
ax5 = fig.add_axes([0.4,0.5,0.2,0.5])
ax6 = fig.add_axes([0.8,0,0.2,1])
ax1.set_xticks([])
ax2.set_xticks([])
ax3.set_xticks([])
ax4.set_xticks([])
ax5.set_xticks([])
ax6.set_xticks([])
ax1.set_yticks([])
ax2.set_yticks([])
ax3.set_yticks([])
ax4.set_yticks([])
ax5.set_yticks([])
ax6.set_yticks([])
ax3.set_facecolor('black')
Part1 = Part(read, nout)
Cell1 = Cell(read, nout, Part1)
Fesc_indSED1 = Fesc_indSED(read, nout)
print('reading finished , t = %.2f [sec]' % (time.time() - start))
xcen, ycen, zcen = CoM_main(Part1, Cell1, 1.75e9)
print('finish to find CoM, t= %.2f [sec]'%(time.time()-start))
a = new_projection(Cell1, xcen, ycen, zcen, 7000,7000, 3500, 'nH', 'xy')
cm1 = plt.get_cmap('inferno')
cm2 = plt.get_cmap('viridis_r')
cm3 = plt.get_cmap('bone')
cm4 = plt.get_cmap('pink')
a.projectionPlot(ax1,cm1,[-3,-2,-1,0,1,2],True,True,1,True,label)
#b = new_projection(Cell1, xcen, ycen, zcen, 7000, 7000, 100, 'xHI', 'xy')
b = new_projection(Cell1, xcen, ycen, zcen, 7000, 7000, 3500, 'xHI', 'xy')
b.projectionPlot(ax2,cm2,[-2, -1,0],True,True,1,False,label)
filter = [[7950/4, 10050/4],[17550/4, 22260/4],[24160/4,31270/4]]
a.star_plot3(Part1,ax3,a.mindx,cm3,[6,7,8],5.5,8.5,True,True,1)
c = new_projection(Cell1, xcen, ycen, zcen, 7000, 7000, 3500, 'nH', 'xz')
c.projectionPlot(ax5, cm1, [-3, -2, -1, 0, 1, 2],False,True,2,False,label)
c.star_plot3(Part1,ax4,a.mindx,cm3,[6,7,8],5.5,8.5,False,True,2)
d = new_projection(Cell1, xcen, ycen, zcen, 7000, 7000, 3500, 'xH2', 'xy')
d.projectionPlot(ax6, cm4, [-6,-4, -2, 0],True,True,1,False,label)
arr = np.loadtxt(read + 'sh_hmr.dat')
noutarr = arr[:, 0]
indnout = np.where(noutarr == nout)
hmr = arr[indnout, 2]
hmr = np.asscalar(hmr)
circle = plt.Circle((0, 0), hmr / 1000, edgecolor='w', fill=False)
ax6.add_artist(circle)
plt.savefig(savedirec+'thr_fiducial_H2_%s_%05d.png'%(label,nout))
#single(read_new_01Zsun,Cell,3,480,'G9_01Zsun')
#four_fiducial('/blackwhale/dbahck37/kisti/',124,Cell)
thr_fiducial(read_new_01Zsun,480,Cell,'/Volumes/THYoo/kisti/plot/2019thesis/',54,'G9_Zlow')
thr_fiducial(read_new_gasrich,300,Cellfromdat,'/Volumes/THYoo/kisti/plot/2019thesis/',72,'G9_Zlow_gas5')
thr_fiducial(read_new_03Zsun_highSN,380,Cell,'/Volumes/THYoo/kisti/plot/2019thesis/',72,'G9_Zmid_SN5')
thr_fiducial(read_new_01Zsun_05pc,380,Cell,'/Volumes/THYoo/kisti/plot/2019thesis/',72,'G9_Zlow_HR')
thr_fiducial(read_new_1Zsun,480,Cell,'/Volumes/THYoo/kisti/plot/2019thesis/',72,'G9_Zhigh')
thr_fiducial(read_new_1Zsun_highSN_new,380,Cell,'/Volumes/THYoo/kisti/plot/2019thesis/',72,'G9_Zhigh_SN5')
def main(read, Cell, inisnap, endsnap,savedirec):
numsnap = endsnap - inisnap + 1
for i in range(numsnap):
nout = i + inisnap
if not os.path.isfile(read + '/SAVE/part_%05d.sav' % (nout)):
print(read + '/SAVE/part_%05d.sav' % (nout))
continue
if not os.path.isfile(read + '/SAVE/cell_%05d.sav' % (nout)):
print(read + '/SAVE/cell_%05d.sav' % (nout))
continue
thr_fiducial(read,nout,Cell,savedirec,'G9_Zlow')
| [
"noreply@github.com"
] | noreply@github.com |
f991f0151a8dda85fd7b923dfbf44c69063ed06a | 93795b826912c44dc7797effca38efc7a4c6603f | /sorting/monk-being-monitor.py | a226387e2b1ec2931a75b5925a469541e11b68bb | [] | no_license | nattapat-v/code-monk | 961fa087c78f941fb29f9a49845a3c0e6391b620 | 01d5701c54b63c290e0bc0bd1fb5c58907f6dd96 | refs/heads/main | 2023-06-20T03:37:59.935763 | 2021-07-18T09:27:06 | 2021-07-18T09:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from itertools import groupby
def group(h):
h.sort()
h = [len(list(j)) for i, j in groupby(h)]
h.sort()
return h
def solution(h):
if len(set(h)) == 1:
print('-1')
else:
h = group(h)
print(h[-1] - h[0])
T = int(input())
for _ in range(T):
n = int(input())
h = list(map(int, input().split()))
solution(h) | [
"dream_nattapat@hotmail.com"
] | dream_nattapat@hotmail.com |
190bfa669cf3e49500f9113ee9ff6e2ec99bac36 | a9fc496e0724866093dbb9cba70a8fdce12b67a9 | /scripts/portal/out_140030000.py | 5eb2acf20a46a339aaa1228ed2aad91159c1a465 | [
"MIT"
] | permissive | ryantpayton/Swordie | b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0 | ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e | refs/heads/master | 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 | MIT | 2022-11-24T08:17:54 | 2020-04-08T05:50:22 | Java | UTF-8 | Python | false | false | 166 | py | if sm.hasQuestCompleted(21202) and sm.hasQuest(21201) or sm.hasQuestCompleted(21303) and sm.hasQuest(21302):
sm.warp(140000000, 1)
else:
sm.warp(140010200, 1) | [
"sensiblemagic@gmail.com"
] | sensiblemagic@gmail.com |
e31a60671323357774dacb420be416d4091213cf | c29d5a918b7dee4ad8f8ff17f63968bee84cc856 | /count_hongbao.py | 112f9169ffa06adcecce0ec13d844958803ca6dd | [] | no_license | longhongjun/jobinsina | 9dfac0456d951c18107e319133257935a47e7790 | a72621a77843a3eb441a216cc52a260307ce6fe2 | refs/heads/master | 2021-01-23T20:27:18.085219 | 2013-05-16T10:23:20 | 2013-05-16T10:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #coding:utf-8
'''用于统计从主站发出的、带“#让红包飞#”关键词的原创/转发 次数/人数'''
import datetime
import pandas as pd
def main():
f=open('c:\\result.txt','rb')
data=f.readlines()
f.close()
zhuzhan_appid=('*','**','****')
result_list=[]
for item in data:
item=item.split('\t')
if item[4] in zhuzhan_appid and item[2]=='0':
result_list.append(item[1])
cishu=len(result_list)
renshu=len(set(result_list))
print u'原创次数为:%s\n原创人数为:%s'%(cishu,renshu)
print 'OK!'
if __name__=='__main__':
main()
| [
"lllhhw@gmail.com"
] | lllhhw@gmail.com |
fea2f08c9f726cf28b051dffac17414bb63ba028 | 693e23a99539b1837d5331847bb726d6dd6ba1ae | /ansible/environments/stage/state2inventory2.py | 87b94bd920688e999398d568f0d1c86580f6a1e5 | [
"MIT"
] | permissive | spinor72/otus-devops-infra | e375a7ec347484b89feb94255044baf8004e8dd0 | 35a0e5db24f88941c756f8afbb80087258edd70e | refs/heads/master | 2020-04-02T05:50:25.767922 | 2018-04-18T15:16:59 | 2018-04-18T15:16:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,404 | py | #!/usr/bin/python
import sys
import os
import argparse
import json
from subprocess import Popen, PIPE
import ConfigParser
URL_TEMPLATE = "gs://{}/{}/default.tfstate"
DEBUG = False
BUCKET = "storage-bucket-spinor-test"
PREFIX = "terraform/stage"
def load_state(bucket, prefix):
try:
state_loader = Popen(["gsutil", "cp", URL_TEMPLATE.format(bucket, prefix), "-"], stdout=PIPE, stderr=PIPE)
error = state_loader.stderr.read()
if error:
print error
sys.stderr.write(error)
sys.exit(os.EX_DATAERR)
data = json.loads(state_loader.stdout.read())
return data
except ValueError:
if DEBUG:
sys.stderr.write("No JSON object could be decoded")
sys.exit(os.EX_DATAERR)
else:
return "{}"
except OSError:
if DEBUG:
sys.stderr.write("Gsutill not installed or other system error")
sys.exit(os.EX_DATAERR)
else:
return "{}"
def print_list(data):
try:
inventory = {"_meta": {"hostvars": {}}}
hosts = (i for i in data.get('modules',[]) if i.get('resources') and i.get('resources').get('null_resource.ansible'))
for _ in hosts:
host = _.get('outputs')
if host.get('name'):
name = host.get('name').get('value')
else:
raise ValueError("Host name not present")
vars = host.get('vars', {}).get('value', {})
inventory["_meta"]["hostvars"][name] = vars
if host.get('host'):
ansible_host = host.get('host').get('value', '')
inventory["_meta"]["hostvars"][name]['ansible_host'] = ansible_host
else:
raise ValueError("Host address not present")
if host.get('groups'):
for g in host.get('groups').get('value', []):
if g not in inventory:
inventory[g] = {'hosts': [], "vars": {}}
inventory[g]['hosts'].append(name)
print json.dumps(inventory, sort_keys=True, indent=4, separators=(',', ': '))
except ValueError as e:
if DEBUG:
sys.stderr.write(e.message)
sys.exit(os.EX_DATAERR)
else:
print "{}"
except AttributeError:
if DEBUG:
sys.stderr.write("Inventory data invalid")
sys.exit(os.EX_DATAERR)
else:
print "{}"
def print_host(host):
print '{}'
prefix = os.environ.get('GS_BUCKET')
bucket = os.environ.get('GS_PREFIX')
config = ConfigParser.ConfigParser()
config_file = os.path.splitext(os.path.abspath(__file__))[0] + ".cfg"
if os.path.isfile (config_file):
try:
config.read(config_file)
prefix = config.get('gs', 'prefix')
bucket = config.get('gs', 'bucket')
except :
sys.stderr.write("Error in configuration file")
if not prefix or not bucket:
sys.stderr.write("Cannot set configuration options for bucket")
sys.exit(os.EX_DATAERR)
parser = argparse.ArgumentParser()
parser.add_argument("--list", help="print inventory in json format", action="store_true")
parser.add_argument("--host", help="print host vars in json format")
args = parser.parse_args()
state = load_state(bucket, prefix)
if args.list:
print_list(state)
elif args.host:
print_host(args.host)
| [
"spinor72@gmail.com"
] | spinor72@gmail.com |
831816fbb5c78a1a9f011082071976f5c31f65cb | 60f8187a36a2d1e19daaf81e0b476af48dff299e | /main.py | 04372691f07ad116ab3ea89b8767ab195808774e | [] | no_license | AndreasArne/iot-course-lnu | 150eb452e74c3e118fb23c50cc25937aeb0e43ad | 3c73ebfa909a3a8d10e46099eb7044cb701938c2 | refs/heads/main | 2023-06-26T08:22:14.097097 | 2021-08-02T15:05:32 | 2021-08-02T15:05:32 | 391,902,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | import time
import ubinascii
import hashlib
from mqttclient import MQTTClient
from machine import Pin
from _pybytes import Pybytes, unique_id
from _pybytes_config import PybytesConfig
print("Setup")
# Use pybytes config to connect to Wifi
conf = PybytesConfig().read_config()
pybytes = Pybytes(conf)
pybytes.start()
# Prepare pin to read from sensor
pin_input = Pin('P16', mode = Pin.IN)
# mqtt settings
topic_pub = 'home/bedroom/'
broker_url = '<IP-to-broker>'
client_name = ubinascii.hexlify(hashlib.md5(unique_id()).digest()) # create a md5 hash of the pycom WLAN mac
c = MQTTClient(client_name,broker_url)
c.connect()
def send_value(motion, mq):
# create message and send it
value = '{"bedroom": { "motion": ' + str(motion) + '}}'
try:
mq.publish(topic_pub, value)
print('Sensor data sent with value {}'.format(motion))
except (NameError, ValueError, TypeError) as e:
print('Failed to send!')
print(e)
# also send pybytes signal
pybytes.send_signal(99, motion)
old_motion = 0
while True: # main loop
motion = pin_input.value() # 1 for movement, 0 for nothing
if old_motion != motion: # if state has changed, send it
old_motion = motion
send_value(motion, c)
time.sleep(2)
| [
"andreas_osby@hotmail.com"
] | andreas_osby@hotmail.com |
11af46ef97df5e9a477ac2fc1ce38b1f0d148bd8 | c74756d67db9ad5207a493cd501fbec0ba2614ee | /过往的爬虫项目/zls_moviePro/zls_moviePro/items.py | 27607e9a9d5379e52946430675f7d28bad717af0 | [] | no_license | kawakami-araki/- | 1a3928fbc77cbb7b08659c5c01cdcda894e89626 | 2d0515fde8c6234831bab7c4313b901b233416d3 | refs/heads/master | 2022-03-31T12:10:58.308646 | 2020-01-14T06:27:49 | 2020-01-14T06:27:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ZlsMovieproItem(scrapy.Item):
movie_name = scrapy.Field()
detail_text = scrapy.Field()
| [
"55047390+fcg22450@users.noreply.github.com"
] | 55047390+fcg22450@users.noreply.github.com |
51bdf2cbbf1b7030553346999295a259f8d27ba9 | 5b3c7a9e8adb114bd1962782bf78b4ab84f95506 | /append_delete_inlinkedlist.py | d50a2f146e74fe2dae39939eef43af0d1d06cf00 | [] | no_license | suryansh4537/basics | 09817e9e9051157f1082a96d433308966231221c | 47244a54122324cbbd489f7dea7d6328f19ef96f | refs/heads/master | 2020-05-31T13:57:46.483380 | 2019-08-03T11:34:23 | 2019-08-03T11:34:23 | 190,317,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | class Node:
def __init__(self,data):
self.data=data
self.nextval=None
class linkedlist:
def __init__(self):
self.head=None
def append(self,data):
new_node=Node(data)
if self.head==None:
self.head=new_node
return
last=self.head
while last.nextval!=None:
last=last.nextval
last.nextval=new_node
def showall(self):
start=self.head
while start!=None:
print(start.data)
start=start.nextval
def deletenode(self,key):
point=self.head
if point and point.data==key:
self.head=point.nextval
point.nextval=None
return
prev = None
while point and point.data!=key:
prev = point
point=point.nextval
prev.nextval=point.nextval
point.nextval=None
def deletewithindex(self,pos):
index=0
currentnode=self.head
if currentnode and pos==index:
self.head=currentnode.nextval
currentnode.nextval=None
return
prev=None
while index!=pos and currentnode.nextval!=None:
index+=1
prev=currentnode
currentnode=currentnode.nextval
if currentnode.nextval==None:
print("not in the list")
prev.nextval=currentnode.nextval
currentnode.nextval=None
l1=linkedlist()
l1.append("a")
l1.append("b")
l1.append("c")
l1.append("d")
#l1.deletenode("a")
l1.deletewithindex(4)
l1.showall() | [
"noreply@github.com"
] | noreply@github.com |
d1474cc0df6c811402262151e649c79b99c5fa63 | 7033a3077a5824ee74af4f7edc16224349ee8ef9 | /venv1/lib/python3.7/site-packages/phonenumbers/carrierdata/data2.py | 94bc6c16dc2590e3f7b65fcef3f39358d3d5960d | [] | no_license | Caiphe/Barrows_python | 773e7c7d45c38753c5e3dce87c95768fcf3e5e9d | 8fd3f18f53aceef2b56ee18f6847a4441ae82803 | refs/heads/master | 2020-07-04T06:49:00.824349 | 2019-08-19T09:47:48 | 2019-08-19T09:47:48 | 202,192,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,301 | py | """Per-prefix data, mapping each prefix to a dict of locale:name.
Auto-generated file, do not edit by hand.
"""
from ..util import u
# Copyright (C) 2011-2019 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data = {
'9183287':{'en': 'Vodafone'},
'918329':{'en': 'Reliance Jio'},
'91833':{'en': 'Vodafone'},
'918330':{'en': 'BSNL MOBILE'},
'918331':{'en': 'BSNL MOBILE'},
'918332':{'en': 'BSNL MOBILE'},
'918333':{'en': 'BSNL MOBILE'},
'918340':{'en': 'Reliance Jio'},
'9183400':{'en': 'Idea'},
'9183408':{'en': 'Idea'},
'9183409':{'en': 'Idea'},
'918341':{'en': 'Telewings'},
'918342':{'en': 'Vodafone'},
'918343':{'en': 'Idea'},
'918344':{'en': 'Aircel'},
'918345':{'en': 'Idea'},
'918346':{'en': 'Idea'},
'918347':{'en': 'Idea'},
'918348':{'en': 'Vodafone'},
'918349':{'en': 'Airtel'},
'918350':{'en': 'Idea'},
'918351':{'en': 'Idea'},
'918352':{'en': 'Idea'},
'9183530':{'en': 'Idea'},
'9183538':{'en': 'Idea'},
'9183539':{'en': 'Telewings'},
'918354':{'en': 'Telewings'},
'9183558':{'en': 'Reliance Jio'},
'9183559':{'en': 'Reliance Jio'},
'9183560':{'en': 'Reliance Jio'},
'9183568':{'en': 'Reliance Jio'},
'9183569':{'en': 'Reliance Jio'},
'918357':{'en': 'Vodafone'},
'918358':{'en': 'Vodafone'},
'918359':{'en': 'Vodafone'},
'918360':{'en': 'Reliance Jio'},
'9183670':{'en': 'Idea'},
'9183672':{'en': 'Idea'},
'9183673':{'en': 'Idea'},
'9183674':{'en': 'Idea'},
'9183675':{'en': 'Idea'},
'9183676':{'en': 'Idea'},
'9183677':{'en': 'Idea'},
'9183678':{'en': 'Vodafone'},
'918368':{'en': 'Reliance Jio'},
'918369':{'en': 'Reliance Jio'},
'91837':{'en': 'Vodafone'},
'918374':{'en': 'Airtel'},
'918378':{'en': 'Idea'},
'918379':{'en': 'Idea'},
'918380':{'en': 'Idea'},
'9183810':{'en': 'Idea'},
'9183818':{'en': 'Telewings'},
'918382':{'en': 'Telewings'},
'9183830':{'en': 'Reliance Jio'},
'9183838':{'en': 'Reliance Jio'},
'9183839':{'en': 'Reliance Jio'},
'918384':{'en': 'Aircel'},
'9183840':{'en': 'Reliance Jio'},
'9183848':{'en': 'Reliance Jio'},
'918385':{'en': 'Aircel'},
'918386':{'en': 'Aircel'},
'918387':{'en': 'Aircel'},
'918388':{'en': 'Reliance Jio'},
'918389':{'en': 'Reliance Jio'},
'918390':{'en': 'Vodafone'},
'918391':{'en': 'Reliance Jio'},
'918392':{'en': 'Vodafone'},
'9183920':{'en': 'Reliance Jio'},
'918393':{'en': 'Vodafone'},
'918394':{'en': 'Vodafone'},
'918395':{'en': 'Vodafone'},
'9183960':{'en': 'Vodafone'},
'9183968':{'en': 'Vodafone'},
'9183969':{'en': 'Vodafone'},
'918397':{'en': 'Vodafone'},
'918398':{'en': 'Vodafone'},
'918399':{'en': 'Vodafone'},
'91840':{'en': 'Vodafone'},
'918400':{'en': 'Airtel'},
'918401':{'en': 'Telewings'},
'918409':{'en': 'Telewings'},
'9184100':{'en': 'Aircel'},
'9184118':{'en': 'Vodafone'},
'9184119':{'en': 'Vodafone'},
'918412':{'en': 'Vodafone'},
'918413':{'en': 'Airtel'},
'918414':{'en': 'Airtel'},
'918415':{'en': 'Airtel'},
'918416':{'en': 'Idea'},
'918417':{'en': 'Idea'},
'918418':{'en': 'Idea'},
'918419':{'en': 'Idea'},
'918420':{'en': 'Airtel'},
'918421':{'en': 'Telewings'},
'918422':{'en': 'Idea'},
'918423':{'en': 'Telewings'},
'918424':{'en': 'Idea'},
'918425':{'en': 'Idea'},
'918426':{'en': 'Idea'},
'918427':{'en': 'Airtel'},
'918428':{'en': 'Idea'},
'918429':{'en': 'Reliance Jio'},
'918430':{'en': 'Vodafone'},
'918431':{'en': 'Reliance Jio'},
'918432':{'en': 'Idea'},
'918433':{'en': 'Airtel'},
'9184330':{'en': 'Reliance Jio'},
'9184331':{'en': 'Reliance Jio'},
'9184332':{'en': 'Reliance Jio'},
'9184334':{'en': 'Reliance Jio'},
'918434':{'en': 'Telewings'},
'918435':{'en': 'Idea'},
'918436':{'en': 'Uninor'},
'918437':{'en': 'Idea'},
'918438':{'en': 'Tata Docomo'},
'918439':{'en': 'Tata Docomo'},
'918440':{'en': 'Idea'},
'918441':{'en': 'Idea'},
'918442':{'en': 'Idea'},
'918443':{'en': 'Reliance Jio'},
'918444':{'en': 'Reliance Jio'},
'918445':{'en': 'Telewings'},
'918446':{'en': 'Tata Docomo'},
'918447':{'en': 'Vodafone'},
'918448':{'en': 'Reliance Jio'},
'918449':{'en': 'Idea'},
'9184500':{'en': 'Reliance Jio'},
'9184508':{'en': 'Reliance Jio'},
'9184509':{'en': 'Airtel'},
'918451':{'en': 'Airtel'},
'918452':{'en': 'Airtel'},
'918454':{'en': 'Airtel'},
'918455':{'en': 'Airtel'},
'918456':{'en': 'Airtel'},
'918457':{'en': 'Airtel'},
'918458':{'en': 'Idea'},
'918459':{'en': 'Reliance Jio'},
'91846':{'en': 'Idea'},
'918460':{'en': 'Tata Docomo'},
'918467':{'en': 'Reliance Jio'},
'918468':{'en': 'Reliance Jio'},
'918469':{'en': 'Vodafone'},
'918470':{'en': 'Reliance Jio'},
'918471':{'en': 'Airtel'},
'918472':{'en': 'Airtel'},
'918473':{'en': 'Airtel'},
'9184748':{'en': 'Airtel'},
'9184749':{'en': 'Idea'},
'918475':{'en': 'Idea'},
'918476':{'en': 'Idea'},
'918477':{'en': 'Idea'},
'918478':{'en': 'Idea'},
'918479':{'en': 'Idea'},
'918480':{'en': 'BSNL MOBILE'},
'918481':{'en': 'Idea'},
'9184820':{'en': 'Idea'},
'9184828':{'en': 'Telewings'},
'918483':{'en': 'Telewings'},
'918484':{'en': 'Telewings'},
'918485':{'en': 'Telewings'},
'9184859':{'en': 'Airtel'},
'918486':{'en': 'Vodafone'},
'918487':{'en': 'Telewings'},
'918488':{'en': 'Telewings'},
'918489':{'en': 'Vodafone'},
'91849':{'en': 'Idea'},
'918490':{'en': 'Telewings'},
'918491':{'en': 'Airtel'},
'918492':{'en': 'Airtel'},
'918493':{'en': 'Airtel'},
'91850':{'en': 'Idea'},
'918500':{'en': 'BSNL MOBILE'},
'918507':{'en': 'Aircel'},
'918508':{'en': 'Aircel'},
'918509':{'en': 'Reliance Jio'},
'918510':{'en': 'Idea'},
'918511':{'en': 'Airtel'},
'918512':{'en': 'Idea'},
'918513':{'en': 'Idea'},
'918514':{'en': 'Idea'},
'918515':{'en': 'Idea'},
'918516':{'en': 'Vodafone'},
'918517':{'en': 'Vodafone'},
'918518':{'en': 'Vodafone'},
'9185190':{'en': 'Vodafone'},
'9185198':{'en': 'Telewings'},
'918520':{'en': 'Telewings'},
'918521':{'en': 'Airtel'},
'918522':{'en': 'Telewings'},
'918523':{'en': 'Telewings'},
'9185239':{'en': 'Vodafone'},
'918524':{'en': 'Vodafone'},
'918525':{'en': 'Vodafone'},
'918526':{'en': 'Aircel'},
'918527':{'en': 'Airtel'},
'9185310':{'en': 'Vodafone'},
'9185318':{'en': 'Vodafone'},
'9185319':{'en': 'Vodafone'},
'918532':{'en': 'Telewings'},
'918533':{'en': 'Telewings'},
'918534':{'en': 'Telewings'},
'918535':{'en': 'Idea'},
'918536':{'en': 'Idea'},
'918537':{'en': 'Idea'},
'9185380':{'en': 'Idea'},
'9185388':{'en': 'Idea'},
'9185389':{'en': 'Telewings'},
'918539':{'en': 'Telewings'},
'918540':{'en': 'Telewings'},
'918541':{'en': 'Telewings'},
'918542':{'en': 'Telewings'},
'9185438':{'en': 'Telewings'},
'918544':{'en': 'BSNL MOBILE'},
'918545':{'en': 'Telewings'},
'918546':{'en': 'Idea'},
'918547':{'en': 'BSNL MOBILE'},
'918548':{'en': 'Idea'},
'918549':{'en': 'Idea'},
'9185500':{'en': 'Idea'},
'9185508':{'en': 'Idea'},
'9185509':{'en': 'Vodafone'},
'918551':{'en': 'Vodafone'},
'918552':{'en': 'Vodafone'},
'918553':{'en': 'Aircel'},
'918554':{'en': 'Vodafone'},
'9185550':{'en': 'Reliance Jio'},
'9185558':{'en': 'Reliance Jio'},
'9185559':{'en': 'Reliance Jio'},
'918556':{'en': 'Vodafone'},
'918557':{'en': 'Vodafone'},
'918558':{'en': 'Vodafone'},
'9185590':{'en': 'Vodafone'},
'9185598':{'en': 'Aircel'},
'9185599':{'en': 'Aircel'},
'918560':{'en': 'Aircel'},
'918561':{'en': 'Aircel'},
'918562':{'en': 'Aircel'},
'918563':{'en': 'Aircel'},
'918564':{'en': 'Aircel'},
'918565':{'en': 'Aircel'},
'918566':{'en': 'Aircel'},
'918567':{'en': 'Aircel'},
'918568':{'en': 'Aircel'},
'9185690':{'en': 'Aircel'},
'9185698':{'en': 'Airtel'},
'9185699':{'en': 'Airtel'},
'918570':{'en': 'Airtel'},
'918571':{'en': 'Airtel'},
'9185720':{'en': 'Airtel'},
'9185728':{'en': 'Airtel'},
'9185729':{'en': 'Idea'},
'918573':{'en': 'Idea'},
'918574':{'en': 'Aircel'},
'918575':{'en': 'Aircel'},
'918576':{'en': 'Idea'},
'918577':{'en': 'Idea'},
'918578':{'en': 'Idea'},
'918579':{'en': 'Idea'},
'918580':{'en': 'BSNL MOBILE'},
'918581':{'en': 'Idea'},
'9185820':{'en': 'Idea'},
'9185828':{'en': 'Airtel'},
'9185829':{'en': 'Airtel'},
'918583':{'en': 'Airtel'},
'918584':{'en': 'Airtel'},
'9185850':{'en': 'Airtel'},
'9185858':{'en': 'Airtel'},
'9185859':{'en': 'Vodafone'},
'918586':{'en': 'Vodafone'},
'918587':{'en': 'Vodafone'},
'918588':{'en': 'Vodafone'},
'918589':{'en': 'Vodafone'},
'918590':{'en': 'Reliance Jio'},
'918591':{'en': 'Reliance Jio'},
'918592':{'en': 'Vodafone'},
'918593':{'en': 'Vodafone'},
'918594':{'en': 'Vodafone'},
'918595':{'en': 'Reliance Jio'},
'918596':{'en': 'Vodafone'},
'918597':{'en': 'Reliance Jio'},
'918598':{'en': 'Vodafone'},
'9185990':{'en': 'Vodafone'},
'9185998':{'en': 'Vodafone'},
'9185999':{'en': 'Aircel'},
'918600':{'en': 'Airtel'},
'918601':{'en': 'Vodafone'},
'918602':{'en': 'Tata Docomo'},
'918603':{'en': 'Tata Docomo'},
'918604':{'en': 'Tata Docomo'},
'918605':{'en': 'Idea'},
'918606':{'en': 'Idea'},
'918607':{'en': 'Idea'},
'918608':{'en': 'Idea'},
'918609':{'en': 'Idea'},
'918610':{'en': 'Reliance Jio'},
'9186170':{'en': 'Reliance Jio'},
'9186172':{'en': 'Reliance Jio'},
'9186173':{'en': 'Reliance Jio'},
'9186174':{'en': 'Reliance Jio'},
'9186175':{'en': 'Reliance Jio'},
'9186176':{'en': 'Reliance Jio'},
'9186177':{'en': 'Reliance Jio'},
'9186178':{'en': 'Reliance Jio'},
'918618':{'en': 'Reliance Jio'},
'918619':{'en': 'Reliance Jio'},
'918620':{'en': 'Aircel'},
'918621':{'en': 'Aircel'},
'918622':{'en': 'Aircel'},
'918623':{'en': 'Telewings'},
'918624':{'en': 'Telewings'},
'918625':{'en': 'Telewings'},
'918626':{'en': 'Airtel'},
'9186260':{'en': 'Telewings'},
'918627':{'en': 'Airtel'},
'918628':{'en': 'Airtel'},
'9186290':{'en': 'Airtel'},
'9186298':{'en': 'Airtel'},
'918630':{'en': 'Reliance Jio'},
'9186370':{'en': 'Reliance Jio'},
'9186372':{'en': 'Reliance Jio'},
'9186373':{'en': 'Reliance Jio'},
'9186374':{'en': 'Reliance Jio'},
'9186375':{'en': 'Reliance Jio'},
'9186376':{'en': 'Reliance Jio'},
'9186377':{'en': 'Idea'},
'9186378':{'en': 'Reliance Jio'},
'918638':{'en': 'Reliance Jio'},
'918639':{'en': 'Reliance Jio'},
'9186499':{'en': 'Aircel'},
'918650':{'en': 'Vodafone'},
'918651':{'en': 'Idea'},
'918652':{'en': 'Idea'},
'918653':{'en': 'Tata Docomo'},
'918654':{'en': 'Aircel'},
'918655':{'en': 'Tata Docomo'},
'918656':{'en': 'Aircel'},
'918657':{'en': 'Vodafone'},
'918658':{'en': 'Airtel'},
'918659':{'en': 'Aircel'},
'918660':{'en': 'Reliance Jio'},
'9186670':{'en': 'Reliance Jio'},
'9186672':{'en': 'Reliance Jio'},
'9186673':{'en': 'Reliance Jio'},
'9186674':{'en': 'Reliance Jio'},
'9186675':{'en': 'Reliance Jio'},
'9186676':{'en': 'Reliance Jio'},
'9186677':{'en': 'Reliance Jio'},
'9186678':{'en': 'Reliance Jio'},
'918668':{'en': 'Reliance Jio'},
'918669':{'en': 'Idea'},
'9186690':{'en': 'Reliance Jio'},
'9186691':{'en': 'Reliance Jio'},
'918670':{'en': 'Airtel'},
'918671':{'en': 'Aircel'},
'918672':{'en': 'Aircel'},
'918673':{'en': 'Aircel'},
'918674':{'en': 'Aircel'},
'9186748':{'en': 'Vodafone'},
'9186749':{'en': 'Vodafone'},
'918675':{'en': 'Aircel'},
'918676':{'en': 'Vodafone'},
'9186763':{'en': 'Reliance Jio'},
'918677':{'en': 'Vodafone'},
'9186780':{'en': 'Vodafone'},
'9186788':{'en': 'Vodafone'},
'9186789':{'en': 'Idea'},
'918679':{'en': 'Aircel'},
'91868':{'en': 'Idea'},
'918686':{'en': 'Aircel'},
'918687':{'en': 'Reliance Jio'},
'918688':{'en': 'Reliance Jio'},
'918690':{'en': 'Airtel'},
'918691':{'en': 'Idea'},
'918692':{'en': 'Idea'},
'9186930':{'en': 'Idea'},
'9186938':{'en': 'Idea'},
'918695':{'en': 'Vodafone'},
'918696':{'en': 'Vodafone'},
'918697':{'en': 'Vodafone'},
'918698':{'en': 'Vodafone'},
'918699':{'en': 'Tata Docomo'},
'918700':{'en': 'Reliance Jio'},
'9187070':{'en': 'Reliance Jio'},
'9187071':{'en': 'Idea'},
'9187072':{'en': 'Reliance Jio'},
'9187073':{'en': 'Reliance Jio'},
'9187074':{'en': 'Reliance Jio'},
'9187075':{'en': 'Reliance Jio'},
'9187076':{'en': 'Reliance Jio'},
'9187077':{'en': 'Reliance Jio'},
'9187078':{'en': 'Reliance Jio'},
'9187079':{'en': 'Idea'},
'918708':{'en': 'Reliance Jio'},
'918709':{'en': 'Reliance Jio'},
'918712':{'en': 'Tata Docomo'},
'918713':{'en': 'Vodafone'},
'918714':{'en': 'Tata Docomo'},
'918715':{'en': 'Vodafone'},
'918716':{'en': 'Vodafone'},
'9187170':{'en': 'Vodafone'},
'9187178':{'en': 'Idea'},
'9187179':{'en': 'Idea'},
'918718':{'en': 'Idea'},
'918719':{'en': 'Idea'},
'9187200':{'en': 'Idea'},
'9187208':{'en': 'Idea'},
'9187209':{'en': 'Vodafone'},
'918721':{'en': 'Vodafone'},
'918722':{'en': 'Idea'},
'918723':{'en': 'Vodafone'},
'918724':{'en': 'Vodafone'},
'918725':{'en': 'Idea'},
'918726':{'en': 'Idea'},
'918727':{'en': 'Idea'},
'918728':{'en': 'Idea'},
'9187290':{'en': 'Idea'},
'9187298':{'en': 'Airtel'},
'9187300':{'en': 'Airtel'},
'9187310':{'en': 'Airtel'},
'9187328':{'en': 'Airtel'},
'9187329':{'en': 'Telewings'},
'918733':{'en': 'Telewings'},
'918734':{'en': 'Telewings'},
'918735':{'en': 'Telewings'},
'918736':{'en': 'Telewings'},
'918737':{'en': 'Telewings'},
'918738':{'en': 'Telewings'},
'918739':{'en': 'Idea'},
'918740':{'en': 'Idea'},
'918741':{'en': 'Idea'},
'918742':{'en': 'Idea'},
'918743':{'en': 'Idea'},
'918744':{'en': 'Idea'},
'918745':{'en': 'Idea'},
'918746':{'en': 'Idea'},
'918747':{'en': 'Idea'},
'918748':{'en': 'Idea'},
'9187490':{'en': 'Idea'},
'9187498':{'en': 'Aircel'},
'918750':{'en': 'Idea'},
'918751':{'en': 'Aircel'},
'918752':{'en': 'Aircel'},
'9187530':{'en': 'Aircel'},
'9187538':{'en': 'Aircel'},
'9187539':{'en': 'Airtel'},
'918754':{'en': 'Airtel'},
'918755':{'en': 'Airtel'},
'918756':{'en': 'Airtel'},
'918757':{'en': 'Airtel'},
'918758':{'en': 'Vodafone'},
'918759':{'en': 'Aircel'},
'918760':{'en': 'Aircel'},
'918761':{'en': 'Airtel'},
'918762':{'en': 'BSNL MOBILE'},
'918763':{'en': 'BSNL MOBILE'},
'918764':{'en': 'BSNL MOBILE'},
'918765':{'en': 'BSNL MOBILE'},
'918766':{'en': 'Reliance Jio'},
'918767':{'en': 'Reliance Jio'},
'918768':{'en': 'Vodafone'},
'918769':{'en': 'Airtel'},
'918770':{'en': 'Reliance Jio'},
'9187770':{'en': 'Reliance Jio'},
'9187772':{'en': 'Reliance Jio'},
'9187773':{'en': 'Reliance Jio'},
'9187774':{'en': 'Reliance Jio'},
'9187775':{'en': 'Reliance Jio'},
'9187776':{'en': 'Reliance Jio'},
'9187777':{'en': 'Reliance Jio'},
'9187778':{'en': 'Reliance Jio'},
'918778':{'en': 'Reliance Jio'},
'918779':{'en': 'Reliance Jio'},
'918780':{'en': 'Reliance Jio'},
'9187870':{'en': 'Reliance Jio'},
'9187872':{'en': 'Reliance Jio'},
'9187873':{'en': 'Reliance Jio'},
'9187874':{'en': 'Reliance Jio'},
'9187875':{'en': 'Reliance Jio'},
'9187876':{'en': 'Reliance Jio'},
'9187877':{'en': 'Reliance Jio'},
'9187878':{'en': 'Reliance Jio'},
'918788':{'en': 'Reliance Jio'},
'918789':{'en': 'Reliance Jio'},
'918790':{'en': 'Airtel'},
'918791':{'en': 'Tata Docomo'},
'918792':{'en': 'Tata Docomo'},
'918793':{'en': 'Tata Docomo'},
'918794':{'en': 'Vodafone'},
'918795':{'en': 'Vodafone'},
'918796':{'en': 'Aircel'},
'918797':{'en': 'Tata Docomo'},
'918798':{'en': 'Reliance Jio'},
'918799':{'en': 'Reliance Jio'},
'918800':{'en': 'Airtel'},
'918801':{'en': 'Aircel'},
'918802':{'en': 'Aircel'},
'918803':{'en': 'Aircel'},
'918804':{'en': 'Aircel'},
'918805':{'en': 'Idea'},
'918806':{'en': 'Vodafone'},
'918807':{'en': 'Tata Docomo'},
'918808':{'en': 'Idea'},
'918809':{'en': 'Airtel'},
'918810':{'en': 'Reliance Jio'},
'918811':{'en': 'Airtel'},
'918812':{'en': 'Airtel'},
'918813':{'en': 'Vodafone'},
'918814':{'en': 'Vodafone'},
'918815':{'en': 'Reliance Jio'},
'918816':{'en': 'Vodafone'},
'918817':{'en': 'Reliance Jio'},
'918818':{'en': 'Vodafone'},
'918819':{'en': 'Vodafone'},
'918820':{'en': 'Reliance Jio'},
'918821':{'en': 'Vodafone'},
'918822':{'en': 'Reliance Jio'},
'9188230':{'en': 'Vodafone'},
'9188238':{'en': 'Vodafone'},
'9188239':{'en': 'Tata Docomo'},
'918824':{'en': 'Reliance Jio'},
'918825':{'en': 'Reliance Jio'},
'918826':{'en': 'Airtel'},
'918827':{'en': 'Airtel'},
'9188280':{'en': 'Airtel'},
'9188281':{'en': 'Airtel'},
'9188282':{'en': 'Airtel'},
'9188283':{'en': 'Airtel'},
'9188284':{'en': 'Airtel'},
'9188285':{'en': 'Idea'},
'9188286':{'en': 'Idea'},
'9188287':{'en': 'Idea'},
'9188288':{'en': 'Idea'},
'9188289':{'en': 'Idea'},
'918829':{'en': 'Tata Docomo'},
'918830':{'en': 'Reliance Jio'},
'9188370':{'en': 'Reliance Jio'},
'9188372':{'en': 'Reliance Jio'},
'9188373':{'en': 'Reliance Jio'},
'9188374':{'en': 'Reliance Jio'},
'9188375':{'en': 'Reliance Jio'},
'9188376':{'en': 'Reliance Jio'},
'9188377':{'en': 'Reliance Jio'},
'9188378':{'en': 'Reliance Jio'},
'918838':{'en': 'Reliance Jio'},
'918839':{'en': 'Reliance Jio'},
'918840':{'en': 'Reliance Jio'},
'9188470':{'en': 'Reliance Jio'},
'9188472':{'en': 'Reliance Jio'},
'9188473':{'en': 'Reliance Jio'},
'9188474':{'en': 'Reliance Jio'},
'9188475':{'en': 'Reliance Jio'},
'9188476':{'en': 'Reliance Jio'},
'9188477':{'en': 'Idea'},
'9188478':{'en': 'Reliance Jio'},
'918848':{'en': 'Reliance Jio'},
'918849':{'en': 'Reliance Jio'},
'918850':{'en': 'Reliance Jio'},
'918851':{'en': 'Reliance Jio'},
'918852':{'en': 'Tata Docomo'},
'918853':{'en': 'Airtel'},
'918854':{'en': 'Tata Docomo'},
'918855':{'en': 'Telewings'},
'918856':{'en': 'Telewings'},
'918857':{'en': 'Telewings'},
'918858':{'en': 'Telewings'},
'918859':{'en': 'Vodafone'},
'91886':{'en': 'Telewings'},
'918860':{'en': 'Vodafone'},
'918861':{'en': 'Airtel'},
'918866':{'en': 'Tata Docomo'},
'918867':{'en': 'Tata Docomo'},
'91887':{'en': 'Vodafone'},
'918870':{'en': 'Airtel'},
'918871':{'en': 'Tata Docomo'},
'918872':{'en': 'Idea'},
'918873':{'en': 'Idea'},
'918880':{'en': 'Reliance Jio'},
'918881':{'en': 'Vodafone'},
'918882':{'en': 'Reliance Jio'},
'918883':{'en': 'Aircel'},
'918884':{'en': 'Vodafone'},
'918885':{'en': 'Tata Docomo'},
'918886':{'en': 'Vodafone'},
'918887':{'en': 'BSNL MOBILE'},
'918888':{'en': 'Idea'},
'918889':{'en': 'Idea'},
'918890':{'en': 'Airtel'},
'918891':{'en': 'Tata Docomo'},
'918892':{'en': 'Aircel'},
'918893':{'en': 'Reliance Jio'},
'918894':{'en': 'Airtel'},
'918895':{'en': 'BSNL MOBILE'},
'918896':{'en': 'Aircel'},
'918897':{'en': 'Airtel'},
'918898':{'en': 'Aircel'},
'918899':{'en': 'Airtel'},
'918900':{'en': 'BSNL MOBILE'},
'918901':{'en': 'BSNL MOBILE'},
'918902':{'en': 'BSNL MOBILE'},
'918903':{'en': 'BSNL MOBILE'},
'918904':{'en': 'Tata Docomo'},
'918905':{'en': 'Airtel'},
'918906':{'en': 'Aircel'},
'918907':{'en': 'Aircel'},
'918908':{'en': 'Aircel'},
'918909':{'en': 'Aircel'},
'918910':{'en': 'Reliance Jio'},
'918912':{'en': 'Airtel'},
'9189170':{'en': 'Idea'},
'9189172':{'en': 'Reliance Jio'},
'9189173':{'en': 'Reliance Jio'},
'9189174':{'en': 'Reliance Jio'},
'9189175':{'en': 'Reliance Jio'},
'9189176':{'en': 'Reliance Jio'},
'9189177':{'en': 'Idea'},
'9189178':{'en': 'Idea'},
'918918':{'en': 'Reliance Jio'},
'918919':{'en': 'Reliance Jio'},
'918920':{'en': 'Reliance Jio'},
'918921':{'en': 'Reliance Jio'},
'918922':{'en': 'Telewings'},
'918923':{'en': 'Telewings'},
'918924':{'en': 'Telewings'},
'918927':{'en': 'Reliance Jio'},
'918928':{'en': 'Reliance Jio'},
'918929':{'en': 'Vodafone'},
'918930':{'en': 'Vodafone'},
'918931':{'en': 'Telewings'},
'918932':{'en': 'Idea'},
'918933':{'en': 'Idea'},
'918934':{'en': 'Idea'},
'9189350':{'en': 'Idea'},
'9189358':{'en': 'Telewings'},
'9189368':{'en': 'Telewings'},
'9189369':{'en': 'Idea'},
'918937':{'en': 'Idea'},
'9189380':{'en': 'Idea'},
'918939':{'en': 'Vodafone'},
'918940':{'en': 'Vodafone'},
'918941':{'en': 'Idea'},
'918942':{'en': 'Airtel'},
'918943':{'en': 'Vodafone'},
'918944':{'en': 'Airtel'},
'918945':{'en': 'Airtel'},
'918946':{'en': 'Aircel'},
'9189460':{'en': 'Reliance Jio'},
'918947':{'en': 'Aircel'},
'918948':{'en': 'Vodafone'},
'918949':{'en': 'Reliance Jio'},
'918950':{'en': 'Tata Docomo'},
'918951':{'en': 'Tata Docomo'},
'918952':{'en': 'Aircel'},
'918953':{'en': 'Airtel'},
'918954':{'en': 'Vodafone'},
'918955':{'en': 'Reliance Jio'},
'918956':{'en': 'Airtel'},
'918957':{'en': 'Reliance Jio'},
'918958':{'en': 'Idea'},
'918959':{'en': 'Idea'},
'918960':{'en': 'Tata Docomo'},
'918961':{'en': 'Tata Docomo'},
'918962':{'en': 'Tata Docomo'},
'9189630':{'en': 'Aircel'},
'9189638':{'en': 'Aircel'},
'9189639':{'en': 'Idea'},
'918964':{'en': 'Idea'},
'918965':{'en': 'Idea'},
'918966':{'en': 'Idea'},
'918967':{'en': 'Airtel'},
'918968':{'en': 'Airtel'},
'918969':{'en': 'Airtel'},
'918970':{'en': 'Idea'},
'918971':{'en': 'Airtel'},
'918972':{'en': 'Airtel'},
'918973':{'en': 'Aircel'},
'918974':{'en': 'Airtel'},
'918975':{'en': 'Idea'},
'918976':{'en': 'Tata Docomo'},
'918977':{'en': 'Tata Docomo'},
'918978':{'en': 'Airtel'},
'918979':{'en': 'Airtel'},
'918980':{'en': 'Vodafone'},
'918981':{'en': 'Tata Docomo'},
'918982':{'en': 'Tata Docomo'},
'918983':{'en': 'Tata Docomo'},
'918984':{'en': 'Tata Docomo'},
'918985':{'en': 'BSNL MOBILE'},
'918986':{'en': 'BSNL MOBILE'},
'918987':{'en': 'BSNL MOBILE'},
'918988':{'en': 'BSNL MOBILE'},
'918989':{'en': 'BSNL MOBILE'},
'918990':{'en': 'Reliance Jio'},
'9189911':{'en': 'BSNL MOBILE'},
'918999':{'en': 'Reliance Jio'},
'91900':{'en': 'Airtel'},
'919009':{'en': 'Idea'},
'91901':{'en': 'Reliance Jio'},
'919010':{'en': 'Idea'},
'919011':{'en': 'Idea'},
'919012':{'en': 'Idea'},
'919013':{'en': 'MTNL'},
'91902':{'en': 'Reliance Jio'},
'919028':{'en': 'Tata Docomo'},
'919029':{'en': 'Tata Docomo'},
'91903':{'en': 'Tata Docomo'},
'91904':{'en': 'Tata Docomo'},
'919047':{'en': 'Vodafone'},
'919048':{'en': 'Vodafone'},
'919049':{'en': 'Vodafone'},
'919050':{'en': 'Vodafone'},
'919051':{'en': 'Vodafone'},
'919052':{'en': 'Vodafone'},
'919053':{'en': 'Idea'},
'919055':{'en': 'Idea'},
'919056':{'en': 'Tata Docomo'},
'919057':{'en': 'Idea'},
'9190572':{'en': 'Reliance Jio'},
'9190575':{'en': 'Reliance Jio'},
'919058':{'en': 'Telewings'},
'919059':{'en': 'Telewings'},
'919060':{'en': 'Telenor'},
'919061':{'en': 'Idea'},
'919062':{'en': 'Idea'},
'919063':{'en': 'Telewings'},
'919064':{'en': 'Reliance Jio'},
'919066':{'en': 'Aircel'},
'919070':{'en': 'Vodafone'},
'919071':{'en': 'Idea'},
'919072':{'en': 'Vodafone'},
'919073':{'en': 'Vodafone'},
'919074':{'en': 'Reliance Jio'},
'919075':{'en': 'Idea'},
'919076':{'en': 'Idea'},
'919077':{'en': 'Idea'},
'919078':{'en': 'Airtel'},
'919079':{'en': 'Reliance Jio'},
'9190794':{'en': 'Idea'},
'9190795':{'en': 'Idea'},
'919080':{'en': 'Reliance Jio'},
'9190813':{'en': 'Idea'},
'9190814':{'en': 'Idea'},
'9190815':{'en': 'Idea'},
'9190816':{'en': 'Idea'},
'9190817':{'en': 'Idea'},
'9190818':{'en': 'Idea'},
'9190819':{'en': 'Idea'},
'919082':{'en': 'Reliance Jio'},
'919083':{'en': 'Vodafone'},
'919084':{'en': 'Telewings'},
'919085':{'en': 'Idea'},
'919086':{'en': 'Idea'},
'919087':{'en': 'Idea'},
'919088':{'en': 'Idea'},
'919089':{'en': 'Idea'},
'919090':{'en': 'Idea'},
'919091':{'en': 'Idea'},
'919092':{'en': 'Idea'},
'919093':{'en': 'Idea'},
'919094':{'en': 'Aircel'},
'919095':{'en': 'Aircel'},
'919096':{'en': 'Airtel'},
'919097':{'en': 'Aircel'},
'919098':{'en': 'Reliance Jio'},
'919099':{'en': 'Vodafone'},
'919100':{'en': 'Idea'},
'919101':{'en': 'Reliance Jio'},
'919102':{'en': 'Airtel'},
'919103':{'en': 'Reliance Jio'},
'9191030':{'en': 'Aircel'},
'919104':{'en': 'Telewings'},
'919105':{'en': 'Idea'},
'919106':{'en': 'Reliance Jio'},
'919107':{'en': 'Aircel'},
'919108':{'en': 'Airtel'},
'919109':{'en': 'Airtel'},
'919110':{'en': 'Reliance Jio'},
'919111':{'en': 'Idea'},
'9191120':{'en': 'Vodafone'},
'9191121':{'en': 'Vodafone'},
'9191122':{'en': 'Idea'},
'9191123':{'en': 'Idea'},
'9191124':{'en': 'Idea'},
'9191126':{'en': 'Idea'},
'9191127':{'en': 'Idea'},
'9191128':{'en': 'Idea'},
'9191129':{'en': 'Idea'},
'919113':{'en': 'Reliance Jio'},
'919114':{'en': 'Idea'},
'9191160':{'en': 'Airtel'},
'9191161':{'en': 'Airtel'},
'9191163':{'en': 'Airtel'},
'9191164':{'en': 'Airtel'},
'9191165':{'en': 'Airtel'},
'9191166':{'en': 'Airtel'},
'9191167':{'en': 'Airtel'},
'9191168':{'en': 'Airtel'},
'9191169':{'en': 'Airtel'},
'919117':{'en': 'Idea'},
'919118':{'en': 'Telewings'},
'9191190':{'en': 'Telewings'},
'9191191':{'en': 'Airtel'},
'9191192':{'en': 'Telewings'},
'9191193':{'en': 'Airtel'},
'9191194':{'en': 'Idea'},
'9191195':{'en': 'Idea'},
'9191196':{'en': 'Airtel'},
'9191197':{'en': 'Telewings'},
'9191198':{'en': 'Airtel'},
'9191199':{'en': 'Airtel'},
'919120':{'en': 'Telewings'},
'919121':{'en': 'Airtel'},
'919122':{'en': 'Telewings'},
'919123':{'en': 'Reliance Jio'},
'919124':{'en': 'Tata Docomo'},
'919125':{'en': 'Telewings'},
'919126':{'en': 'Idea'},
'919127':{'en': 'Vodafone'},
'919128':{'en': 'Idea'},
'919129':{'en': 'S TEL'},
'919130':{'en': 'Idea'},
'919131':{'en': 'Reliance Jio'},
'919132':{'en': 'Aircel'},
'919133':{'en': 'Idea'},
'919134':{'en': 'Idea'},
'919135':{'en': 'Idea'},
'919137':{'en': 'Reliance Jio'},
'919138':{'en': 'Airtel'},
'919139':{'en': 'Aircel'},
'919140':{'en': 'Reliance Jio'},
'919141':{'en': 'BSNL MOBILE'},
'919144':{'en': 'Idea'},
'919145':{'en': 'Vodafone'},
'919146':{'en': 'Idea'},
'919147':{'en': 'Tata Docomo'},
'919148':{'en': 'Airtel'},
'919149':{'en': 'Reliance Jio'},
'919151':{'en': 'Tata Docomo'},
'919154':{'en': 'Telewings'},
'919155':{'en': 'Telewings'},
'919156':{'en': 'Telewings'},
'919157':{'en': 'Telewings'},
'919158':{'en': 'Vodafone'},
'919159':{'en': 'Vodafone'},
'919160':{'en': 'Vodafone'},
'919161':{'en': 'Vodafone'},
'919162':{'en': 'Airtel'},
'919163':{'en': 'Airtel'},
'919164':{'en': 'Idea'},
'919165':{'en': 'Idea'},
'919166':{'en': 'Airtel'},
'919167':{'en': 'Vodafone'},
'919168':{'en': 'Vodafone'},
'919169':{'en': 'Aircel'},
'919170':{'en': 'Telewings'},
'919171':{'en': 'Idea'},
'919172':{'en': 'Airtel'},
'919173':{'en': 'Telewings'},
'919174':{'en': 'Idea'},
'919175':{'en': 'Telewings'},
'919176':{'en': 'Vodafone'},
'919177':{'en': 'Airtel'},
'919178':{'en': 'Airtel'},
'919179':{'en': 'Airtel'},
'91918':{'en': 'BSNL MOBILE'},
'919182':{'en': 'Reliance Jio'},
'91919':{'en': 'BSNL MOBILE'},
'919198':{'en': 'Airtel'},
'919199':{'en': 'Airtel'},
'9192':{'en': 'Tata Docomo'},
'919205':{'en': 'Airtel'},
'919206':{'en': 'Aircel'},
'919207':{'en': 'Idea'},
'9193':{'en': 'Reliance Jio'},
'919319':{'en': 'Airtel'},
'919341':{'en': 'Airtel'},
'919355':{'en': 'Idea'},
'919358':{'en': 'Airtel'},
'919371':{'en': 'Idea'},
'919374':{'en': 'Vodafone'},
'919375':{'en': 'Vodafone'},
'919376':{'en': 'Vodafone'},
'919385':{'en': 'BSNL MOBILE'},
'9194':{'en': 'BSNL MOBILE'},
'919500':{'en': 'Airtel'},
'919501':{'en': 'Airtel'},
'919502':{'en': 'Airtel'},
'919503':{'en': 'Airtel'},
'919504':{'en': 'Aircel'},
'919505':{'en': 'Idea'},
'919506':{'en': 'Idea'},
'919507':{'en': 'Idea'},
'919508':{'en': 'Reliance Jio'},
'919509':{'en': 'Reliance Jio'},
'919510':{'en': 'Reliance Jio'},
'9195110':{'en': 'Airtel'},
'9195111':{'en': 'Airtel'},
'9195112':{'en': 'Reliance Jio'},
'9195113':{'en': 'Airtel'},
'9195114':{'en': 'Airtel'},
'9195115':{'en': 'Reliance Jio'},
'9195116':{'en': 'Reliance Jio'},
'9195117':{'en': 'Reliance Jio'},
'9195118':{'en': 'Reliance Jio'},
'9195119':{'en': 'Airtel'},
'919512':{'en': 'Vodafone'},
'919513':{'en': 'Vodafone'},
'919514':{'en': 'Idea'},
'919515':{'en': 'Idea'},
'919516':{'en': 'Idea'},
'919517':{'en': 'Vodafone'},
'919518':{'en': 'Reliance Jio'},
'919519':{'en': 'Airtel'},
'9195200':{'en': 'Telewings'},
'9195205':{'en': 'Telewings'},
'9195206':{'en': 'Telewings'},
'9195207':{'en': 'Telewings'},
'9195208':{'en': 'Tata Docomo'},
'9195209':{'en': 'Tata Docomo'},
'919521':{'en': 'Airtel'},
'9195219':{'en': 'Aircel'},
'919522':{'en': 'Vodafone'},
'919523':{'en': 'Airtel'},
'919524':{'en': 'Aircel'},
'919525':{'en': 'Idea'},
'919526':{'en': 'Idea'},
'919527':{'en': 'Idea'},
'919528':{'en': 'Reliance Jio'},
'919529':{'en': 'Reliance Jio'},
'919530':{'en': 'BSNL MOBILE'},
'919531':{'en': 'BSNL MOBILE'},
'919532':{'en': 'BSNL MOBILE'},
'919533':{'en': 'Reliance Jio'},
'919534':{'en': 'Vodafone'},
'919535':{'en': 'Airtel'},
'919536':{'en': 'Vodafone'},
'919537':{'en': 'Vodafone'},
'919538':{'en': 'Vodafone'},
'919539':{'en': 'Vodafone'},
'919540':{'en': 'Idea'},
'919541':{'en': 'Reliance Jio'},
'919542':{'en': 'Idea'},
'919543':{'en': 'Reliance Jio'},
'919544':{'en': 'Idea'},
'919545':{'en': 'Vodafone'},
'919546':{'en': 'Airtel'},
'919547':{'en': 'Airtel'},
'919548':{'en': 'Reliance Jio'},
'919549':{'en': 'Vodafone'},
'919550':{'en': 'Airtel'},
'919551':{'en': 'Aircel'},
'919552':{'en': 'Idea'},
'919553':{'en': 'Idea'},
'919554':{'en': 'Vodafone'},
'919555':{'en': 'Reliance Jio'},
'919556':{'en': 'Airtel'},
'919557':{'en': 'Airtel'},
'919558':{'en': 'Airtel'},
'919559':{'en': 'Airtel'},
'919560':{'en': 'Airtel'},
'919561':{'en': 'Airtel'},
'919562':{'en': 'Idea'},
'919563':{'en': 'Aircel'},
'919564':{'en': 'Vodafone'},
'919565':{'en': 'Vodafone'},
'919566':{'en': 'Airtel'},
'919567':{'en': 'Airtel'},
'919568':{'en': 'Idea'},
'919569':{'en': 'Reliance Jio'},
'919570':{'en': 'Vodafone'},
'919571':{'en': 'Airtel'},
'919572':{'en': 'Airtel'},
'919573':{'en': 'Airtel'},
'919574':{'en': 'Idea'},
'919575':{'en': 'Idea'},
'919576':{'en': 'Idea'},
'919577':{'en': 'Aircel'},
'919578':{'en': 'Aircel'},
'919579':{'en': 'Reliance Jio'},
'91958':{'en': 'Vodafone'},
'919580':{'en': 'Reliance Jio'},
'919588':{'en': 'Reliance Jio'},
'919589':{'en': 'Airtel'},
'919590':{'en': 'Reliance Jio'},
'919591':{'en': 'Airtel'},
'919592':{'en': 'Idea'},
'919593':{'en': 'Vodafone'},
'919594':{'en': 'Idea'},
'919595':{'en': 'Reliance Jio'},
'919596':{'en': 'Airtel'},
'919597':{'en': 'Airtel'},
'919598':{'en': 'Idea'},
'919599':{'en': 'Airtel'},
'919600':{'en': 'Airtel'},
'919601':{'en': 'Airtel'},
'919602':{'en': 'Airtel'},
'919603':{'en': 'Idea'},
'919604':{'en': 'Idea'},
'919605':{'en': 'Idea'},
'919606':{'en': 'Reliance Jio'},
'919607':{'en': 'Reliance Jio'},
'919608':{'en': 'Reliance Jio'},
'919609':{'en': 'Vodafone'},
'919610':{'en': 'Vodafone'},
'919611':{'en': 'Airtel'},
'919612':{'en': 'Airtel'},
'919613':{'en': 'Aircel'},
'919614':{'en': 'Aircel'},
'919615':{'en': 'Aircel'},
'919616':{'en': 'Idea'},
'919617':{'en': 'Idea'},
'919619':{'en': 'Vodafone'},
'919620':{'en': 'Vodafone'},
'919621':{'en': 'Airtel'},
'919622':{'en': 'Airtel'},
'919623':{'en': 'Idea'},
'919625':{'en': 'Reliance Jio'},
'919626':{'en': 'Vodafone'},
'919627':{'en': 'Vodafone'},
'919628':{'en': 'Vodafone'},
'919629':{'en': 'Airtel'},
'919630':{'en': 'Airtel'},
'919631':{'en': 'Airtel'},
'919632':{'en': 'Airtel'},
'919633':{'en': 'Airtel'},
'919634':{'en': 'Airtel'},
'919635':{'en': 'Airtel'},
'919637':{'en': 'Vodafone'},
'919639':{'en': 'Idea'},
'919640':{'en': 'Idea'},
'919641':{'en': 'Reliance Jio'},
'919642':{'en': 'Vodafone'},
'919644':{'en': 'Idea'},
'919645':{'en': 'Vodafone'},
'919646':{'en': 'Vodafone'},
'919647':{'en': 'Vodafone'},
'919648':{'en': 'Vodafone'},
'919649':{'en': 'Vodafone'},
'919650':{'en': 'Airtel'},
'919651':{'en': 'Airtel'},
'919652':{'en': 'Airtel'},
'9196530':{'en': 'Reliance Jio'},
'9196531':{'en': 'Reliance Jio'},
'9196532':{'en': 'Reliance Jio'},
'9196533':{'en': 'Reliance Jio'},
'9196534':{'en': 'Reliance Jio'},
'9196536':{'en': 'Reliance Jio'},
'919654':{'en': 'Vodafone'},
'919655':{'en': 'Vodafone'},
'919656':{'en': 'Idea'},
'919657':{'en': 'Idea'},
'919658':{'en': 'Aircel'},
'919659':{'en': 'Aircel'},
'919660':{'en': 'Airtel'},
'919661':{'en': 'Airtel'},
'919662':{'en': 'Airtel'},
'919663':{'en': 'Airtel'},
'919664':{'en': 'Reliance Jio'},
'919665':{'en': 'Airtel'},
'919666':{'en': 'Idea'},
'919668':{'en': 'Airtel'},
'919669':{'en': 'Idea'},
'91967':{'en': 'Vodafone'},
'919676':{'en': 'Airtel'},
'919677':{'en': 'Airtel'},
'919678':{'en': 'Airtel'},
'919679':{'en': 'Airtel'},
'919680':{'en': 'Airtel'},
'919681':{'en': 'Reliance Jio'},
'919682':{'en': 'BSNL MOBILE'},
'9196821':{'en': 'Reliance Jio'},
'9196823':{'en': 'Reliance Jio'},
'9196825':{'en': 'Reliance Jio'},
'9196826':{'en': 'Reliance Jio'},
'919683':{'en': 'BSNL MOBILE'},
'919684':{'en': 'BSNL MOBILE'},
'919685':{'en': 'Airtel'},
'919686':{'en': 'Airtel'},
'919687':{'en': 'Vodafone'},
'919688':{'en': 'Aircel'},
'919689':{'en': 'Idea'},
'919691':{'en': 'Reliance Jio'},
'919692':{'en': 'Reliance Jio'},
'919693':{'en': 'Reliance Jio'},
'919694':{'en': 'Idea'},
'919695':{'en': 'Airtel'},
'919696':{'en': 'Reliance Jio'},
'919697':{'en': 'Aircel'},
'919698':{'en': 'Aircel'},
'919699':{'en': 'Reliance Jio'},
'919700':{'en': 'Aircel'},
'919701':{'en': 'Airtel'},
'919702':{'en': 'Idea'},
'919703':{'en': 'Vodafone'},
'919704':{'en': 'Airtel'},
'919705':{'en': 'Idea'},
'919706':{'en': 'Vodafone'},
'919707':{'en': 'Reliance Jio'},
'919708':{'en': 'Idea'},
'919709':{'en': 'Vodafone'},
'919710':{'en': 'Aircel'},
'919711':{'en': 'Vodafone'},
'919712':{'en': 'Vodafone'},
'919713':{'en': 'Vodafone'},
'919714':{'en': 'Idea'},
'919715':{'en': 'Aircel'},
'919716':{'en': 'Aircel'},
'919717':{'en': 'Airtel'},
'919718':{'en': 'Idea'},
'919719':{'en': 'Vodafone'},
'919720':{'en': 'Vodafone'},
'919721':{'en': 'Vodafone'},
'919722':{'en': 'Aircel'},
'919723':{'en': 'Idea'},
'919724':{'en': 'Airtel'},
'919725':{'en': 'Airtel'},
'919726':{'en': 'Vodafone'},
'919727':{'en': 'Vodafone'},
'919728':{'en': 'Idea'},
'919729':{'en': 'Airtel'},
'91973':{'en': 'Vodafone'},
'919730':{'en': 'Airtel'},
'919731':{'en': 'Airtel'},
'919737':{'en': 'Idea'},
'919738':{'en': 'Aircel'},
'919740':{'en': 'Airtel'},
'919741':{'en': 'Airtel'},
'919742':{'en': 'Vodafone'},
'919743':{'en': 'Idea'},
'919744':{'en': 'Idea'},
'919745':{'en': 'Vodafone'},
'919746':{'en': 'Airtel'},
'919747':{'en': 'Idea'},
'919748':{'en': 'Airtel'},
'919749':{'en': 'Reliance Jio'},
'919750':{'en': 'Aircel'},
'919751':{'en': 'Vodafone'},
'919752':{'en': 'Airtel'},
'919753':{'en': 'Idea'},
'919754':{'en': 'Idea'},
'919755':{'en': 'Airtel'},
'919756':{'en': 'Idea'},
'919757':{'en': 'MTNL'},
'919758':{'en': 'Vodafone'},
'919759':{'en': 'Vodafone'},
'919760':{'en': 'Airtel'},
'919761':{'en': 'Vodafone'},
'919762':{'en': 'Aircel'},
'919763':{'en': 'Idea'},
'919764':{'en': 'Vodafone'},
'919765':{'en': 'Vodafone'},
'919766':{'en': 'Airtel'},
'919767':{'en': 'Idea'},
'919768':{'en': 'Aircel'},
'919769':{'en': 'Vodafone'},
'919770':{'en': 'Reliance Jio'},
'919771':{'en': 'Airtel'},
'919772':{'en': 'Vodafone'},
'9197730':{'en': 'Reliance Jio'},
'9197731':{'en': 'Reliance Jio'},
'9197732':{'en': 'Reliance Jio'},
'9197734':{'en': 'Reliance Jio'},
'9197735':{'en': 'Airtel'},
'9197736':{'en': 'Airtel'},
'9197737':{'en': 'Airtel'},
'9197738':{'en': 'Airtel'},
'9197739':{'en': 'Airtel'},
'919774':{'en': 'Vodafone'},
'919775':{'en': 'Vodafone'},
'919776':{'en': 'Vodafone'},
'919777':{'en': 'Airtel'},
'919778':{'en': 'Reliance Jio'},
'919779':{'en': 'Airtel'},
'919780':{'en': 'Vodafone'},
'919781':{'en': 'Idea'},
'919782':{'en': 'Aircel'},
'919783':{'en': 'Vodafone'},
'919784':{'en': 'Airtel'},
'919785':{'en': 'Idea'},
'919786':{'en': 'Vodafone'},
'919787':{'en': 'Vodafone'},
'919788':{'en': 'Aircel'},
'919789':{'en': 'Airtel'},
'919790':{'en': 'Airtel'},
'919791':{'en': 'Airtel'},
'919792':{'en': 'Vodafone'},
'919793':{'en': 'Airtel'},
'919794':{'en': 'Airtel'},
'919796':{'en': 'Vodafone'},
'919797':{'en': 'Airtel'},
'919798':{'en': 'Reliance Jio'},
'919799':{'en': 'Airtel'},
'91980':{'en': 'Aircel'},
'919800':{'en': 'Airtel'},
'919801':{'en': 'Airtel'},
'919805':{'en': 'Airtel'},
'919810':{'en': 'Airtel'},
'919811':{'en': 'Vodafone'},
'919812':{'en': 'Idea'},
'919813':{'en': 'Vodafone'},
'919814':{'en': 'Idea'},
'919815':{'en': 'Airtel'},
'919816':{'en': 'Airtel'},
'919817':{'en': 'Reliance Jio'},
'919818':{'en': 'Airtel'},
'919819':{'en': 'Vodafone'},
'919820':{'en': 'Vodafone'},
'919821':{'en': 'Airtel'},
'919822':{'en': 'Idea'},
'919823':{'en': 'Vodafone'},
'919824':{'en': 'Idea'},
'919825':{'en': 'Vodafone'},
'919826':{'en': 'Idea'},
'919827':{'en': 'Reliance Jio'},
'919828':{'en': 'Vodafone'},
'919829':{'en': 'Airtel'},
'919830':{'en': 'Vodafone'},
'919831':{'en': 'Airtel'},
'919832':{'en': 'Reliance Jio'},
'919833':{'en': 'Vodafone'},
'919834':{'en': 'Reliance Jio'},
'919835':{'en': 'Reliance Jio'},
'919836':{'en': 'Vodafone'},
'919837':{'en': 'Idea'},
'919838':{'en': 'Vodafone'},
'919839':{'en': 'Vodafone'},
'919840':{'en': 'Airtel'},
'919841':{'en': 'Aircel'},
'919842':{'en': 'Aircel'},
'919843':{'en': 'Vodafone'},
'919844':{'en': 'Idea'},
'919845':{'en': 'Airtel'},
'919846':{'en': 'Vodafone'},
'919847':{'en': 'Idea'},
'919848':{'en': 'Idea'},
'919849':{'en': 'Airtel'},
'91985':{'en': 'Aircel'},
'919850':{'en': 'Idea'},
'919855':{'en': 'Idea'},
'919860':{'en': 'Airtel'},
'919861':{'en': 'Reliance Jio'},
'919862':{'en': 'Airtel'},
'919863':{'en': 'Reliance Jio'},
'919864':{'en': 'Reliance Jio'},
'919865':{'en': 'Aircel'},
'919866':{'en': 'Airtel'},
'919867':{'en': 'Airtel'},
'919868':{'en': 'MTNL'},
'919869':{'en': 'MTNL'},
'9198700':{'en': 'Reliance Jio'},
'9198701':{'en': 'Airtel'},
'9198702':{'en': 'Airtel'},
'9198703':{'en': 'Airtel'},
'9198704':{'en': 'Airtel'},
'9198705':{'en': 'Airtel'},
'9198706':{'en': 'Reliance Jio'},
'9198707':{'en': 'Reliance Jio'},
'9198708':{'en': 'Reliance Jio'},
'9198709':{'en': 'Reliance Jio'},
'919871':{'en': 'Airtel'},
'919872':{'en': 'Airtel'},
'919873':{'en': 'Vodafone'},
'919874':{'en': 'Vodafone'},
'9198753':{'en': 'Reliance Jio'},
'9198754':{'en': 'Reliance Jio'},
'9198755':{'en': 'Reliance Jio'},
'9198756':{'en': 'Reliance Jio'},
'9198759':{'en': 'Airtel'},
'919876':{'en': 'Airtel'},
'919877':{'en': 'Reliance Jio'},
'919878':{'en': 'Airtel'},
'919879':{'en': 'Vodafone'},
'919880':{'en': 'Airtel'},
'919882':{'en': 'Idea'},
'919883':{'en': 'Reliance Jio'},
'919884':{'en': 'Vodafone'},
'919885':{'en': 'Vodafone'},
'919886':{'en': 'Vodafone'},
'919887':{'en': 'Idea'},
'919888':{'en': 'Vodafone'},
'919889':{'en': 'Idea'},
'91989':{'en': 'Airtel'},
'919891':{'en': 'Idea'},
'919899':{'en': 'Vodafone'},
'91990':{'en': 'Airtel'},
'919904':{'en': 'Idea'},
'919905':{'en': 'Reliance Jio'},
'919907':{'en': 'Reliance Jio'},
'919909':{'en': 'Vodafone'},
'919910':{'en': 'Airtel'},
'919911':{'en': 'Idea'},
'919912':{'en': 'Idea'},
'919913':{'en': 'Vodafone'},
'919914':{'en': 'Idea'},
'919915':{'en': 'Airtel'},
'919916':{'en': 'Vodafone'},
'919917':{'en': 'Idea'},
'919918':{'en': 'Vodafone'},
'919919':{'en': 'Vodafone'},
'919920':{'en': 'Vodafone'},
'919921':{'en': 'Idea'},
'919922':{'en': 'Idea'},
'919923':{'en': 'Vodafone'},
'919924':{'en': 'Idea'},
'919925':{'en': 'Vodafone'},
'919926':{'en': 'Idea'},
'919927':{'en': 'Idea'},
'919928':{'en': 'Airtel'},
'919929':{'en': 'Airtel'},
'91993':{'en': 'Airtel'},
'919930':{'en': 'Vodafone'},
'919940':{'en': 'Airtel'},
'919941':{'en': 'Aircel'},
'919942':{'en': 'Aircel'},
'919943':{'en': 'Vodafone'},
'919944':{'en': 'Airtel'},
'919945':{'en': 'Airtel'},
'919946':{'en': 'Vodafone'},
'919947':{'en': 'Idea'},
'919948':{'en': 'Idea'},
'919949':{'en': 'Airtel'},
'91995':{'en': 'Airtel'},
'919951':{'en': 'Idea'},
'919953':{'en': 'Vodafone'},
'919960':{'en': 'Airtel'},
'919961':{'en': 'Idea'},
'919962':{'en': 'Vodafone'},
'919963':{'en': 'Airtel'},
'919964':{'en': 'Idea'},
'919965':{'en': 'Aircel'},
'919966':{'en': 'Vodafone'},
'919967':{'en': 'Airtel'},
'919968':{'en': 'MTNL'},
'919969':{'en': 'MTNL'},
'91997':{'en': 'Airtel'},
'919976':{'en': 'Aircel'},
'919977':{'en': 'Idea'},
'919978':{'en': 'Vodafone'},
'919979':{'en': 'Vodafone'},
'91998':{'en': 'Vodafone'},
'919980':{'en': 'Airtel'},
'919981':{'en': 'Airtel'},
'919987':{'en': 'Airtel'},
'919989':{'en': 'Airtel'},
'919990':{'en': 'Idea'},
'919991':{'en': 'Vodafone'},
'919992':{'en': 'Idea'},
'919993':{'en': 'Airtel'},
'919994':{'en': 'Airtel'},
'919995':{'en': 'Airtel'},
'919996':{'en': 'Airtel'},
'919997':{'en': 'Airtel'},
'919999':{'en': 'Vodafone'},
'9230':{'en': 'Mobilink'},
'9231':{'en': 'Zong'},
'9232':{'en': 'Warid'},
'9233':{'en': 'Ufone'},
'9234':{'en': 'Telenor'},
'9370':{'en': 'AWCC', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u0628\u06cc \u0633\u06cc\u0645')},
'9371':{'en': 'AWCC', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u0628\u06cc \u0633\u06cc\u0645')},
'9372':{'en': 'Roshan', 'fa': u('\u0631\u0648\u0634\u0646')},
'9373':{'en': 'Etisalat', 'fa': u('\u0627\u062a\u0635\u0627\u0644\u0627\u062a')},
'93744':{'en': 'Afghan Telecom', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u062a\u0644\u06a9\u0627\u0645')},
'93747':{'en': 'Afghan Telecom', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u062a\u0644\u06a9\u0627\u0645')},
'93748':{'en': 'Afghan Telecom', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u062a\u0644\u06a9\u0627\u0645')},
'93749':{'en': 'Afghan Telecom', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u062a\u0644\u06a9\u0627\u0645')},
'9375':{'en': 'Afghan Telecom', 'fa': u('\u0627\u0641\u063a\u0627\u0646 \u062a\u0644\u06a9\u0627\u0645')},
'9376':{'en': 'MTN', 'fa': u('\u0627\u0645 \u062a\u06cc \u0627\u0646')},
'9377':{'en': 'MTN', 'fa': u('\u0627\u0645 \u062a\u06cc \u0627\u0646')},
'9378':{'en': 'Etisalat', 'fa': u('\u0627\u062a\u0635\u0627\u0644\u0627\u062a')},
'9379':{'en': 'Roshan', 'fa': u('\u0631\u0648\u0634\u0646')},
'94117':{'en': 'Dialog'},
'9470':{'en': 'Mobitel'},
'9471':{'en': 'Mobitel'},
'9472':{'en': 'Etisalat'},
'9475':{'en': 'Airtel'},
'9476':{'en': 'Dialog'},
'9477':{'en': 'Dialog'},
'9478':{'en': 'Hutch'},
'9592':{'en': 'MPT'},
'9593':{'en': 'MPT'},
'95940':{'en': 'MPT'},
'95941':{'en': 'MPT'},
'95942':{'en': 'MPT'},
'95943':{'en': 'MPT'},
'95944':{'en': 'MPT'},
'95945':{'en': 'MPT'},
'95947':{'en': 'MPT'},
'95949':{'en': 'MPT'},
'9595':{'en': 'MPT'},
'95967':{'en': 'Mytel'},
'95968':{'en': 'Mytel'},
'95969':{'en': 'MNTC'},
'95973':{'en': 'MPT'},
'95975':{'en': 'Telenor'},
'95976':{'en': 'Telenor'},
'95977':{'en': 'Telenor'},
'95978':{'en': 'Telenor'},
'95979':{'en': 'Telenor'},
'95981':{'en': 'MPT'},
'95983':{'en': 'MPT'},
'95984':{'en': 'MPT'},
'95985':{'en': 'MPT'},
'95986':{'en': 'MPT'},
'95987':{'en': 'MPT'},
'95989':{'en': 'MPT'},
'95991':{'en': 'MPT'},
'95995':{'en': 'Ooredoo'},
'95996':{'en': 'Ooredoo'},
'95997':{'en': 'Ooredoo'},
'9607':{'en': 'Dhiraagu'},
'9609':{'en': 'Ooredoo'},
'961788':{'en': 'Touch'},
'9617890':{'en': 'Touch'},
'9617891':{'en': 'Touch'},
'9617892':{'en': 'Touch'},
'9617893':{'en': 'Touch'},
'9617894':{'en': 'Touch'},
'961791':{'en': 'Alfa'},
'961793':{'en': 'Alfa'},
'961810':{'en': 'Touch'},
'961811':{'en': 'Alfa'},
'961812':{'en': 'Alfa'},
'961813':{'en': 'Alfa'},
'961814':{'en': 'Alfa'},
'961815':{'en': 'Alfa'},
'961816':{'en': 'Touch'},
'961817':{'en': 'Touch'},
'961818':{'en': 'Touch'},
'961819':{'en': 'Touch'},
'96274':{'en': 'Mirsal'},
'96275':{'en': 'Friendi'},
'96277':{'en': 'Orange'},
'96278':{'en': 'Umniah'},
'96279':{'en': 'Zain JO'},
'96392':{'en': 'Syriatel'},
'96393':{'en': 'Syriatel'},
'96394':{'en': 'MTN'},
'963950':{'en': 'MTN'},
'963952':{'en': 'MTN'},
'963954':{'en': 'MTN'},
'963955':{'en': 'MTN'},
'963956':{'en': 'MTN'},
'963957':{'en': 'MTN'},
'963958':{'en': 'MTN'},
'963959':{'en': 'MTN'},
'963962':{'en': 'MTN'},
'963964':{'en': 'MTN'},
'963965':{'en': 'MTN'},
'963966':{'en': 'MTN'},
'963967':{'en': 'MTN'},
'963968':{'en': 'MTN'},
'963969':{'en': 'MTN'},
'96398':{'en': 'Syriatel'},
'96399':{'en': 'Syriatel'},
'9647400':{'en': 'Itisaluna'},
'9647401':{'en': 'Itisaluna'},
'9647435':{'en': 'Kalimat'},
'9647444':{'en': 'Mobitel'},
'9647480':{'en': 'ITC Fanoos'},
'9647481':{'en': 'ITC Fanoos'},
'9647491':{'en': 'ITPC'},
'9647494':{'en': 'Imam Hussien Holy Shrine'},
'96475':{'en': 'Korek'},
'96476':{'en': 'Omnnea'},
'96477':{'en': 'Asiacell'},
'96478':{'en': 'Zain'},
'96479':{'en': 'Zain'},
'9655':{'ar': u('\u0641\u064a\u0641\u0627'), 'en': 'VIVA'},
'9656':{'ar': u('\u0623\u0648\u0631\u064a\u062f\u0648'), 'en': 'Ooredoo'},
'9659':{'ar': u('\u0632\u064a\u0646'), 'en': 'Zain'},
'96650':{'en': 'Al Jawal (STC)'},
'96653':{'en': 'Al Jawal (STC)'},
'96654':{'en': 'Mobily'},
'96655':{'en': 'Al Jawal (STC)'},
'96656':{'en': 'Mobily'},
'966570':{'en': 'Virgin'},
'966571':{'en': 'Virgin'},
'966572':{'en': 'Virgin'},
'966573':{'en': 'Virgin'},
'966576':{'en': 'Lebara'},
'966577':{'en': 'Lebara'},
'966578':{'en': 'Lebara'},
'96658':{'en': 'Zain'},
'96659':{'en': 'Zain'},
'96770':{'en': 'Y'},
'96771':{'en': 'SabaFon'},
'96773':{'en': 'MTN'},
'96777':{'en': 'Yemen Mobile'},
'96871':{'en': 'Omantel'},
'96872':{'en': 'Omantel'},
'96879':{'en': 'Ooredoo'},
'968901':{'en': 'Omantel'},
'968902':{'en': 'Omantel'},
'968903':{'en': 'Omantel'},
'968904':{'en': 'Omantel'},
'968905':{'en': 'Omantel'},
'968906':{'en': 'Omantel'},
'968907':{'en': 'Omantel'},
'968908':{'en': 'Omantel'},
'968909':{'en': 'Omantel'},
'96891':{'en': 'Omantel'},
'96892':{'en': 'Omantel'},
'96893':{'en': 'Omantel'},
'96894':{'en': 'Ooredoo'},
'96895':{'en': 'Ooredoo'},
'96896':{'en': 'Ooredoo'},
'96897':{'en': 'Ooredoo'},
'96898':{'en': 'Omantel'},
'96899':{'en': 'Omantel'},
'97056':{'en': 'Wataniya'},
'97059':{'en': 'Palestine Cellular Communications'},
'97150':{'en': 'Etisalat'},
'97152':{'en': 'du'},
'97154':{'en': 'Etisalat'},
'97155':{'en': 'du'},
'97156':{'en': 'Etisalat'},
'97158':{'en': 'du'},
'97250':{'en': 'Pelephone'},
'97251':{'en': 'Xphone'},
'97252':{'en': 'Cellcom'},
'97253':{'en': 'Hot Mobile'},
'97254':{'en': 'Orange'},
'972550':{'en': 'Beezz'},
'9725522':{'en': 'Home Cellular'},
'9725523':{'en': 'Home Cellular'},
'9725524':{'en': 'Telzar'},
'9725525':{'en': 'Telzar'},
'9725526':{'en': 'Telzar'},
'972553':{'en': 'Free Telecom'},
'9725550':{'en': 'Annatel'},
'9725555':{'en': 'Rami Levy'},
'972556':{'en': 'Rami Levy'},
'9725570':{'en': 'Cellact'},
'9725571':{'en': 'Cellact'},
'9725572':{'en': 'Cellact'},
'972558':{'en': 'Alon'},
'972559':{'en': 'Telzar'},
'97256':{'en': 'Wataniya'},
'97258':{'en': 'Golan Telecom'},
'97259':{'en': 'Jawwal'},
'97331':{'en': 'Royal Court'},
'97332':{'en': 'Batelco'},
'97333':{'en': 'VIVA'},
'97334':{'en': 'VIVA'},
'97335':{'en': 'VIVA'},
'97336':{'en': 'zain BH'},
'97337':{'en': 'zain BH'},
'97338':{'en': 'Batelco'},
'97339':{'en': 'Batelco'},
'97363':{'en': 'VIVA'},
'973663':{'en': 'zain BH'},
'973666':{'en': 'zain BH'},
'973667':{'en': 'Batelco'},
'973669':{'en': 'zain BH'},
'97433':{'en': 'ooredoo'},
'97455':{'en': 'ooredoo'},
'97466':{'en': 'ooredoo'},
'97477':{'en': 'Vodafone'},
'9751':{'en': 'B-Mobile of Bhutan Telecom'},
'97577':{'en': 'TashiCell of Tashi InfoComm'},
'97680':{'en': 'Unitel'},
'97683':{'en': 'G-Mobile'},
'97685':{'en': 'Mobicom'},
'97686':{'en': 'Unitel'},
'97688':{'en': 'Unitel'},
'97689':{'en': 'Unitel'},
'97690':{'en': 'Skytel'},
'97691':{'en': 'Skytel'},
'97692':{'en': 'Skytel'},
'97693':{'en': 'G-Mobile'},
'97694':{'en': 'Mobicom'},
'97695':{'en': 'Mobicom'},
'97696':{'en': 'Skytel'},
'97697':{'en': 'G-Mobile'},
'97698':{'en': 'G-Mobile'},
'97699':{'en': 'Mobicom'},
'977960':{'en': 'STM Telecom'},
'977961':{'en': 'Smart Telecom'},
'977962':{'en': 'Smart Telecom'},
'977963':{'en': 'NSTPL'},
'977972':{'en': 'UTL'},
'977974':{'en': 'NDCL'},
'977975':{'en': 'NDCL'},
'977980':{'en': 'NCell'},
'977981':{'en': 'NCell'},
'977982':{'en': 'NCell'},
'977984':{'en': 'Nepal Telecom'},
'977985':{'en': 'Nepal Telecom'},
'977986':{'en': 'Nepal Telecom'},
'977988':{'en': 'Smart Telecom'},
'9890':{'en': 'Irancell', 'fa': u('\u0627\u06cc\u0631\u0627\u0646\u0633\u0644')},
'9891':{'en': 'IR-MCI', 'fa': u('\u0647\u0645\u0631\u0627\u0647 \u0627\u0648\u0644')},
'9892':{'en': 'Rightel', 'fa': u('\u0631\u0627\u06cc\u062a\u0644')},
'9893':{'en': 'Irancell', 'fa': u('\u0627\u06cc\u0631\u0627\u0646\u0633\u0644')},
'98931':{'en': 'MTCE', 'fa': u('\u0627\u0633\u067e\u0627\u062f\u0627\u0646')},
'98932':{'en': 'Taliya', 'fa': u('\u062a\u0627\u0644\u06cc\u0627')},
'98934':{'en': 'TeleKish', 'fa': u('\u062a\u0644\u0647\u200c\u06a9\u06cc\u0634')},
'98990':{'en': 'IR-MCI', 'fa': u('\u0647\u0645\u0631\u0627\u0647 \u0627\u0648\u0644')},
'98991':{'en': 'IR-MCI', 'fa': u('\u0647\u0645\u0631\u0627\u0647 \u0627\u0648\u0644')},
'98994':{'en': 'Anarestan', 'fa': u('\u0627\u0646\u0627\u0631\u0633\u062a\u0627\u0646')},
'989981':{'en': 'Shatel Mobile', 'fa': u('\u0634\u0627\u062a\u0644 \u0645\u0648\u0628\u0627\u06cc\u0644')},
'9899900':{'en': 'LOTUSTEL', 'fa': u('\u0644\u0648\u062a\u0648\u0633\u200c\u062a\u0644')},
'9899910':{'en': 'Irancell', 'fa': u('\u0627\u06cc\u0631\u0627\u0646\u0633\u0644')},
'9899911':{'en': 'ApTel', 'fa': u('\u0622\u067e\u062a\u0644')},
'9899913':{'en': 'Irancell', 'fa': u('\u0627\u06cc\u0631\u0627\u0646\u0633\u0644')},
'9899914':{'en': 'Azartel', 'fa': u('\u0622\u0630\u0631\u062a\u0644')},
'9899998':{'en': 'SamanTel', 'fa': u('\u0633\u0627\u0645\u0627\u0646\u062a\u0644')},
'9899999':{'en': 'SamanTel', 'fa': u('\u0633\u0627\u0645\u0627\u0646\u062a\u0644')},
'99241':{'en': 'Megafon'},
'99250':{'en': 'Tcell'},
'99255':{'en': 'Megafon'},
'9927':{'en': 'Tcell'},
'9928':{'en': 'Megafon'},
'99290':{'en': 'Megafon'},
'992917':{'en': 'Tacom'},
'992918':{'en': 'Babilon-M'},
'992919':{'en': 'Tacom'},
'99292':{'en': 'Tcell'},
'99293':{'en': 'Tcell'},
'99294':{'en': 'Babilon-M'},
'99298':{'en': 'Babilon-M'},
'99361':{'en': 'TM-Cell'},
'99365':{'en': 'TM-Cell'},
'99366':{'en': 'MTS (BARASH Communication)'},
'99367':{'en': 'MTS (BARASH Communication)'},
'99369':{'en': 'MTS (BARASH Communication)'},
'99436554':{'en': 'Nakhtel'},
'99440':{'en': 'FONEX'},
'99444':{'en': 'Aztelekom'},
'99450':{'en': 'Azercell'},
'99451':{'en': 'Azercell'},
'99455':{'en': 'Bakcell'},
'9946':{'en': 'Nakhtel'},
'9947':{'en': 'Nar Mobile'},
'99551':{'en': 'Geocell'},
'99554444':{'en': 'MagtiCom'},
'995550':{'en': 'MagtiCom'},
'995551':{'en': 'MagtiCom'},
'995555':{'en': 'Geocell'},
'995557':{'en': 'Geocell'},
'995558':{'en': 'Geocell'},
'995559':{'en': 'Globalcell'},
'99556':{'en': 'Veon'},
'995570':{'en': 'Silknet'},
'995571':{'en': 'Veon'},
'995574':{'en': 'Veon'},
'995577':{'en': 'Geocell'},
'995579':{'en': 'Veon'},
'995591':{'en': 'MagtiCom'},
'995592':{'en': 'Veon'},
'995593':{'en': 'Geocell'},
'995595':{'en': 'MagtiCom'},
'995596':{'en': 'MagtiCom'},
'995597':{'en': 'Veon'},
'995598':{'en': 'MagtiCom'},
'995599':{'en': 'MagtiCom'},
'99579':{'en': 'MagtiCom'},
'99620':{'en': 'Aktel'},
'99622':{'en': 'Sky mobile'},
'99650':{'en': 'Nur Telecom'},
'996503':{'en': '7 Mobile'},
'996504':{'en': '7 Mobile'},
'99651':{'en': 'Katel'},
'99654':{'en': 'Aktel'},
'99655':{'en': 'ALFA Telecom'},
'99656':{'en': 'Winline'},
'99657':{'en': 'Sotel'},
'99670':{'en': 'Nur Telecom'},
'99675':{'en': 'ALFA Telecom'},
'99677':{'en': 'Sky mobile'},
'9969':{'en': 'Sky mobile'},
'9986':{'en': 'MTS'},
'99870':{'en': 'MTS'},
'99872':{'en': 'MTS'},
'99873':{'en': 'MTS'},
'99874':{'en': 'MTS'},
'99875':{'en': 'MTS'},
'99876':{'en': 'MTS'},
'99879':{'en': 'MTS'},
'99890':{'en': 'Beeline'},
'99891':{'en': 'Beeline'},
'99892':{'en': 'MTS'},
'99893':{'en': 'Ucell'},
'99894':{'en': 'Ucell'},
'99897':{'en': 'MTS'},
}
| [
"caipheilunga@gmail.com"
] | caipheilunga@gmail.com |
ebe6e7a3cc63ed2303d62fe9f298a5aff5760b94 | 6ffc7ec6a1e95910b9e32c93d2976879b65eeb73 | /app/utils/crud_collection_destination.py | c75c484f9075e2f30822da5a193cdec1ddffd04b | [] | no_license | BIGtanukiudon/information-gathering-managiment | 7d51eb07bd5da43ba96bdd8e8bf6b8f910ec0e99 | f2ee8523e6b185909afd3f884c23e1050981aa89 | refs/heads/main | 2023-07-18T19:07:25.596644 | 2021-09-18T05:20:34 | 2021-09-18T05:20:34 | 379,589,673 | 0 | 0 | null | 2021-09-18T05:20:35 | 2021-06-23T12:06:33 | Python | UTF-8 | Python | false | false | 1,217 | py | from sqlalchemy.orm import Session
from sqlalchemy.exc import SQLAlchemyError
from database.models import CollectionDestinationForGet as CDDM4G, CollectionDestinationForCreate as CDDM4C
from models.collection_destination import CollectionDestinationCreate as CDC
def get_collection_destination(db: Session, collection_destination_id: int):
return db.query(CDDM4G).filter(
CDDM4G.id == collection_destination_id).first()
def get_collection_destination_list(db: Session):
return db.query(CDDM4G).order_by(CDDM4G.updated_at).all()
def create_collection_destination(db: Session, item: CDC):
db_item = CDDM4C(**item.dict())
try:
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
except SQLAlchemyError as e:
print(e)
return None
def delete_collection_destination(db: Session, collection_destination_id: int):
try:
query = db.query(CDDM4G).filter(
CDDM4G.id == collection_destination_id).first()
if query is None:
return None
db.delete(query)
db.commit()
return 204
except SQLAlchemyError as e:
db.rollback()
print(e)
return 500
| [
"rksakanatanuki@gmail.com"
] | rksakanatanuki@gmail.com |
9cb82dbfb52bc39ff71664aa836a811c579409b2 | 918721a0c8b854d4a67e0227cc516647ba8b16ab | /quotesServer/quotesServer/quotesServer/settings.py | 8bf27dc574b870adad202ace707e920ac81d2888 | [] | no_license | nrjvarshney/QuoteFromPic | 39631bed99d7aab8ddfa9328094c3c024fa55cbf | 2d48d19096f97ce60b8f696f81586ea047f8fbee | refs/heads/master | 2022-03-27T16:21:24.989782 | 2019-07-23T18:46:40 | 2019-07-23T18:46:40 | 198,168,244 | 0 | 1 | null | 2020-01-13T05:46:57 | 2019-07-22T07:14:21 | Jupyter Notebook | UTF-8 | Python | false | false | 3,183 | py | """
Django settings for quotesServer project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=jc*b#rj%q@+lxi$4&e1xryvz+akehdb7t7qw1yvsv2y@5aayw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['100.64.24.149','localhost','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'quoteapi',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'quotesServer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quotesServer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"nevarshn@microsoft.com"
] | nevarshn@microsoft.com |
a6c789b7be6e47e5a363cd0cc4b9e9d846ce4005 | b3b443f0bc49bbb10c26b51fe89e6860d4ca3d3a | /ctreport_selenium/ctreport_html/scripts/detailmodal.py | d3f7cf88a94f4e60fc79f4cc43686715a63414b6 | [
"MIT"
] | permissive | naveens33/ctreport-selenium | 6b3a1cc93a6741a1d493c2452c1cf56c6d85c052 | 9553b5c4b8deb52e46cf0fb3e1ea7092028cf090 | refs/heads/master | 2022-12-23T04:55:12.226339 | 2020-08-29T19:22:00 | 2020-08-29T19:22:00 | 228,779,087 | 2 | 2 | MIT | 2022-12-18T22:53:51 | 2019-12-18T07:03:39 | Python | UTF-8 | Python | false | false | 5,082 | py | def content(var):
c = '''
<script>
function createmodal(id) {
''' + var + '''
var content = '<table class="table table-bordered ">';
var footer = ''
if(Array.isArray(tests[id])){
content += '<tbody>\
<tr class="table-secondary"><td>Expected</td></tr>\
<tr class="align-middle">';
content += '<td>'+tests[id][0].join(", ")+'</td></tr>\
<tr class="table-secondary"><td>Actual</td></tr>\
<tr class="align-middle">';
content += '<td>'+tests[id][1].join(", ")+'</td></tr>';
}
else{
content += '<thead class="thead-light">\
<tr>\
<th class="align-middle text-sm-center">Status</th>\
<th class="align-middle text-sm-center">Key</th>\
<th class="align-middle text-sm-center">Expected</th>\
<th class="align-middle text-sm-center">Actual</th>\
</tr>\
</thead>\
<tbody>';
for(var key in tests[id]) {
status =''
key_='<td>'+key+'</td>'
expected='<td>'+tests[id][key][0]+'</td>';
actual='<td>'+tests[id][key][1]+'</td>';
if (tests[id][key][2]=='true'){
status='<i class="fa fa-check-circle align-middle text-sm-center" style="color:#00AF00; font-size: 18px;"></i>';
}
else{
status='<i class="fa-times-circle fa align-middle text-sm-center" style="color:#F7464A; font-size: 18px;"></i>';
if (tests[id][key][0]=="null"){
key_ = '<td style="background-color:rgb(247, 131, 134,0.3);">'+key+'</td>'
expected='<td></td>';
}
else if(tests[id][key][1]=="null"){
actual='<td style="color:#F7464A;">\
<i class="fas fa-ban" data-toggle="tooltip" data-placement="right" data-original-title="Key missing in actual data"></i>\
</td>';
}
else{
actual='<td style="background-color: #ffffb2">'+tests[id][key][1]+'</td>';
}
}
content += '<tr class="align-middle text-sm-center">\
<td>\
'+status+'\
</td>\
'+key_+'\
'+expected+'\
'+actual+'\
</tr>';
footer = '<div class="row">\
<div class="col-2"><i class="fas fa-square-full border border-secondary" style="color: #ffffb2"></i></div>\
<div class="col-10">\Actual is not same as Expected</div>\
</div>\
<div class="row">\
<div class="col-2"><i class="fas fa-square-full border border-secondary" style="color:rgb(247, 131, 134,0.3);"></i></div>\
<div class="col-10">New key found in actual</div>\
</div>\
<div class="row">\
<div class="col-2"><i class="fas fa-ban" style="color:#F7464A;"></i></div>\
<div class="col-10">Key missing in actual data</div>\
</div>\';
}
}
content += '</tbody>\
</table>';
var header = "Expected vs Actual";
var html = '<div id="modalWindow" class="modal" data-keyboard="false" data-backdrop="static">';
html += '<div class="modal-dialog modal-dialog-scrollable ">\
<div class="modal-content">\
<div class="modal-header">\
<button type="button" id="closeModal" class="btn btn-danger" data-dismiss="modal" onclick=deletemodal("modalWindow") style="margin:auto 1rem auto auto; font-size: smaller;">Close</button>\
</div>\
<div class="modal-body">'
+content+'\
</div>\
<div class="modal-footer small">'\
+footer+'\
</div>\
</div>\
</div>\
</div>';
$("#myModal").html(html);
$("#modalWindow").modal();
}
function deletemodal(id) {
var element = document.getElementById(id);
element.parentNode.removeChild(element);
};
</script>
'''
return c
| [
"naveensagayaselvaraj@gmail.com"
] | naveensagayaselvaraj@gmail.com |
46a68cf8d816140c27a6905b438ef3b5390e2390 | 29ecf78ebd8fe26409db20f5a5ccbf40a0b7bf77 | /posts/tests/test_api_views.py | 10d12405755f41f59f77e32766cef9f8a3457530 | [] | no_license | pranavchandran/Django-Tests-unleashed | 56225d1cdd6cca58df4e0fffec33b3d36cabbad7 | dc76e6b87cea7842388cd90bbd5a45c563e4af3f | refs/heads/master | 2022-09-29T11:11:10.517822 | 2020-06-10T06:21:29 | 2020-06-10T06:21:29 | 271,107,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | from rest_framework.test import APIRequestFactory,force_authenticate
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.contrib.auth.models import AnonymousUser,User
from posts.models import Post
from posts.api.views import (
PostCreateAPIView,
PostDeleteAPIView,
PostDetailAPIView,
PostListAPIView,
PostUpdateAPIView,
)
# User = get_user_model
class PostApiTest(TestCase):
def setUp(self):
self.data = {"title":"coming days","content":"time is","publish":timezone.now().date()}
self.factory = APIRequestFactory()
self.user = User.objects.create(
username='test1', email='test@neeps.in', password='top_secret',
is_staff=True,is_superuser=True)
def create_post(self,title='crucial'):
return Post.objects.create(title=title)
def test_get_data(self):
list_url = reverse("posts-api:list")
obj =self.create_post()
detail_url = reverse('posts-api:detail',kwargs={'slug':obj.slug})
request = self.factory.get(list_url)
response = PostListAPIView.as_view()(request)
self.assertEqual(response.status_code,200)
request = self.factory.get(detail_url)
response = PostDetailAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,200)
def test_post_data(self):
create_url = reverse("posts-api:create")
request = self.factory.post(create_url,data=self.data)
response1 = PostCreateAPIView.as_view()(request)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostCreateAPIView.as_view()(request)
self.assertEqual(response.status_code,201)
def test_update_data(self):
obj = self.create_post()
update_url = reverse("posts-api:update",kwargs={"slug":obj.slug})
request = self.factory.put(update_url,data=self.data)
# print(request)
response1 = PostUpdateAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostUpdateAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,200)
def test_delete_data(self):
obj = self.create_post()
delete_url = reverse("posts-api:delete",kwargs={"slug":obj.slug})
request = self.factory.delete(delete_url)
print(request)
response1 = PostDeleteAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostDeleteAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,204) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
be12370e08190de56618721120fab97c739b4cac | 0873613d4a935a8473466e9cdbb38c0ea72770cf | /sol.py | 74ddf6cd819972d6e305e649359aeb8cbaa5f80b | [] | no_license | KTala9/AIND-Sudoku | f8cf34c87c0b8a40dddf8b437d0df6bda2a60c83 | b3fc7373647626f0d5fccff4fa2d79a58daaafdf | refs/heads/master | 2021-01-21T19:13:35.036264 | 2017-05-28T20:28:34 | 2017-05-28T20:28:34 | 92,130,703 | 0 | 0 | null | 2017-05-23T04:54:12 | 2017-05-23T04:54:12 | null | UTF-8 | Python | false | false | 8,626 | py | import itertools
def cross(A, B):
"Cross product of elements in A and elements in B."
return [s+t for s in A for t in B]
rows = 'ABCDEFGHI'
cols = '123456789'
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
diag_units = [[rows[i] + cols[i] for i in range(9)], [rows[::-1][i] + cols[i] for i in range(9)]]
unitlist = row_units + column_units + square_units + diag_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
assignments = []
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
print(box, value)
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def naked_twins(values):
"""
Eliminate values using the naked twins strategy.
See link for details: http://www.sudokudragon.com/sudokustrategy.htm
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
The values dictionary with the naked twins eliminated from peers.
"""
# Naked twins: two boxes in same unit that have a pair of identical digits
# remaining as their only possibilities
for unit in unitlist:
# Find all boxes with two digits remaining as possibilities
pairs = [box for box in unit if len(values[box]) == 2]
# Pairwise combinations
poss_twins = [list(pair) for pair in itertools.combinations(pairs, 2)]
for pair in poss_twins:
box1 = pair[0]
box2 = pair[1]
# Find the naked twins
if values[box1] == values[box2]:
for box in unit:
# Eliminate the naked twins as possibilities for peers
if box != box1 and box != box2:
for digit in values[box1]:
assign_value(values, box, values[box].replace(digit,''))
return values
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
chars = []
digits = '123456789'
for c in grid:
if c in digits:
chars.append(c)
if c == '.':
chars.append(digits)
# Nine by nine grid
assert len(chars) == 81
return dict(zip(boxes, chars))
def display(values):
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
print
def eliminate(values):
"""
Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers.
Args:
A sudoku in dictionary form.
Returns:
The resulting sudoku in dictionary form.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
# Remove solved digit from the list of possible values for each peer
for peer in peers[box]:
assign_value(values, peer, values[peer].replace(digit,''))
return values
def only_choice(values):
"""
Go through all the units, and whenever there is a unit with a value that only fits in one box, assign the value to this box.
Args:
A sudoku in dictionary form.
Returns:
The resulting sudoku in dictionary form.
"""
for unit in unitlist:
for digit in '123456789':
# Create a list of all the boxes in the unit in question
# that contain the digit in question
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
# This box is the only choice for this digit
values = assign_value(values, dplaces[0], digit)
return values
def single_possibility(values):
"""
Assign values using the single possibility strategy.
See link for details: http://www.sudokudragon.com/sudokustrategy.htm
This strategy is not very sophisticated, which is reflected in its poor performance time-wise (often ~190x slower than the only_choice assignment strategy).
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
The values dictionary with squares assigned their only possible value.
"""
for box in boxes:
digits = '123456789'
for digit in digits:
for peer in peers[box]:
# Remove solved peers from digit possibilities
if len(values[peer]) == 1:
digits = digits.replace(values[peer],'')
# Only one digit can go in this box i.e. a single possibility
if len(digits) == 1:
values = assign_value(values, box, digits)
return values
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Args:
A sudoku in dictionary form.
Returns:
The resulting sudoku in dictionary form.
"""
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Apply the eliminate exclusion strategy
values = eliminate(values)
# Apply the only choice assignment strategy
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# Stop applying these strategies if we stop making box-solving progress
stalled = solved_values_before == solved_values_after
# Sanity check: never eliminate all digits from a box's possibilities
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""
Using depth-first search and propagation, try all possible values.
Args:
A sudoku in dictionary form.
Returns:
The solved sudoku if solvable or False if not solvable.
"""
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False # Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values # Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus,
# and if one returns a value (not False), return that answer!
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
# Convert string grid to dictionary grid
values = grid_values(grid)
solved = search(values)
if solved:
return solved
else:
return False
if __name__ == '__main__':
diag_sudoku_grid = '9.1....8.8.5.7..4.2.4....6...7......5..............83.3..6......9................'
display(solve(diag_sudoku_grid))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.') | [
"ktalanine@deloitte.com"
] | ktalanine@deloitte.com |
083c11e06225210c43ebe3bbfe58f3b6b2068634 | 52e125ea26123a332321b9ee0156cc878dce7f03 | /topcoder/KeyDungeonDiv1.py | d0a75c10ff6a1fc449572926318e30ebc1f11a7f | [] | no_license | mihneagiurgea/pysandbox | 73252fcfe82515d6203470a97919d8390208df46 | 5c49b0d3aa15894b50507c55f4d66f48544af224 | refs/heads/master | 2021-01-22T20:08:56.228738 | 2020-09-27T23:31:53 | 2020-09-27T23:31:53 | 6,213,625 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,410 | py | from collections import namedtuple, defaultdict
State = namedtuple('State', ['r', 'g', 'w'])
class KeyDungeonDiv1(object):
def canOpen(self, state, i):
r = max(0, self.doorR[i] - state.r)
g = max(0, self.doorG[i] - state.g)
return state.w >= r + g
def open(self, state, i):
r = max(0, self.doorR[i] - state.r)
g = max(0, self.doorG[i] - state.g)
if state.w < r + g:
# Cannot open this room.
return None
return State(r=max(0, state.r - self.doorR[i]) + self.roomR[i],
g=max(0, state.g - self.doorG[i]) + self.roomG[i],
w=state.w - r - g + self.roomW[i])
def minimizeStates(self, states):
w_to_states = defaultdict(list)
for state in states:
w_to_states[state.w].append(state)
result = []
for w in w_to_states:
sub_states = w_to_states[w]
sub_states.sort()
prev = State(-1, -1, -1)
for state in sub_states:
if not (state.g >= prev.g):
result.append(prev)
prev = state
result.append(prev)
return set(result)
def maxKeys(self, doorR, doorG, roomR, roomG, roomW, keys):
"""
>>> obj = KeyDungeonDiv1()
>>> obj.maxKeys((1, 2, 3), (0, 4, 9), (0, 0, 10), (0, 8, 9), (1, 0, 8), (3, 1, 2))
8
>>> obj.maxKeys((1, 1, 1, 2), (0, 2, 3, 1), (2, 1, 0, 4), (1, 3, 3, 1), (1, 0, 2, 1), (0, 4, 0))
4
>>> obj.maxKeys((2, 0, 4), (3, 0, 4), (0, 0, 9), (0, 0, 9), (8, 5, 9), (0, 0, 0))
27
"""
self.doorR = doorR
self.doorG = doorG
self.roomR = roomR
self.roomG = roomG
self.roomW = roomW
initial_state = State(*keys)
states = set([initial_state])
N = len(doorR)
for i in range(N):
new_states = set()
for state in states:
next_state = self.open(state, i)
if next_state is not None:
new_states.add(next_state)
states.update(new_states)
# Minimize states.
states = self.minimizeStates(states)
max_keys = -1
for state in states:
max_keys = max(max_keys, state.r + state.g + state.w)
return max_keys
import doctest
doctest.testmod() | [
"mihnea.giurgea@ubervu.com"
] | mihnea.giurgea@ubervu.com |
229128424c9b4cb65c8e0638f4b143ddde03708d | eab5f1c8292a76babb0e1b86471db954ac0d1a41 | /guvi90.py | ea2fae62f19a846b8a998e9be1d931e0daa5d69e | [] | no_license | pavanimallem/pavs3 | 56cabfa7cc56c650746cbf80296e6fe32578f953 | c43e6b2993317c438554bbcae304eb6aa6763801 | refs/heads/master | 2020-03-24T07:26:10.827892 | 2018-09-19T11:18:24 | 2018-09-19T11:18:24 | 142,564,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | str=raw_input()
x=[]
for i in str:
if(i.isdigit()):
x.append(i)
print "".join(x)
| [
"noreply@github.com"
] | noreply@github.com |
4d94685eff433b3cd9954ef5f2e3a219a023b767 | db1580d0ea6f8740a2e729f8265bdf98f42ca286 | /songFileServer/models.py | ac40ce374b276c2e6dc438f630c7807e73475385 | [] | no_license | jyotib652/SongAudiobookFileServer | 598bb02abfa94e64831f5df9229d45e107c457f3 | 196819410d4d3a83b16c130bedc7b7dd47e52a22 | refs/heads/main | 2023-03-15T02:11:21.392860 | 2021-03-16T05:51:39 | 2021-03-16T05:51:39 | 347,339,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | from django.db import models
# Create your models here.
# class CustomDateTimeField(models.DateTimeField):
# def value_to_string(self, obj):
# val = self.value_from_object(obj)
# if val:
# val.replace(microsecond=0)
# return val.isoformat()
# return ''
class Song(models.Model):
id=models.IntegerField(primary_key=True)
songName=models.CharField(max_length=100, blank=False, null=True)
songDurationSeconds=models.PositiveIntegerField(blank=False, null=True)
uploadedTime=models.DateTimeField(auto_now_add=True, blank=False, null=True)
class Podcast(models.Model):
id=models.IntegerField(primary_key=True)
podcastName=models.CharField(max_length=100, blank=False, null=True)
podcastDurationSeconds=models.PositiveIntegerField(blank=False, null=True)
uploadedTime=models.DateTimeField(auto_now_add=True, blank=False, null=True)
podcastHost=models.CharField(max_length=100, blank=False, null=True)
podcastParticipants=models.CharField(max_length=100, choices=[], blank=False, null=True)
class Audiobook(models.Model):
id=models.IntegerField(primary_key=True)
audiobookTitle=models.CharField(max_length=100, blank=False, null=True)
titleAuthor=models.CharField(max_length=100, blank=False, null=True)
audiobookNarrator=models.CharField(max_length=100, blank=False, null=True)
audiobookDurationSeconds=models.PositiveIntegerField(blank=False, null=True)
uploadedTime=models.DateTimeField(auto_now_add=True, blank=False, null=True)
| [
"jyotib652@gmail.com"
] | jyotib652@gmail.com |
33812b49768d981e641e836ec3d404c2f197b362 | ea52083073db9b1d87ffb660ad17afd5eb0e24c6 | /mypythonscripts/HelloCG.py | 047b82fa14c48414b154798627cd6dd91dee287b | [] | no_license | avikdeb/pythonwork | 1641d93d45b44c7d75a6ec591c54b1529c72a763 | 5e067afc38244c3133765570c3f59dfb47d1d4d3 | refs/heads/master | 2020-03-09T06:47:57.003019 | 2018-05-14T16:36:18 | 2018-05-14T16:36:18 | 125,887,248 | 0 | 0 | null | 2018-04-08T08:24:31 | 2018-03-19T16:22:09 | null | UTF-8 | Python | false | false | 138 | py | print("Hello Everyone!")
def print_something():
for count in range(0,3):
print("I am writing something!")
print_something() | [
"gkc.cse@gmail.com"
] | gkc.cse@gmail.com |
cfb1675bba4c3ed8406d520389114b467ce2dd69 | 631123e441324e8366bd522046e042a3fb8d50bc | /inputTokenizer.py | 780e0faa903844338202ff7dc7743cf576b97aac | [] | no_license | skrishpv/deeplearning-chatbot-758b | 33d5ad82c10071af3929ec3ce61aa2999dce2c3c | 64d297563ec3dbb95ab4fa38394d405112118283 | refs/heads/main | 2023-02-03T18:38:36.583250 | 2020-12-18T15:08:57 | 2020-12-18T15:08:57 | 320,298,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
def tokenizeInput(vocab_size, oov_token, training_sentences, testing_sentences, max_len):
tokenizer = Tokenizer(num_words= vocab_size, oov_token=oov_token)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
train_sequences = tokenizer.texts_to_sequences(training_sentences)
train_padded_sequences = pad_sequences(train_sequences, truncating='post', maxlen=max_len)
test_sequences = tokenizer.texts_to_sequences(testing_sentences)
test_padded_sequences = pad_sequences(test_sequences, truncating='post', maxlen=max_len)
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
return train_padded_sequences, test_padded_sequences
| [
"67808815+skrishpv@users.noreply.github.com"
] | 67808815+skrishpv@users.noreply.github.com |
a85b4046bfc7987cb03c53122f8ed3882aa82d61 | 2272759c7b09397ff462115fc68d1b8363f572db | /app/__init__.py | 5715f71dd4d096fce31c078b09bcf1a4e9ed4dcc | [
"MIT",
"CC-BY-4.0"
] | permissive | Bubblbu/fhe-collector | e8f2f2b8d80a86c11c43d506244077b879ebedfc | b587a952eec318eab6cf430383fe83ca85277895 | refs/heads/master | 2020-03-30T17:49:08.065705 | 2019-09-17T22:54:25 | 2019-09-17T22:54:25 | 151,471,327 | 0 | 0 | MIT | 2018-10-03T19:48:38 | 2018-10-03T19:48:38 | null | UTF-8 | Python | false | false | 34,027 | py | """
.. module::
:platform: Linux
:synopsis: Web-app to collect facebook metrics.
.. moduleauthor:: Stefan Kasberger <mail@stefankasberger.at>
"""
__author__ = 'Stefan Kasberger'
__email__ = 'stefan.kasberger@univie.ac.at'
__copyright__ = 'Copyright (c) 2019 Stefan Kasberger'
__license__ = 'MIT License'
__version__ = '0.0.1'
__url__ = 'https://github.com/ScholCommLab/fhe-collector'
# from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
from facebook import GraphAPI
from json import dumps, loads
import logging
from logging.handlers import RotatingFileHandler
import os
import pandas as pd
from psycopg2 import connect
import re
import requests
import urllib.parse
from tqdm import tqdm
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_debugtoolbar import DebugToolbarExtension
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
db = SQLAlchemy()
migrate = Migrate()
def validate_doi(doi):
"""Validate a DOI via regular expressions.
Parameters
----------
doi : string
A single DOI to be validated.
Returns
-------
bool
True, if DOI is valid, False if not.
"""
# validate doi
patterns = [
r"^10.\d{4,9}/[-._;()/:A-Z0-9]+$",
r"^10.1002/[^\s]+$",
r"^10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d$",
r"^10.1021/\w\w\d+$",
r"^10.1207\/[\w\d]+\&\d+_\d+$"
]
is_valid = False
for pat in patterns:
if re.match(pat, doi, re.IGNORECASE):
is_valid = True
return is_valid
def init_from_csv(filename, batch_size):
"""Import DOI's from a csv file.
Imports the DOI's from a csv file into the database. Stores the raw data
and adds dois in table.also the It must contain an
attribute `doi`, and optionally `url`, `url_type` and `date`.
For test purposes, there is a file with 100 entries you can use.
Checks, if duplicate dois are in the file and removes them.
Parameters
----------
filename : string
Filepath for the csv file, relative from the root dir.
Returns
-------
bool
True, if import worked, False if not.
"""
from app.models import Doi
from app.models import Import
from app.models import Url
from app import db
num_dois_added = 0
num_urls_added = 0
dois_added = []
url_import_lst = []
url_list = []
filename = '{0}/{1}'.format(BASE_DIR, filename)
df = pd.read_csv(filename, encoding='utf8')
df = df.drop_duplicates(subset='doi')
df = df.fillna(False)
data = df.to_json(orient='records')
try:
db_imp = Import('<Init '+filename+'>', data)
db.session.add(db_imp)
db.session.commit()
except:
print('ERROR: Import() can not be stored in Database.')
return False
for i in range(0, len(df), batch_size):
for _, row in tqdm(df[i:i+batch_size].iterrows()):
dict_tmp = {}
is_valid = validate_doi(row['doi'])
if is_valid:
if row['doi'] and row['date']:
db_doi = None
try:
db_doi = Doi(
doi=row['doi'],
date_published=datetime.strptime(row['date'], '%Y-%m-%d'),
import_id=db_imp.id,
is_valid=True
)
db.session.add(db_doi)
num_dois_added += 1
dois_added.append(row['doi'])
except:
print('ERROR: Can not import Doi {0}.'.format(row['doi']))
if row['url'] and row['url_type'] and db_doi:
if row['url'] not in url_list:
url_list.append(row['url'])
dict_tmp['doi'] = db_doi.doi
dict_tmp['url'] = row['url']
dict_tmp['url_type'] = row['url_type']
url_import_lst.append(dict_tmp)
else:
print('WARNING: Entry {0} is not valid'.format(row['doi']))
else:
print('WARNING: DOI {} is not valid.'.format(row['doi']))
db.session.commit()
for i in range(0, len(url_import_lst), batch_size):
for d in url_import_lst[i:i+batch_size]:
try:
db_url = Url(
url=d['url'],
doi=d['doi'],
url_type=d['url_type']
)
db.session.add(db_url)
num_urls_added += 1
except:
print('ERROR: Can not import Url {0}.'.format(d['url']))
db.session.commit()
db.session.close()
print('{0} doi\'s added to database.'.format(num_dois_added))
print('{0} url\'s added to database.'.format(num_urls_added))
return {'dois_added': dois_added, 'num_dois_added': num_dois_added, 'num_urls_added': num_urls_added}
def add_entries_to_database(data, import_id):
"""Store data to table Doi and Url.
Parameters
----------
data : list
List of dictionaries.
import_id : string
Id of ``Import()`` table, where the raw data was stored in.
Returns
-------
dict
Import metrics as ``dict``. Keys: ``doi_list``, ``dois_added``,
``dois_already_in``, ``urls_added`` and ``urls_already_in``.
"""
from app.models import Doi
from app.models import Url
num_dois_added = 0
num_urls_added = 0
dois_added = []
url_import_lst = []
url_list = []
for entry in tqdm(data):
dict_tmp = {}
is_valid = validate_doi(entry['doi'])
if is_valid:
db_doi = None
result_doi = Doi.query.filter_by(doi=entry['doi']).first()
if result_doi is None:
try:
db_doi = Doi(
doi=entry['doi'],
date_published=datetime.strptime(entry['date'], '%Y-%m-%d'),
import_id=import_id,
is_valid=True
)
db.session.add(db_doi)
num_dois_added += 1
dois_added.append(entry['doi'])
db.session.commit()
except:
print('ERROR: Can not import Doi {0}.'.format(entry['doi']))
if entry['url'] and entry['url_type'] and db_doi:
if entry['url'] not in url_list:
url_list.append(entry['url'])
dict_tmp['doi'] = db_doi.doi
dict_tmp['url'] = entry['url']
dict_tmp['url_type'] = entry['url_type']
url_import_lst.append(dict_tmp)
else:
print('WARNING: Entry {0} is not valid'.format(entry['doi']))
else:
print('WARNING: DOI {} is not valid.'.format(entry['doi']))
db.session.commit()
for d in url_import_lst:
try:
db_url = Url(
url=d['url'],
doi=d['doi'],
url_type=d['url_type']
)
db.session.add(db_url)
num_urls_added += 1
except:
print('ERROR: Can not import Url {0}.'.format(d['url']))
db.session.commit()
db.session.close()
print('{0} doi\'s added to database.'.format(num_dois_added))
print('{0} url\'s added to database.'.format(num_urls_added))
return {'dois_added': dois_added, 'num_dois_added': num_dois_added, 'num_urls_added': num_urls_added}
for entry in tqdm(data):
is_valid = validate_doi(entry['doi'])
# TODO: what if not valid? user does not get it back in the api response.
if is_valid:
if entry['doi'] and entry['date']:
doi = entry['doi']
result_doi = Doi.query.filter_by(doi=doi).first()
if result_doi is None:
try:
db_doi = Doi(
doi=doi,
date_published=datetime.strptime(entry['date'], '%Y-%m-%d'),
import_id=import_id,
is_valid=True
)
db.session.add(db_doi)
db.session.commit()
num_dois_added += 1
dois_added.append(doi)
except:
print('ERROR: Can not import Doi {0}.'.format(doi))
else:
doi = result_doi.doi
else:
print('WARNING: Entry {0} is not valid'.format(doi))
# store url
if entry['url'] and entry['url_type']:
url = entry['url']
result_url = Url.query.filter_by(url=url).first()
if result_url is None:
try:
db_url = Url(
url=url,
doi=doi,
url_type=entry['url_type']
)
db.session.add(db_url)
db.session.commit()
num_urls_added += 1
except:
print('ERROR: Can not import Url {0}.'.format(url))
else:
print('WARNING: DOI {} is not valid.'.format(entry['doi']))
print('{0} doi\'s added to database.'.format(num_dois_added))
print('{0} url\'s added to database.'.format(num_urls_added))
return {'dois_added': dois_added, 'num_dois_added': num_dois_added, 'num_urls_added': num_urls_added}
def import_dois_from_api(data):
"""Import data coming from the API endpoint.
Parameters
----------
data : type
Description of parameter `data`.
Returns
-------
string
Response text for API request.
"""
from app.models import Import
try:
imp = Import('<API>', dumps(data))
db.session.add(imp)
db.session.commit()
response = add_entries_to_database(data, imp.id)
return response
except:
response = 'ERROR: Data import from API not working.'
print(response)
return response
def create_doi_new_urls(batch_size):
"""Create URL's from the identifier.
Creates the DOI URL's as part of the pre-processing.
"""
from app.models import Doi
from app.models import Url
import app
num_urls_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
# get doi, url_doi_new=False and url
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_doi_new == False).all()
for i in range(0, len(result_join), batch_size):
for d in result_join[i:i+batch_size]:
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://doi.org/{0}'.format(doi_url_encoded)
if url not in db_urls and url not in urls_added:
try:
db_url = Url(
url=url,
doi=d.doi,
url_type='doi_new'
)
d.url_doi_new = True
db.session.add(db_url)
num_urls_added += 1
urls_added.append(url)
except:
print('WARNING: Url {0} can not be created.'.format(url))
db.session.commit()
db.session.close()
print('{0} new doi url\'s added to database.'.format(num_urls_added))
def create_doi_old_urls(batch_size):
"""Create URL's from the identifier.
Creates the DOI URL's as part of the pre-processing.
"""
from app.models import Doi
from app.models import Url
num_urls_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
# get doi, url_doi_old=False and url
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_doi_old == False).all()
for i in range(0, len(result_join), batch_size):
for d in result_join[i:i+batch_size]:
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'http://dx.doi.org/{0}'.format(doi_url_encoded)
if url not in db_urls and url not in urls_added:
try:
db_url = Url(
url=url,
doi=d.doi,
url_type='doi_old'
)
d.url_doi_old = True
db.session.add(db_url)
num_urls_added += 1
urls_added.append(url)
except:
print('WARNING: Url {0} can not be created.'.format(url))
db.session.commit()
db.session.close()
print('{0} old doi url\'s added to database.'.format(num_urls_added))
def create_doi_lp_urls():
"""Create URL's from the identifier.
Creates the DOI URL's as part of the pre-processing.
"""
from app.models import APIRequest
from app.models import Doi
from app.models import Url
num_urls_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
# create doi landing page url
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_doi_lp == False).all()
for d in tqdm(result_join):
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://doi.org/{0}'.format(doi_url_encoded)
resp = request_doi_landingpage_api(url)
resp_url = resp.url
try:
db_api = APIRequest(
doi=d.doi,
request_url=url,
request_type='doi_landingpage',
response_content=resp.content,
response_status=resp.status_code
)
db.session.add(db_api)
except:
print('WARNING: APIRequest can not be created.')
if resp_url not in db_urls and resp_url not in urls_added:
db_url = Url(
url=resp_url,
doi=d.doi,
url_type='doi_landingpage'
)
d.url_doi_lp = True
db.session.add(db_url)
num_urls_added += 1
urls_added.append(resp_url)
db.session.commit()
db.session.close()
print('{0} doi new landing page doi url\'s added to database.'.format(num_urls_added))
def request_doi_landingpage_api(url):
return requests.get(url, allow_redirects=True)
def create_ncbi_urls(ncbi_tool, ncbi_email):
"""Create NCBI URL's from the identifier.
https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/
Parameters
----------
ncbi_tool : string
Name of tool, which want to connect to the NCBI API.
email : string
Email related to the app, used as credential for the request.
"""
from app.models import APIRequest
from app.models import Doi
from app.models import Url
num_urls_pm_added = 0
num_urls_pmc_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_pm == False or Doi.url_pmc == False).all()
for d in tqdm(result_join):
# TODO: allows up to 200 ids sent at the same time
# send request to NCBI API
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?ids={0}'.format(doi_url_encoded)
resp_data = request_ncbi_api(url, ncbi_tool, ncbi_email, d.doi)
db_ncbi = APIRequest(
# doi=d.doi,
doi=d.doi,
request_url=url,
request_type='ncbi',
response_content=dumps(resp_data),
response_status=resp.status_code
)
db.session.add(db_ncbi)
if 'records' in resp_data:
# create PMC url
if 'pmcid' in resp_data['records']:
url_pmc = 'https://ncbi.nlm.nih.gov/pmc/articles/PMC{0}/'.format(
resp_data['records']['pmcid'])
if url not in db_urls and url not in urls_added:
db_url_pmc = Url(
doi=d.doi,
url_type='pmc'
)
d.url_pmc = True
db.session.add(db_url_pmc)
num_urls_pmc_added += 1
urls_added.append(url_pmc)
# create PM url
if 'pmid' in resp_data['records']:
url_pm = 'https://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(
resp_data['records']['pmid'])
if Url.query.filter_by(url=url_pm).first() is None:
db_url_pm = Url(
url=url_pm,
doi=d.doi,
url_type='pm'
)
d.url_pm = True
db.session.add(db_url_pm)
num_urls_pm_added += 1
urls_added.append(url_pmc)
db.session.commit()
db.session.close()
print('{0} PM url\'s added to database.'.format(num_urls_pm_added))
print('{0} PMC url\'s added to database.'.format(num_urls_pmc_added))
def request_ncbi_api(url, ncbi_tool, ncbi_email, doi):
resp = requests.get(url, params={
'tool': ncbi_tool,
'email': ncbi_email,
'idtype': 'doi',
'versions': 'no',
'format': 'json'
})
return resp.json()
def create_unpaywall_urls(email):
"""Create Unpaywall URL's from the identifier.
https://unpaywall.org/products/api
Parameters
----------
email : string
Email related to the app, used as credential for the request.
"""
from app.models import APIRequest
from app.models import Doi
from app.models import Url
num_urls_unpaywall_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_unpaywall == False).all()
for d in tqdm(result_join):
# send request to Unpaywall API
url_dict = {}
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://api.unpaywall.org/v2/{0}?email={1}'.format(doi_url_encoded, email)
resp_data = request_unpaywall_api(url)
db_api = APIRequest(
doi=d.doi,
request_url=url,
request_type='unpaywall',
response_content=dumps(resp_data),
response_status=resp_data.status_code
)
d.url_unpaywall = True
db.session.add(db_api)
db.session.commit()
# check if response includes needed data
if 'doi_url' in resp_data:
url_dict['unpaywall_doi_url'] = resp_data['doi_url']
if 'oa_locations' in resp_data:
for loc in resp_data['oa_locations']:
if 'url_for_pdf' in loc:
if loc['url_for_pdf']:
url_dict['unpaywall_url_for_pdf'] = loc['url_for_pdf']
if 'url' in loc:
if loc['url']:
url_dict['unpaywall_url'] = loc['url']
if 'url_for_landing_page' in loc:
if loc['url_for_landing_page']:
url_dict['unpaywall_url_for_landing_page'] = loc['url_for_landing_page']
# store URL's in database
for url_type, url in url_dict.items():
if url not in db_urls and url not in urls_added:
db_url = Url(
url=url,
doi=d.doi,
url_type=url_type
)
d.url_unpaywall = True
db.session.add(db_url)
num_urls_unpaywall_added += 1
urls_added.append(url)
db.session.commit()
db.session.close()
print('{0} Unpaywall url\'s added to database.'.format(num_urls_unpaywall_added))
def request_unpaywall_api(url):
resp = requests.get(url)
return resp.json()
def fb_requests(app_id, app_secret, batch_size):
"""Get app access token.
Example Response:
{'id': 'http://dx.doi.org/10.22230/src.2010v1n2a24',
'engagement': { 'share_count': 0, 'comment_plugin_count': 0,
'reaction_count': 0, 'comment_count': 0}}
"""
from app.models import FBRequest
from app.models import Url
payload = {'grant_type': 'client_credentials',
'client_id': app_id,
'client_secret': app_secret}
try:
response = requests.post(
'https://graph.facebook.com/oauth/access_token?',
params=payload)
except requests.exceptions.RequestException:
raise Exception()
token = loads(response.text)
fb_graph = GraphAPI(token['access_token'], version="2.10")
fb_request_added = 0
result_url = Url.query.all()
for i in range(0, len(result_url), batch_size):
batch = result_url[i:i + batch_size]
url_list = []
for row in batch:
url_list.append(row.url)
urls_response = fb_graph.get_objects(ids=url_list,
fields="engagement,og_object")
for key, value in urls_response.items():
if urls_response:
db_fb_request = FBRequest(
url=key,
response=value
)
db.session.add(db_fb_request)
fb_request_added += 1
db.session.commit()
db.session.close()
print('{0} Facebook openGraph request\'s added to database.'.format(fb_request_added))
def delete_dois():
"""Delete all doi entries."""
from app.models import Doi
try:
dois_deleted = db.session.query(Doi).delete()
db.session.commit()
print(dois_deleted, 'doi\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: Doi\'s can not be deleted from database.')
def delete_urls():
"""Delete all url entries."""
from app.models import Url
try:
urls_deleted = db.session.query(Url).delete()
db.session.commit()
print(urls_deleted, 'url\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: Url\'s can not be deleted from database.')
def delete_apirequests():
"""Delete all api requests."""
from app.models import APIRequest
try:
apirequests_deleted = db.session.query(APIRequest).delete()
db.session.commit()
print(apirequests_deleted, 'APIRequests\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: API requests\'s can not be deleted from database.')
def delete_fbrequests():
"""Delete all facebook requests."""
from app.models import FBRequest
try:
fbrequests_deleted = db.session.query(FBRequest).delete()
db.session.commit()
print(fbrequests_deleted, 'FBRequests\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: Facebook requests\'s can not be deleted from database.')
def export_tables_to_csv(table_names, db_uri):
"""Short summary.
Parameters
----------
table_names : list
Description of parameter `table_names`.
db_uri : string
Description of parameter `db_uri`.
"""
con = connect(db_uri)
cur = con.cursor()
filename_list = [BASE_DIR + '/app/static/export/'+datetime.today().strftime('%Y-%m-%d')+'_'+table+'.csv' for table in table_names]
for idx, filename in enumerate(filename_list):
sql = "COPY "+table_names[idx]+" TO STDOUT DELIMITER ',' CSV HEADER;"
cur.copy_expert(sql, open(filename, "w"))
def import_csv(table_names, delete_tables):
"""Import data coming from CSV file."""
from app import import_csv_recreate
from app import import_csv_append
if delete_tables:
import_csv_recreate(table_names)
else:
import_csv_append(table_names)
def import_csv_recreate(table_names):
"""Import data coming from CSV file.
Delete all data in advance and do fresh import.
"""
from app import delete_data
from app.models import Import
from app.models import Doi
from app.models import Url
from app.models import APIRequest
from app.models import FBRequest
table2model = {
'doi': Doi,
'url': Url,
'api_request': APIRequest,
'fb_request': FBRequest
}
delete_data()
filename_list = [BASE_DIR + '/app/static/import/'+table+'.csv' for table in table_names]
for idx, filename in enumerate(filename_list):
model = table2model[table_names[idx]]
df = pd.read_csv(filename)
data_str = df.to_json(orient='records')
db_imp = Import('<Import '+filename+'>', data_str)
db.session.add(db_imp)
db.session.commit()
for row in df.to_dict(orient="records"):
if table_names[idx] == 'doi':
model = Doi(**row)
elif table_names[idx] == 'url':
model = Url(**row)
elif table_names[idx] == 'api_request':
model = APIRequest(**row)
elif table_names[idx] == 'fb_request':
model = FBRequest(**row)
db.session.add(model)
db.session.commit()
def import_csv_append(table_names):
"""Import data coming from CSV file.
Insert all data in advance and do fresh import.
"""
from app.models import Import
from app.models import Doi
from app.models import Url
from app.models import APIRequest
from app.models import FBRequest
for table_name in table_names:
filename = BASE_DIR + '/app/static/import/'+table_name+'.csv'
df = pd.read_csv(filename, encoding='utf8')
data_str = df.to_json(orient='records')
data = df.to_dict(orient='records')
db_imp = Import('<Import '+filename+'>', data_str)
db.session.add(db_imp)
db.session.commit()
if table_name == 'doi':
print('Import Doi table:')
dois_added = 0
for entry in tqdm(data):
result_doi = Doi.query.filter_by(doi=entry['doi']).first()
if result_doi is None:
if entry['is_valid'] == 't':
is_valid = True
elif entry['is_valid'] == 'f':
is_valid = False
if entry['url_doi_lp'] == 't':
url_doi_lp = True
elif entry['url_doi_lp'] == 'f':
url_doi_lp = False
if entry['url_doi_new'] == 't':
url_doi_new = True
elif entry['url_doi_new'] == 'f':
url_doi_new = False
if entry['url_doi_old'] == 't':
url_doi_old = True
elif entry['url_doi_old'] == 'f':
url_doi_old = False
if entry['url_pm'] == 't':
url_pm = True
elif entry['url_pm'] == 'f':
url_pm = False
if entry['url_pmc'] == 't':
url_pmc = True
elif entry['url_pmc'] == 'f':
url_pmc = False
if entry['url_unpaywall'] == 't':
url_unpaywall = True
elif entry['url_unpaywall'] == 'f':
url_unpaywall = False
db_doi = Doi(
doi=entry['doi'],
import_id=db_imp.id,
is_valid=is_valid,
pm_id=entry['pm_id'],
pmc_id=entry['pmc_id'],
date_published=datetime.strptime(entry['date_published'], '%Y-%m-%d %H:%M:%S'),
url_doi_lp=url_doi_lp,
url_doi_new=url_doi_new,
url_doi_old=url_doi_old,
url_pm=url_pm,
url_pmc=url_pmc,
url_unpaywall=url_unpaywall
)
db.session.add(db_doi)
db.session.commit()
dois_added += 1
print('{0} doi\'s added to database.'.format(dois_added))
elif table_name == 'url':
print('Import Url table:')
urls_added = 0
for entry in tqdm(data):
result_url = Url.query.filter_by(url=entry['url']).first()
if result_url is None:
db_url = Url(
url=entry['url'],
doi=entry['doi'],
url_type=entry['url_type'],
date_added=datetime.strptime(entry['date_added'], '%Y-%m-%d %H:%M:%S.%f')
)
db.session.add(db_url)
db.session.commit()
urls_added += 1
print('{0} url\'s added to database.'.format(urls_added))
elif table_name == 'api_request':
print('Import APIRequests table:')
apirequests_added = 0
for entry in tqdm(data):
db_apirequest = APIRequest(
doi=entry['doi'],
request_url=entry['request_url'],
request_type=entry['request_type'],
response_content=entry['response_content'],
response_status=entry['response_status']
)
db.session.add(db_apirequest)
db.session.commit()
apirequests_added += 1
print('{0} apirequest\'s added to database.'.format(apirequests_added))
elif table_name == 'fb_request':
print('Import FBRequests table:')
fbrequests_added = 0
for entry in tqdm(data):
db_fbrequest = FBRequest(
url_url=entry['url_url'],
response=entry['response'],
reactions=entry['reactions'],
shares=entry['shares'],
comments=entry['comments'],
plugin_comments=entry['plugin_comments'],
timestamp=datetime.strptime(entry['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
)
db.session.add(db_fbrequest)
db.session.commit()
fbrequests_added += 1
print('{0} fbrequest\'s added to database.'.format(fbrequests_added))
def create_app():
"""Create application and load settings."""
app = Flask(__name__)
ENVIRONMENT = os.getenv('ENV', default='development')
# TESTING = os.getenv('TESTING', default=False)
print('* Updating App Mode to: ' + ENVIRONMENT)
travis = os.getenv('TRAVIS', default=False)
if not travis:
print('* Loading User Settings.')
app.config.from_pyfile(BASE_DIR+'/settings_user.py', silent=True)
if ENVIRONMENT == 'development':
print('* Loading Development Settings.')
app.config.from_pyfile(BASE_DIR+'/settings_development.py', silent=True)
app.config.from_object('settings_default.Development')
if not travis:
DebugToolbarExtension(app)
elif ENVIRONMENT == 'production':
print('* Loading Production Settings.')
# order of settings loading: 1. settings file, 2. environment variable DATABASE_URL, 3. environment variable SQLALCHEMY_DATABASE_URI
if not travis:
app.config.from_pyfile(BASE_DIR+'/settings_production.py', silent=True)
app.config.from_object('settings_default.Production')
elif ENVIRONMENT == 'testing':
print('* Loading Test Settings.')
app.config['TESTING'] = True
app.config.from_object('settings_default.Testing')
if not travis:
print('* Database: ' + app.config['SQLALCHEMY_DATABASE_URI'])
db.init_app(app)
migrate.init_app(app, db)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.main import bp as main_bp
app.register_blueprint(main_bp)
# scheduler = BackgroundScheduler()
# rate_limit = app.config['FB_HOURLY_RATELIMIT']
# rate_intervall = 3600 / rate_limit
# scheduler.add_job(, trigger='interval', seconds=rate_intervall)
# scheduler.start()
if not app.debug and not app.testing:
# Logging (only production)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/fhe.log', maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Facebook Hidden Engagement')
return app
from app import models
| [
"mail@stefankasberger.at"
] | mail@stefankasberger.at |
c175141ce719e09b6cea9f37d217223ff7d6033a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/H/hitkarsh/karnataka_2.py | 6c81472eb4a4013c05dd3d24a663158f61abd084 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,692 | py | import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=29"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=16
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
count+=1
continue
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=29"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=16
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
count+=1
continue
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
78640dca0822abb8a5bb9127e14b9c04fb305615 | 883c1d9ece9a5a5d7001b7a92fe3e8f1a7776354 | /cmt_statistics_tool/statistics/s01_01.py | e3905b7075a8e52365297bfd23484e2bb712c0f4 | [] | no_license | HPI-Information-Systems/cmt_statistics_tool | d2aab748965d959a339d9f24e6ff4027c6ef4d92 | 76f7e58b472ad3f20264e5c985a1d269b5d5cc63 | refs/heads/master | 2023-09-03T12:59:22.211882 | 2021-10-05T07:37:45 | 2021-10-05T07:37:45 | 404,407,951 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | """Reviewers and ratings: Expertise Level vs Rating"""
from asyncio import run
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from pandas import DataFrame
from seaborn import barplot
from sqlalchemy import func
from sqlalchemy.future import select
from uvloop import install
from cmt_statistics_tool.statistics import get_data, plot_df
from cmt_statistics_tool.tables import SubmissionReview
async def main() -> DataFrame:
statement = (
select(
SubmissionReview.overall_rating, SubmissionReview.confidence, func.count()
)
.group_by(SubmissionReview.overall_rating, SubmissionReview.confidence)
.order_by(SubmissionReview.overall_rating, SubmissionReview.confidence)
)
df = DataFrame(await get_data(statement)).rename(
columns={0: "Status", 1: "Expertise", 2: "Count"}
)
df2 = df.groupby("Expertise").sum().reset_index()
df2.insert(0, "Status", "All")
return df.append(df2)
def plot(df: DataFrame, ax: Axes) -> None:
ax.set_title("Expertise level and rating (original submissions)")
barplot(
x="Status",
y="Count",
hue="Expertise",
order=["Accept", "Weak Accept", "Weak Reject", "Reject", "All"],
hue_order=[
"Expert in this problem",
"Knowledgeable in this sub-area ",
"Generally aware of the area",
"Had to use common sense and general knowledge",
],
data=df,
ax=ax,
)
if __name__ == "__main__":
install()
df = run(main())
fig = plot_df(df, plot)
plt.savefig("plots/01_01.png")
print(df)
| [
"39628987+fabianhe@users.noreply.github.com"
] | 39628987+fabianhe@users.noreply.github.com |
aea3200a6cf1ceec2a12eac766f221b4f85cb99d | 03415e25427d9a17bada8fd75daadc45c093c377 | /LST_Collect.py | 7cf76c296c1d1f4a78e7ce9e9b0fd9243fd117e1 | [] | no_license | mwilensky768/MJW-HERA | 472d639bd4086a31be112564be9b2b22e70e3e86 | da1710a17123cc3ccd3e318e224712eb80bcb3bd | refs/heads/master | 2021-08-10T22:32:15.391270 | 2017-11-13T01:45:48 | 2017-11-13T01:45:48 | 108,204,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | import pyuvdata
import glob
import numpy as np
from math import pi
inpath = '/data6/HERA/data/2458042/zen.2458042.'
pathlist = glob.glob(inpath + '*.xx*.uv')
obslist = np.sort(np.array([int(path[path.find('zen.') + 12:path.find('.xx')])
for path in pathlist]))
pathlist_sort = [inpath + str(obs) + '.xx.HH.uv' for obs in obslist]
UV = pyuvdata.UVData()
LST = []
for path in pathlist_sort:
UV.read_miriad(path)
LST.append(UV.lst_array[0])
np.save('/data4/mwilensky/GS_LST.npy', np.array(LST) * 23.934 / (2 * pi))
| [
"mjw768@uw.edu"
] | mjw768@uw.edu |
d2b90e879e40e32c34f931f514ed5e99a04ff5b1 | 979366550b6a98a758f17828407d3304e63a540a | /microblog/app/models.py | 6350b18df6700e15a03f659295db3fedf8a570f5 | [] | no_license | ankitrkumar/Learning-Python | 2ff5dd497234742dbfd5916b6993f15606d3b43c | f112272b7622795d17113c8edd850fdfe066ca75 | refs/heads/master | 2021-01-10T13:48:50.810023 | 2015-07-03T22:52:55 | 2015-07-03T22:52:55 | 36,099,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,625 | py | from app import db, app
from hashlib import md5
import sys
from config import WHOOSH_ENABLED
enable_search = WHOOSH_ENABLED
if enable_search:
import flask.ext.whooshalchemy as whooshalchemy
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
nickname = db.Column(db.String(64), index = True, unique = True)
email = db.Column(db.String(120), index = True, unique =True)
posts = db.relationship('Post', backref = 'author', lazy ='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime)
followed = db.relationship('User',
secondary = followers,
primaryjoin = (followers.c.follower_id == id),
secondaryjoin = (followers.c.followed_id == id),
backref = db.backref('followers', lazy ='dynamic'),
lazy = 'dynamic'
)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def avatar(self, size):
return 'http://www.gravatar.com/avatar/%s?d=mm&s=%d' % (md5(self.email.encode('utf-8')).hexdigest(), size)
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname = nickname).first() is None:
return nickname
from random import randint
version = randint(1,9999)
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname = new_nickname).first() is None:
break
version = randint(1,9999)
return new_nickname
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
return self
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
return self
def is_following(self, user):
return self.followed.filter(followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
return Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter(followers.c.follower_id == self.id).order_by(Post.timestamp.desc())
def __repr__(self):
return '<user %r>' % (self.nickname)
class Post(db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post %r>' % (self.body)
if enable_search:
whooshalchemy.whoosh_index(app, Post)
| [
"ankitkumar1618@gmail.com"
] | ankitkumar1618@gmail.com |
ca566e9009110135e2547eb8c805829d729eff4d | 2aa15786d231136f4487ac904ada5719a0605f3d | /testData/completion/conlist.py | 9a6bf6f67cfe03e5a66772df712542583dc4291a | [
"Apache-2.0",
"MIT"
] | permissive | koxudaxi/pydantic-pycharm-plugin | 7b2f248e45aceccb58e12e67abb34c89e32a53a0 | 61455a7d63c46d567e739ae05f15475b84142a16 | refs/heads/main | 2023-08-23T07:23:40.067425 | 2023-08-07T16:25:52 | 2023-08-07T16:25:52 | 197,027,423 | 362 | 13 | MIT | 2023-09-14T16:39:41 | 2019-07-15T15:41:01 | Kotlin | UTF-8 | Python | false | false | 199 | py |
from pydantic import BaseModel
from pydantic.types import conlist
class A(BaseModel):
abc: conlist()
cde: conlist(str)
efg: conlist(item_type=str)
hij: conlist(List[str])
A(<caret>) | [
"noreply@github.com"
] | noreply@github.com |
352ca71e5dbb7798c95372d5af60e84c1ba86dff | 612535501dcbe82923d45e84517843f0f666a289 | /AMLS_20-21_SN20059361/B2/CNN_B2.py | 6fc2a063572f926cd116569352181e93ad275f7a | [] | no_license | TianhuitiaXu/AMLS_assignment20_21- | 90b7725076c49acf7961004612a00e05f60fbf5a | d1e2208faa01aaaa8536409d99b25c83efa16c14 | refs/heads/master | 2023-02-15T23:13:17.836022 | 2021-01-07T00:04:52 | 2021-01-07T00:04:52 | 304,770,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,122 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from keras.preprocessing import image
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
import tensorflow as tf
from tensorflow.keras import Model
from sklearn.utils import shuffle
from tensorflow import keras
import time
#from B2 import landmarks as la
import matplotlib.pyplot as plt
# from keras import backend as K
# K.set_image_dim_ordering('th')
# global basedir, image_paths, target_size
# basedir = './AMLS_20-21_SNzcictxu'
# basedir = os.path.join(basedir,'Datasets')
# basedir = os.path.join(basedir,'cartoon_set')
# images_dir = os.path.join(basedir,'img')
# labels_filename = 'labels.csv'
# #nor
# def nor(x):
# x = (x - np.mean(x)) / np.std(x)
# return x
# #central_crop
# def center_crop(image):
# cropped_image = tf.image.central_crop(
# image, 0.5)
# return cropped_image
# def data_preprocessing(data_B, image_B):
# #obtain labels
# # data = pd.read_csv(os.path.join(basedir, labels_filename),sep='\t')
# Y_B2=data_B['eye_color']
# Y_B1=data_B['face_shape']
# # now_time = time.time()#起始时间
# x_train, x_test, y_train, y_test = train_test_split(image_B, Y_B2, train_size=0.8, random_state=0)
# x_train, x_cv, y_train, y_cv = train_test_split(x_train, y_train, train_size=0.8, random_state=0)
# x_train = nor(x_train)
# x_cv = nor(x_cv)
# x_test = nor(x_test)
# x_train = center_crop(x_train)
# x_cv = center_crop(x_cv)
# x_test = center_crop(x_test)
# return x_train, x_cv, x_test, y_train, y_cv, y_test
# ############normalization##########################
# # x_train = nor(x_train)
# # x_cv = nor(x_cv)
# # x_test = nor(x_test)
# # # print(x_train.shape, x_cv.shape, x_test.shape)
# #min max normalize
# x_train = (x_train - np.min(x_train)) / (np.max(x_train) - np.min(x_train))
# x_cv = (x_cv - np.min(x_cv)) / (np.max(x_cv) - np.min(x_cv))
# x_test = (x_test - np.min(x_test)) / (np.max(x_test) - np.min(x_test))
# # x_train = x_train.reshape(x_train.shape[0],68,2,1)
# # x_cv = x_cv.reshape(x_cv.shape[0],68,2,1)
# # x_test = x_test.reshape(x_test.shape[0],68,2,1)
# # print(x_train.shape,x_cv.shape,x_test.shape)
# # x_train = x_train / 255.0
# # x_cv = x_cv / 255.0 #测试集不做增强
# # x_test = x_test / 255.0
# ######################################################
# # datagen = image.ImageDataGenerator(
#horizontal_flip=True,
# # )
# # vertical_flip=True
# # rotation_range=1.2,
# # zoom_range=0.2,
# # horizontal_flip=True
# # shear_range=0.2,
# # width_shift_range=0.1,
# # height_shift_range=0.1,
# # zca_whitening = True,
# # zca_epsilon = 1e-1
# # datagen_cv = image.ImageDataGenerator(
# # zca_whitening=True,
# # zca_epsilon = 1e-1)
# # datagen_te = image.ImageDataGenerator(
# # zca_whitening=True,
# # zca_epsilon = 1e-1)
# #datagen_0 = image.ImageDataGenerator(
# #zca_whitening=False)
# #datagen.fit(x_train)
# # datagen_cv.fit(x_cv)
# # datagen_te.fit(x_test)
# #datagen_0.fit(x_train)
# x_train = np.array(x_train)
# print(x_train.shape)
# #crop
# # def random_crop(image):
# # cropped_image = tf.image.random_crop(
# # image, size=[x_train.shape[0],25,25,3])
# # return tf.reshape(cropped_image, [res, res, 3])
# # x_train = random_crop(x_train)
# # x_train = np.array(x_train)
# # print(x_train.shape)
# # image show
# def show_image(x_train):
# for x_batch in datagen_0.flow(x_train, batch_size=32, shuffle = True):
# # create a grid of 4x4 images
# fig, axes = plt.subplots(3, 3)
# axes = axes.flatten()
# for i in range(0, 9):
# axes[i].imshow(x_batch[i])
# # axes[i].set_xticks(())
# # axes[i].set_yticks(())
# plt.show()
# # plt.tight_layout()
# break
# # show_image(x_train)
# def show_augment_image(x_train, y_train, datagen):
# # configure batch size and retrieve one batch of images
# for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32, shuffle = True):
# # create a grid of 4x4 images
# fig, axes = plt.subplots(2, 2)
# axes = axes.flatten()
# for i in range(0, 4):
# # print(x_batch[i].shape)
# # axes[i].imshow(x_batch[i], cmap=plt.get_cmap('rgb'))
# axes[i].imshow(x_batch[i])
# # axes[i].set_xticks(())
# # axes[i].set_yticks(())
# plt.show()
# # plt.tight_layout()
# break
# #show_augment_image(x_train, y_train, datagen)
# # show_augment_image(x_cv, y_cv,datagen_cv)
# # # Pre-process data
# # scaler = MinMaxScaler() # This estimator scales and translates each feature individually such that it is in the given range on the training set, default between(0,1)
# # x_train = x_train.reshape(x_train.shape[0],50*50*3)
# # x_test = x_test.reshape(x_test.shape[0],50*50*3)
# # #x_test = x_test.reshape(x_test.shape[0],100*100*3)
# # x_train = scaler.fit_transform(x_train)
# # x_test = scaler.fit_transform(x_test)
# # #x_test = scaler.transform(x_test)
# # x_train = x_train/255.0
# # x_test = x_test/255.0
# # ########################CNN########################
def CNN_B2(x_train, x_cv, x_test, y_train, y_cv, y_test):
class AlexNet(Model):
def __init__(self):
super(AlexNet, self).__init__()
self.c1 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu') # 卷积层
#self.b1 = BatchNormalization() # BN层
# self.a1 = Activation('relu') # 激活层
self.p1 = MaxPool2D(pool_size=(2, 2), strides=2) # 池化层
#self.d1 = Dropout(0.2) # dropout层
self.c2 = Conv2D(filters=128, kernel_size=(3,3), activation='relu')
#self.b2 = BatchNormalization()
# self.a2 = Activation('relu')
self.p2 = MaxPool2D(pool_size=(2,2), strides=2)
self.c3 = Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu')
#self.c4 = Conv2D(filters=384, kernel_size=(3,3), padding='same', activation='relu')
#self.c5 = Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu')
self.p3 = MaxPool2D(pool_size=(2,2), strides=2)
self.flatten = Flatten()
self.f1 = Dense(1024, activation='relu')
self.d1 = Dropout(0.2)
self.f2 = Dense(2048, activation='relu')
self.d2 = Dropout(0.2)
self.f3 = Dense(5, activation='softmax')
def call(self, x):
x = self.c1(x)
#x = self.b1(x)
#x = self.a1(x)
x = self.p1(x)
# x = self.d1(x)
x = self.c2(x)
#x = self.b2(x)
#x = self.a2(x)
x = self.p2(x)
x = self.c3(x)
#x = self.c4(x)
#x = self.c5(x)
x = self.p3(x)
x = self.flatten(x)
x = self.f1(x)
x = self.d1(x)
x = self.f2(x)
x = self.d2(x)
y = self.f3(x)
return y
# x_train, x_cv, x_test, y_train, y_cv, y_test = data_preprocessing(data_B, image_B)
model = AlexNet()
from tensorflow import keras
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
opt = keras.optimizers.Adam(learning_rate=0.00005)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
epochs = 30
model.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
#itr = datagen.flow(x_train, y_train, batch_size=32)
# itr_cv=datagen_cv.flow(x_cv, y_cv, batch_size=32)
#history = model.fit(itr, epochs=epochs, validation_data=(x_cv,y_cv),validation_freq=1, callbacks=[callback])
history = model.fit(x_train, y_train, batch_size=32, epochs=epochs, validation_data=(x_cv,y_cv), validation_freq=1, callbacks=[callback])
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
results = model.evaluate(x_test, y_test)
test_loss = results[0]
test_acc = results[1]
return loss[-1], acc[-1], val_loss[-1], val_acc[-1], test_loss, test_acc
# # # checkpoint_save_path = "./checkpoint/Baseline.ckpt"
# # # if os.path.exists(checkpoint_save_path + '.index'):
# # # print('-------------load the model-----------------')
# # # model.load_weights(checkpoint_save_path)
# # # cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
# # # save_weights_only=True,
# # # save_best_only=True)
# # # #history = model.fit(x_train, y_train, batch_size=32, epochs=15, validation_data=(x_test, y_test), validation_freq=1,
# # # callbacks=[cp_callback])
# # #history = model.fit(x_train, y_train, batch_size=64, epochs=50, validation_data=(x_test, y_test), validation_freq=1, callbacks=[callback])
# # # model.summary()
# # # # print(model.trainable_variables)
# # # file = open('./weights.txt', 'w')
# # # for v in model.trainable_variables:
# # # file.write(str(v.name) + '\n')
# # # file.write(str(v.shape) + '\n')
# # # file.write(str(v.numpy()) + '\n')
# # # file.close()
# # # # ############################################### show ###############################################
# # # 显示训练集和验证集的acc和loss曲线
# # acc = history.history['sparse_categorical_accuracy']
# # val_acc = history.history['val_sparse_categorical_accuracy']
# # loss = history.history['loss']
# # val_loss = history.history['val_loss']
# # plt.subplot(1, 2, 1)
# # plt.plot(acc, label='Training Accuracy')
# # plt.plot(val_acc, label='Validation Accuracy')
# # plt.title('Training and Validation Accuracy')
# # plt.legend()
# # plt.subplot(1, 2, 2)
# # plt.plot(loss, label='Training Loss')
# # plt.plot(val_loss, label='Validation Loss')
# # plt.title('Training and Validation Loss')
# # plt.legend()
# # plt.show()
# # total_time = time.time()-now_time
# # print(total_time)
# # print("Evaluate on tset data")
# # # itr_te=datagen_te.flow(x_test, y_test, batch_size=32)
# # # x_test, y_test = itr_te.next()
# # #results = model.evaluate(x_cv, np.array(list(zip(*y_test)))[0], batch_size=32)
# # results = model.evaluate(x_test, y_test)
# # #results = model.evaluate(itr_te)
# # print("test loss, test acc:", results)
# # ##########################################extra test data
# # # #data = pd.read_csv('./dataset_AMLS_20-21/celeba/labels.csv',sep='\t',header=0)
# # # data_ = pd.read_csv('./dataset_AMLS_20-21_test/cartoon_set_test/labels.csv',sep='\t')
# # # # Y_A0=data['gender']
# # # # Y_A1=data['smiling']
# # # y_B0=data_['eye_color']
# # # y_B1=data_['face_shape']
# # # # for i in range(Y_A0.shape[0]):
# # # # if Y_A0[i] == -1:
# # # # Y_A0[i] = 0
# # # # else:
# # # # Y_A0[i] = 1 #防止标签出现负数,如果这里负数的话后面计算loss就会出现nan
# # # # for i in range(Y_A1.shape[0]):
# # # # if Y_A1[i] == -1:
# # # # Y_A1[i] = 0
# # # # else:
# # # # Y_A1[i] = 1
# # # #图片读取
# # # train_image_ = []
# # # for i in tqdm(range(data_.shape[0])):
# # # #img = image.load_img('./dataset_AMLS_20-21_test/celeba/img/'+str(i)+'.jpg', color_mode='rgb', target_size=(50,50), grayscale=False)
# # # img_ = image.load_img('./dataset_AMLS_20-21_test/cartoon_set_test/img/'+str(i)+'.png', target_size=(100,100), grayscale=False, color_mode='rgb')
# # # # img = img.resize((50,50))
# # # img_ = image.img_to_array(img_)
# # # # img = img/255
# # # train_image_.append(img_)
# # # X_ = np.array(train_image_)
# # # # Pre-process data
# # # scaler = MinMaxScaler() # This estimator scales and translates each feature individually such that it is in the given range on the training set, default between(0,1)
# # # X_ = X_.reshape(X_.shape[0],100*100*3)
# # # X_ = scaler.fit_transform(X_)
# # # X_ = X_.reshape(X_.shape[0],100,100,3)
# # # #X_ = X_/255.0
# # # # print(X_.shape, y_B0.shape)
# # # print("Evaluate on test data")
# # # results = model.evaluate(X_, y_B0, batch_size=32)
# # # print("test loss, test acc:", results) | [
"noreply@github.com"
] | noreply@github.com |
90d22ecc08de8602eac788e6f922fed5d85d7ac9 | ad32b7116b3ff8e0dab14ad9ba0cf35fdb61cca6 | /python/count_leaves_of_binary_tree.py | e10e12eb4a2013095fa64a0b367d361b1efb8292 | [] | no_license | beingnikhilarora/ds-algo | 0e440300cf5978626ab224cc431a30ff734b7bfb | 8925e563a8fd9943a3ed6df0a83a40c89705d2ac | refs/heads/master | 2020-05-15T11:29:27.330318 | 2019-04-22T18:41:36 | 2019-04-22T18:41:36 | 182,230,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | class TreeNode(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def count_leaves(root):
if not root:
return 0
if not (root.left or root.right):
return 1
else:
return count_leaves(root.left) + count_leaves(root.right)
if __name__ == '__main__':
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.right = TreeNode(4)
root.right.left = TreeNode(5)
root.right.left.left = TreeNode(6)
print(count_leaves(root)) | [
"beingnikhilarora@gmail.com"
] | beingnikhilarora@gmail.com |
607219c000f7f31a1333d2b772480f3aad169545 | fea6e9d6b20b0c5f2a05a6f2433aae4176b2a00a | /server/applibs/account/tasks/fetch_status.py | 1c80b02e381a041e1e063576ae4ca0441bcb6c7a | [] | no_license | fanshuai/kubrick | fddf6c21bcd500223d9a05bd002e47eb1ecf8839 | b7ed6588e13d2916a4162d56509d2794742a1eb1 | refs/heads/main | 2023-03-24T12:21:44.562850 | 2021-03-19T15:11:40 | 2021-03-19T15:11:40 | 349,445,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | """
验证码发送状态同步
"""
import logging
from kubrick.celery import app
from server.corelib.dealer import deal_time
from server.corelib.notice.async_tasks import send_dd_msg__task
from server.constant.djalias import CQueueAlias
logger = logging.getLogger('kubrick.celery')
@app.task(queue=CQueueAlias.Timed.value)
def fetch_status_pnverify(now=None):
""" 短信验证码状态检查 """
from server.constant import mochoice as mc
from server.applibs.account.models import PNVerify
time_start, time_end = deal_time.round_floor_ten_mins(now=now)
pnv_qs = PNVerify.objects.filter(
status=mc.SMSStatus.Waiting,
created_at__gte=time_start,
created_at__lt=time_end,
)
done_count = 0
waiting_count = pnv_qs.count()
for pnv in pnv_qs:
pnv.sms_code_query()
done_count += 1 if pnv.is_status_final else 0
done_info = f'{time_start} ~ {time_end}: {done_count}/{waiting_count}'
logger.info(f'fetch_status_pnverify__done {done_info}')
if done_count != waiting_count:
send_dd_msg__task(f'短信验证码状态检查:{done_info}')
result = dict(
task='fetch_status_pnverify',
done=done_count,
waiting=waiting_count,
end_at=time_end.isoformat(),
start_at=time_start.isoformat(),
)
return result
| [
"zfaner@gmail.com"
] | zfaner@gmail.com |
0380da442927af5372a04c57cc92c5eaca31627f | ad04a6364cb38b3951b2c427c5e8c9e554c457ee | /test_kitti_pose.py | e63685520c20982df09c48e7f7b32e4cdcf08c22 | [
"MIT"
] | permissive | Arjung27/SFM-Learner | 5ce428d920a3f1e43e59133af3f795c2676cbaa8 | 8da43b4266dfeb23465d3caad0d23b0c8f457257 | refs/heads/master | 2022-08-14T02:33:49.443046 | 2020-05-20T20:46:51 | 2020-05-20T20:46:51 | 264,470,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,257 | py | from __future__ import division
import os
import math
import scipy.misc
import tensorflow as tf
import numpy as np
from glob import glob
from SfMLearner import SfMLearner
from kitti_eval.pose_evaluation_utils import dump_pose_seq_TUM
flags = tf.app.flags
flags.DEFINE_integer("batch_size", 1, "The size of of a sample batch")
flags.DEFINE_integer("img_height", 128, "Image height")
flags.DEFINE_integer("img_width", 416, "Image width")
flags.DEFINE_integer("seq_length", 3, "Sequence length for each example")
flags.DEFINE_integer("test_seq", 9, "Sequence id to test")
flags.DEFINE_string("dataset_dir", None, "Dataset directory")
flags.DEFINE_string("output_dir", None, "Output directory")
flags.DEFINE_string("ckpt_file", None, "checkpoint file")
FLAGS = flags.FLAGS
def load_image_sequence(dataset_dir,
frames,
tgt_idx,
seq_length,
img_height,
img_width):
half_offset = int((seq_length - 1)/2)
for o in range(-half_offset, half_offset+1):
curr_idx = tgt_idx + o
curr_drive, curr_frame_id = frames[curr_idx].split(' ')
img_file = os.path.join(
dataset_dir, 'sequences', '%s/image_2/%s.png' % (curr_drive, curr_frame_id))
curr_img = scipy.misc.imread(img_file)
curr_img = scipy.misc.imresize(curr_img, (img_height, img_width))
if o == -half_offset:
image_seq = curr_img
else:
image_seq = np.hstack((image_seq, curr_img))
return image_seq
def is_valid_sample(frames, tgt_idx, seq_length):
N = len(frames)
tgt_drive, _ = frames[tgt_idx].split(' ')
max_src_offset = int((seq_length - 1)/2)
min_src_idx = tgt_idx - max_src_offset
max_src_idx = tgt_idx + max_src_offset
if min_src_idx < 0 or max_src_idx >= N:
return False
# TODO: unnecessary to check if the drives match
min_src_drive, _ = frames[min_src_idx].split(' ')
max_src_drive, _ = frames[max_src_idx].split(' ')
if tgt_drive == min_src_drive and tgt_drive == max_src_drive:
return True
return False
def main():
sfm = SfMLearner()
sfm.setup_inference(FLAGS.img_height,
FLAGS.img_width,
'pose',
FLAGS.seq_length)
saver = tf.train.Saver([var for var in tf.trainable_variables()])
FLAGS.output_dir = os.path.join(FLAGS.output_dir, str(FLAGS.test_seq) + '/')
if not os.path.isdir(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
seq_dir = os.path.join(FLAGS.dataset_dir, 'sequences', '%.2d' % FLAGS.test_seq)
img_dir = os.path.join(seq_dir, 'image_2')
N = len(glob(img_dir + '/*.png'))
test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n) for n in range(N)]
with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq, 'r') as f:
times = f.readlines()
times = np.array([float(s[:-1]) for s in times])
max_src_offset = (FLAGS.seq_length - 1)//2
with tf.Session() as sess:
saver.restore(sess, FLAGS.ckpt_file)
for tgt_idx in range(N):
if not is_valid_sample(test_frames, tgt_idx, FLAGS.seq_length):
continue
if tgt_idx % 100 == 0:
print('Progress: %d/%d' % (tgt_idx, N))
# TODO: currently assuming batch_size = 1
image_seq = load_image_sequence(FLAGS.dataset_dir,
test_frames,
tgt_idx,
FLAGS.seq_length,
FLAGS.img_height,
FLAGS.img_width)
pred = sfm.inference(image_seq[None, :, :, :], sess, mode='pose')
pred_poses = pred['pose'][0]
# Insert the target pose [0, 0, 0, 0, 0, 0]
pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=0)
curr_times = times[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
dump_pose_seq_TUM(out_file, pred_poses, curr_times)
main() | [
"arjun.g1511@gmail.com"
] | arjun.g1511@gmail.com |
1e069e901a9d931704594b568c24eb89ab3392b6 | 541fed374b1d1ebff33c42496db84337e06177b6 | /City.py | 4e62852ace82662a8167ce02a534b5b510013eba | [] | no_license | huangruihaocst/tsp-genetic | faaa6654459cfce521f936bd31c5438c19f8d250 | 794be023d698fca41caf797810feb44a0024cdea | refs/heads/master | 2020-03-21T17:47:49.697351 | 2016-05-03T04:42:06 | 2016-05-03T04:42:06 | 138,854,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | class City:
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
| [
"huangruihaocst@126.com"
] | huangruihaocst@126.com |
5e3409f2d51063ee40904ea53ad1d9db1bded724 | f2ff4f3bcb80b1062e95f8c6ece2793cd58b2ecf | /GIT PV/Client-server application 4.4v/Client-server application 4.4v/Server/config.py | d1e188e5366e67d03655b69fd7bdc016086db09c | [] | no_license | Victor2000GIT/PythonApplication | 88c0a6f2394caa9b04e36ad4bfac5cf678937e6e | e1e1af56ad4049159e0635cafaf6015931286688 | refs/heads/master | 2020-12-02T06:25:33.073260 | 2017-07-11T01:01:44 | 2017-07-11T01:01:44 | 96,831,742 | 0 | 0 | null | 2017-07-11T01:01:44 | 2017-07-11T00:01:17 | Tcl | UTF-8 | Python | false | false | 24 | py | ClientCommand = ("", 0)
| [
"wiktor@mail.ru"
] | wiktor@mail.ru |
854ae93892eb1ab90f7c762a359cddc341b04067 | 856668b36edfcd34bac74e062f9990944a9dd0f0 | /recommenders_class.py | 35e76185e65515da2b6928c2cf848865201708de | [] | no_license | fraidaL/SF_DAT_15_WORK | 2b45071d2bdf01f3599ac82635888eae2a7def53 | 889c8361ee412340710eb3ebae4a330ca4c0a29a | refs/heads/master | 2020-05-29T16:03:20.415732 | 2015-08-27T01:28:39 | 2015-08-27T01:28:39 | 37,505,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,207 | py |
"""
Class 18: Recommendation Engines
Content based and Collaborative based filtering
Jaccard Similarity
Modified KNN Algorithm
"""
import pandas as pd
########################################
## Collaborative-Based User Filtering ##
########################################
#read in brands data
user_brands = pd.read_csv('../data/user_brand.csv')
#look at count of stores
user_brands.Store.value_counts()
# Series of user IDs, note the duplicates
user_ids = user_brands.ID
user_ids
# groupby ID to see what each user likes!
# ANSWER HEREEEEEE
user_brands.groupby('ID').Store.value_counts()
# turns my data frame into a dictionary
# where the key is a user ID, and the value is a
# list of stores that the user "likes"
# ANSWER HEREEEEEE
# try it out. User 83065 likes Kohl's and Target
brandsfor['83065']
# User 82983 likes many more!
brandsfor['82983']
########################
## Jaccard Similarity ##
########################
'''
The Jaccard Similarity allows us to compare two sets
If we regard people as merely being a set of brands they prefer
the Jaccard Similarity allows us to compare people
Example. the jaccard similarty between user 82983 and 83065 is .125
because
brandsfor['83065'] == ["Kohl's", 'Target']
brandsfor['82983'] == ['Hanky Panky', 'Betsey Johnson', 'Converse', 'Steve Madden', 'Old Navy', 'Target', 'Nordstrom']
the intersection of these two sets is just set("Target")
the union of the two sets is set(['Target', 'Hanky Panky', 'Betsey Johnson', 'Converse', 'Steve Madden', 'Old Navy', 'Target', 'Nordstrom'])
so the len(intersection) / len(union) = 1 / 8 == .125
EXERCISE: what is the Jaccard Similarity
between user 82956 and user 82963?
# ANSWER == 0.3333333333
'''
brandsfor['82956'] # == ['Diesel', 'Old Navy', 'Crate & Barrel', 'Target']
brandsfor['82963'] # == ['Puma', 'New Balance', 'Old Navy', 'Target']
'''
EXERCISE: Complete the jaccard method below.
It should take in a list of brands, and output the
jaccard similarity between them
This should work with anything in the set, for example
jaccard([1,2,3], [2,3,4,5,6]) == .3333333
HINT: set1 & set2 is the intersection
set1 | set2 is the union
'''
def jaccard(first, second):
first = set(first)
second = set(second)
# the line below should be changed
# ANSWER HEREEEEEE
# try it out!
brandsfor['83065'] # brands for user 83065
brandsfor['82983'] # brands for user 82983
jaccard(brandsfor['83065'], brandsfor['82983'])
jaccard(brandsfor['82956'], brandsfor['82963'])
#######################
### Our Recommender ###
#######################
'''
Our recommender will be a modified KNN collaborative algorithm.
Input: A given user's brands that they like
Output: A set (no repeats) of brand recommendations based on
similar users preferences
1. When a user's brands are given to us, we will calculate the input user's
jaccard similarity with every person in our brandsfor dictionary
2. We will pick the K most similar users and recommend
the brands that they like that the given user doesn't know about
EXAMPLE:
Given User likes ['Target', 'Old Navy', 'Banana Republic', 'H&M']
Outputs: ['Forever 21', 'Gap', 'Steve Madden']
'''
given_user = ['Target', 'Old Navy', 'Banana Republic', 'H&M']
#similarty between user 83065 and given user
brandsfor['83065']
jaccard(brandsfor['83065'], given_user)
# should be 0.2
'''
EXERCISE
Find the similarty between given_user and ALL of our users
output should be a dictionary where
the key is a user id and the value is the jaccard similarity
{...
'83055': 0.25,
'83056': 0.0,
'83058': 0.1111111111111111,
'83060': 0.07894736842105263,
'83061': 0.4,
'83064': 0.25,
'83065': 0.2,
...}
'''
# ANSWER HEREEEEEE
similarities
K = 5 #number of similar users to look at
# Now for the top K most similar users, let's aggregate the brands they like.
# I sort by the jaccard similarty so most similar users are first
# I use the sorted method, but because I'm dorting dictionaries
# I specify the "key" as the value of the dictionary
# the key is what the list should sort on
# so the most similar users end up being on top
# ANSWER HEREEEEEE
# list of K similar users' IDs
most_similar_users
# let's see what some of the most similar users likes
brandsfor[most_similar_users[0]]
brandsfor[most_similar_users[3]]
# Aggregate all brands liked by the K most similar users into a single set
brands_to_recommend = set()
for user in most_similar_users:
# for each user
# ANSWER HEREEEEEE
# add to the set of brands_to_recommend
brands_to_recommend
# UH OH WE HAVE DUPLICATES. Banana Republic, Old Navy, Target are all repeats.
# EXERCISE: use a set difference so brands_to_recommend only has
# brands that given_user hasn't seen yet
# ANSWER HEREEEEEE
# without duplicates
brands_to_recommend
######################
## One Step Further ##
######################
# We can take this one step further and caculate a "score" of recommendation
# We will define the score as being the number of times
# a brand appears within the first K users
brands_to_recommend = []
for user in most_similar_users:
pass
# Use a counter to count the number of times a brand appears
# Now we see Gap has the highest score!
recommend_with_scores
#################################
#### Collaborative Item based ###
#################################
'''
We can also define a similary between items using jaccard similarity.
We can say that the similarity between two items is the jaccard similarity
between the sets of people who like the two brands.
Example: similarity of Gap to Target is:
'''
# filter users by liking Gap
gap_lovers = set(user_brands['Gap' == user_brands.Store].ID)
old_navy_lovers = set(user_brands['Old Navy' == user_brands.Store].ID)
# similarty between Gap and Old Navy
jaccard(gap_lovers, old_navy_lovers)
guess_lovers = set(user_brands['Guess' == user_brands.Store].ID)
# similarty between Gap andGuess
jaccard(guess_lovers, gap_lovers)
calvin_lovers = set(user_brands['Calvin Klein' == user_brands.Store].ID)
# similarty between Gap and Calvin Klein
jaccard(calvin_lovers, gap_lovers)
| [
"fraida.lev@gmail.com"
] | fraida.lev@gmail.com |
08835a2cf6230c6cdf9585aea813e58abddf569b | 2335093cd5940a6e59e6a9334a4e5b683a704f8e | /SugarSyncInstance.py | 8e4e6ac6faa3224d2c91dc65a2a7ef4e0f22bae5 | [] | no_license | HonestQiao/SugarSync-Python-Client | 247a73440e018717842158d6ee3f6d22582ff48e | dfc624a8b3ca9ca8e86775803186c35242aff89a | refs/heads/master | 2021-01-20T19:18:59.376705 | 2012-09-02T07:46:53 | 2012-09-02T07:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# author: Alexander Straube
# author: Lukas Schreiner
#
# For editors/contributors: please add an author tag above -
# for a full list ;-)
#
# For debugging: please use the pudb from
# https://github.com/orsenthil/pudb.git
# because the original does not support python3!
class SugarSyncInstance:
instance = None
notifier = None
| [
"lukas.schreiner@gmail.com"
] | lukas.schreiner@gmail.com |
2e7e71d751ad855e3a9be19f978880fcf20395bd | 3dd68d711b5cda2231a44c613fed554b8c1031b2 | /pdf_rotate/settings_manager.py | dd2c2aa6dc9196dc20fc61810208f8cf6c2b8176 | [] | no_license | it-walker/pdf_rotate | 36755b383a71c8c420ac04a857d2928551947572 | 2af2808e7cb1d40bd0e950985dc1743f5118732d | refs/heads/master | 2022-04-26T02:22:08.683771 | 2020-04-26T03:27:52 | 2020-04-26T03:27:52 | 258,921,419 | 0 | 0 | null | 2020-04-26T03:27:54 | 2020-04-26T02:33:06 | null | UTF-8 | Python | false | false | 1,221 | py | import yaml
class SettingsManager:
_key_target_file_path = "target_file_path"
_key_out_file_path = "out_file_path"
_key_rotate = "rotate"
@property
def target_file_path(self):
"""対象のPDFファイルパス"""
return self._target_file_path
@property
def out_file_path(self):
"""出力PDFファイルパス"""
return self._out_file_path
@property
def rotate(self):
"""回転させる角度"""
return self._rotate
def __init__(self):
self._target_file_path = ""
self._out_file_path = ""
self._rotate = 0
def load(self, yaml_path):
"""設定ファイルを読み込みます
Arguments:
yaml_path {string} -- 設定ファイル(config.yaml)
"""
try:
with open(yaml_path, "r", encoding="utf-8") as f:
y = yaml.load(stream=f, Loader=yaml.SafeLoader)
self._target_file_path = y[self._key_target_file_path]
self._out_file_path = y[self._key_out_file_path]
self._rotate = y[self._key_rotate]
except BaseException:
print("error", yaml_path)
raise
| [
"myhome.satoh+backlog@gmail.com"
] | myhome.satoh+backlog@gmail.com |
86b103721a71c81bb1ec8da79ea8625a844315db | 3eda1cfd85cc60aa49922256a632eb8d9ea81954 | /api/utils/decorators.py | cf4e87517c3269dd6ad8478acf4da1dd04becbf1 | [] | no_license | akshaybabu09/employee-device-manager | 1981b9f2bd5abb8b36d3da2a392b7d40017cb219 | 34ef45d8558741d9707271b48579e78f707f0a07 | refs/heads/master | 2022-09-27T05:08:41.943416 | 2019-12-17T05:06:39 | 2019-12-17T05:06:39 | 227,947,908 | 1 | 0 | null | 2022-09-16T18:14:44 | 2019-12-14T01:12:34 | Python | UTF-8 | Python | false | false | 773 | py | from functools import wraps
import jwt
from flask import request, Response
from api import app
from api.employee.models import Employee
from api.utils.status_codes import UNAUTHORIZED
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'access-token' in request.headers:
token = request.headers['access-token']
if not token:
return Response('Token is missing!', UNAUTHORIZED)
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Employee.query.filter_by(id=data['emp_id']).first()
except:
return Response('Token is invalid!', UNAUTHORIZED)
return f(current_user, *args, **kwargs)
return decorated
| [
"akshay@loktra.com"
] | akshay@loktra.com |
23007d4246cb908e0036ea6dbb78f3bc8a4a723d | 125b42b9f42b3a45838b15afb2a25fec3113e05c | /Ex4/pal.py | 7aff42612342e89abe739d584dd557a460f98db7 | [] | no_license | kcanez/ProjEulerCode | 1b50f16fc658792fb85f93da8fdf21a7acebdf9d | c61ca29f380f9e28e7f677e4baa3be7f8f27d6a7 | refs/heads/master | 2021-01-10T08:49:03.988335 | 2016-02-16T00:39:52 | 2016-02-16T00:39:52 | 50,868,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import math
MAX = 999
i = MAX
j = MAX
maxPal = 0
def isPal(num):
first = 0
numText = str(num)
last = len(numText)-1
mid = math.floor(len(numText)/2)
while first <= mid:
if first == mid:
return True
else:
if numText[first] != numText[last]:
return False
first += 1
last -= 1
for i in range(999,0,-1):
for j in range(999,0,-1):
if(isPal(i*j)):
if maxPal < i*j:
maxPal = i*j
print(maxPal)
| [
"kevin.canez@gmail.com"
] | kevin.canez@gmail.com |
af9738f6a4a38219406718a295ea78a732a3232d | a5205843ab0c6cff8f76f32436c580cfd523e9ad | /edit_sample_craps.py | cb01ef33a0829d35b2b1f5ee2d59d478e474790b | [] | no_license | LRBeaver/Random | 70194cde5d26b5e268d7c245056cedc8d0a6618d | 90ec0036a4efb383d6496a7724a108aa1b2f2ddf | refs/heads/master | 2020-12-24T18:42:37.716951 | 2016-04-14T12:52:56 | 2016-04-14T12:52:56 | 56,150,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | __author__ = 'lyndsay.beaver'
import random
def playRound():
print("The come-out phase: ")
print()
rollDice = input("Hit ENTER to roll the dice...")
diceTotal = random.randint(1,6) + random.randint(1,6)
if diceTotal in (7,11):
print("You rolled a", diceTotal)
print("You Win: Natural!")
elif diceTotal in (2,3,12):
print("You rolled a", diceTotal)
print("You Lose: Crap-Out!")
else:
print("You rolled a", diceTotal)
pointPhase(diceTotal)
def pointPhase(diceTotal):
print("The Point Phase:")
rollDice = input("Hit ENTER to roll the dice...")
diceTotalPoint = random.randint(1,6) + random.randint(1,6)
while diceTotalPoint not in (7, diceTotal):
diceTotalPoint = random.randint(1,6) + random.randint(1,6)
if diceTotalPoint == diceTotal:
print("You Rolled a", diceTotalPoint)
print("You Win: Hit!")
break
elif diceTotalPoint == 7:
print("You Rolled a", diceTotalPoint)
print("You lose: Seven-Out!")
else:
print("Keep Rolling")
def main():
playRound()
main() | [
"lrbeaver@gmail.com"
] | lrbeaver@gmail.com |
bac299c3cb313b52052370dda678e88f817e000f | 95c3b632c9f03983f4dcc277756058df71fa510f | /text2vec/tokenizer.py | f150e2b3ce5e4ac62f2a6de82dc941415fbdf5c6 | [] | no_license | gaojing8500/Zero2OneBuildMachineLearningModel | c3c897d072b27d991419d830e2c35d04e34c637c | 60963daa6ca6af853621d246d5286ee0da77828a | refs/heads/main | 2023-08-19T19:05:20.799276 | 2021-10-12T11:25:20 | 2021-10-12T11:25:20 | 377,815,343 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | from typing import List
import jieba
import json
##hanlp还是比较麻烦的
# from pyhanlp import *
class Tokenizer(object):
def __init__(self):
self.name = "Jieba hanlp allennlp corenlp"
def __str__(self):
return self.name
def tokenize(self, sentences, tokenize_label):
if tokenize_label == "jieba":
seg_list = []
if isinstance(sentences, List):
for sentence in sentences:
seg_object = jieba.cut(sentence, cut_all=False)
seg_list.append(",".join(seg_object).split(","))
return seg_list
if isinstance(sentences, str):
seg_list = jieba.cut(sentences, cut_all=False)
return ",".join(seg_list)
if tokenize_label == "hanlp":
if isinstance(sentences, str):
return "None"
| [
"jing.gao01@united-imaging.com"
] | jing.gao01@united-imaging.com |
290a3b4f08969bef3f76e2e5fecc22d486567154 | 4659ef75716389c6f9280c609cc1e639da79d9ee | /libs/x86_64/matplotlib2/animation.py | 2763cf16c9392054d9b936b0dd4215801f516e56 | [] | no_license | vvkond/QGIS | cf2ea68f53921010916f999dd34dade190eaa22a | 302dbc948de3163ec74d47164c5e6e1f8a6f7df0 | refs/heads/master | 2021-06-03T22:37:34.290064 | 2020-12-03T04:33:03 | 2020-12-03T04:33:03 | 107,931,779 | 1 | 1 | null | 2019-09-20T04:16:34 | 2017-10-23T04:23:30 | Python | UTF-8 | Python | false | false | 67,441 | py | # TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/matplotlib2/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib2 animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import abc
import contextlib
from io import BytesIO
import itertools
import logging
import os
import platform
import sys
import tempfile
import uuid
import numpy as np
from matplotlib2._animation_data import (DISPLAY_TEMPLATE, INCLUDED_FRAMES,
JS_INCLUDE)
from matplotlib2.compat import subprocess
from matplotlib2 import cbook, rcParams, rcParamsDefault, rc_context
if six.PY2:
from base64 import encodestring as encodebytes
else:
from base64 import encodebytes
_log = logging.getLogger(__name__)
# Process creation flag for subprocess to prevent it raising a terminal
# window. See for example:
# https://stackoverflow.com/questions/24130623/using-python-subprocess-popen-cant-prevent-exe-stopped-working-prompt
if platform.system() == 'Windows':
subprocess_creation_flags = CREATE_NO_WINDOW = 0x08000000
else:
# Apparently None won't work here
subprocess_creation_flags = 0
# Other potential writing methods:
# * http://pymedia.org/
# * libmng (produces swf) python wrappers: https://github.com/libming/libming
# * Wrap x264 API:
# (http://stackoverflow.com/questions/2940671/
# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
def adjusted_figsize(w, h, dpi, n):
'''Compute figure size so that pixels are a multiple of n
Parameters
----------
w, h : float
Size in inches
dpi : float
The dpi
n : int
The target multiple
Returns
-------
wnew, hnew : float
The new figure size in inches.
'''
# this maybe simplified if / when we adopt consistent rounding for
# pixel size across the whole library
def correct_roundoff(x, dpi, n):
if int(x*dpi) % n != 0:
if int(np.nextafter(x, np.inf)*dpi) % n == 0:
x = np.nextafter(x, np.inf)
elif int(np.nextafter(x, -np.inf)*dpi) % n == 0:
x = np.nextafter(x, -np.inf)
return x
wnew = int(w * dpi / n) * n / dpi
hnew = int(h * dpi / n) * n / dpi
return (correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n))
# A registry for available MovieWriter classes
class MovieWriterRegistry(object):
'''Registry of available writer classes by human readable name.'''
def __init__(self):
self.avail = dict()
self._registered = dict()
self._dirty = False
def set_dirty(self):
"""Sets a flag to re-setup the writers."""
self._dirty = True
def register(self, name):
"""Decorator for registering a class under a name.
Example use::
@registry.register(name)
class Foo:
pass
"""
def wrapper(writerClass):
self._registered[name] = writerClass
if writerClass.isAvailable():
self.avail[name] = writerClass
return writerClass
return wrapper
def ensure_not_dirty(self):
"""If dirty, reasks the writers if they are available"""
if self._dirty:
self.reset_available_writers()
def reset_available_writers(self):
"""Reset the available state of all registered writers"""
self.avail = {}
for name, writerClass in self._registered.items():
if writerClass.isAvailable():
self.avail[name] = writerClass
self._dirty = False
def list(self):
'''Get a list of available MovieWriters.'''
self.ensure_not_dirty()
return list(self.avail)
def is_available(self, name):
'''Check if given writer is available by name.
Parameters
----------
name : str
Returns
-------
available : bool
'''
self.ensure_not_dirty()
return name in self.avail
def __getitem__(self, name):
self.ensure_not_dirty()
if not self.avail:
raise RuntimeError("No MovieWriters available!")
try:
return self.avail[name]
except KeyError:
raise RuntimeError(
'Requested MovieWriter ({}) not available'.format(name))
writers = MovieWriterRegistry()
class AbstractMovieWriter(six.with_metaclass(abc.ABCMeta)):
'''
Abstract base class for writing movies. Fundamentally, what a MovieWriter
does is provide is a way to grab frames by calling grab_frame().
setup() is called to start the process and finish() is called afterwards.
This class is set up to provide for writing movie frame data to a pipe.
saving() is provided as a context manager to facilitate this process as::
with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100):
# Iterate over frames
moviewriter.grab_frame(**savefig_kwargs)
The use of the context manager ensures that setup() and finish() are
performed as necessary.
An instance of a concrete subclass of this class can be given as the
``writer`` argument of `Animation.save()`.
'''
@abc.abstractmethod
def setup(self, fig, outfile, dpi=None):
'''
Perform setup for writing the movie file.
Parameters
----------
fig: `matplotlib2.figure.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int, optional
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file. Default is ``fig.dpi``.
'''
@abc.abstractmethod
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the `savefig`
command that saves the figure.
'''
@abc.abstractmethod
def finish(self):
'''Finish any processing for writing the movie.'''
@contextlib.contextmanager
def saving(self, fig, outfile, dpi, *args, **kwargs):
'''
Context manager to facilitate writing the movie file.
``*args, **kw`` are any parameters that should be passed to `setup`.
'''
# This particular sequence is what contextlib.contextmanager wants
self.setup(fig, outfile, dpi, *args, **kwargs)
try:
yield self
finally:
self.finish()
class MovieWriter(AbstractMovieWriter):
'''Base class for writing movies.
This class is set up to provide for writing movie frame data to a pipe.
See examples for how to use these classes.
Attributes
----------
frame_format : str
The format used in writing frame data, defaults to 'rgba'
fig : `~matplotlib2.figure.Figure`
The figure to capture data from.
This must be provided by the sub-classes.
'''
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
'''MovieWriter
Parameters
----------
fps: int
Framerate for movie.
codec: string or None, optional
The codec to use. If ``None`` (the default) the ``animation.codec``
rcParam is used.
bitrate: int or None, optional
The bitrate for the saved movie file, which is one way to control
the output file size and quality. The default value is ``None``,
which uses the ``animation.bitrate`` rcParam. A value of -1
implies that the bitrate should be determined automatically by the
underlying utility.
extra_args: list of strings or None, optional
A list of extra string arguments to be passed to the underlying
movie utility. The default is ``None``, which passes the additional
arguments in the ``animation.extra_args`` rcParam.
metadata: Dict[str, str] or None
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
self.fps = fps
self.frame_format = 'rgba'
if codec is None:
self.codec = rcParams['animation.codec']
else:
self.codec = codec
if bitrate is None:
self.bitrate = rcParams['animation.bitrate']
else:
self.bitrate = bitrate
if extra_args is None:
self.extra_args = list(rcParams[self.args_key])
else:
self.extra_args = extra_args
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
@property
def frame_size(self):
'''A tuple ``(width, height)`` in pixels of a movie frame.'''
w, h = self.fig.get_size_inches()
return int(w * self.dpi), int(h * self.dpi)
def _adjust_frame_size(self):
if self.codec == 'h264':
wo, ho = self.fig.get_size_inches()
w, h = adjusted_figsize(wo, ho, self.dpi, 2)
if not (wo, ho) == (w, h):
self.fig.set_size_inches(w, h, forward=True)
_log.info('figure size (inches) has been adjusted '
'from %s x %s to %s x %s', wo, ho, w, h)
else:
w, h = self.fig.get_size_inches()
_log.debug('frame size in pixels is %s x %s', *self.frame_size)
return w, h
def setup(self, fig, outfile, dpi=None):
'''
Perform setup for writing the movie file.
Parameters
----------
fig : matplotlib2.figure.Figure
The figure object that contains the information for frames
outfile : string
The filename of the resulting movie file
dpi : int, optional
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file. Default is fig.dpi.
'''
self.outfile = outfile
self.fig = fig
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
self._w, self._h = self._adjust_frame_size()
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
output = subprocess.PIPE
_log.info('MovieWriter.run: running command: %s', command)
self._proc = subprocess.Popen(command, shell=False,
stdout=output, stderr=output,
stdin=subprocess.PIPE,
creationflags=subprocess_creation_flags)
def finish(self):
'''Finish any processing for writing the movie.'''
self.cleanup()
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the `savefig`
command that saves the figure.
'''
_log.debug('MovieWriter.grab_frame: Grabbing frame.')
try:
# re-adjust the figure size in case it has been changed by the
# user. We must ensure that every frame is the same size or
# the movie will not save correctly.
self.fig.set_size_inches(self._w, self._h)
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
self.fig.savefig(self._frame_sink(), format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except (RuntimeError, IOError) as e:
out, err = self._proc.communicate()
_log.info('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out, err))
raise IOError('Error saving animation to file (cause: {0}) '
'Stdout: {1} StdError: {2}. It may help to re-run '
'with logging level set to '
'DEBUG.'.format(e, out, err))
def _frame_sink(self):
'''Returns the place to which frames should be written.'''
return self._proc.stdin
def _args(self):
'''Assemble list of utility-specific command-line arguments.'''
return NotImplementedError("args needs to be implemented by subclass.")
def cleanup(self):
'''Clean-up and collect the process used to write the movie file.'''
out, err = self._proc.communicate()
self._frame_sink().close()
_log.debug('MovieWriter -- Command stdout:\n%s', out)
_log.debug('MovieWriter -- Command stderr:\n%s', err)
@classmethod
def bin_path(cls):
'''
Returns the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
'''
return str(rcParams[cls.exec_key])
@classmethod
def isAvailable(cls):
'''
Check to see if a MovieWriter subclass is actually available by
running the commandline tool.
'''
bin_path = cls.bin_path()
if not bin_path:
return False
try:
p = subprocess.Popen(
bin_path,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=subprocess_creation_flags)
return cls._handle_subprocess(p)
except OSError:
return False
@classmethod
def _handle_subprocess(cls, process):
process.communicate()
return True
class FileMovieWriter(MovieWriter):
'''`MovieWriter` for writing to individual files and stitching at the end.
This must be sub-classed to be useful.
'''
def __init__(self, *args, **kwargs):
MovieWriter.__init__(self, *args, **kwargs)
self.frame_format = rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi=None, frame_prefix='_tmp',
clear_temp=True):
'''Perform setup for writing the movie file.
Parameters
----------
fig : matplotlib2.figure.Figure
The figure to grab the rendered frames from.
outfile : str
The filename of the resulting movie file.
dpi : number, optional
The dpi of the output file. This, with the figure size,
controls the size in pixels of the resulting movie file.
Default is fig.dpi.
frame_prefix : str, optional
The filename prefix to use for temporary files. Defaults to
``'_tmp'``.
clear_temp : bool, optional
If the temporary files should be deleted after stitching
the final result. Setting this to ``False`` can be useful for
debugging. Defaults to ``True``.
'''
self.fig = fig
self.outfile = outfile
if dpi is None:
dpi = self.fig.dpi
self.dpi = dpi
self._adjust_frame_size()
self.clear_temp = clear_temp
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_names = list()
self.fname_format_str = '%s%%07d.%s'
@property
def frame_format(self):
'''
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
'''
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def _frame_sink(self):
# Creates a filename for saving using the basename and the current
# counter.
fname = self._base_temp_name() % self._frame_counter
# Save the filename so we can delete it later if necessary
self._temp_names.append(fname)
_log.debug('FileMovieWriter.frame_sink: saving frame %d to fname=%s',
self._frame_counter, fname)
self._frame_counter += 1 # Ensures each created name is 'unique'
# This file returned here will be closed once it's used by savefig()
# because it will no longer be referenced and will be gc-ed.
return open(fname, 'wb')
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the `savefig`
command that saves the figure.
'''
# Overloaded to explicitly close temp file.
_log.debug('MovieWriter.grab_frame: Grabbing frame.')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
with self._frame_sink() as myframesink:
self.fig.savefig(myframesink, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except RuntimeError:
out, err = self._proc.communicate()
_log.info('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out, err))
raise
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
self._run()
MovieWriter.finish(self) # Will call clean-up
# Check error code for creating file here, since we just run
# the process here, rather than having an open pipe.
if self._proc.returncode:
try:
stdout = [s.decode() for s in self._proc._stdout_buff]
stderr = [s.decode() for s in self._proc._stderr_buff]
_log.info("MovieWriter.finish: stdout: %s", stdout)
_log.info("MovieWriter.finish: stderr: %s", stderr)
except Exception as e:
pass
raise RuntimeError('Error creating movie, return code: {}'
.format(self._proc.returncode))
def cleanup(self):
MovieWriter.cleanup(self)
# Delete temporary files
if self.clear_temp:
_log.debug('MovieWriter: clearing temporary fnames=%s',
self._temp_names)
for fname in self._temp_names:
os.remove(fname)
@writers.register('pillow')
class PillowWriter(MovieWriter):
@classmethod
def isAvailable(cls):
try:
import PIL
except ImportError:
return False
return True
def __init__(self, *args, **kwargs):
if kwargs.get("extra_args") is None:
kwargs["extra_args"] = ()
super(PillowWriter, self).__init__(*args, **kwargs)
def setup(self, fig, outfile, dpi=None):
self._frames = []
self._outfile = outfile
self._dpi = dpi
self._fig = fig
def grab_frame(self, **savefig_kwargs):
from PIL import Image
buf = BytesIO()
self._fig.savefig(buf, **dict(savefig_kwargs, format="rgba"))
renderer = self._fig.canvas.get_renderer()
# Using frombuffer / getbuffer may be slightly more efficient, but
# Py3-only.
self._frames.append(Image.frombytes(
"RGBA",
(int(renderer.width), int(renderer.height)),
buf.getvalue()))
def finish(self):
self._frames[0].save(
self._outfile, save_all=True, append_images=self._frames[1:],
duration=int(1000 / self.fps))
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase(object):
'''Mixin class for FFMpeg output.
To be useful this must be multiply-inherited from with a
`MovieWriterBase` sub-class.
'''
exec_key = 'animation.ffmpeg_path'
args_key = 'animation.ffmpeg_args'
@property
def output_args(self):
args = ['-vcodec', self.codec]
# For h264, the default format is yuv444p, which is not compatible
# with quicktime (and others). Specifying yuv420p fixes playback on
# iOS,as well as HTML5 video in firefox and safari (on both Win and
# OSX). Also fixes internet explorer. This is as of 2015/10/29.
if self.codec == 'h264' and '-pix_fmt' not in self.extra_args:
args.extend(['-pix_fmt', 'yuv420p'])
# The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
# kbps
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate])
if self.extra_args:
args.extend(self.extra_args)
for k, v in six.iteritems(self.metadata):
args.extend(['-metadata', '%s=%s' % (k, v)])
return args + ['-y', self.outfile]
@classmethod
def _handle_subprocess(cls, process):
_, err = process.communicate()
# Ubuntu 12.04 ships a broken ffmpeg binary which we shouldn't use
# NOTE : when removed, remove the same method in AVConvBase.
if 'Libav' in err.decode():
return False
return True
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(FFMpegBase, MovieWriter):
'''Pipe-based ffmpeg writer.
Frames are streamed directly to ffmpeg via a pipe and written in a single
pass.
'''
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-r', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
# If you have a lot of frames in your animation and set logging to
# DEBUG, you will have a buffer overrun.
if (_log.getEffectiveLevel() > logging.DEBUG):
args += ['-loglevel', 'quiet']
args += ['-i', 'pipe:'] + self.output_args
return args
# Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FFMpegBase, FileMovieWriter):
'''File-based ffmpeg writer.
Frames are written to temporary files on disk and then stitched
together at the end.
'''
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
return [self.bin_path(), '-r', str(self.fps),
'-i', self._base_temp_name(),
'-vframes', str(self._frame_counter)] + self.output_args
# Base class of avconv information. AVConv has identical arguments to
# FFMpeg
class AVConvBase(FFMpegBase):
'''Mixin class for avconv output.
To be useful this must be multiply-inherited from with a
`MovieWriterBase` sub-class.
'''
exec_key = 'animation.avconv_path'
args_key = 'animation.avconv_args'
# NOTE : should be removed when the same method is removed in FFMpegBase.
@classmethod
def _handle_subprocess(cls, process):
return MovieWriter._handle_subprocess(process)
# Combine AVConv options with pipe-based writing
@writers.register('avconv')
class AVConvWriter(AVConvBase, FFMpegWriter):
'''Pipe-based avconv writer.
Frames are streamed directly to avconv via a pipe and written in a single
pass.
'''
# Combine AVConv options with file-based writing
@writers.register('avconv_file')
class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
'''File-based avconv writer.
Frames are written to temporary files on disk and then stitched
together at the end.
'''
# Base class for animated GIFs with convert utility
class ImageMagickBase(object):
'''Mixin class for ImageMagick output.
To be useful this must be multiply-inherited from with a
`MovieWriterBase` sub-class.
'''
exec_key = 'animation.convert_path'
args_key = 'animation.convert_args'
@property
def delay(self):
return 100. / self.fps
@property
def output_args(self):
return [self.outfile]
@classmethod
def _init_from_registry(cls):
if sys.platform != 'win32' or rcParams[cls.exec_key] != 'convert':
return
from six.moves import winreg
for flag in (0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY):
try:
hkey = winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE,
'Software\\Imagemagick\\Current',
0, winreg.KEY_QUERY_VALUE | flag)
binpath = winreg.QueryValueEx(hkey, 'BinPath')[0]
winreg.CloseKey(hkey)
binpath += '\\convert.exe'
break
except Exception:
binpath = ''
rcParams[cls.exec_key] = rcParamsDefault[cls.exec_key] = binpath
@classmethod
def isAvailable(cls):
'''
Check to see if a ImageMagickWriter is actually available.
Done by first checking the windows registry (if applicable) and then
running the commandline tool.
'''
bin_path = cls.bin_path()
if bin_path == "convert":
cls._init_from_registry()
return super(ImageMagickBase, cls).isAvailable()
ImageMagickBase._init_from_registry()
# Note: the base classes need to be in that order to get
# isAvailable() from ImageMagickBase called and not the
# one from MovieWriter. The latter is then called by the
# former.
@writers.register('imagemagick')
class ImageMagickWriter(ImageMagickBase, MovieWriter):
'''Pipe-based animated gif.
Frames are streamed directly to ImageMagick via a pipe and written
in a single pass.
'''
def _args(self):
return ([self.bin_path(),
'-size', '%ix%i' % self.frame_size, '-depth', '8',
'-delay', str(self.delay), '-loop', '0',
'%s:-' % self.frame_format]
+ self.output_args)
# Note: the base classes need to be in that order to get
# isAvailable() from ImageMagickBase called and not the
# one from MovieWriter. The latter is then called by the
# former.
@writers.register('imagemagick_file')
class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter):
'''File-based animated gif writer.
Frames are written to temporary files on disk and then stitched
together at the end.
'''
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
'%s*.%s' % (self.temp_prefix, self.frame_format)]
+ self.output_args)
# Taken directly from jakevdp's JSAnimation package at
# http://github.com/jakevdp/JSAnimation
def _included_frames(frame_list, frame_format):
"""frame_list should be a list of filenames"""
return INCLUDED_FRAMES.format(Nframes=len(frame_list),
frame_dir=os.path.dirname(frame_list[0]),
frame_format=frame_format)
def _embedded_frames(frame_list, frame_format):
"""frame_list should be a list of base64-encoded png files"""
template = ' frames[{0}] = "data:image/{1};base64,{2}"\n'
return "\n" + "".join(
template.format(i, frame_format, frame_data.replace('\n', '\\\n'))
for i, frame_data in enumerate(frame_list))
@writers.register('html')
class HTMLWriter(FileMovieWriter):
supported_formats = ['png', 'jpeg', 'tiff', 'svg']
args_key = 'animation.html_args'
@classmethod
def isAvailable(cls):
return True
def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None,
metadata=None, embed_frames=False, default_mode='loop',
embed_limit=None):
self.embed_frames = embed_frames
self.default_mode = default_mode.lower()
# Save embed limit, which is given in MB
if embed_limit is None:
self._bytes_limit = rcParams['animation.embed_limit']
else:
self._bytes_limit = embed_limit
# Convert from MB to bytes
self._bytes_limit *= 1024 * 1024
if self.default_mode not in ['loop', 'once', 'reflect']:
self.default_mode = 'loop'
_log.warning("unrecognized default_mode: using 'loop'")
self._saved_frames = []
self._total_bytes = 0
self._hit_limit = False
super(HTMLWriter, self).__init__(fps, codec, bitrate,
extra_args, metadata)
def setup(self, fig, outfile, dpi, frame_dir=None):
root, ext = os.path.splitext(outfile)
if ext not in ['.html', '.htm']:
raise ValueError("outfile must be *.htm or *.html")
if not self.embed_frames:
if frame_dir is None:
frame_dir = root + '_frames'
if not os.path.exists(frame_dir):
os.makedirs(frame_dir)
frame_prefix = os.path.join(frame_dir, 'frame')
else:
frame_prefix = None
super(HTMLWriter, self).setup(fig, outfile, dpi,
frame_prefix, clear_temp=False)
def grab_frame(self, **savefig_kwargs):
if self.embed_frames:
# Just stop processing if we hit the limit
if self._hit_limit:
return
suffix = '.' + self.frame_format
f = BytesIO()
self.fig.savefig(f, format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
imgdata64 = encodebytes(f.getvalue()).decode('ascii')
self._total_bytes += len(imgdata64)
if self._total_bytes >= self._bytes_limit:
_log.warning(
"Animation size has reached %s bytes, exceeding the limit "
"of %s. If you're sure you want a larger animation "
"embedded, set the animation.embed_limit rc parameter to "
"a larger value (in MB). This and further frames will be "
"dropped.", self._total_bytes, self._bytes_limit)
self._hit_limit = True
else:
self._saved_frames.append(imgdata64)
else:
return super(HTMLWriter, self).grab_frame(**savefig_kwargs)
def _run(self):
# make a duck-typed subprocess stand in
# this is called by the MovieWriter base class, but not used here.
class ProcessStandin(object):
returncode = 0
def communicate(self):
return '', ''
self._proc = ProcessStandin()
# save the frames to an html file
if self.embed_frames:
fill_frames = _embedded_frames(self._saved_frames,
self.frame_format)
else:
# temp names is filled by FileMovieWriter
fill_frames = _included_frames(self._temp_names,
self.frame_format)
mode_dict = dict(once_checked='',
loop_checked='',
reflect_checked='')
mode_dict[self.default_mode + '_checked'] = 'checked'
interval = 1000 // self.fps
with open(self.outfile, 'w') as of:
of.write(JS_INCLUDE)
of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex,
Nframes=len(self._temp_names),
fill_frames=fill_frames,
interval=interval,
**mode_dict))
class Animation(object):
'''This class wraps the creation of an animation using matplotlib2.
It is only a base class which should be subclassed to provide
needed behavior.
This class is not typically used directly.
Parameters
----------
fig : matplotlib2.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
event_source : object, optional
A class that can run a callback when desired events
are generated, as well as be stopped and started.
Examples include timers (see :class:`TimedAnimation`) and file
system notifications.
blit : bool, optional
controls whether blitting is used to optimize drawing. Defaults
to ``False``.
See Also
--------
FuncAnimation, ArtistAnimation
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# First disconnect our draw event handler
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
# Now do any initial draw
self._init_draw()
# Add our callback for stepping the animation and
# actually start the event_source.
self.event_source.add_callback(self._step)
self.event_source.start()
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None):
'''Saves a movie file by drawing every frame.
Parameters
----------
filename : str
The output filename, e.g., :file:`mymovie.mp4`.
writer : :class:`MovieWriter` or str, optional
A `MovieWriter` instance to use or a key that identifies a
class to use, such as 'ffmpeg'. If ``None``, defaults to
:rc:`animation.writer`.
fps : number, optional
Frames per second in the movie. Defaults to ``None``, which will use
the animation's specified interval to set the frames per second.
dpi : number, optional
Controls the dots per inch for the movie frames. This combined with
the figure's size in inches controls the size of the movie. If
``None``, defaults to :rc:`savefig.dpi`.
codec : str, optional
The video codec to be used. Not all codecs are supported
by a given :class:`MovieWriter`. If ``None``, default to
:rc:`animation.codec`.
bitrate : number, optional
Specifies the number of bits used per second in the compressed
movie, in kilobits per second. A higher number means a higher
quality movie, but at the cost of increased file size. If ``None``,
defaults to :rc:`animation.bitrate`.
extra_args : list, optional
List of extra string arguments to be passed to the underlying movie
utility. If ``None``, defaults to :rc:`animation.extra_args`.
metadata : Dict[str, str], optional
Dictionary of keys and values for metadata to include in
the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
extra_anim : list, optional
Additional `Animation` objects that should be included
in the saved movie file. These need to be from the same
`matplotlib2.figure.Figure` instance. Also, animation frames will
just be simply combined, so there should be a 1:1 correspondence
between the frames from the different animations.
savefig_kwargs : dict, optional
Is a dictionary containing keyword arguments to be passed
on to the `savefig` command which is called repeatedly to
save the individual frames.
Notes
-----
fps, codec, bitrate, extra_args, metadata are used to
construct a :class:`MovieWriter` instance and can only be
passed if `writer` is a string. If they are passed as
non-`None` and ``writer`` is a :class:`MovieWriter`, a
`RuntimeError` will be raised.
'''
# If the writer is None, use the rc param to find the name of the one
# to use
if writer is None:
writer = rcParams['animation.writer']
elif (not isinstance(writer, six.string_types) and
any(arg is not None
for arg in (fps, codec, bitrate, extra_args, metadata))):
raise RuntimeError('Passing in values for arguments '
'fps, codec, bitrate, extra_args, or metadata '
'is not supported when writer is an existing '
'MovieWriter instance. These should instead be '
'passed as arguments when creating the '
'MovieWriter instance.')
if savefig_kwargs is None:
savefig_kwargs = {}
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# Re-use the savefig DPI for ours if none is given
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = self._fig.dpi
if codec is None:
codec = rcParams['animation.codec']
if bitrate is None:
bitrate = rcParams['animation.bitrate']
all_anim = [self]
if extra_anim is not None:
all_anim.extend(anim
for anim
in extra_anim if anim._fig is self._fig)
# If we have the name of a writer, instantiate an instance of the
# registered class.
if isinstance(writer, six.string_types):
if writer in writers.avail:
writer = writers[writer](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
else:
_log.warning("MovieWriter %s unavailable.", writer)
try:
writer = writers[writers.list()[0]](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install ffmpeg to "
"save animations.")
_log.info('Animation.save using %s', type(writer))
if 'bbox_inches' in savefig_kwargs:
_log.warning("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it may cause frame size "
"to vary, which is inappropriate for animation.")
savefig_kwargs.pop('bbox_inches')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existent use case or find a way to make it work.
with rc_context():
if rcParams['savefig.bbox'] == 'tight':
_log.info("Disabling savefig.bbox = 'tight', as it may cause "
"frame size to vary, which is inappropriate for "
"animation.")
rcParams['savefig.bbox'] = None
with writer.saving(self._fig, filename, dpi):
for anim in all_anim:
# Clear the initial frame
anim._init_draw()
for data in zip(*[a.new_saved_frame_seq() for a in all_anim]):
for anim, d in zip(all_anim, data):
# TODO: See if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
writer.grab_frame(**savefig_kwargs)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'''Creates a new sequence of frame information.'''
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'''Creates a new sequence of saved/cached frame information.'''
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
if a in bg_cache:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, False)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
def to_html5_video(self, embed_limit=None):
'''Returns animation as an HTML5 video tag.
This saves the animation as an h264 video, encoded in base64
directly into the HTML5 video tag. This respects the rc parameters
for the writer as well as the bitrate. This also makes use of the
``interval`` to control the speed, and uses the ``repeat``
parameter to decide whether to loop.
'''
VIDEO_TAG = r'''<video {size} {options}>
<source type="video/mp4" src="data:video/mp4;base64,{video}">
Your browser does not support the video tag.
</video>'''
# Cache the rendering of the video as HTML
if not hasattr(self, '_base64_video'):
# Save embed limit, which is given in MB
if embed_limit is None:
embed_limit = rcParams['animation.embed_limit']
# Convert from MB to bytes
embed_limit *= 1024 * 1024
# First write the video to a tempfile. Set delete to False
# so we can re-open to read binary data.
with tempfile.NamedTemporaryFile(suffix='.m4v',
delete=False) as f:
# We create a writer manually so that we can get the
# appropriate size for the tag
Writer = writers[rcParams['animation.writer']]
writer = Writer(codec='h264',
bitrate=rcParams['animation.bitrate'],
fps=1000. / self._interval)
self.save(f.name, writer=writer)
# Now open and base64 encode
with open(f.name, 'rb') as video:
vid64 = encodebytes(video.read())
vid_len = len(vid64)
if vid_len >= embed_limit:
_log.warning(
"Animation movie is %s bytes, exceeding the limit of "
"%s. If you're sure you want a large animation "
"embedded, set the animation.embed_limit rc parameter "
"to a larger value (in MB).", vid_len, embed_limit)
else:
self._base64_video = vid64.decode('ascii')
self._video_size = 'width="{}" height="{}"'.format(
*writer.frame_size)
# Now we can remove
os.remove(f.name)
# If we exceeded the size, this attribute won't exist
if hasattr(self, '_base64_video'):
# Default HTML5 options are to autoplay and display video controls
options = ['controls', 'autoplay']
# If we're set to repeat, make it loop
if hasattr(self, 'repeat') and self.repeat:
options.append('loop')
return VIDEO_TAG.format(video=self._base64_video,
size=self._video_size,
options=' '.join(options))
else:
return 'Video too large to embed.'
def to_jshtml(self, fps=None, embed_frames=True, default_mode=None):
"""Generate HTML representation of the animation"""
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000 / self._interval
# If we're not given a default mode, choose one base on the value of
# the repeat attribute
if default_mode is None:
default_mode = 'loop' if self.repeat else 'once'
if hasattr(self, "_html_representation"):
return self._html_representation
else:
# Can't open a second time while opened on windows. So we avoid
# deleting when closed, and delete manually later.
with tempfile.NamedTemporaryFile(suffix='.html',
delete=False) as f:
self.save(f.name, writer=HTMLWriter(fps=fps,
embed_frames=embed_frames,
default_mode=default_mode))
# Re-open and get content
with open(f.name) as fobj:
html = fobj.read()
# Now we can delete
os.remove(f.name)
self._html_representation = html
return html
def _repr_html_(self):
'''IPython display hook for rendering.'''
fmt = rcParams['animation.html']
if fmt == 'html5':
return self.to_html5_video()
elif fmt == 'jshtml':
return self.to_jshtml()
class TimedAnimation(Animation):
''':class:`Animation` subclass for time-based animation.
A new frame is drawn every *interval* milliseconds.
Parameters
----------
fig : matplotlib2.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to ``None``.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to ``True``.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to ``False``.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source,
*args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
self._init_draw()
self.frame_seq = self.new_frame_seq()
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
return True
else:
return Animation._step(self, *args)
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
Animation._step(self)
class ArtistAnimation(TimedAnimation):
'''Animation using a fixed set of `Artist` objects.
Before creating an instance, all plotting should have taken place
and the relevant artists saved.
Parameters
----------
fig : matplotlib2.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
artists : list
Each list entry a collection of artists that represent what
needs to be enabled on each frame. These will be disabled for
other frames.
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to ``None``.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to ``True``.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to ``False``.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
figs = set()
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
artist.set_animated(self._blit)
# Assemble a list of unique figures that need flushing
if artist.get_figure() not in figs:
figs.add(artist.get_figure())
# Flush the needed figures
for fig in figs:
fig.canvas.draw_idle()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function ``func``.
Parameters
----------
fig : matplotlib2.figure.Figure
The figure object that is used to get draw, resize, and any
other needed events.
func : callable
The function to call at each frame. The first argument will
be the next value in ``frames``. Any additional positional
arguments can be supplied via the ``fargs`` parameter.
The required signature is::
def func(frame, *fargs) -> iterable_of_artists:
frames : iterable, int, generator function, or None, optional
Source of data to pass ``func`` and each frame of the animation
If an iterable, then simply use the values provided. If the
iterable has a length, it will override the ``save_count`` kwarg.
If an integer, then equivalent to passing ``range(frames)``
If a generator function, then must have the signature::
def gen_function() -> obj:
If ``None``, then equivalent to passing ``itertools.count``.
In all of these cases, the values in *frames* is simply passed through
to the user-supplied *func* and thus can be of any type.
init_func : callable, optional
A function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence
will be used. This function will be called once before the
first frame.
If ``blit == True``, ``init_func`` must return an iterable of artists
to be re-drawn.
The required signature is::
def init_func() -> iterable_of_artists:
fargs : tuple or None, optional
Additional arguments to pass to each call to *func*.
save_count : int, optional
The number of values from *frames* to cache.
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated, adds a delay in milliseconds
before repeating the animation. Defaults to ``None``.
repeat : bool, optional
Controls whether the animation should repeat when the sequence
of frames is completed. Defaults to ``True``.
blit : bool, optional
Controls whether blitting is used to optimize drawing. Defaults
to ``False``.
'''
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif cbook.iterable(frames):
self._iter_gen = lambda: iter(frames)
if hasattr(frames, '__len__'):
self.save_count = len(frames)
else:
self._iter_gen = lambda: iter(xrange(frames))
self.save_count = frames
if self.save_count is None:
# If we're passed in and using the default, set save_count to 100.
self.save_count = 100
else:
# itertools.islice returns an error when passed a numpy int instead
# of a native python int (http://bugs.python.org/issue30537).
# As a workaround, convert save_count to a native python int.
self.save_count = int(self.save_count)
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
# While iterating we are going to update _save_seq
# so make a copy to safely iterate over
self._old_saved_seq = list(self._save_seq)
return iter(self._old_saved_seq)
else:
if self.save_count is not None:
return itertools.islice(self.new_frame_seq(), self.save_count)
else:
frame_seq = self.new_frame_seq()
def gen():
try:
for _ in range(100):
yield next(frame_seq)
except StopIteration:
pass
else:
cbook.warn_deprecated(
"2.2", "FuncAnimation.save has truncated your "
"animation to 100 frames. In the future, no such "
"truncation will occur; please pass 'save_count' "
"accordingly.")
return gen()
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(next(self.new_frame_seq()))
else:
self._drawn_artists = self._init_func()
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The init_func must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
self._save_seq = []
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count
# around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
if self._blit:
if self._drawn_artists is None:
raise RuntimeError('The animation function must return a '
'sequence of Artist objects.')
for a in self._drawn_artists:
a.set_animated(self._blit)
| [
"skylex72rus@gmail.com"
] | skylex72rus@gmail.com |
901005101e83d21158d5f681a8a232e73f3bfa90 | e492b3b33d333420f40ead5b8b0c9340dedaac6a | /Cogs/Logger.py | 325da4af195b673fe000f629d3141a24e559edc0 | [] | no_license | timwhite/MegaBot | 51079471c4cc9cfee08a9a47f888f038b0d08bce | 21c52c27bff9127c7dfe7b35712e03349016a4eb | refs/heads/master | 2021-03-14T10:41:36.492531 | 2020-03-12T06:31:32 | 2020-03-12T06:31:32 | 246,759,600 | 0 | 0 | null | 2020-03-12T06:25:43 | 2020-03-12T06:25:42 | null | UTF-8 | Python | false | false | 4,795 | py | import discord
from discord.ext import commands
import os
import subprocess
import asyncio
import re
from dotenv import load_dotenv
from datetime import datetime
version = '0.5.5 Alpha'
load_dotenv()
logurl = os.getenv('LOG_URL')
panic_word = os.getenv('PANIC_WORD')
panic_length = int(os.getenv('PANIC_LOG_LEN'))
now = datetime.now()
def setup(bot):
bot.add_cog(logging(bot))
print("Logger Version {}.".format(version))
if logurl == None:
print("WARNING: No log URL defined, unable to automatically publish logs.")
if panic_word == None:
print("WARNING: No panic word defined, unable to generate panic logs.")
async def run(cmd):
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
async def tail(f, lines=1, _buffer=4098):
"""Tail a file and get X lines from the end"""
# place holder for the lines found
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
# we found enough lines, get out
# Removed this line because it was redundant the while will catch
# it, I left it for history
# if len(lines_found) > lines:
# break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
#print(lines_found[-lines:])
return lines_found[-lines:]
async def listToString(s):
# initialize an empty string
str1 = " "
# return string
return (str1.join(s))
class logging(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, ctx):
if ctx.guild:
message = ctx.content
channel = ctx.channel.name
guild = ctx.guild.name
user = ctx.author.name
dt_str = now.strftime("%m/%d/%Y-%H:%M:%S")
if os.path.exists("logs/{}/{}".format(guild, channel)):
open("logs/{}/{}/chat.log".format(guild, channel), "a").write(dt_str + ":" + user + ":" + message + "\n")
else:
os.makedirs("logs/{}/{}".format(guild, channel))
open("logs/{}/{}/chat.log".format(guild, channel), "a").write(dt_str + ":" + user + ":" + message + "\n")
if ctx.attachments:
if not os.path.exists("logs/{}/{}/images".format(guild, channel)):
os.makedirs("logs/{}/{}/images".format(guild, channel))
dlpath = "logs/{}/{}/images/{}-{}".format(guild, channel, dt_str, ctx.attachments[0].filename)
await run("curl {} -o {}".format(ctx.attachments[0].url, dlpath))
if panic_word in message and ctx.author.bot == False:
await ctx.channel.send("Panic word '**{}**' detected, compiling log for download.".format(panic_word))
log = open("logs/{}/{}/chat.log".format(guild, channel), "r")
paniclog = await tail(log, panic_length, 4098)
if not os.path.exists("logs/{}/{}/panic".format(guild, channel)):
os.makedirs("logs/{}/{}/panic".format(guild, channel))
output = await listToString(paniclog)
open("logs/{}/{}/panic/panic.log".format(guild, channel), "w").write(output)
await ctx.channel.send("Panic log completed, you may view it at {1}/{2}/{0}/panic/panic.log".format(ctx.channel.name, logurl, ctx.guild.name))
else:
pass
@commands.command()
async def logs(self, ctx):
if not ctx.guild:
await ctx.send("I do not log messages sent in DMs")
else:
if not logurl == None:
await ctx.send("My recordings can be found at {0}/{1}".format(logurl, ctx.guild.name))
else:
await ctx.send("The asministrator has not set a log URL, you are unable to view my logs.")
@commands.command()
async def channellog(self, ctx):
if not logurl == None:
await ctx.send("My recordings for the channel '{0}' can be found at {1}/{2}/{0}".format(ctx.channel.name, logurl, ctx.guild.name))
else:
await ctx.send("The asministrator has not set a log URL, you are unable to view my logs.") | [
"kainenjw@gmail.com"
] | kainenjw@gmail.com |
cbd8fcf710be493308140c526ea247720404d8d5 | dc9ef3bebf6dc7387c893b2999c0caaa2bc6ca3f | /run_scripts/pktc.py | a1bfa396d47c79e88247cc03fa05eb5b1a84a66f | [] | no_license | ganesh1901/TWDL | 8d052e1475b1930af8bbad95364b04262597d900 | 960ea2ddce6f6265a3e51f0f7483a50f216804b1 | refs/heads/main | 2023-02-08T12:22:09.857666 | 2020-12-29T18:55:00 | 2020-12-29T18:55:00 | 325,363,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py |
from matplotlib.pyplot import figure, show
from numpy import arange, sin, pi
import struct
import os
import time
import sys
import binascii
if (len(sys.argv) != 2):
print 'usage == python 1)filein name '
exit(0)
file_name = sys.argv[1]
timetag_fmt = 'L'
header_fmt = 'H'
if os.path.islink(file_name):
real_path = os.readlink(file_name)
time1 = os.path.getctime(real_path)
else:
time1 = os.path.getctime(file_name)
timestamp = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(time1))
timetag_size = struct.calcsize(timetag_fmt)
header_size = struct.calcsize(header_fmt)
packet_size = timetag_size + header_size + 64
print 'packet_size', packet_size
line_count = os.stat(file_name).st_size / packet_size
print 'line count ', line_count
f = open(file_name, "rb")
pktAmiss = 0
pktAtotal = 0
pktAPreSeqNum = 0
pcount=0
pseq=0
for y in range(line_count):
if y > 10:
bytes = f.read(packet_size)
if bytes:
a = struct.unpack("=HHHHBB18H", bytes[timetag_size + header_size: timetag_size + header_size + 46])
msgId = a[5]
currSeqNumber = a[4] #>> 8 | ((a[5] & 0xff) << 8)
if currSeqNumber!=0 and pseq!=currSeqNumber:
pcount+=1
pseq=currSeqNumber
print "Seq=", currSeqNumber
if (msgId == 0x03): # Packet A
if (currSeqNumber < pktAPreSeqNum):
pktAPreSeqNum = currSeqNumber
if (pktAtotal == 0):
pktAPreSeqNum = currSeqNumber
else:
if (pktAPreSeqNum < currSeqNumber) and pktAPreSeqNum + 1 != currSeqNumber:
#print 'pre= ', pktAPreSeqNum, ' current=', currSeqNumber
pktAmiss += currSeqNumber - pktAPreSeqNum - 1
pktAPreSeqNum = currSeqNumber
else:
if (pktAPreSeqNum != currSeqNumber):
pktAPreSeqNum += 1
pktAtotal += 1
print "Total Packet Miss===",pktAmiss
| [
"addepalliganesh323@gmail.com"
] | addepalliganesh323@gmail.com |
7cf8c871194f2bd90b8ae87f4758338eca105f0e | e28070b3ac7c3373c529c78b83240da80d00e16d | /scripts/audioReply.py | 2b238a1e650ecc04220a0dff2aa9b75fa467621c | [] | no_license | This-Is-Ko/myAssistant | 9f02dc915a067bec0cf640b03b88434047c70201 | d0b6e6f44eeb9ccb4fd0f7e72907ee41a23d6653 | refs/heads/master | 2022-03-15T15:37:31.343465 | 2019-10-24T06:49:08 | 2019-10-24T06:49:08 | 213,093,050 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from gtts import gTTS
from pydub import AudioSegment
from pydub.playback import play
def getAudioReply(audioString):
makeAudioReply(audioString)
playAudio("temp/output/reply.mp3")
def makeAudioReply(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("temp/output/reply.mp3")
def playAudio(audio_file):
sound = AudioSegment.from_mp3(audio_file)
play(sound)
def main():
getAudioReply("Hi there. System is booting up")
if __name__ == '__main__':
main()
| [
"kobo67@hotmail.com"
] | kobo67@hotmail.com |
d8422b8aa979e0556d75828f1a513a2d477b8016 | f2406914ebd779c1955e1105b683aa313b490576 | /Source/utilities2.py | fea925c7c2c039054d5ca963a92dad6510dda143 | [] | no_license | Jaideepm08/Crime-in-Vancouver | 0f59dd00c9a0dc2ef3787b3213748a29c870e7f8 | 124f22c12d2ef67c68947d4dd01d079118455130 | refs/heads/master | 2021-07-11T23:42:38.354678 | 2021-02-23T09:24:19 | 2021-02-23T09:24:19 | 235,506,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,757 | py | import pandas as pd, numpy as np, fiona, timeit
from geopy.distance import vincenty
from shapely import geometry
from utilities import utm_to_latlong, latlong_to_utm
from __builtin__ import False
from pandas.core.frame import DataFrame
PROJECT_ROOT = '../'
def main():
#test values
lat = 49.2668355595
lon = -123.070244095
year = 2010
month = 5
'''
prop_df = pd.read_csv(PROJECT_ROOT + 'data/property_tax_06_15/latlong_property_tax_' + str(2006) + '.csv')
print avg_closest_properties(lat,lon,prop_df=prop_df)
sky_df = pd.read_csv(PROJECT_ROOT + 'data/skytrain_stations/rapid_transit_stations.csv')
print closest_skytrain(lat,lon)
crime_df = pd.read_csv(PROJECT_ROOT+'/data/crime_03_15/crime_latlong.csv')
neighbourhoods = crime_df['NEIGHBOURHOOD'].unique().tolist()
print len(neighbourhoods)
print one_hot_encoding(neighbourhoods[2],neighbourhoods)
a = number_graffiti(lat,lon)
print type(a[0])
'''
data = pd.read_csv(PROJECT_ROOT+'/data/crime_03_15/crime_latlong.csv')
data = data[data['YEAR'] >= 2006].sample(1000)
data = data[['LATITUDE','LONGITUDE', 'NEIGHBOURHOOD']]
data2 = data.apply(lambda row: pd.Series(locate_neighbourhood(row['LATITUDE'], row['LONGITUDE']),
index=['NEIGHBOURHOOD_2']),axis=1)
data = pd.concat([data,data2],axis=1)[['NEIGHBOURHOOD','NEIGHBOURHOOD_2']]
data = data[data['NEIGHBOURHOOD'] != data['NEIGHBOURHOOD_2']][pd.notnull(data['NEIGHBOURHOOD'])]
print data
print data.count()
def avg_closest_properties(lat, lon,year = None, prop_df = None, range_val = 0.0001):
try:
if year is not None:
property_file = PROJECT_ROOT + 'data/property_tax_06_15/latlong_property_tax_' + str(year) + '.csv'
if prop_df is None: prop_df = pd.read_csv(property_file)
# Keep a copy of original df
temp_df = prop_df
# Narrow down options to minimize unnecessary calculations
prop_df = prop_df[prop_df['LATITUDE']< lat+range_val]
prop_df = prop_df[prop_df['LATITUDE']> lat-range_val]
prop_df = prop_df[prop_df['LONGITUDE']< lon+range_val]
prop_df = prop_df[prop_df['LONGITUDE']> lon-range_val]
# If not enough values, start again with a bigger range
if prop_df.count()['VALUE'] < 10:
return avg_closest_properties(lat,lon,prop_df=temp_df,range_val=range_val*10)
# Apply vincenty in the remaining rows
prop_df['DIST_DIF'] = prop_df.apply(lambda row: vincenty((lat,lon),(row['LATITUDE'],row['LONGITUDE'])).m,axis=1)
# Find the top 10 and top 5 closest properties
ten_min_df = prop_df[['VALUE','DIST_DIF']].nsmallest(10,'DIST_DIF')
five_min_df = ten_min_df.nsmallest(5,'DIST_DIF')
# Return average property value for he top 5 and 10
return [five_min_df['VALUE'].mean(),ten_min_df['VALUE'].mean()]
except:
print "Error in avg_closest_properties"
def closest_skytrain(lat,lon, sky_df = None):
skytrain_file = PROJECT_ROOT + 'data/skytrain_stations/rapid_transit_stations.csv'
if sky_df is None: sky_df = pd.read_csv(skytrain_file)
vector = [0]*(sky_df.count()['STATION']+1)
# Find closest skytrain station
sky_df['DIST_DIF'] = sky_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
min_df = sky_df.nsmallest(1,'DIST_DIF')
vector[list(min_df.index)[0]] = 1
vector[-1] = min_df.iloc[0]['DIST_DIF']
# returns on-hot encoded vector with distance at the end
return vector
'''
def get_weather(year, month, weatherdf = None):
weather_file = PROJECT_ROOT + 'data/weather/VANCOUVER SEA ISLAND CCG/summarydata.csv'
if weatherdf is None:
weatherdf = pd.read_csv(weather_file)
# basic checking to see if we have reasonable data passed in.
if month > 12:
return False
if year >= 2006 and year <= 2015:
filter_year = weatherdf[(weatherdf.YEAR == year)]
line = filter_year[(filter_year.MONTH == month)].drop('YEAR',axis=1).drop('MONTH',axis=1)
return line
else:
filter_month = weatherdf[(weatherdf.MONTH == month)].drop('YEAR',axis=1).drop('MONTH',axis=1).mean(axis=0).to_frame().transpose()
return filter_month
'''
def one_hot_encoding(label, list_of_labels):
vector = [0]*len(list_of_labels)
vector[list_of_labels.index(label)] = 1
return vector
def number_graffiti(lat,lon, graf_df = None, radius1 = 50, radius2 = 100):
graffiti_file = PROJECT_ROOT + 'data/graffiti/graffiti.csv'
if graf_df is None: graf_df = pd.read_csv(graffiti_file)
# Narrow down options
graf_df = graf_df[graf_df['LAT'] < lat+.001]
graf_df = graf_df[graf_df['LAT'] > lat-.001]
graf_df = graf_df[graf_df['LONG'] < lon+.001]
graf_df = graf_df[graf_df['LONG'] < lon+.001]
if graf_df['LAT'].count() == 0: return [0,0]
# Apply vincenty for remaining rows
graf_df['DIST_DIF'] = graf_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
count_2 = graf_df[graf_df['DIST_DIF'] <= radius2]
count_1 = count_2[count_2['DIST_DIF'] <= radius1]
return [count_1['COUNT'].sum(), count_2['COUNT'].sum()]
def number_street_lights(lat,lon,light_df = None, radius = 50):
light_file = PROJECT_ROOT + 'data/street_lightings/street_lighting_poles.csv'
if light_df is None: light_df = pd.read_csv(light_file)
# Narrow down options
light_df = light_df[light_df['LAT'] < lat+.001]
light_df = light_df[light_df['LAT'] > lat-.001]
light_df = light_df[light_df['LONG'] < lon+.001]
light_df = light_df[light_df['LONG'] < lon+.001]
if light_df['LAT'].count() == 0 : return 0
# Apply vincenty and find number of lights within radius
light_df['DIST_DIF'] = light_df.apply(lambda row: vincenty((lat,lon),(row['LAT'],row['LONG'])).m,axis=1)
min_lights = light_df[light_df['DIST_DIF'] < radius]
return min_lights['DIST_DIF'].count()
def locate_neighbourhood(lat, lon):
with fiona.open(PROJECT_ROOT+'data/neighbourhood_borders/local_area_boundary.shp') as neighbourhoods:
point = geometry.Point(lat,lon)
for n in neighbourhoods:
if n['properties']['NAME'] == 'Arbutus-Ridge': n['properties']['NAME'] = 'Arbutus Ridge'
if n['properties']['NAME'] == 'Downtown': n['properties']['NAME'] = 'Central Business District'
n['geometry']['coordinates'][0] = [utm_to_latlong(x[0],x[1]) for x in n['geometry']['coordinates'][0]]
shape = geometry.asShape(n['geometry'])
if shape.contains(point): return n['properties']['NAME']
return -1
if __name__ == "__main__":
main()
| [
"45701689+Jaideepm08@users.noreply.github.com"
] | 45701689+Jaideepm08@users.noreply.github.com |
7321373acb0af20941885f268896dbd444622116 | 1f554053ca430f60542f26febd47df2bc28c4499 | /lms_app/migrations/0010_alter_courses_course_detail.py | 12073b9a8af787f5f915fc5300db438daca937de | [] | no_license | ashikpydev/Learning-Management-System | e46c1358c5d02a0e14e295f5c1b8a2c5a1290799 | 824eeb34700e5cd842d628c490ac09f7f139023c | refs/heads/main | 2023-06-13T02:25:42.314229 | 2021-06-29T04:35:18 | 2021-06-29T04:35:18 | 371,429,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # Generated by Django 3.2.3 on 2021-06-18 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lms_app', '0009_rename_blog_ttle_blog_blog_title'),
]
operations = [
migrations.AlterField(
model_name='courses',
name='course_detail',
field=models.TextField(max_length=250),
),
]
| [
"ashiqurrahman0506@gmail.com"
] | ashiqurrahman0506@gmail.com |
ac2debe025aa644830e9dd2142081101ea527a27 | 121c994c7bc86d0fb28d30f5abbb6f23800fed25 | /connected_world_api/connected_world_api/settings.py | 774df49bac46b5c3539eff907c7b0c5ea3e0e2f2 | [] | no_license | avelaga/connected_world_api | d7ef3b75d554973fa3d3aa32ff8e536f1502b423 | 7a523d0e4fda9a331ae4345ea15b70cd1d30b075 | refs/heads/master | 2023-04-29T07:28:25.945068 | 2021-04-26T19:12:22 | 2021-04-26T19:12:22 | 360,400,317 | 0 | 0 | null | 2021-04-26T17:40:05 | 2021-04-22T05:17:58 | Python | UTF-8 | Python | false | false | 3,804 | py | """
Django settings for connected_world_api project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-d36_c@cnxyvg$ed%-jq-y^ibveib+&%8h$^j_26(tgs3=e2esf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'clicks.apps.ClicksConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Add cors middleware (the order is important!)
# 'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'connected_world_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'connected_world_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'connected_world',
# 'USER': 'root',
# 'PASSWORD': 'supersecret',
# 'HOST': 'localhost',
# 'PORT': '3306',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CSRF_COOKIE_SECURE = False | [
"abhinav.velaga@gmail.com"
] | abhinav.velaga@gmail.com |
bdaf49b8f1852494947d57dd9f3e385d7cb21ecb | 73c9211d5627594e0191510f0b4d70a907f5c4c5 | /pytest/lesson6/TestXlsxReportdemo.py | 2c2e3aef8262fceb1736ac41921a38a074af96c5 | [] | no_license | tigerxjtu/py3 | 35378f270363532fb30962da8674dbcee99eb5ff | 5d24cd074f51bd0f17f6cc4f5f1a6e7cf0d48779 | refs/heads/master | 2021-07-13T05:34:15.080119 | 2020-06-24T09:36:33 | 2020-06-24T09:36:33 | 159,121,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,744 | py | # -*- coding:utf-8 -*-
import xlsxwriter
import time
from pytest.lesson4.testrequest import *
from pytest.lesson4.testvote import *
from pytest.lesson4.testrequest import *
from pytest.testdata.getpath import GetTestDataPath
import xlrd
#把GetTestReport方法自己写出来
from pytest.testdata.getpath import GetTestReport
testurl="http://127.0.0.1:8000"
ReportPath=GetTestReport()
workbook = xlsxwriter.Workbook(ReportPath)
worksheet = workbook.add_worksheet("测试总结")
worksheet2 = workbook.add_worksheet("用例详情")
test_polls()
test_vote()
test_login()
TestReport = hlist # 调用测试结果
hpassnum = 0 # 定义一个变量,用来计算测试通过的用例数量
def get_format(wd, option={}):
return wd.add_format(option)
# 设置居中
def get_format_center(wd, num=1):
return wd.add_format({'align': 'center', 'valign': 'vcenter', 'border': num})
def set_border_(wd, num=1):
return wd.add_format({}).set_border(num)
# 写数据
def _write_center(worksheet, cl, data, wd):
return worksheet.write(cl, data, get_format_center(wd))
# 生成饼形图
def pie(workbook, worksheet):
chart1 = workbook.add_chart({'type': 'pie'})
chart1.add_series({
'name': '接口测试统计',
'categories': '=测试总结!$D$4:$D$5',
'values': '=测试总结!$E$4:$E$5',
})
chart1.set_title({'name': '接口测试统计'})
chart1.set_style(10)
worksheet.insert_chart('A9', chart1, {'x_offset': 25, 'y_offset': 10})
def init(worksheet):
global workbook
# 设置列行的宽高
worksheet.set_column("A:A", 15)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_row(1, 30)
worksheet.set_row(2, 30)
worksheet.set_row(3, 30)
worksheet.set_row(4, 30)
worksheet.set_row(5, 30)
# worksheet.set_row(0, 200)
define_format_H1 = get_format(workbook, {'bold': True, 'font_size': 18})
define_format_H2 = get_format(workbook, {'bold': True, 'font_size': 14})
define_format_H1.set_border(1)
define_format_H2.set_border(1)
define_format_H1.set_align("center")
define_format_H2.set_align("center")
define_format_H2.set_bg_color("blue")
define_format_H2.set_color("#ffffff")
# Create a new Chart object.
worksheet.merge_range('A1:F1', '接口自动化测试报告', define_format_H1)
worksheet.merge_range('A2:F2', '测试概括', define_format_H2)
worksheet.merge_range('A3:A6', '炼数成金', get_format_center(workbook))
# worksheet.insert_image('A1', GetLogoDataPath())
_write_center(worksheet, "B3", '项目名称', workbook)
_write_center(worksheet, "B4", '接口版本', workbook)
_write_center(worksheet, "B5", '脚本语言', workbook)
_write_center(worksheet, "B6", '测试地址', workbook)
data = {"test_name": "炼数成金项目接口", "test_version": "v1.0.0",
"test_pl": "Python3", "test_net": testurl}
_write_center(worksheet, "C3", data['test_name'], workbook)
_write_center(worksheet, "C4", data['test_version'], workbook)
_write_center(worksheet, "C5", data['test_pl'], workbook)
_write_center(worksheet, "C6", data['test_net'], workbook)
_write_center(worksheet, "D3", "测试用例总数", workbook)
_write_center(worksheet, "D4", "测试用例通过数", workbook)
_write_center(worksheet, "D5", "测试用例失败数", workbook)
_write_center(worksheet, "D6", "测试日期", workbook)
timenow = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
data1 = {"test_sum": len(TestReport),
"test_success": hpassnum,
"test_failed": len(TestReport) - hpassnum,
"test_date": timenow}
_write_center(worksheet, "E3", data1['test_sum'], workbook)
_write_center(worksheet, "E4", data1['test_success'], workbook)
_write_center(worksheet, "E5", data1['test_failed'], workbook)
_write_center(worksheet, "E6", data1['test_date'], workbook)
_write_center(worksheet, "F3", "测试用例通过率", workbook)
worksheet.merge_range('F4:F6', str(
(round(hpassnum / len(TestReport), 2)) * 100) + '%', get_format_center(workbook))
pie(workbook, worksheet)
def test_detail(worksheet):
# 设置列宽高
worksheet.set_column("A:A", 30)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_column("G:G", 20)
worksheet.set_column("H:H", 20)
# 设置行的宽高
for hrow in range(len(TestReport) + 2):
worksheet.set_row(hrow, 30)
worksheet.merge_range('A1:H1', '测试详情', get_format(workbook, {'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'bg_color': 'blue',
'font_color': '#ffffff'}))
_write_center(worksheet, "A2", '用例ID', workbook)
_write_center(worksheet, "B2", '接口名称', workbook)
_write_center(worksheet, "C2", '接口协议', workbook)
_write_center(worksheet, "D2", 'URL', workbook)
_write_center(worksheet, "E2", '参数', workbook)
_write_center(worksheet, "F2", '预期值', workbook)
_write_center(worksheet, "G2", '实际值', workbook)
_write_center(worksheet, "H2", '测试结果', workbook)
data = {"info": TestReport} # 获取测试结果被添加到测试报告里
temp = len(TestReport) + 2
global hpassnum
for item in data["info"]:
if item["t_result"] == "通过":
hpassnum += 1
else:
pass
_write_center(worksheet, "A" + str(temp), item["t_id"], workbook)
_write_center(worksheet, "B" + str(temp), item["t_name"], workbook)
_write_center(worksheet, "C" + str(temp), item["t_method"], workbook)
_write_center(worksheet, "D" + str(temp), item["t_url"], workbook)
_write_center(worksheet, "E" + str(temp), item["t_param"], workbook)
_write_center(worksheet, "F" + str(temp), item["t_hope"], workbook)
_write_center(worksheet, "G" + str(temp), item["t_actual"], workbook)
_write_center(worksheet, "H" + str(temp), item["t_result"], workbook)
temp = temp - 1
test_detail(worksheet2)
init(worksheet)
workbook.close()
| [
"liyin@16010.net"
] | liyin@16010.net |
a34492455f57342694d38e9b0238b4df02fb6fe5 | eb63b1d36aefe82885865b6bc0161a60c4141acf | /exam/migrations/0003_auto_20170718_0556.py | 54750a45ee10289847bb07bcf1515281858f95a8 | [] | no_license | vinodsesetti/onlineportal | 82005aea76b38b237b8ec10e7a4ed1e921de5746 | 39231d0acd6c07fce06936590693cd876b236c01 | refs/heads/master | 2021-01-23T08:49:31.506375 | 2017-09-06T03:11:25 | 2017-09-06T03:11:25 | 102,556,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-07-18 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exam', '0002_auto_20170718_0554'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(upload_to='images/'),
),
]
| [
"vinodsesetti@gmail.com"
] | vinodsesetti@gmail.com |
9de156bb01de3eafff3afb39c54460c9d46babbf | a5e9f5cabb4f513968f105eaaf8ba83cd7833b4f | /home/models.py | a1064177e94524efaabd76dd9122c7897d248c67 | [] | no_license | sagaruniyal/Django-Blog-Project | 6178b782b31bff6e385a0e5d0697e06c8c2b67d5 | 3e0b5c4500875706dcff017de49e3b2b5bdb4a14 | refs/heads/master | 2022-11-15T06:38:08.219641 | 2020-06-30T10:46:06 | 2020-06-30T10:46:06 | 276,069,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from django.db import models
# Create your models here.
# Database ---> excel Workbook
# Models in Django --->Table-----> Sheet
class Contact(models.Model):
sno = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
phone = models.CharField(max_length=13)
email = models.CharField(max_length=100)
content = models.TextField()
timeStamp = models.DateTimeField(auto_now_add=True,blank=True)
def __str__(self):
return 'Message from ' + self.name + ' - ' + self.email
| [
"uniyals322@gmail.com"
] | uniyals322@gmail.com |
bde03dcbaf6dcd991de97a0554b598898ba38ef5 | 15ef76c7e32e0ae54da7fcd5a7c2239f63c47ed4 | /coincidences.py | 5cbe1b6948dcd907a74d88ff3a42fbcd613237e2 | [] | no_license | jmaicas/habitat | 3e1e2bdd98ac294b48e8a9e6cdbd8ccbd5554245 | 67cb5dfa879b58a2587a91656bc86071b60388be | refs/heads/main | 2023-06-02T10:57:14.173750 | 2021-06-23T17:10:59 | 2021-06-23T17:10:59 | 336,404,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,342 | py | import sys
import os
import platform
import glob
import re as re
import numpy as np
import random
import pandas as pd
from statistics import mean
import matplotlib.pyplot as plt
import xlsxwriter
from w_coincidencies import *
from coincidences_classes import *
from PyQt5.QtWidgets import QMainWindow, QDialog, QApplication, QFileDialog, QTableWidgetItem
from PyQt5.QtWidgets import QWidget, QPushButton, QErrorMessage
from PyQt5.QtCore import pyqtSlot
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
from datetime import datetime
sampling_freq = 1000
sampling_time = 1/sampling_freq
# translating modules tunnels into numbers because calculations are faster that way
# modules 511 refers to sample not recorded
modules_dict = {"A1":32,"A2":33, "A3":34, "A4":35, "B1":36, "B2":37, "B3":38, "B4":39, \
"C1":40,"C2":41, "C3":42, "C4":43, "D1":44,"D2":45, "D3":46, "D4":47, \
"1":1, "2":2, "3":3, "4":4, "5":5, "6":6, "7":7, "8":8, "9":9, "10":10, "11":11, "12":12, "13":13, \
"tC":14, "tD":15, "Norec":511}
# d.items() >> d.keys() >> d.values() to use it and recover the keys.
class MyForm(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_w_coincidencies()
self.ui.setupUi(self)
self.ui.ButtonSelectFolder.clicked.connect(self.selectFolder)
self.ui.ButDelFolder.clicked.connect(self.delFolder)
self.ui.ButtonResultFolder.clicked.connect(self.selectResultFolder)
self.ui.ButDelResultFolder.clicked.connect(self.delResultFolder)
self.ui.ButtonSelectFile.clicked.connect(self.selectFile)
self.ui.ButDelFile.clicked.connect(self.delFile)
self.ui.ButtonRunAll.clicked.connect(self.runAll)
# Error message (it is necessary to initialize it too)
self.error_msg = QErrorMessage()
self.error_msg.setWindowTitle("Error")
self.show()
def selectFolder(self):
DataFolder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
if DataFolder:
self.ui.labelFolderPath.setText(DataFolder)
self.delFile()
else:
self.error_msg.showMessage("It is necessary to select a folder")
def delFolder(self):
self.ui.labelFolderPath.clear()
def selectResultFolder(self):
self.ResultsFolder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
if self.ResultsFolder:
self.ui.labelResultFolderPath.setText(self.ResultsFolder)
else:
self.error_msg.showMessage("It is necessary to select a folder")
def delResultFolder(self):
self.ui.labelResultFolderPath.clear()
def selectFile(self):
options = QFileDialog.Options()
self.filename, _ = QFileDialog.getOpenFileName(self,"Select Spreadsheet", "", "Excel files (*.xlsx)", options=options)
if self.filename:
self.ui.labelFilePath.setText(self.filename)
self.delFolder()
def delFile(self):
self.ui.labelFilePath.clear()
def runAll(self):
if str(self.ui.labelResultFolderPath.text()) == '':
self.error_msg.showMessage("It is necessary to select an empty results folder")
else:
if str(self.ui.labelFolderPath.text()) != '' :
# A folder path is given
os.chdir(str(self.ui.labelFolderPath.text()))
elif str(self.ui.labelFilePath.text()) != '' :
# A file path is given and the file contains several sheets
xls = pd.ExcelFile(self.filename)
os.chdir(str(self.ui.labelResultFolderPath.text()))
for n, sheet in enumerate(xls.sheet_names):
df = pd.DataFrame()
df = pd.read_excel(self.filename, sheet_name=sheet)
filename_out = 'rat_' + str(n+1) +'.xlsx'
df.to_excel(filename_out, index = 0)
if platform.system() == 'Windows':
d = os.getcwd() + "\\" # windows
else:
d = os.getcwd() + "/" # linux & MacOS
matching_files = sorted(glob.glob(r'*xlsx')) # not sorted by name by default
l_xlsx_files = []
self.rats = []
self.rats_ids = []
last_samples = []
for matching_file in matching_files:
file_route = d + matching_file
l_xlsx_files.append(file_route)
rat_n = rat()
rat_n.read_excel(file_route, modules_dict)
if not rat_n.consistency:
error_message = "time errors in rat: " + str(rat_n.rat_id) + " columns: " + np.array2string(rat_n.excel_locations)
self.error_msg.showMessage(error_message)
rat_n.add_sampling(sampling_freq, sampling_time)
#print(rat_n.rat_location)
#print(rat_n.starting_time)
#print(rat_n.rat_times)
self.rats.append(rat_n)
self.rats_ids.append(rat_n.rat_id)
last_samples.append(rat_n.last_sample)
# for comparison, all the rats should have the same tracking time, the minimun
self.max_time = min(last_samples)
for rat_n in (self.rats):
rat_n.chop_times(self.max_time)
rat_n.basic_stats(modules_dict)
#print('modules or tunnels: ', rat_n.uniqueplaces)
#print('Modules names:', rat_n.unique_places_names)
#print('number of times: ', rat_n.uniquenumbertimes)
#print ("Data and figures correctly exported to excel")
self.calc_tracking_coinc()
self.export_coincidences_to_excel()
def export_coincidences_to_excel(self):
os.chdir(str(self.ui.labelResultFolderPath.text()))
workbook = xlsxwriter.Workbook('coincidencies.xlsx')
for rat_n in (self.rats):
worksheet_rat= workbook.add_worksheet(str(rat_n.rat_id))
worksheet_rat.write(0, 0, "Number of companions per module")
worksheet_rat.write(2, 0, "Module")
worksheet_rat.write(2, 1, "Number of companions")
worksheet_rat.write(2, 2, "Seconds")
for n, mod in enumerate(rat_n.l_companions_times_per_module):
if mod[0] in np.arange(16):
module = "T" + str(mod[0])
else:
module = mod[0]
worksheet_rat.write(n+3, 0, module)
i = 0
for item in mod[1].items():
#companions_number = int(*item.keys())
#samples_number = int(*item.values())
worksheet_rat.write(n+3, i+1, item[0])
worksheet_rat.write(n+3, i+2, item[1])
i = i + 2
workbook.close()
# for clarity, another loop to export the coincidences per module per rat
writer = pd.ExcelWriter('coincident_time_per_module_per_rat.xlsx', engine='xlsxwriter')
for rat_n in (self.rats):
#the_other_rats = list(filter((rat_n.rat_id).__ne__, self.rats_ids)) # excludes the current rat from the list
list_modules = list(modules_dict.keys())
rat_df = pd.DataFrame(rat_n.shared_time_per_module_per_rat, \
columns=self.rats_ids, index=list_modules)
# adding averages per column (animals) and rows (modules&tunnels)
rat_df.loc['Totals',:] = rat_df.sum(axis=0) - rat_df.loc['Norec']
#cols_to_sum = rat_df.columns[ : rat_df.shape[1]]
#rat_df['Totals'] = rat_df[cols_to_sum].sum(axis=1) - rat_df[rat_n.rat_id]
#print(rat_df)
rat_df.to_excel(writer, sheet_name=str(rat_n.rat_id))
writer.save()
def closeFigures(self):
plt.close('all')
def print2pdf(self, filename = ''):
if filename:
filename2 = '/' + filename + '.pdf'
pdf = matplotlib.backends.backend_pdf.PdfPages(str(self.ui.labelFileSelected.text()) + filename2)
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pdf, format='pdf')
pdf.close()
else:
self.error_msg.showMessage("It is necessary to select a folder")
def print2png(self):
figFolder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
if figFolder:
prefix = '/' + str(datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
for i in plt.get_fignums():
plt.figure(i)
plt.savefig(figFolder + prefix +'figure%d.png' % i)
else:
self.error_msg.showMessage("It is necessary to select a folder")
def calc_tracking_coinc(self):
allrats_locations = []
for rat_n in self.rats:
allrats_locations.append(rat_n.rat_location)
# matrix with all rats and their location per sample
all_rats_locations = np.transpose(np.asarray(allrats_locations)).astype(int)
for r, rat_n in enumerate(self.rats):
rat_n.calculate_coincidences(r, all_rats_locations, modules_dict)
## making recordings of the same lenght
#if np.size(self.resamp_data2) < np.size(self.resamp_data1):
# rat1_data = self.resamp_data1[:np.size(self.resamp_data2)]
# rat2_data = self.resamp_data2
#else:
# rat2_data = self.resamp_data2[:np.size(self.resamp_data1)]
# rat1_data = self.resamp_data1
## obtaining the coincidences
#equal_module = []
#for pos, module in enumerate(rat1_data):
# if module == rat2_data[pos]:
# equal_module.append(module)
# else:
# equal_module.append('none')
#
#number_of_coinc = []
#
#for n in range(16):
# # it is in samples, we want it in seconds
# number_of_coinc.append(equal_module.count(n)*self.sample_time)
#
#
#
#de#f load_tracking_data(self, dframe, sampling_freq):
#
# df_track['Accumulated Time'] = df_track['Accumulated Time']*self.sampling_freq
# df_track['Accumulated Time'] = df_track['Accumulated Time'].round(0)*self.sample_time
# df_track['T0'] = df_track['Accumulated Time'].shift(1)
# df_track['T1'] = df_track['Accumulated Time']
# df_track.fillna(0, inplace = True)
# print(df_track)
# df_track['Ttotal'] = df_track['T1'] - df_track['T0']
# df_track['Samples'] = df_track['Ttotal']*self.sampling_freq
# df_track['Samples'] = df_track['Samples'].astype(int)
# df_track['Module #'] = df_track['Module #'].astype(int)
# print(df_track)
# mod_arr = df_track['Module #'].to_numpy()
# samp_arr = df_track['Samples'].to_numpy()
# mod_samp = np.vstack((mod_arr, samp_arr)).T
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show
sys.exit(app.exec())
| [
"noreply@github.com"
] | noreply@github.com |
e02a4e642a50e5cb9fea026b8290f81105be0c01 | 7cd169ba231b45a4faed48ef08d60a3f6172c877 | /train.py | 9be3c9471c15b6fd44edf231e12d0e67cd4550ab | [] | no_license | GeraldHost/bert-ner | 07412c97e611ee0b9620e75f1c74c3deb6e09b35 | ecdeb15f707db7ce2e62b479cba4e823f82cb78c | refs/heads/master | 2022-12-28T05:39:09.539353 | 2020-10-15T14:49:57 | 2020-10-15T14:49:57 | 303,312,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | import pandas as pd
import numpy as np
import joblib
import torch
from sklearn import preprocessing
from sklearn import model_selection
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
import config
import dataset
import engine
from model import EntityModel
def process_data(data_path):
df = pd.read_csv(data_path, encoding="latin-1")
enc_tag = preprocessing.LabelEncoder()
enc_str = np.append(df.tag.unique(), "B-START")
enc_ids = enc_tag.fit_transform(enc_str)
def str_to_id(s):
index = np.where(enc_str == s)[0][0]
return enc_ids[index]
df["tag"] = df["tag"].apply(str_to_id)
sentences = df.groupby("sentence")["word"].apply(list).values
tag = df.groupby("sentence")["tag"].apply(list).values
return sentences, tag, enc_tag
if __name__ == "__main__":
sentences, tag, enc_tag = process_data(config.TRAINING_FILE)
meta_data = {
"enc_tag": enc_tag
}
joblib.dump(meta_data, "meta.bin")
num_tag = len(list(enc_tag.classes_))
print(list(enc_tag.classes_))
(
train_sentences,
test_sentences,
train_tag,
test_tag
) = model_selection.train_test_split(sentences, tag, random_state=42, test_size=0.1)
train_dataset = dataset.EntityDataset(
texts=train_sentences, tags=train_tag
)
train_data_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.TRAIN_BATCH_SIZE, num_workers=4
)
valid_dataset = dataset.EntityDataset(
texts=test_sentences, tags=test_tag
)
valid_data_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=config.VALID_BATCH_SIZE, num_workers=1
)
device = torch.device("cuda")
model = EntityModel(num_tag=num_tag)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.001,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
num_train_steps = int(len(train_sentences) / config.TRAIN_BATCH_SIZE * config.EPOCHS)
optimizer = AdamW(optimizer_parameters, lr=3e-5)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=num_train_steps
)
best_loss = np.inf
for epoch in range(config.EPOCHS):
train_accuracy, train_loss = engine.train_fn(train_data_loader, model, optimizer, device, scheduler)
test_accuracy, test_loss = engine.eval_fn(valid_data_loader, model, device)
print(f"Train Loss = {train_loss} Valid Loss = {test_loss}")
print(f"Train Accuracy = {train_accuracy} Valid Accuracy = {test_accuracy}")
if test_loss < best_loss:
torch.save(model.state_dict(), config.MODEL_PATH)
best_loss = test_loss
| [
"me@jacobford.co.uk"
] | me@jacobford.co.uk |
221f4c8150fddc906199d788e70ea2553500a8f7 | 2903ac66369b6bd45889b12629d8c8e34e6089b3 | /frappe_training/config/desktop.py | 60ea98f53064fec38a864b70c7e641453fb4dd78 | [
"MIT"
] | permissive | sivaranjanipalanivel/training | 6fa50b5f97fb00894404fba11122599fd796623c | b177c56a319c07dc3467ce3113e332ecee9b81fa | refs/heads/master | 2023-07-17T06:11:29.894363 | 2021-08-02T14:47:31 | 2021-08-02T14:47:31 | 391,987,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "frappe_training",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("frappe_training")
}
]
| [
"you@example.com"
] | you@example.com |
bc10143549abf633a804734a7dfb801743e8a2eb | 6e96e9ecb3cd60954fb2be6fc4b6cad4b9450309 | /django_api/contact_api/utils.py | 3592a4dff3b471236d3133fa85e392941ed36ba4 | [] | no_license | sarahdactyl71/django_api | b1e3392befdf2d99dac9a1d754ed1957892fd4c9 | 87bcd12dc7d1aa1b18968f3da16c1c7369e2a1bb | refs/heads/master | 2020-03-26T09:20:25.695850 | 2018-08-22T06:05:24 | 2018-08-22T06:05:24 | 144,745,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | import json
def parse_json_for_data(request):
r = json.loads(request.body)
full_name = r['full_name']
email = r['email']
address = r['address']
phone = r['phone']
return {'full_name': full_name, 'email': email, 'address': address, 'phone': phone}
def get_user_creds(request):
username = request.META['HTTP_USERNAME']
password = request.META['HTTP_PASSWORD']
return {'username' : username, 'password': password}
| [
"kirkse710@gmail.com"
] | kirkse710@gmail.com |
4c3861919096c212fc796e992d2e10a2588b9a40 | 7d8711ca39731eed2b236f9b03c113c62415efde | /test/__init__.py | 96b466c239e27c665071b28e2bfd277713d2fcef | [] | no_license | IndonesiaHaze/indonesia-haze | d4cc94b48c89e2badff4d52c2549c194a735009e | 9908a2859b0801dba53479bb9bfcbaa95228e7ae | refs/heads/master | 2021-01-10T17:51:39.494257 | 2016-02-08T09:21:22 | 2016-02-08T09:21:22 | 48,312,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | __author__ = 'brlnt-super'
| [
"okasurya@users.noreply.github.com"
] | okasurya@users.noreply.github.com |
fc79fded64c300677e0efa2c62dbca95c45f3941 | 3f29c0eecb7bf4bf5ead307210d08201660024a1 | /Networking/Google_Calendar/Native_HTTP/Refresh_Token.py | 0ad9641ea334a602a38a7d410825c7757fc06353 | [] | no_license | reds98/Python-Sensei | 3f171745889f051bd51dbd4f1636206cd3eda2ba | ace27303cfc89400980ebd1188245532dfd44003 | refs/heads/master | 2020-07-31T06:58:26.034758 | 2020-06-05T06:59:41 | 2020-06-05T06:59:41 | 210,522,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | import requests
response = requests.post(
url='https://www.googleapis.com/oauth2/v4/token',
data={
'client_id': '329717365176-aqe4dnhufklio4fcbhlg2j498g7gspn4.apps.googleusercontent.com',
'client_secret': '-1OsrUOu_P2Bwa1e-CAX6C_-',
'refresh_token': '1//04t3Sd3pr_NKCCgYIARAAGAQSNwF-L9IrqK2EfHExN1debApfXQKz_Q4OccKzPtskMtprCnv7NQZoGmfcDy7HugWxC5_HrbLybC0',
'grant_type': 'refresh_token',
},
headers={
'Content-Type': 'application/x-www-form-urlencoded',
},
)
response.raise_for_status()
print(response.json().get('access_token')) | [
"sahid.rojas64@gmail.com"
] | sahid.rojas64@gmail.com |
111aa7b541118eeee86127ef84029eea234ce034 | 4b8dfa56ee51214637db2ac718c599a44577d076 | /app/__init__.py | 73af1bc70d3a52dd149747a4d0dc6bd566d6e376 | [] | no_license | psy2848048/langchain_eosio | fed1add7b451fb8c9c21a2f4780d905dc95bc7d6 | 82f7984a6f207d6403f274fbe1846f53b172cb6d | refs/heads/master | 2022-10-19T13:42:27.250579 | 2020-06-16T01:26:10 | 2020-06-16T01:26:10 | 272,577,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | # -*- coding: utf-8 -*-
from flask import Flask, g, redirect, make_response, jsonify
from flask_session import Session
from flask_cors import CORS
import os
import subprocess
# Define the WSGI application object
app = Flask(__name__, static_url_path='/static', template_folder='/static/front')
#: Flask-Session
Session(app)
#: Flask-CORS
cors = CORS(app, resources={r"/*": {"origins": "*", "supports_credentials": "true"}})
# API Version
versions = ['/api/action']
from app.auth.urls import auth
app.register_blueprint(auth, url_prefix='/api/v1/auth')
#: 등록된 url 확인하기
print(app.url_map)
#####################################################################################
@app.before_request
def before_request():
"""
모든 API 실행 전 실행하는 부분
"""
p = subprocess.Popen(
@app.teardown_request
def teardown_request(exception):
"""
모든 API 실행 후 실행하는 부분. 여기서는 DB 연결종료.
"""
pass
# Sample HTTP error handling
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify(result_ko='존재하지 않는 페이지입니다'
, result_en='Not Found!'
, result=404), 404)
@app.errorhandler(401)
def not_unauthorized(error):
return make_response(jsonify(result_ko='인증되지 않음'
, result_en='Unauthenticated'
, result=401), 401)
@app.errorhandler(403)
def forbidden(error):
# return abort(403)
return make_response(jsonify(result_ko='접근 금지!'
, result_en='Forbidden!'
, result=403), 403)
| [
"psy2848048@gmail.com"
] | psy2848048@gmail.com |
33db5512963f5b8c6d5910b476d375ebec537462 | 414393a5048e5212223051d6a5541ecb873bcc53 | /imagenet_VGG16/main_PFSUM.py | 00bece7144bfcbbe48d1335b150d8de00d3d18ec | [] | no_license | byh1321/CIFAR100_Distorted_Channel_Selective | 5a0fc1107ab9d60ce12504a8e474144762eda8df | 897f2dea4e645329dfc3bf3df6b147c783bfa83f | refs/heads/master | 2020-03-21T02:31:24.024771 | 2019-08-12T05:59:53 | 2019-08-12T05:59:53 | 138,002,631 | 0 | 0 | null | 2019-08-02T02:26:49 | 2018-06-20T08:26:51 | Python | UTF-8 | Python | false | false | 21,685 | py | from __future__ import print_function
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.models as models
import argparse
import torch.optim as optim
import pytorch_fft.fft as fft
import utils
from utils import progress_bar
import os
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--se', default=0, type=int, help='start epoch')
parser.add_argument('--ne', default=0, type=int, help='number of epoch')
parser.add_argument('--bs', default=128, type=int, help='batch size')
parser.add_argument('--mode', default=1, type=int, help='train or inference') #mode=1 is train, mode=0 is inference
parser.add_argument('--fixed', default=0, type=int, help='quantization') #mode=1 is train, mode=0 is inference
parser.add_argument('--gau', type=float, default=0, metavar='N',help='gaussian noise standard deviation')
parser.add_argument('--blur', type=float, default=0, metavar='N',help='blur noise standard deviation')
parser.add_argument('--samplesize', default=0, type=int, help='set sample size')
parser.add_argument('--outputfile', default='garbage.txt', help='output file name', metavar="FILE")
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
traindir = os.path.join('/usr/share/ImageNet/train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir,transforms.Compose([transforms.RandomSizedCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),normalize,]))
train_sub_dataset, dump = torch.utils.data.random_split(train_dataset,[args.samplesize,(len(train_dataset)-args.samplesize)])
train_loader = torch.utils.data.DataLoader(train_sub_dataset, batch_size=1, shuffle=True, num_workers=8, pin_memory=True)
#train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True,num_workers=8, pin_memory=True)
valdir = os.path.join('/usr/share/ImageNet/val')
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir,transforms.Compose([transforms.Scale(256),transforms.CenterCrop(224),transforms.ToTensor(),normalize])),batch_size=128, shuffle=False,num_workers=1, pin_memory=True)
class VGG16(nn.Module):
def __init__(self):
super(VGG16,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv4 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv6 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv7 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv8 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv9 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv10 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv11 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv12 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv13 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.linear1 = nn.Sequential(
nn.Linear(25088, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
)
self.linear2 = nn.Sequential(
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
)
self.linear3 = nn.Sequential(
nn.Linear(4096, 1000),
)
def forward(self,x):
if (args.gau==0)&(args.blur==0):
#no noise
pass
elif (args.blur == 0)&(args.gau != 0):
#gaussian noise add
gau_kernel = torch.randn(x.size())*args.gau
x = Variable(gau_kernel.cuda()) + x
elif (args.gau == 0)&(args.blur != 0):
#blur noise add
blur_kernel_partial = torch.FloatTensor(utils.genblurkernel(args.blur))
blur_kernel_partial = torch.matmul(blur_kernel_partial.unsqueeze(1),torch.transpose(blur_kernel_partial.unsqueeze(1),0,1))
kernel_size = blur_kernel_partial.size()[0]
zeros = torch.zeros(kernel_size,kernel_size)
blur_kernel = torch.cat((blur_kernel_partial,zeros,zeros,
zeros,blur_kernel_partial,zeros,
zeros,zeros,blur_kernel_partial),0)
blur_kernel = blur_kernel.view(3,3,kernel_size,kernel_size)
blur_padding = int((blur_kernel_partial.size()[0]-1)/2)
#x = torch.nn.functional.conv2d(x, weight=blur_kernel.cuda(), padding=blur_padding)
x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding)
elif (args.gau != 0) & (args.blur != 0):
#both gaussian and blur noise added
blur_kernel_partial = torch.FloatTensor(utils.genblurkernel(args.blur))
blur_kernel_partial = torch.matmul(blur_kernel_partial.unsqueeze(1),torch.transpose(blur_kernel_partial.unsqueeze(1),0,1))
kernel_size = blur_kernel_partial.size()[0]
zeros = torch.zeros(kernel_size,kernel_size)
blur_kernel = torch.cat((blur_kernel_partial,zeros,zeros,
zeros,blur_kernel_partial,zeros,
zeros,zeros,blur_kernel_partial),0)
blur_kernel = blur_kernel.view(3,3,kernel_size,kernel_size)
blur_padding = int((blur_kernel_partial.size()[0]-1)/2)
x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding)
gau_kernel = torch.randn(x.size())*args.gau
x = Variable(gau_kernel.cuda()) + x
else:
print("Something is wrong in noise adding part")
exit()
tmp = Variable(torch.zeros(1,3,224,224).cuda())
f = fft.Fft2d()
fft_rout, fft_iout = f(x, tmp)
mag = torch.sqrt(torch.mul(fft_rout,fft_rout) + torch.mul(fft_iout,fft_iout))
tmp = torch.zeros(1,1,224,224).cuda()
tmp = torch.add(torch.add(mag[:,0,:,:],mag[:,1,:,:]),mag[:,2,:,:])
tmp = torch.abs(tmp)
PFSUM = 0
for i in range(0,224):
for j in range(0,224):
if (i+j) < 111:
print_value = 0
elif (i-j) > 112:
print_value = 0
elif (j-i) > 112:
print_value = 0
elif (i+j) > 335:
print_value = 0
else:
PFSUM = PFSUM + tmp[0,i,j]
f = open(args.outputfile,'a+')
print(PFSUM.item(),file=f)
f.close()
'''
f = open(args.outputfile,'a+')
for i in range(0,224):
for j in range(0,224):
print(tmp[0,i,j].item()/3,file = f)
f.close()
exit()
'''
"""
if args.fixed:
x = quant(x)
x = roundmax(x)
out = self.conv1(x)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv2(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv4(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv5(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv6(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv7(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv8(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv9(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv10(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv11(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv12(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv13(out)
out = out.view(out.size(0), -1)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.linear1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.linear2(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.linear3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
"""
out = torch.zeros(1000)
return out
if args.mode == 0:
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt_20180813.t0')
net = checkpoint['net']
elif args.mode == 1:
if args.resume:
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt_20180813.t0')
best_acc = checkpoint['acc']
net = checkpoint['net']
else:
print('==> Building model..')
net = VGG16()
elif args.mode == 2:
checkpoint = torch.load('./checkpoint/ckpt_20180813.t0')
net = checkpoint['net']
if args.resume:
print('==> Resuming from checkpoint..')
best_acc = checkpoint['acc']
else:
best_acc = 0
if use_cuda:
#print(torch.cuda.device_count())
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(0,1))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
start_epoch = args.se
num_epoch = args.ne
###################################################################################
# Copied this part from https://github.com/pytorch/examples/blob/master/imagenet/main.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
######################################################################################
def paramsget():
params = net.conv1[0].weight.view(-1,)
params = torch.cat((params,net.conv2[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv3[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv4[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv5[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv6[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv7[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv8[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv9[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv10[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv11[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv12[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv13[0].weight.view(-1,)),0)
params = torch.cat((params,net.fc1[1].weight.view(-1,)),0)
params = torch.cat((params,net.fc2[1].weight.view(-1,)),0)
params = torch.cat((params,net.fc3[0].weight.view(-1,)),0)
#net = checkpoint['net']
return params
def findThreshold(params):
thres=0
while 1:
tmp = (torch.abs(params.data)<thres).type(torch.FloatTensor)
result = torch.sum(tmp)/params.size()[0]
if (args.pr/100)<result:
print("threshold : {}".format(thres))
return thres
else:
thres += 0.0001
def getPruningMask(thres):
mask = torch.load('mask_null.dat')
mask[0] = torch.abs(net.conv1[0].weight.data)>thres
mask[1] = torch.abs(net.conv2[0].weight.data)>thres
mask[2] = torch.abs(net.conv3[0].weight.data)>thres
mask[3] = torch.abs(net.conv4[0].weight.data)>thres
mask[4] = torch.abs(net.conv5[0].weight.data)>thres
mask[5] = torch.abs(net.conv6[0].weight.data)>thres
mask[6] = torch.abs(net.conv7[0].weight.data)>thres
mask[7] = torch.abs(net.conv8[0].weight.data)>thres
mask[8] = torch.abs(net.conv9[0].weight.data)>thres
mask[9] = torch.abs(net.conv10[0].weight.data)>thres
mask[10] = torch.abs(net.conv11[0].weight.data)>thres
mask[11] = torch.abs(net.conv12[0].weight.data)>thres
mask[12] = torch.abs(net.conv13[0].weight.data)>thres
mask[13] = torch.abs(net.fc1[1].weight.data)>thres
mask[14] = torch.abs(net.fc2[1].weight.data)>thres
mask[15] = torch.abs(net.fc3[0].weight.data)>thres
mask[0] = mask[0].type(torch.FloatTensor)
mask[1] = mask[1].type(torch.FloatTensor)
mask[2] = mask[2].type(torch.FloatTensor)
mask[3] = mask[3].type(torch.FloatTensor)
mask[4] = mask[4].type(torch.FloatTensor)
mask[5] = mask[5].type(torch.FloatTensor)
mask[6] = mask[6].type(torch.FloatTensor)
mask[7] = mask[7].type(torch.FloatTensor)
mask[8] = mask[8].type(torch.FloatTensor)
mask[9] = mask[9].type(torch.FloatTensor)
mask[10] = mask[10].type(torch.FloatTensor)
mask[11] = mask[11].type(torch.FloatTensor)
mask[12] = mask[12].type(torch.FloatTensor)
mask[13] = mask[13].type(torch.FloatTensor)
mask[14] = mask[14].type(torch.FloatTensor)
mask[15] = mask[15].type(torch.FloatTensor)
return mask
def pruneNetwork(mask):
for child in net.children():
for param in child.conv1[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[0].cuda())
param.data = torch.mul(param.data,mask[0].cuda())
for child in net.children():
for param in child.conv2[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[1].cuda())
param.data = torch.mul(param.data,mask[1].cuda())
for child in net.children():
for param in child.conv3[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[2].cuda())
param.data = torch.mul(param.data,mask[2].cuda())
for child in net.children():
for param in child.conv4[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[3].cuda())
param.data = torch.mul(param.data,mask[3].cuda())
for child in net.children():
for param in child.conv5[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[4].cuda())
param.data = torch.mul(param.data,mask[4].cuda())
for child in net.children():
for param in child.conv6[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[5].cuda())
param.data = torch.mul(param.data,mask[5].cuda())
for child in net.children():
for param in child.conv7[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[6].cuda())
param.data = torch.mul(param.data,mask[6].cuda())
for child in net.children():
for param in child.conv8[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[7].cuda())
param.data = torch.mul(param.data,mask[7].cuda())
for child in net.children():
for param in child.conv9[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[8].cuda())
param.data = torch.mul(param.data,mask[8].cuda())
for child in net.children():
for param in child.conv10[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[9].cuda())
param.data = torch.mul(param.data,mask[9].cuda())
for child in net.children():
for param in child.conv11[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[10].cuda())
param.data = torch.mul(param.data,mask[10].cuda())
for child in net.children():
for param in child.conv12[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[11].cuda())
param.data = torch.mul(param.data,mask[11].cuda())
for child in net.children():
for param in child.conv13[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[12].cuda())
param.data = torch.mul(param.data,mask[12].cuda())
for child in net.children():
for param in child.fc1[1].parameters():
param.grad.data = torch.mul(param.grad.data,mask[13].cuda())
param.data = torch.mul(param.data,mask[13].cuda())
for child in net.children():
for param in child.fc2[1].parameters():
param.grad.data = torch.mul(param.grad.data,mask[14].cuda())
param.data = torch.mul(param.data,mask[14].cuda())
for child in net.children():
for param in child.fc3[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[15].cuda())
param.data = torch.mul(param.data,mask[15].cuda())
return
def train(epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
net.train()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda is not None:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = net(inputs)
'''
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#progress_bar(batch_idx, len(train_loader), 'Loss: {loss.val:.4f} | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
progress_bar(batch_idx, len(train_loader),
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
'''
progress_bar(batch_idx, len(train_loader))
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(val_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0].item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*float(correct)/float(total), correct, total))
# Save checkpoint.
acc = 100.*correct/total
if args.mode == 0:
pass
else:
if acc > best_acc:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
#torch.save(state, './checkpoint/ckpt_20180726.t0')
best_acc = acc
def retrain(epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
net.train()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda is not None:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = net(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
#quantize()
#pruneNetwork(mask)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#progress_bar(batch_idx, len(train_loader), 'Loss: {loss.val:.4f} | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
progress_bar(batch_idx, len(train_loader),
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def roundmax(input):
'''
maximum = 2 ** args.iwidth - 1
minimum = -maximum - 1
input = F.relu(torch.add(input, -minimum))
input = F.relu(torch.add(torch.neg(input), maximum - minimum))
input = torch.add(torch.neg(input), maximum)
'''
return input
def quant(input):
#input = torch.round(input / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
return input
mode = args.mode
if mode == 0: # only inference
test()
elif mode == 1: # mode=1 is training & inference @ each epoch
for epoch in range(start_epoch, start_epoch+num_epoch):
train(epoch)
exit()
else:
pass
| [
"byh1321@naver.com"
] | byh1321@naver.com |
15cb6d7afdc7fc7eaaeaf492f771909ea8cda285 | 833b43575815ce6c5fa8cbac2628cb774331eda7 | /chap14_p277_code1.py | ae943fb048c09744b8a7feb977edb8216aa7d722 | [] | no_license | ai-times/infinitybook_python | d9529dfe7d486bf5c713d52b530915a23cbf1812 | 1c011c31994d07fe959bba9b519c4365f5f40e7f | refs/heads/main | 2023-03-01T12:18:20.695888 | 2021-02-14T04:22:40 | 2021-02-14T04:22:40 | 338,578,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | code = input("주민번호 앞자리 입력 : ")
y = "19" + code[0:2]
m = code[2:4]
d = code[4:6]
age = 2019-int(y)+1
print("당신은", y, "년에 태어났군요.")
print("당신의 생일은", m, "월", d, "일 이군요.")
print("당신의 올해", age, "살 이군요")
| [
"wskim092@gmail.com"
] | wskim092@gmail.com |
2bd082fdd4932b32b6df338c4a8dc17d3a48077a | 328f38ab097afcdf0426b21a070c3c78ecfe6edb | /1-4.py | f19a93617c0ef7a8f68d96f214bc3d717314e355 | [] | no_license | smgmamur/Practice-Python | ab3f1130dbf0b5ec6590c61039649035190130f3 | d383b196b92b1ba62c2c678164a98fd24eb320a0 | refs/heads/master | 2021-09-06T19:08:03.390086 | 2018-02-10T04:13:28 | 2018-02-10T04:13:28 | 106,565,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | '''
Created on Oct 11, 2017
@author: mhossain
'''
i = 0
num = int(input("Enter a number to find its divisors:"))
nums_less_than_num = list(range(1,num+1))
divisors = []
while nums_less_than_num[i]<num:
if num%nums_less_than_num[i]==0:
divisors.append(nums_less_than_num[i])
i=i+1
print(divisors)
| [
"noreply@github.com"
] | noreply@github.com |
10db09bd205a4767ad04c2ad9a7ae71e296af40f | 296132d2c5d95440b3ce5f4401078a6d0f736f5a | /homeassistant/components/xiaomi_ble/sensor.py | 831b5d0910be035820e0172f6706c2b06edb2f0c | [
"Apache-2.0"
] | permissive | mezz64/home-assistant | 5349a242fbfa182159e784deec580d2800173a3b | 997d4fbe5308b01d14ceabcfe089c2bc511473dd | refs/heads/dev | 2023-03-16T22:31:52.499528 | 2022-12-08T02:55:25 | 2022-12-08T02:55:25 | 68,411,158 | 2 | 1 | Apache-2.0 | 2023-03-10T06:56:54 | 2016-09-16T20:04:27 | Python | UTF-8 | Python | false | false | 6,812 | py | """Support for xiaomi ble sensors."""
from __future__ import annotations
from typing import Optional, Union
from xiaomi_ble import DeviceClass, SensorUpdate, Units
from homeassistant import config_entries
from homeassistant.components.bluetooth.passive_update_processor import (
PassiveBluetoothDataProcessor,
PassiveBluetoothDataUpdate,
PassiveBluetoothProcessorCoordinator,
PassiveBluetoothProcessorEntity,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONDUCTIVITY,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.sensor import sensor_device_info_to_hass_device_info
from .const import DOMAIN
from .device import device_key_to_bluetooth_entity_key
SENSOR_DESCRIPTIONS = {
(DeviceClass.BATTERY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.BATTERY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
(DeviceClass.CONDUCTIVITY, Units.CONDUCTIVITY): SensorEntityDescription(
key=str(Units.CONDUCTIVITY),
device_class=None,
native_unit_of_measurement=CONDUCTIVITY,
state_class=SensorStateClass.MEASUREMENT,
),
(
DeviceClass.FORMALDEHYDE,
Units.CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
): SensorEntityDescription(
key=f"{DeviceClass.FORMALDEHYDE}_{Units.CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER}",
native_unit_of_measurement=CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.HUMIDITY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.HUMIDITY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.ILLUMINANCE, Units.LIGHT_LUX): SensorEntityDescription(
key=f"{DeviceClass.ILLUMINANCE}_{Units.LIGHT_LUX}",
device_class=SensorDeviceClass.ILLUMINANCE,
native_unit_of_measurement=LIGHT_LUX,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.MOISTURE, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.MOISTURE}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.MOISTURE,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.PRESSURE, Units.PRESSURE_MBAR): SensorEntityDescription(
key=f"{DeviceClass.PRESSURE}_{Units.PRESSURE_MBAR}",
device_class=SensorDeviceClass.PRESSURE,
native_unit_of_measurement=PRESSURE_MBAR,
state_class=SensorStateClass.MEASUREMENT,
),
(
DeviceClass.SIGNAL_STRENGTH,
Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
): SensorEntityDescription(
key=f"{DeviceClass.SIGNAL_STRENGTH}_{Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT}",
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
(DeviceClass.TEMPERATURE, Units.TEMP_CELSIUS): SensorEntityDescription(
key=f"{DeviceClass.TEMPERATURE}_{Units.TEMP_CELSIUS}",
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.VOLTAGE, Units.ELECTRIC_POTENTIAL_VOLT): SensorEntityDescription(
key=f"{DeviceClass.VOLTAGE}_{Units.ELECTRIC_POTENTIAL_VOLT}",
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
# Used for e.g. consumable sensor on WX08ZM
(None, Units.PERCENTAGE): SensorEntityDescription(
key=str(Units.PERCENTAGE),
device_class=None,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
}
def sensor_update_to_bluetooth_data_update(
sensor_update: SensorUpdate,
) -> PassiveBluetoothDataUpdate:
"""Convert a sensor update to a bluetooth data update."""
return PassiveBluetoothDataUpdate(
devices={
device_id: sensor_device_info_to_hass_device_info(device_info)
for device_id, device_info in sensor_update.devices.items()
},
entity_descriptions={
device_key_to_bluetooth_entity_key(device_key): SENSOR_DESCRIPTIONS[
(description.device_class, description.native_unit_of_measurement)
]
for device_key, description in sensor_update.entity_descriptions.items()
if description.native_unit_of_measurement
},
entity_data={
device_key_to_bluetooth_entity_key(device_key): sensor_values.native_value
for device_key, sensor_values in sensor_update.entity_values.items()
},
entity_names={
device_key_to_bluetooth_entity_key(device_key): sensor_values.name
for device_key, sensor_values in sensor_update.entity_values.items()
},
)
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Xiaomi BLE sensors."""
coordinator: PassiveBluetoothProcessorCoordinator = hass.data[DOMAIN][
entry.entry_id
]
processor = PassiveBluetoothDataProcessor(sensor_update_to_bluetooth_data_update)
entry.async_on_unload(
processor.async_add_entities_listener(
XiaomiBluetoothSensorEntity, async_add_entities
)
)
entry.async_on_unload(coordinator.async_register_processor(processor))
class XiaomiBluetoothSensorEntity(
PassiveBluetoothProcessorEntity[
PassiveBluetoothDataProcessor[Optional[Union[float, int]]]
],
SensorEntity,
):
"""Representation of a xiaomi ble sensor."""
@property
def native_value(self) -> int | float | None:
"""Return the native value."""
return self.processor.entity_data.get(self.entity_key)
| [
"noreply@github.com"
] | noreply@github.com |
d28eca3d054bc287a25d6c77fb244f87cf085ec2 | a8ce50df43eb06efe284e0a5f58bad1ba29db95e | /VaccinationSpots.py | 896ec55b65509194479b2ea6e4af362a76297171 | [] | no_license | RohitDashora/vaccineFinder | 052a983d0655fce7c10ad2fbf70fccacfc281e4f | 234d7b564c354f2b1b5083686b820d59462e52c5 | refs/heads/main | 2023-05-30T07:35:42.315273 | 2021-06-06T19:48:18 | 2021-06-06T19:48:18 | 364,281,980 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,650 | py | ### core login, now distributed, this file will be removed
import argparse
import requests
import json
import datetime
from datetime import date
from datetime import timedelta
#adding argparse to go with command line argument, this will also generate helptext
ap= argparse.ArgumentParser()
ap.add_argument("pincode", help="Pincode of your location")
ap.add_argument("agelimit", help="Age limit, choose for 18 or 45", type=int, choices=[18,45])
ap.add_argument("--date", help="Date when you are looking for vaccination [format dd-mm-yyyy] by default its today's date and onwards")
args = ap.parse_args()
#top level variables
ageLimit=args.agelimit
pincode=args.pincode
endpoint = 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?'
#we need headers because API will get unauthenticated otherwise
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def pollURL(endpoint, pincode, date):
url=endpoint+'pincode='+pincode+'&date='+date
#print(url)
response=requests.get(url, headers=headers)
return(response)
if __name__ == '__main__':
slot_list =[]
date2=date.today().strftime("%d-%m-%Y")
response = pollURL(endpoint, pincode, date2)
#print(response.content)
if response.status_code == 200:
vacdata=response.json()
#print(vacdata)
center_count =len(vacdata["centers"])
for i in range(0,center_count):
sessions = vacdata["centers"][i]["sessions"]
location = vacdata["centers"][i]["name"]
freepaid =vacdata["centers"][i]["fee_type"]
for i in range(len(sessions)):
session_list=[]
min_age_limit =sessions[i]["min_age_limit"]
available=sessions[i]["available_capacity"]
slot= sessions[i]["slots"]
#if available > 0 :
if min_age_limit == ageLimit and available > 0 :
#uncoment this and comment above if you want to fiter by age limit
session_list.append(sessions[i]["date"])
session_list.append(location)
session_list.append(freepaid)
session_list.append(min_age_limit)
session_list.append(sessions[i]["vaccine"])
session_list.append(slot)
session_list.append(available)
slot_list.append(session_list)
print(*slot_list, sep='\n')
else:
print("Error calling the API- "+response.reason)
exit()
| [
"rohitdashora@gmail.com"
] | rohitdashora@gmail.com |
5d5765059765b74e4cfcfc5ac46ced0437562b18 | a633ffc4cf287c4799843f8480a4bb804432c2f0 | /makecsv.py | 72821f81037ead2d743d1d777c2fe5dd87be3fcb | [] | no_license | josieFoo/webscrapper | d195e8a19230c8d209f03a8231b7f76c3e71ade5 | 999a8a04099c466b08a34f3897d2ef8015ffdf8a | refs/heads/master | 2023-04-03T14:37:22.458373 | 2021-05-04T19:35:38 | 2021-05-04T19:35:38 | 364,366,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | import csv
def save_to_file(tuples):
file = open("courses.csv", mode="w")
writer = csv.writer(file)
writer.writerow(["title", "link"])
for t in tuples:
writer.writerow(list(t.values()))
return | [
"65567887+josieFoo@users.noreply.github.com"
] | 65567887+josieFoo@users.noreply.github.com |
984ba1aebcc881466e7f57859ef9a5ae29fb6614 | 0deba0f28b36ab39df4bdb2ed9fca9fedf638a91 | /lovegov/dev_manage.py | 5cd2e1704b13f49a4ab804a846b49263dca6959d | [] | no_license | mhfowler/LoveGov | 080e2c8fedcc3ad7b5d008787921558cfd86a2bf | a77c9d968059361fe09409d46568afe86239e3ba | HEAD | 2016-09-06T06:03:34.289383 | 2014-09-04T15:32:39 | 2014-09-04T15:32:39 | 4,600,457 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from django.core.management import execute_manager
import imp
try:
imp.find_module('dev_settings')
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'dev_settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import dev_settings
if __name__ == "__main__":
execute_manager(dev_settings) | [
"jsgreenf@gmail.com"
] | jsgreenf@gmail.com |
27a49544c7c1b8f8f550a76bdb9f95675a635c6a | 3cedb583e9f3dfcdf16aeba56a0b3ff7c6213e99 | /python-codes/m3_curso_em_video_estruturas_compostas/ex101.0.py | b156f239ea79ed513ea7696f940a01732d28e535 | [
"MIT"
] | permissive | lucasportella/learning-python | 0f39ae2389db6d07b5b8c14ebe0c24f1e93c77c5 | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | refs/heads/master | 2022-12-26T15:04:12.806300 | 2020-10-14T23:17:47 | 2020-10-14T23:17:47 | 260,685,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | def voto(ano_nascimento):
from datetime import date
idade = date.today().year - ano_nascimento
if idade < 16:
return print(f"Com {idade} anos: VOTO NEGADO")
elif 16 <= idade < 18 or idade >= 70:
return print(f"Com {idade} anos: VOTO OPCIONAL")
elif idade >= 18 and idade < 70:
return print(f"Com {idade} anos: VOTO OBRIGATÓRIO")
print('--'*10)
voto(int(input("Em que ano você nasceu? ")))
| [
"lucasportellaagu@gmail.com"
] | lucasportellaagu@gmail.com |
c87621bd581d363337b17836bd4c0148c9a0d81b | 3df711ca560ed1d5da78ef6a30babf1e5500ec96 | /posts/migrations/0001_initial.py | ae463b455cb684bba71f6242a1c3d9848d0ea1d1 | [
"MIT"
] | permissive | alejandrowd/platzigram-github | 3a7da395e99a5daa7a4ac1553375516a7c45885f | 87056629dc7c196281330f02fc4e830b06acef2e | refs/heads/master | 2020-04-21T18:43:29.170782 | 2019-02-11T22:53:38 | 2019-02-11T22:53:38 | 169,781,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | # Generated by Django 2.0.7 on 2019-01-17 00:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0002_auto_20190116_1901'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('photo', models.ImageField(upload_to='posts/photos')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Profile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"arguellowalter@gmail.com"
] | arguellowalter@gmail.com |
2f4a30b005a855598d009b9a2f39db016da46006 | a8ab1fe9859f8d790af0f685bcadffdd39707354 | /main.py | bd2912c06f9621996a792267bdc5b08602dd2a86 | [] | no_license | Nigel-Ernest/PDF_to_Speech | 5e530b2fe2b940717ff93ad19a1d1a9dc46a4fc9 | 7bd25952f6d1e793994449741cf10dccf8c4e827 | refs/heads/main | 2023-01-04T01:32:31.793960 | 2020-10-30T13:18:10 | 2020-10-30T13:18:10 | 308,634,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import pyttsx3
import PyPDF2
book = open('The_Art_Of_War.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(book)
pages = pdfReader.numPages
print(pages)
speaker = pyttsx3.init()
speaker.setProperty('rate', 150)
speaker.setProperty('volume', 0.7)
voices = speaker.getProperty('voices')
voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0"
# Use female voice
speaker.setProperty('voice', voice_id)
for num in range(3, pages):
page = pdfReader.getPage(num)
page1 = pdfReader.getPage(num)
text = page.extractText()
speaker.say(text)
speaker.runAndWait() | [
"noreply@github.com"
] | noreply@github.com |
273fd7b6034f9f22838ca96b6da75d9ec2de89ea | 4d4efbfed965c4f61d1a769c73aebd2ac84bd3a2 | /Unidad 3/ejercicio13.py | ee7e9fb1a4ccb47d28fc23bedc89948a47938e4d | [] | no_license | JoeMaurice88/testayed2021 | 985275d93570578ee5a5f832e24d083c7f86e0de | 8a4216df4e2d2cc423a918a9b8eaa2f1d1a5e4f8 | refs/heads/main | 2023-07-27T18:23:51.976812 | 2021-09-09T23:44:03 | 2021-09-09T23:44:03 | 404,898,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | """Ejercicio (Ahora con flag)**
El programa debe:
* pedir un dato al usuario
* solo en caso que este escriba la palabra clave "python" imprimir por pantalla "Correcto",
en caso contrario debe seguir pidiendo el dato
* no deben aparecer errores."""
flag= True
while flag:
dato_1= input("Ingrese un dato: ")
dato_1= dato_1.lower()
if dato_1 == "python":
print("Correcto")
flag= False
else:
print("Es incorrecto") | [
"joelmaurice@iresm.edu.ar"
] | joelmaurice@iresm.edu.ar |
bc8bc46623f89ed25fa9f05d0c589a967a1bcd30 | bb68bb93b4c587bbe898fa968c170ecf7abe861f | /CheckValidCGRs_and_Uniqueness.py | 6b037039f54d39e85241453d2a21f907761f8214 | [] | no_license | phquanta/CGRSmiles | 701b3ecc806982ba2fb2f694f5f0fa180853ace3 | 287f94dc6d4522d720b82f5ba3045a0423ebe846 | refs/heads/main | 2023-03-31T06:08:05.733237 | 2021-04-01T15:27:33 | 2021-04-01T15:27:33 | 353,736,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,864 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 19:47:32 2020
@author: phquanta
"""
import rdkit
from rdkit.Chem import AllChem
from rdkit import Chem, RDConfig
from rdkit.Chem import MolFromSmiles, AllChem
from rdkit.Chem.rdChemReactions import PreprocessReaction
import CGRtools as cgr
from pathlib import Path
import numpy as np
from io import StringIO
import os
import pickle
#reactionsFile="/mnt/hgfs/H/DataSets/USPTO/USPTO_AllReactions_SMARTS.smi"6
#reactionsFile="train.txt"
#reactionsFile="/mnt/hgfs/H/DataSets/JinsData/test1.smi"
reactionsFile="CGRSmiles156.smi"
#CGRSmileFileOut="CGRSmiles156.smi"
#CGRSSmilesGenerated=["RNNGeneratedReactioins.dat","RNNGeneratedReactioinsStartingOO.dat","BIMODAL_Fixed_Aug1_T_07.smi"]
CGRSSmilesGenerated=["RNNGeneratedReactioins.dat","RNNGeneratedReactioinsStartingOO.dat","..\\CleanData\\RNN_5LSTM_512Each_138Epochs_And_BiMODAL_Fixed\\General_WithBIMODAL_CGRsGenSMILE_2_Clean.smi", "GeneralGeneratedReactioinsStarting_AfterFineTuningOn_OO_Reactions.smi","BiModaL_Aug1_FineTuned_OO.smi","BiModaL_Aug1_FineTuned_OO_1f_ep4.smi","BiModal_30K_Generated_FromGuelphComputer_256.smi","BiModaL_Aug5_FineTuned_OO_Guelph_1f_ep19.smi"]
CGRFineTuneOO="FineTuneCGR_OO.smi"
#CGRSSmilesGenerated=["BiModal_30K_Generated_FromGuelphComputer_256.smi"]
#CGRSSmilesGenerated=["RNNGeneralGeneratedReactioinsNew.dat","RNNGeneralGeneratedReactioinsStartingOONew.dat","..\\CleanData\\RNN_5LSTM_512Each_138Epochs_And_BiMODAL_Fixed\\General_WithBIMODAL_CGRsGenSMILE_2_Clean.smi"]
#CGRSSmilesGenerated=["BIMODAL_Fixed_Aug1_T_07.smi"]
lengthCorrect=[0 for x in range(len(CGRSSmilesGenerated))]
lengthCorrectPercentage=[0 for x in range(len(CGRSSmilesGenerated))]
lengthCorrect_l=[0 for x in range(len(CGRSSmilesGenerated))]
lengthCorrectPercentage_l=[0 for x in range(len(CGRSSmilesGenerated))]
notPickled=True
notPickledTrained=False
n_of_generatedCGRs=30000.
def isValidReaction(reactionObj):
valid=False
reactionMols=list(reactionObj.reactants)
reactionMols.extend(reactionObj.products)
reactionMols.extend(reactionObj.reagents)
for i in reactionMols:
try:
i.kekule()
lst=i.check_valence()
if len(lst)==0 and i.check_thiele():
valid=True
except Exception as e:
print("ERROR IN isValidReactrion")
return False
return valid
def getReactionCentersDictionary(CGRSmiles,cgrs,Verbose=False):
rcs={}
for n,cgr_obj in enumerate(cgrs):
try:
#print(n,cgr_obj)
reactionObj = cgr.ReactionContainer.from_cgr(cgr_obj)
#reactionObj=preparer.decompose(nextCGRObj)
#reactionObj=preparer.decompose(nextCGRObj)
isValid=isValidReaction(reactionObj)
if isValid:
rC=cgr_obj.centers_list
for i in range(len(rC)):
rc1 = cgr_obj.substructure(rC[i], as_query=True)
#if (';-' in str(rc1)) and (';+' in str(rc1)):
# CntChargedRCs+=1
#hashes.add(rc1.__hash__())
rcs[rc1.__hash__()]=str(rc1)
if len(rC)>1:
if Verbose:
print(rC," ----- ", rC[i],str(rc1))
#if rc1.__hash__() in rcs:
# print("FOUNDDDDDDDDDDDDDDDDDDDDDD:",str(rc1),rcs[rc1.__hash__()])
#print("Reaction Centers Number OF:",len(rcs))
#print(rc1)
if (n%10)==0 and n>1:
print(f"Done : {n} Smiles, len(RCS): {len(rcs)} ")
#Cnt_validCGR+=1
else:
print("invalidDDDD")
if Verbose:
print("###################")
print("")
print(str(cgr_obj))
print(str(cgr_obj) in CGRSmiles)
print("#############")
except StopIteration:
print("########### STOP ITERATION or END of LOOP ##################")
except Exception as e:
print("########### SOME ERROR Detected ##################")
print(e)
print("Error")
return rcs
def getCGRs(reactionsFile):
generator=cgr.files.SMILESRead(reactionsFile)
ll=set()
cgrObjs=set()
ll1=[]
cgrObjs1=[]
length=[]
cnt=0
cntAll=0
while True:
try:
nextReactionObj=next(generator)
#print(str(nextReactionObj),type(nextReactionObj))
cntAll+=1
#print("All, len of current list:",cntAll,print(len(ll)))
if type(nextReactionObj)!=cgr.containers.cgr.CGRContainer:
nextReactionObj.standardize()
cgrContainer=~nextReactionObj
decomposed = cgr.ReactionContainer.from_cgr(cgrContainer)
# print("decomposed",decomposed)
if isValidReaction(decomposed):
cnt+=1
if cnt%2==0:
print(f"done {cnt} SMILES From TRAIN.TXT")
nextReactionStr=str(cgrContainer)
ll.add (nextReactionStr)
cgrObjs.add(cgrContainer)
ll1.append(nextReactionStr)
cgrObjs1.append(cgrContainer)
print()
else:
cgrContainer=nextReactionObj
decomposed = cgr.ReactionContainer.from_cgr(cgrContainer)
# print("decomposed",decomposed)
if isValidReaction(decomposed):
cnt+=1
if cnt%2==0:
print(f"done {cnt} SMILES FROM CONVERTED CGR FILE ALREADY")
nextReactionStr=str(cgrContainer)
ll.add (nextReactionStr)
cgrObjs.add(cgrContainer)
ll1.append(nextReactionStr)
cgrObjs1.append(cgrContainer)
print(nextReactionStr)
else:
continue
except StopIteration:
print("########### STOP ITERATION ##################")
print(cnt)
#continue
break
except Exception as e:
print("########### SOME ERROR ##################")
print()
print(e)
print("Error")
continue
return [ll,cgrObjs,ll1,cgrObjs1]
cgrGen=[]
objGen=[]
Unique=[]
cgrGen_l=[]
objGen_l=[]
Unique_l=[]
if notPickledTrained:
[CGRs,objs,_,_]=getCGRs(reactionsFile)
with open('CGRsTrainNew.pkl', 'wb') as f:
pickle.dump(CGRs, f)
with open('CGRsTrainObjsNew.pkl', 'wb') as f:
pickle.dump(objs, f)
with open('CGRTrainObjsSMILENew_Clean.smi', 'w') as f:
for item in CGRs:
f.write("%s\n" % item)
else:
CGRs = pickle.load( open( "CGRsTrainNew.pkl", "rb" ) )
objs = pickle.load( open( "CGRsTrainObjsNew.pkl", "rb" ) )
if notPickled:
for i,fn in enumerate(CGRSSmilesGenerated):
print(fn)
[cgrR,obj,cgrR_l,obj_l]=getCGRs(fn)
with open(f'CGRsGenNew_{i}.pkl', 'wb') as f:
pickle.dump(cgrR, f)
with open(f'CGRsGenObjsNew_{i}.pkl', 'wb') as f:
pickle.dump(obj, f)
with open(f'CGRsGenSMILENew_{i}_Clean.smi', 'w') as f:
for item in cgrR:
f.write("%s\n" % item)
cgrGen_l.append(cgrR_l)
objGen_l.append(obj_l)
cgrGen.append(cgrR)
objGen.append(obj)
lengthCorrect[i]=len(cgrR)
lengthCorrect_l[i]=len(cgrR_l)
else:
for i,fn in enumerate(CGRSSmilesGenerated):
cgrR=pickle.load( open( f'CGRsGenNew_{i}.pkl', "rb" ) )
obj=pickle.load( open( f'CGRsGenObjsNew_{i}.pkl', "rb" ) )
cgrGen.append(cgrR)
objGen.append(obj)
lengthCorrect[i]=len(cgrR)
lengthCorrect_l[i]=len(cgrR_l)
CGRs=list(CGRs)
objs=list(objs)
[CGRsOO,objsOO,_,_]=getCGRs(CGRFineTuneOO)
CGRsOO=list(CGRsOO)
objsOO=list(objsOO)
for i,(cg,ob) in enumerate(zip(cgrGen,objGen)):
print(i)
cgrGen[i]=list(cg)
objGen[i]=list(ob)
lengthCorrectPercentage=[x/n_of_generatedCGRs*100. for x in lengthCorrect]
lengthCorrectPercentage_l=[x/n_of_generatedCGRs*100. for x in lengthCorrect_l]
for lst in cgrGen:
Unique.append([x for x in lst if x not in CGRs])
rcsAll=getReactionCentersDictionary(CGRs,objs)
rcsAllOO=getReactionCentersDictionary(CGRsOO,objsOO)
rcsGen=[{} for i in range(len(CGRSSmilesGenerated))]
####################### For Depicting Pics from Generated OO reactions #########################
rcsOOreactionsDataset=[]
for i in objsOO:
if isValidReaction(cgr.ReactionContainer.from_cgr(i)):
rcsOOreactionsDataset.append([str(i),str(cgr.ReactionContainer.from_cgr(i))])
rcsGenOO={}
reactionGensOO=[]
reactionGensOOAll=[]
reactionGensOOAll1=[]
cgrSGenOO=[]
#pp=7
pp=7
elementsOO=[]
for elem,sm in zip(objGen[pp],cgrGen[pp]):
#print(sm,str(elem))
reaction_center=getReactionCentersDictionary([sm],[elem])
keys=[z for z in reaction_center.keys()]
if isValidReaction(cgr.ReactionContainer.from_cgr(elem)):
if 'OO' in str(cgr.ReactionContainer.from_cgr(elem)):
if elem not in objsOO:
reactionGensOOAll.append(str(cgr.ReactionContainer.from_cgr(elem)))
reactionGensOOAll1.append([str(cgr.ReactionContainer.from_cgr(elem)),sm])
if len(keys)==1 and keys[0] not in rcsAllOO:
rcsGenOO.update(reaction_center)
if isValidReaction(cgr.ReactionContainer.from_cgr(elem)):
#print(type(elem))
#elem.clean2d()
elementsOO.append(elem)
reactionGensOO.append([keys[0],str(cgr.ReactionContainer.from_cgr(elem)),sm])
#reactionGensOO.append([keys[0],str(cgr.ReactionContainer.from_cgr(elem))])
cgrSGenOO.append([keys[0],sm])
print(len(rcsGenOO))
##################### End of Depicting
pp=2
reactionGensOO_fromAllGenerated=[]
reactionGensOO_fromAllGenerated1=[]
reactionGensOO_fromAllGenerated_RCS={}
reactionGensOO_fromAllGenerated_RCS1=[]
elementsOO_fromAllGenerated=[]
for elem,sm in zip(objGen[pp],cgrGen[pp]):
#print(sm,str(elem))
reaction_center=getReactionCentersDictionary([sm],[elem])
keys=[z for z in reaction_center.keys()]
if isValidReaction(cgr.ReactionContainer.from_cgr(elem)):
if 'OO' in str(cgr.ReactionContainer.from_cgr(elem)):
if elem not in objsOO:
reactionGensOO_fromAllGenerated.append(str(cgr.ReactionContainer.from_cgr(elem)))
reactionGensOO_fromAllGenerated1.append([str(cgr.ReactionContainer.from_cgr(elem)),sm])
print(len(reactionGensOO_fromAllGenerated))
if len(keys)==1 and keys[0] not in rcsAllOO and 'OO' in str(cgr.ReactionContainer.from_cgr(elem)):
reactionGensOO_fromAllGenerated_RCS.update(reaction_center)
if isValidReaction(cgr.ReactionContainer.from_cgr(elem)):
#print(type(elem))
#elem.clean2d()
elementsOO_fromAllGenerated.append(elem)
reactionGensOO_fromAllGenerated_RCS1.append([keys[0],str(cgr.ReactionContainer.from_cgr(elem)),sm])
#reactionGensOO.append([keys[0],str(cgr.ReactionContainer.from_cgr(elem))])
#cgrSGenOO.append([keys[0],sm])
print(len(reactionGensOO_fromAllGenerated_RCS1))
reactionsOO_withS=[]
for i in reactionGensOOAll1:
if 'S' in i[0] and len(i[0])<70:
reactionsOO_withS.append(i)
reactionsOO_withI=[]
for i in reactionGensOOAll1:
#if '1' in i[0] and 'O=O' not in i[0] and len(i[0])<150:
# if '.O' not in i[0] and '.O.' not in i[0] and len(i[0])<150:
if 'NH4+' in i[0] and 'NH3+' not in i[0] and len(i[0])<50:
reactionsOO_withI.append(i)
for i in range(len((CGRSSmilesGenerated))):
for elem,sm in zip(objGen[i],cgrGen[i]):
#print(sm,str(elem))
reaction_center=getReactionCentersDictionary([sm],[elem])
keys=[z for z in reaction_center.keys()]
if len(keys)==1 and keys[0] not in rcsAll:
rcsGen[i].update(reaction_center)
print(len(rcsGen[i]))
if False:
# for lst in cgrGen:
# Unique.append([x for x in lst if x not in CGRs])
from rdkit.Chem import Draw
cntt=0
cnttAll=0
for x in objs:
# try:
decomposed = cgr.ReactionContainer.from_cgr(x)
cnttAll+=1
# decomposed.explicify_hydrogens();e
print(str(decomposed))
m = AllChem.ReactionFromSmarts(str(decomposed))
if m is not None:
cntt+=1
Draw.ReactionToImage(m)
print(cntt)
# decomposed.clean2d()
#decomposed
# except:
# pass
#if notPickled:
| [
"phquanta@gmail.com"
] | phquanta@gmail.com |
19cc60d4e3a124fb7b5614ada71255c241c7ebaa | b8d05aa6db8c1f43f431480f7952a49949d45b53 | /model/init_net.py | 600b7769496114f25e0f4cc0bd970b11672076ce | [] | no_license | tonggege001/MyGAN | 4ca184ac7a456d1cde732da29dfb85e5fab507a4 | 5742bdac7e844d06b67387d1fd8bc16347a4cd55 | refs/heads/main | 2023-05-10T23:35:56.580537 | 2021-05-31T12:35:41 | 2021-05-31T12:35:41 | 370,544,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | import torch
import torch.nn as nn
from torch.nn import init
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if gpu_ids:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
| [
"wontun12@gmail.com"
] | wontun12@gmail.com |
a69a5ede8bc3f3237d149df470385eda0dce6cb6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /YLf984Eod74ha4Tok_9.py | 8d3ff278a5843fa0485c8620003772aaf0edbc8e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | """
In a calendar year, it is exactly 365.25 days. But, eventually, this will lead
to confusion because humans normally count by exact divisibility of 1 and not
with decimal points. So, to avoid the latter, it was decided to add up all
0.25 days every four-year cycle, make that year to sum up to 366 days
(including February 29 as an intercalary day), thus, called a **leap year**
and aside the other years of the four-year cycle to sum up to 365 days, **not
a leap year**.
In this challenge, (though quite repetitive), we'll take it to a new level,
where, you are to determine if it's a leap year or not without the use of the
**datetime** class, **if blocks** , **if-elif blocks** , **conditionals** (`a
if b else c`) nor the logical operators **AND** (`and`) and **OR** (`or`) with
the exemption of the **NOT** (`not`) operator.
Return `True` if it's a leap year, `False` otherwise.
### Examples
leap_year(1979) ➞ False
leap_year(2000) ➞ True
leap_year(2016) ➞ True
leap_year(1521) ➞ False
leap_year(1996) ➞ True
leap_year(1800) ➞ False
### Notes
You can't use the **datetime** class, **if statements** in general, the
**conditional** nor the **logical operators** (`and`, `or`).
"""
def leap_year(yr):
return yr%400 == 0 if not yr%100 else yr%4 == 0
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e7512b4e43e0611ca740b20d2048d4b3495e4361 | e4a9e84cffa74cc56e2a6647e1fc9ae7d607adaa | /BackEnd/apps/a_common/scheme.py | a24b1f2d2c1dee0ab65e9394fa00004a8cc1428e | [
"MIT"
] | permissive | lyleshaw/Program-Practice | 1be643b8516300065902d644be99686edd6b4f5e | 84cdc4fd2fd353974ea5b8cd12a01d847db3cac3 | refs/heads/master | 2023-05-14T11:48:04.568317 | 2021-06-06T02:45:05 | 2021-06-06T02:45:05 | 369,116,923 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import abc
from typing import List
from fastapi import Query
from pydantic import BaseModel, Field
from apps.a_common.constants import Sex
"""
放一些通用的验证字段,比如手机号,邮编,身份证
"""
PhoneField = Field(..., regex=r"^1(3[0-9]|5[0-3,5-9]|7[1-3,5-8]|8[0-9]|9[0-9])\d{8}$", description="手机号码,带有正则验证", title="手机号码")
IDCardField = Field(..., regex=r'^[1-9]\d{5}(18|19|20)\d{2}((0[1-9])|(1[0-2]))(([0-2][1-9])|10|20|30|31)\d{3}[0-9Xx]$', description="身份证号,带有正则验证", title="身份证号")
class ParamsBase:
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
class PageInfo(ParamsBase):
page_id: int
page_size: int
def PageInfo_(page_id: int = Query(1, ge=1), page_size: int = Query(20, ge=1, le=50)) -> PageInfo:
return PageInfo(page_id=page_id, page_size=page_size)
class _CommonlyUsedUserSearch(BaseModel, abc.ABC):
sex: int = None
keyword_name: str = ''
user_identity: int = None
role_id: int = None
class CommonlyUsedUserSearch(_CommonlyUsedUserSearch, ParamsBase):
pass
def CommonlyUsedUserSearch_(sex: int = Query(None, ge=Sex.MALE, le=Sex.FEMALE),
keyword_name: str = '',
user_identity: int = None,
role_id: int = None) -> CommonlyUsedUserSearch:
return CommonlyUsedUserSearch(sex=sex, keyword_name=keyword_name, user_identity=user_identity, role_id=role_id)
class UserSubSearch(_CommonlyUsedUserSearch):
user_id_list: List[int] = []
exclude_user_id_list: List[int] = []
| [
"x@lyleshaw.com"
] | x@lyleshaw.com |
2918cf510c1ddfe401a31a9ef624cd06e9c23e25 | edb6545500e39df9c67aa918a6125bffc8ec1aee | /src/prompt_toolkit/layout/screen.py | 2b58272ca1cc52d588539ac551aa16498dd12b20 | [
"BSD-3-Clause"
] | permissive | repnzscasb/python-prompt-toolkit | 2681716b0e10ef816228091a19700d805ec0f4d9 | da05f669d00817655f76b82972272d4d5f4d4225 | refs/heads/master | 2022-12-22T09:34:26.426466 | 2022-12-06T22:35:54 | 2022-12-06T22:35:54 | 148,856,050 | 0 | 0 | BSD-3-Clause | 2018-09-15T00:53:56 | 2018-09-15T00:53:56 | null | UTF-8 | Python | false | false | 10,241 | py | from collections import defaultdict
from typing import TYPE_CHECKING, Callable, DefaultDict, Dict, List, Optional, Tuple
from prompt_toolkit.cache import FastDictCache
from prompt_toolkit.data_structures import Point
from prompt_toolkit.utils import get_cwidth
if TYPE_CHECKING:
from .containers import Window
__all__ = [
"Screen",
"Char",
]
class Char:
"""
Represent a single character in a :class:`.Screen`.
This should be considered immutable.
:param char: A single character (can be a double-width character).
:param style: A style string. (Can contain classnames.)
"""
__slots__ = ("char", "style", "width")
# If we end up having one of these special control sequences in the input string,
# we should display them as follows:
# Usually this happens after a "quoted insert".
display_mappings: Dict[str, str] = {
"\x00": "^@", # Control space
"\x01": "^A",
"\x02": "^B",
"\x03": "^C",
"\x04": "^D",
"\x05": "^E",
"\x06": "^F",
"\x07": "^G",
"\x08": "^H",
"\x09": "^I",
"\x0a": "^J",
"\x0b": "^K",
"\x0c": "^L",
"\x0d": "^M",
"\x0e": "^N",
"\x0f": "^O",
"\x10": "^P",
"\x11": "^Q",
"\x12": "^R",
"\x13": "^S",
"\x14": "^T",
"\x15": "^U",
"\x16": "^V",
"\x17": "^W",
"\x18": "^X",
"\x19": "^Y",
"\x1a": "^Z",
"\x1b": "^[", # Escape
"\x1c": "^\\",
"\x1d": "^]",
"\x1e": "^^",
"\x1f": "^_",
"\x7f": "^?", # ASCII Delete (backspace).
# Special characters. All visualized like Vim does.
"\x80": "<80>",
"\x81": "<81>",
"\x82": "<82>",
"\x83": "<83>",
"\x84": "<84>",
"\x85": "<85>",
"\x86": "<86>",
"\x87": "<87>",
"\x88": "<88>",
"\x89": "<89>",
"\x8a": "<8a>",
"\x8b": "<8b>",
"\x8c": "<8c>",
"\x8d": "<8d>",
"\x8e": "<8e>",
"\x8f": "<8f>",
"\x90": "<90>",
"\x91": "<91>",
"\x92": "<92>",
"\x93": "<93>",
"\x94": "<94>",
"\x95": "<95>",
"\x96": "<96>",
"\x97": "<97>",
"\x98": "<98>",
"\x99": "<99>",
"\x9a": "<9a>",
"\x9b": "<9b>",
"\x9c": "<9c>",
"\x9d": "<9d>",
"\x9e": "<9e>",
"\x9f": "<9f>",
# For the non-breaking space: visualize like Emacs does by default.
# (Print a space, but attach the 'nbsp' class that applies the
# underline style.)
"\xa0": " ",
}
def __init__(self, char: str = " ", style: str = "") -> None:
# If this character has to be displayed otherwise, take that one.
if char in self.display_mappings:
if char == "\xa0":
style += " class:nbsp " # Will be underlined.
else:
style += " class:control-character "
char = self.display_mappings[char]
self.char = char
self.style = style
# Calculate width. (We always need this, so better to store it directly
# as a member for performance.)
self.width = get_cwidth(char)
# In theory, `other` can be any type of object, but because of performance
# we don't want to do an `isinstance` check every time. We assume "other"
# is always a "Char".
def _equal(self, other: "Char") -> bool:
return self.char == other.char and self.style == other.style
def _not_equal(self, other: "Char") -> bool:
# Not equal: We don't do `not char.__eq__` here, because of the
# performance of calling yet another function.
return self.char != other.char or self.style != other.style
if not TYPE_CHECKING:
__eq__ = _equal
__ne__ = _not_equal
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.char!r}, {self.style!r})"
_CHAR_CACHE: FastDictCache[Tuple[str, str], Char] = FastDictCache(
Char, size=1000 * 1000
)
Transparent = "[transparent]"
class Screen:
"""
Two dimensional buffer of :class:`.Char` instances.
"""
def __init__(
self,
default_char: Optional[Char] = None,
initial_width: int = 0,
initial_height: int = 0,
) -> None:
if default_char is None:
default_char2 = _CHAR_CACHE[" ", Transparent]
else:
default_char2 = default_char
self.data_buffer: DefaultDict[int, DefaultDict[int, Char]] = defaultdict(
lambda: defaultdict(lambda: default_char2)
)
#: Escape sequences to be injected.
self.zero_width_escapes: DefaultDict[int, DefaultDict[int, str]] = defaultdict(
lambda: defaultdict(lambda: "")
)
#: Position of the cursor.
self.cursor_positions: Dict[
"Window", Point
] = {} # Map `Window` objects to `Point` objects.
#: Visibility of the cursor.
self.show_cursor = True
#: (Optional) Where to position the menu. E.g. at the start of a completion.
#: (We can't use the cursor position, because we don't want the
#: completion menu to change its position when we browse through all the
#: completions.)
self.menu_positions: Dict[
"Window", Point
] = {} # Map `Window` objects to `Point` objects.
#: Currently used width/height of the screen. This will increase when
#: data is written to the screen.
self.width = initial_width or 0
self.height = initial_height or 0
# Windows that have been drawn. (Each `Window` class will add itself to
# this list.)
self.visible_windows_to_write_positions: Dict["Window", "WritePosition"] = {}
# List of (z_index, draw_func)
self._draw_float_functions: List[Tuple[int, Callable[[], None]]] = []
@property
def visible_windows(self) -> List["Window"]:
return list(self.visible_windows_to_write_positions.keys())
def set_cursor_position(self, window: "Window", position: Point) -> None:
"""
Set the cursor position for a given window.
"""
self.cursor_positions[window] = position
def set_menu_position(self, window: "Window", position: Point) -> None:
"""
Set the cursor position for a given window.
"""
self.menu_positions[window] = position
def get_cursor_position(self, window: "Window") -> Point:
"""
Get the cursor position for a given window.
Returns a `Point`.
"""
try:
return self.cursor_positions[window]
except KeyError:
return Point(x=0, y=0)
def get_menu_position(self, window: "Window") -> Point:
"""
Get the menu position for a given window.
(This falls back to the cursor position if no menu position was set.)
"""
try:
return self.menu_positions[window]
except KeyError:
try:
return self.cursor_positions[window]
except KeyError:
return Point(x=0, y=0)
def draw_with_z_index(self, z_index: int, draw_func: Callable[[], None]) -> None:
"""
Add a draw-function for a `Window` which has a >= 0 z_index.
This will be postponed until `draw_all_floats` is called.
"""
self._draw_float_functions.append((z_index, draw_func))
def draw_all_floats(self) -> None:
"""
Draw all float functions in order of z-index.
"""
# We keep looping because some draw functions could add new functions
# to this list. See `FloatContainer`.
while self._draw_float_functions:
# Sort the floats that we have so far by z_index.
functions = sorted(self._draw_float_functions, key=lambda item: item[0])
# Draw only one at a time, then sort everything again. Now floats
# might have been added.
self._draw_float_functions = functions[1:]
functions[0][1]()
def append_style_to_content(self, style_str: str) -> None:
"""
For all the characters in the screen.
Set the style string to the given `style_str`.
"""
b = self.data_buffer
char_cache = _CHAR_CACHE
append_style = " " + style_str
for y, row in b.items():
for x, char in row.items():
row[x] = char_cache[char.char, char.style + append_style]
def fill_area(
self, write_position: "WritePosition", style: str = "", after: bool = False
) -> None:
"""
Fill the content of this area, using the given `style`.
The style is prepended before whatever was here before.
"""
if not style.strip():
return
xmin = write_position.xpos
xmax = write_position.xpos + write_position.width
char_cache = _CHAR_CACHE
data_buffer = self.data_buffer
if after:
append_style = " " + style
prepend_style = ""
else:
append_style = ""
prepend_style = style + " "
for y in range(
write_position.ypos, write_position.ypos + write_position.height
):
row = data_buffer[y]
for x in range(xmin, xmax):
cell = row[x]
row[x] = char_cache[
cell.char, prepend_style + cell.style + append_style
]
class WritePosition:
def __init__(self, xpos: int, ypos: int, width: int, height: int) -> None:
assert height >= 0
assert width >= 0
# xpos and ypos can be negative. (A float can be partially visible.)
self.xpos = xpos
self.ypos = ypos
self.width = width
self.height = height
def __repr__(self) -> str:
return "{}(x={!r}, y={!r}, width={!r}, height={!r})".format(
self.__class__.__name__,
self.xpos,
self.ypos,
self.width,
self.height,
)
| [
"jonathan@slenders.be"
] | jonathan@slenders.be |
7cca3e60f41f8fdd61160edec4bdf0bd36208e58 | 4b3d06e1baf7ec652039bbe780968d26fd203b62 | /train_online.py | ecf1bd503b5edadf259e51fcd812a6b4fc81d088 | [] | no_license | antoinecomp/Chatbot_RASA_room_reservation | 8f7ddcef9b2a893830ec8df328d58625ce18102a | 7c20bcefc75159b5736eb7c91e56c52fee0b7dd6 | refs/heads/master | 2020-04-08T20:50:33.670614 | 2018-11-25T11:19:13 | 2018-11-25T11:19:13 | 159,717,529 | 0 | 0 | null | 2018-11-29T19:34:23 | 2018-11-29T19:34:17 | Python | UTF-8 | Python | false | false | 1,501 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.train import online
from rasa_core.utils import EndpointConfig
#from rasa_core.channels.console import ConsoleInputChannel
#from rasa_core.interpreter import RegexInterpreter
logger = logging.getLogger(__name__)
def run_weather_online(interpreter,
domain_file="room_domain.yml",
training_data_file='data/stories.md'):
action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
interpreter=interpreter,
action_endpoint=action_endpoint)
data = agent.load_data(training_data_file)
agent.train(data,
batch_size=50,
epochs=200,
max_training_samples=300)
online.run_online_learning(agent)
return agent
if __name__ == '__main__':
logging.basicConfig(level="INFO")
nlu_interpreter = RasaNLUInterpreter('./models/nlu/default/roomnlu')
run_weather_online(nlu_interpreter)
| [
"noreply@github.com"
] | noreply@github.com |
ace6b636412317b0696d877f61b07556ff12852e | fd799b081c1fc448aca783d4af8f81825c7183c1 | /events/migrations/0002_auto_20170828_1345.py | ce8a20db458e128c7c30f1a6d280fe83f0dfba39 | [] | no_license | Fcmam5/sdh_foundation | 73e72176db8d712dc555a093552a5ba878149109 | 0d8bb3587562ae691c5923b13513189128c39b59 | refs/heads/master | 2022-12-13T18:41:21.607936 | 2020-02-13T22:36:37 | 2020-02-13T22:36:37 | 100,252,638 | 3 | 1 | null | 2022-11-22T04:28:47 | 2017-08-14T09:48:57 | JavaScript | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-28 13:45
from __future__ import unicode_literals
from django.db import migrations, models
import events.models
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'verbose_name_plural': 'events'},
),
migrations.AddField(
model_name='event',
name='poster',
field=models.ImageField(default='https://unsplash.it/1100', upload_to=events.models.upload_posters_location, verbose_name='Poster'),
),
]
| [
"bahri.aimen48@gmail.com"
] | bahri.aimen48@gmail.com |
54c2c7f3ffcb4e4ed90513a1268dfb34424e6ee1 | 1f32096af05da776c59a11b74a424637aa718113 | /primer_clip/bed.py | f8515d2c193cfd8fbfdaa6d97ed825da38f3986d | [] | no_license | ohsu-comp-bio/compbio-galaxy-wrappers | a222dbef5d4d4101f1705c6101f2e212435d1ea8 | 6162bc6d8ee37401de8dffec545935953028bed7 | refs/heads/master | 2023-08-31T05:32:22.305366 | 2023-08-29T18:24:59 | 2023-08-29T18:24:59 | 32,424,561 | 6 | 13 | null | 2023-09-14T19:28:18 | 2015-03-17T22:40:39 | Python | UTF-8 | Python | false | false | 6,729 | py | class BedReader(object):
"""
Simple class to ingest BED files and return a data structure as such:
{chrom: [start1, stop1], [start2, stop2], ...}
Input: filename
"""
def __init__(self, filename):
self.filename = open(filename, 'rU')
self.bed_ints = self._create_bed()
self.chrom_list = list(self.bed_ints)
def _create_bed(self):
"""
Create the structure of BED coordinates connected to chromosome
identifiers.
Return start and stop in 1-based coords.
:return bed_ints:
"""
bed_ints = {}
with self.filename as bed:
for interval in bed:
interval = interval.rstrip('\n').split('\t')
chrom = str(interval[0])
# 0-based
start = int(interval[1])+1
# 1-based
stop = int(interval[2])
if chrom not in bed_ints:
bed_ints[chrom] = [[start, stop]]
else:
bed_ints[chrom].append([start, stop])
return bed_ints
def split_coords(self):
"""
Split out the intervals in to single coords.
:return:
"""
split_coords = {}
for chrom in self.bed_ints:
split_coords[chrom] = []
for coords in self.bed_ints[chrom]:
for coord in range(coords[0], coords[1]+1):
split_coords[chrom].append(coord)
return split_coords
class ExtBedReader(object):
"""
"""
def __init__(self, filename, header=False, hgnc=False, strand=False,
phase=False, pstart=False, pstop=False):
# super(ExtBedReader, self).__init__(*args, **kwargs)
self.filename = open(filename, 'rU')
self.header = header
self.hgnc = hgnc
self.strand = strand
self.phase = phase
self.pstart = pstart
self.pstop = pstop
self.bed_ints = self._ext_bed_parse()
def _ext_bed_parse(self):
"""
Provide additional BED information for use.
:return:
"""
ext_bed_ints = {}
with self.filename as bed:
if self.header:
next(bed)
for interval in bed:
interval = interval.rstrip('\n').split('\t')
chrom = str(interval[0])
if chrom.startswith('chr'):
chrom = chrom[3:]
if chrom not in ext_bed_ints:
ext_bed_ints[chrom] = {}
# 0-based
start = int(interval[1]) + 1
# 1-based
stop = int(interval[2])
new_key = (str(start), str(stop))
ext_bed_ints[chrom][new_key] = {}
ext_bed_ints[chrom][new_key]['start'] = start
ext_bed_ints[chrom][new_key]['stop'] = stop
if self.hgnc:
ext_bed_ints[chrom][new_key]['hgnc'] = interval[self.hgnc]
if self.strand:
if interval[self.strand] == '-':
interval[self.strand] = 0
elif interval[self.strand] == '+':
interval[self.strand] = 1
ext_bed_ints[chrom][new_key]['strand'] = interval[self.strand]
if self.phase:
ext_bed_ints[chrom][new_key]['phase'] = interval[self.phase]
if self.pstart:
ext_bed_ints[chrom][new_key]['pstart'] = int(interval[self.pstart])
if self.pstop:
ext_bed_ints[chrom][new_key]['pstop'] = int(interval[self.pstop])
return ext_bed_ints
def find_primer_coords(self, tsize=250):
"""
Based on a given region length, and BED coordinates, find the
primer coords.
:return:
"""
primer_coords = {}
for chrom in self.bed_ints:
primer_coords[chrom] = []
for entry in self.bed_ints[chrom].values():
rsize = entry['stop'] - entry['start'] + 1
psize = tsize - rsize
if entry['strand'] == 0:
# Theses are being flipped, so that the start > stop for
# primers targeting reverse strand seqs.
pstop = entry['stop'] + 1
pstart = entry['stop'] + psize + 1
elif entry['strand'] == 1:
pstart = entry['start'] - psize
pstop = entry['start'] - 1
else:
raise ValueError("The strand should either be 0 or 1.")
primer_coords[chrom].append([pstart, pstop])
return primer_coords
def get_primer_ends(self):
"""
This is an extended version of find_primer_coords, where we pull
down just the coordinates that are near the ends.
{CHROM: {STRAND: (a, b, c, ...)}}
:return:
"""
primer_coords = {}
for chrom in self.bed_ints.keys():
primer_coords[chrom] = {}
for coord in self.bed_ints[chrom].values():
strand = str(coord['strand'])
if strand not in primer_coords[chrom]:
primer_coords[chrom][strand] = []
if coord['strand'] == 0:
pstop = coord['stop'] + 1
elif coord['strand'] == 1:
pstop = coord['start'] - 1
else:
raise ValueError("The strand should either be 0 or 1.")
primer_coords[chrom][strand].append(pstop)
return primer_coords
def get_v4_primer_ends(self):
"""
This is an extended version of find_primer_coords, where we pull
down just the coordinates that are near the ends.
{CHROM: {STRAND: (a, b, c, ...)}}
This will utilize pre-created values in final two columns of v4 BED.
:return:
"""
primer_coords = {}
for chrom in self.bed_ints.keys():
primer_coords[chrom] = {}
for coord in self.bed_ints[chrom].values():
strand = str(coord['strand'])
if strand not in primer_coords[chrom]:
primer_coords[chrom][strand] = []
if coord['strand'] == 0:
pstop = coord['pstop'] + 1
elif coord['strand'] == 1:
pstop = coord['pstart'] - 1
else:
raise ValueError("The strand should either be 0 or 1.")
primer_coords[chrom][strand].append(pstop)
return primer_coords
| [
"jhl667@yahoo.com"
] | jhl667@yahoo.com |
53fd18278c30fb46cf3443854f3924d9d88bf8f1 | eb99e2e9eda8ed769bf9d27d0e1ece2f4df98302 | /DataLoad.py | 253001fe6fc6ba2ec222a4c86c2b7e02bd19adec | [] | no_license | AmbarDudhane/Hotel-Search-Engine | b33990d59d445ebcade0e212bc9b41a7ddf8f1ab | 8744a51461c67aec748815981be93f32d150aa34 | refs/heads/master | 2020-08-09T11:20:03.930810 | 2019-12-04T05:25:42 | 2019-12-04T05:25:42 | 214,076,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | '''
Author : Ambar Dudhane
Date : 6-10-2019
Contact : ambarsd12345@hotmail.com
'''
import os
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import PorterStemmer
import math
import pickle
import datetime
def storeTotalDocuments(total):
f = open(r'totalDoc.txt', "w")
f.write(str(total))
f.close()
def storeIndex(path, filename, index):
f = open(path + '\\' + "mainIndex.pkl", "wb")
pickle.dump(index, f)
f.close()
def computeTFIDF(mainIndex, total):
tfidf = {}
idfData = {}
tfData = {}
df = 0
for term, posting in mainIndex.items():
df = len(posting)
idf = math.log2((total / df))
for item in posting:
if item[0] in tfidf.keys():
tfidf[item[0]][term] = item[1] * idf
idfData[item[0]][term] = idf
tfData[item[0]][term] = item[1]
else:
tfidf[item[0]] = {term: item[1] * idf}
idfData[item[0]] = {term: idf}
tfData[item[0]] = {term : item[1]}
f = open(r'F:\UTA\1st sem\DM\hotel-reviews (1)\tfidfDataset.pkl', "wb")
pickle.dump(tfidf, f) # which can be used in searchIndex.py
f.close()
f1 = open(r'F:\UTA\1st sem\DM\hotel-reviews (1)\idfDataset.pkl', "wb")
pickle.dump(idfData, f1)
f1.close()
f2 = open(r'F:\UTA\1st sem\DM\hotel-reviews (1)\tfDataset.pkl', "wb")
pickle.dump(tfData, f2)
f2.close()
print("TFIDF of doc1",tfidf[1])
print("TFIDF of doc2", tfidf[2])
print("IDF of doc1", idfData[1])
print("IDF of doc2", idfData[2])
os.chdir("F:\\UTA\\1st sem\\DM\\hotel-reviews (1)")
now = datetime.datetime.now()
print("Starting time: ", now.strftime("%Y-%m-%d %H:%M:%S"))
data = pd.read_csv("Hotel_Reviews_Jun19_reduced.csv", usecols=['reviews.id', 'reviews.text'])
print(data.shape, "Data length=", len(data))
reviewDict = data.set_index("reviews.id")['reviews.text'].to_dict()
bowDict = {}
tokenizer = RegexpTokenizer(r'\w+') # getting rid of punctuation while tokenizing
for key, value in reviewDict.items():
bowDict[key] = list(tokenizer.tokenize(str(value).lower()))
# removing stop words
stop_words = set(stopwords.words('english'))
ps = PorterStemmer()
for key, value in bowDict.items():
temp = []
for word in value:
if word not in stop_words:
temp.append(ps.stem(word)) # performing stemming
bowDict[key] = temp
# creating inverted index
# first create document level index and then merge it to main index
docIndex = {}
for id, words in bowDict.items():
docIndex[id] = []
for word in words:
docIndex[id].append({word: [id, words.count(word)]})
mainIndex = {}
for indexList in docIndex.values():
for element in indexList:
term = list(element.keys()) # term
value = list(element.values()) # [docId, occurances]
if term[0] in mainIndex: # term is already present in mainIndex
if value[0] not in mainIndex[term[0]]:
mainIndex[term[0]].append(value[0])
else: # term is not present in mainIndex
mainIndex[term[0]] = [value[0]]
print("mainIndex size=", len(mainIndex))
# saving index file to disk
storeIndex(r'F:\UTA\1st sem\DM\hotel-reviews (1)', 'MainIndex.txt', mainIndex)
# Calculating tf-idf
computeTFIDF(mainIndex, len(docIndex))
storeTotalDocuments(len(docIndex))
now2 = datetime.datetime.now()
print("Ending time: ", now2.strftime("%Y-%m-%d %H:%M:%S")) | [
"noreply@github.com"
] | noreply@github.com |
f05b076a55494c3defd23b21101ba4ccb8e6b53b | fd9e9a5b9e013bbbd834f5f85d4e1b8ee165443a | /db_downgrade.py | 6bfb62d2e183d28b9a725dcc9396d71f0ed6ac84 | [] | no_license | xiayunlong1991/noozjunkie | 0e44b665616880cdc310ccafbd0c80e46d1a1b05 | 6311c4e970e3c01a622b3677e874ec030a800443 | refs/heads/master | 2020-12-28T23:04:52.343098 | 2016-01-13T20:25:46 | 2016-01-13T20:25:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!flask/bin/python
from migrate.versioning import api
from webapp_config import SQLALCHEMY_DATABASE_URI
from webapp_config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: ' + str(v))
| [
"ljheidel@gmail.com"
] | ljheidel@gmail.com |
08b4c22919b5f084c739575e4ce22a1ed07a6174 | 901ca534bf5cdb3f7fdc0b53eb243fc4f1e6c3a6 | /taopiaopiao/urls.py | 43830b71ffe4286a1080ebf15c2ae6e97f5fa0f2 | [] | no_license | jiangxinke19960710/taopiaopiao | cc70f47def72bce1bf586ceb3dbe068c6b4d9989 | adb3e9423767dddaefd04c755b88f3645fca1381 | refs/heads/master | 2020-05-04T20:43:52.720398 | 2019-04-04T07:55:10 | 2019-04-04T07:55:10 | 158,798,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from django.conf.urls import url,include
from django.contrib import admin
from apps.home import views
urlpatterns = [
url('admin/', admin.site.urls),
url('home/',include('home.urls') ),
url('account/',include('account.urls') ),
url(r'^captcha/',include('captcha.urls') ),
url(r'sort_movies/',include('sort_movies.urls') ),
url('^$',views.home_page)
]
| [
"2263009782@qq.com"
] | 2263009782@qq.com |
d91a3f21c10aa90b08dbeab595516c260355f3d6 | 5a3ce4563bb64786949d73b8637adf29da620207 | /lpthw_27-52_15072016_pycharm/ex32.py | dafd6f56a831f394f48084a5e1255e09af7b1137 | [] | no_license | ohoboho79/LPTHW | 8a445ecd51dae2c9a57a9d23077ab8cc1661ace3 | f61a9dba4c14940dae8719f2c53d2e3325eae09b | refs/heads/master | 2020-12-25T14:22:40.761076 | 2016-09-09T13:29:51 | 2016-09-09T13:29:51 | 67,192,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
#This first kind of for-loop goes through a list
for number in the_count:
print "This is count %d" % number
#same as above
for fruit in fruits:
print "A friut of type: %s" % fruit
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
#we can also build lists, first start with an empty one
elements = []
#then use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
#append is a function that lists understand
elements.append(i)
#now we can print them out too
for i in elements:
print "Element was: %d" % i
elements1 = range(6)
for i in elements1:
print "Elements1 was: %d" % i | [
"nterziysky@mail.bg"
] | nterziysky@mail.bg |
066304dabdf5fafd3a3ca245cdb1d1d3665ca05a | 557946dcb52d38173d06f5d1aad069294dd10254 | /producttracker/tracker/admin.py | fb7198a9f25559514eaf077d58ee6cf88f26380d | [] | no_license | thomaschriskelly/product-tracker | 4c5404dde61c91c4ae6d9b11f7441fd4d2d132fb | d7ae34d164904faa89c963ac66413bb46162cd4b | refs/heads/master | 2021-07-12T06:19:02.728080 | 2017-10-14T16:34:38 | 2017-10-14T16:34:38 | 106,733,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from django.contrib import admin
# Register your models here.
from .models import Product, Breadcrumb
admin.site.register(Product)
admin.site.register(Breadcrumb)
| [
"thomaschriskelly@gmail.com"
] | thomaschriskelly@gmail.com |
faf6419f573eee071a77d9cb8740755a3ebf8256 | be94ee27511f5a558405c9bb9caba713841b1059 | /hmm/chart-bar.py | 755ca1e9a93396bfd9c36a4c4349fbf26a08bdb5 | [] | no_license | david-message/python | 12430ae01f46367e4fa93dd75f7dc7a4a4da9e09 | 7f2604f3cf955c374dc3dff1e52e696bf206df50 | refs/heads/master | 2021-09-03T13:13:52.729204 | 2018-01-09T09:25:04 | 2018-01-09T09:25:04 | 107,939,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """
Make a pie chart
This script is written by Vamei, http://www.cnblogs.com/vamei
you may freely use it.
"""
import matplotlib.pyplot as plt
import numpy as np
# quants: GDP
# labels: country name
labels = []
quants = []
# Read data
for line in file('../data/country_gdp.txt'):
info = line.split()
labels.append(info[0])
quants.append(float(info[1]))
width = 0.4
ind = np.linspace(0.5,9.5,10)
# make a square figure
fig = plt.figure(1, figsize=(12,6))
ax = fig.add_subplot(111)
# Bar Plot
ax.bar(ind-width/2,quants,width,color='coral')
# Set the ticks on x-axis
ax.set_xticks(ind)
ax.set_xticklabels(labels)
# labels
ax.set_xlabel('Country')
ax.set_ylabel('GDP (Billion US dollar)')
# title
ax.set_title('Top 10 GDP Countries', bbox={'facecolor':'0.8', 'pad':5})
plt.show() | [
"david.message@gmail.com"
] | david.message@gmail.com |
10cc66e1643f4c035580b4b7091f02ac6134d1c9 | b754aeccfb2c8dcd572fe6aeb03ed99bd6cd2ee9 | /docs/source/conf.py | 08225bc4ce3132060ec6994a2ce54cce92c05e79 | [
"MIT"
] | permissive | danielflanigan/resonator | dbd9e2ca76cec1e09f6dd0f1d5e549730058d74b | 97f4183ca956d40ca3be606dc4b7fc202360ac58 | refs/heads/master | 2022-07-28T04:27:33.455803 | 2022-07-14T07:53:31 | 2022-07-14T07:53:31 | 134,384,010 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'resonator'
copyright = '2022, Daniel Flanigan'
author = 'Daniel Flanigan'
# The full version, including alpha/beta/rc tags
release = '0.8.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
"daniel.isaiah.flanigan@gmail.com"
] | daniel.isaiah.flanigan@gmail.com |
c274c1bb953eb1497f0f75d7d6eae5b6c2d654e4 | 6653b885b0a4ef50b833757277d40c187289edc3 | /togithub/tcp_server.py | 94377df79b36fd30e8c093bdbb6dc7058fb3e5aa | [] | no_license | gdcgit/python_study | 0955542f2ba9dc1a8c0b294beb5306235d3a5bf3 | c3c9a24f2e98c9524e9b60f9bbcdeb768079b1e3 | refs/heads/master | 2020-03-10T05:28:11.930943 | 2018-09-03T08:32:05 | 2018-09-03T08:32:05 | 129,218,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # -*- coding: utf-8 -*-
import socket
import threading
import time
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'welcome')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # SOCK_STREAM tcp连接协议
s.bind(('127.0.0.1', 9999))
s.listen(5)
print("等待连接。。。。")
while True:
# 接受一个新连接
sock, addr = s.accept()
# 创建新的线程处理tcp连接
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
| [
"378641588@qq.com"
] | 378641588@qq.com |
8a47560d80d2fcdfd8863054c9180f0cfd30801c | 8be54552853763d82315300b543364206e82afb3 | /botAIT.py | ed17bb6184f5e168bf6c6d0e647faf8284827ddf | [] | no_license | ramir7887/telegrambot | 8e36ffd90070eab36a1525bd0ad06eeadf456b61 | 27c927d2cdadc94c625dbe964e5fbf388f542fd3 | refs/heads/master | 2020-05-24T09:59:59.268288 | 2019-05-28T16:06:30 | 2019-05-28T16:06:30 | 187,219,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,960 | py |
#import initbot
import requests
from telebot import types
import telebot
import json
import socket
import redis
import re
import yaml
from postgres import Postgres
############################################
############################################
#1)Добавить функцию с общей информацией о станке
#2)Разделить тело бота и функции на два файла
#3)Добавить авторизацию (хотя бы файлик или словарь)
#4)Функция оповещения о ошибках (сказать что бы создали ошибку)посмотреть как парсить ее
#5)Красивое отображение сообщений
#6)Думать уже о базе данных ошибок
############################################
############################################
TOKEN = "#########################################" #токен бота
#MAIN_URL = f'https://api.telegram.org/bot{TOKEN}'
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def getInfo(machine):
register = {'Станок 1':['Milling Machine', 'Фрезерный','Лаборатория 3-6'],'Станок 2':['СА-535','Токарный','Лаборатория 11'],'Станок 3':['Стенд АксиОМА','Стенд','Аудитория 349'],'Станок 4':['Стенд АксиОМА','Стенд','Аудитория 3-6'],'Станок 5':['Мини станок','Стенд','Аудитория 355'],'Станок 6':['CAN станок','Фрезерный','Аудитория 355']}
nameData = ['Название: ', 'Тип: ','Расположение: ']
getData = register[machine]
strInfo = ''
for i,j in zip(nameData,getData):
strInfo += i+'<i>'+j+'</i>\n'
strInfo = '<b>'+machine+'</b>\n'+strInfo
return strInfo #возвращает строку ответа с названием, типом и расположением оборудования
def getAnswer(param, machine):
####### берет yaml, преобразует в словарь и возвращает словарь
listParam = {'Моточасы':['mototimes','mototime'],'Канал 1':['chan1_entries', 'chan1'],'ПЛК':['plc_entries','plc']}
var1 = listParam[param][0]
var2 = listParam[param][1]
response = requests.get('https://eio.ksu.ru.com/locales/ru.yml')
convert = bytes(response.content)
convert = convert.decode('utf-8')
convert = yaml.load(convert)
convert = convert['ru'][var1] # готовый словарь с названиями
############работа с редис
mach = machine
mach = int(mach[-1])
if mach >=4:
mach += 1
red = str(r.get(str(mach)))
red = red.replace('=>',':')
dict1=eval(red)
dict1 = eval(dict1)#словарь со значениями
strAnsver = ''
#соединение значений с названиями
for key in convert:
newkey = var1+'.'+key
try:
newstr = convert[key]+': <b>'+dict1[var2][newkey]+'</b>\n'
except:
continue
strAnsver += newstr
strAnsver = '<b>'+flgMachine+':</b> '+param+'\n' + strAnsver
return strAnsver #возвращает строку ответа
def AUTORIZ():
markupAUTORIZ = types.ReplyKeyboardMarkup(resize_keyboard= True)
AutoBtn = types.KeyboardButton('Aвторизация')
markupAUTORIZ.add(AutoBtn)
return markupAUTORIZ
def MAINMENU():
mainMenu = types.ReplyKeyboardMarkup(row_width= 1, resize_keyboard= True)
itembtn1 = types.KeyboardButton('Cписок оборудования')
itembtn2 = types.KeyboardButton('Последние крит.данные')
itembtn3 = types.KeyboardButton('Выход из уч.записи')
mainMenu.add(itembtn1, itembtn2, itembtn3)
return mainMenu
def MARKUPone(machine_count): #клавиатура со списком станков, нежно передать кол-во станков (int)
markup1 = types.ReplyKeyboardMarkup(resize_keyboard= True, row_width= 2)
listbtn = []
for i in range(machine_count):
listbtn.append(types.KeyboardButton('Станок {0}'.format(str(i+1))))
listbtn.append(types.KeyboardButton('Главное меню'))
for i in range(len(listbtn)):
markup1.add(listbtn[i])
#markup1.row_width = 2
return markup1
def MARKUPtwo(): #что сюда передавать
markup2 = types.ReplyKeyboardMarkup(resize_keyboard= True)
itembtn1 = types.KeyboardButton('Моточасы')
itembtn2 = types.KeyboardButton('Канал 1')
itembtn3 = types.KeyboardButton('ПЛК')
itembtn4 = types.KeyboardButton('Назад')
itembtn5 = types.KeyboardButton('Главное меню')
markup2.add(itembtn1, itembtn2, itembtn3, itembtn4, itembtn5)
return markup2
#####################
#глобальные переменные для хранения состояний
flgAUTO = False
flgMachine = None
flgParam = None
######################
#создание бота
bot = telebot.TeleBot(TOKEN)
def get_updates_json(request):
response = requests.get(request + 'getUpdates')
return response.json()
#получение информации о боте
user = bot.get_me()
print(user)
#обработчики команд
@bot.message_handler(commands = ['start'])
def send_welcome(message):
bot.reply_to(message, 'Бот предоставляет доступ к информации о работе технологического оборудования. Проходит альфа-тестирование.\nДоступно станков: 4\nПользоваться только клавиатурой и командами')
print('\n'+ str(message.chat.id)+ str(message.chat.first_name))
print("\n" + message.text)
bot.send_message(message.chat.id, "Для авторизации отправьте ЛОГИН:ПАРОЛЬ и нажмите 'Авторизация'", reply_markup= AUTORIZ())
#команда help(пока пустая)
@bot.message_handler(commands = ['help'])
def help_func(message):
pass
#обработчик текстовых сообщений
#реагирует на команды текстовые
@bot.message_handler(content_types = ['text'])
def id_st(message):
if 'Aвторизация' in message.text:
global flgAUTO
flgAUTO= True
bot.send_message(message.chat.id, 'Авторизация пройдена',reply_markup= MAINMENU() )
elif 'Cписок оборудования' in message.text:
if flgAUTO == True:
bot.send_message(message.chat.id,'<b>Список оборудования</b>', reply_markup= MARKUPone(6), parse_mode= 'HTML')
else:
bot.send_message(message.chat.id, "Некорректные данные! Попробуйте еще раз!", reply_markup= AUTORIZ())
elif 'Последние крит.данные' in message.text:
bot.reply_to(message,'Раздел находится в разработке.')
elif 'Выход из уч.записи' in message.text:
bot.send_message(message.chat.id, "Для авторизации отправьте ЛОГИН:ПАРОЛЬ и нажмите", reply_markup= AUTORIZ())
elif 'Станок' in message.text:
#должна отправлять еще общую информацию о станке, ее нужно брать из POSTGRESQL базы
#пока что просто отправляет со списков информацию
global flgMachine
flgMachine = message.text
bot.send_message(message.chat.id, getInfo(message.text),parse_mode= 'HTML',reply_markup= MARKUPtwo())
elif 'Общ.информация' in message.text:
name = 'Стенд Аксиома'
typ = 'Стенд'
place = 'Ауд. 349'
#здесь информация из POSGRESQL будет потом, пока что просто текст с общей информацией
bot.send_message(message.chat.id,"Назывние: {0}\nТип: {1}\nМестоположение: {2}".format(name,typ,place))
elif 'Моточасы' in message.text:
bot.send_message(message.chat.id, getAnswer(message.text,flgMachine), parse_mode= 'HTML')
elif 'Канал 1' in message.text:
bot.send_message(message.chat.id, getAnswer(message.text,flgMachine), parse_mode= 'HTML')
elif 'ПЛК' in message.text:
bot.send_message(message.chat.id, getAnswer(message.text,flgMachine), parse_mode= 'HTML')
elif 'Главное меню' in message.text:
bot.send_message(message.chat.id, message.text, reply_markup=MAINMENU())
elif 'Назад' in message.text:
bot.send_message(message.chat.id, message.text, reply_markup= MARKUPone(6))
else:
if ':' in message.text:
print(message.text)
else:
print('некомандное сообщение')
bot.reply_to(message, 'Такой команды я точно не ожидал')
#пуллинг
bot.polling(none_stop=True)
#while True:
#try:
#bot.polling(none_stop=True)
#except Exception as e:
#time.sleep(10)
#if __name__ == '__main__':
# bot.polling()
| [
"ramir7887@yandex.ru"
] | ramir7887@yandex.ru |
8e4f243dcfc79f2d6db3631448de760c0f7d0b54 | b65542cac9b8bbe4fd1cc434297aaefb11b7bee0 | /dzTraficoBackend/dzTrafico/apps.py | b5a858ce335467cabfdcdbf500db2a1e694580b8 | [
"MIT"
] | permissive | DZAymen/dz-Trafico | f3c53e9531b267dd6b578124c4d33f00ea071dc0 | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | refs/heads/master | 2023-01-07T14:41:36.274533 | 2020-10-31T14:04:26 | 2020-10-31T14:04:26 | 107,466,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class DztraficoConfig(AppConfig):
name = 'dzTrafico'
| [
"aymen_lakehal@live.fr"
] | aymen_lakehal@live.fr |
675df779fe3084bf1af9683905678d351fb4168a | f906e747b4366a9a8364d922fd7dab8280b16424 | /tensorflow_cookbook-master/02_TensorFlow_Way/03_Working_with_Multiple_Layers/03_multiple_layers_of_ipynb.py | 6cf0b7244c0a20c16c999b4a21840eccd16542a9 | [
"MIT"
] | permissive | tony36486/Machine-Learning | 17c396725d8e2925e8304cb9f9eee7590ad155cf | 4cc8b163212ee0550d057209fb88a2617f0fd75f | refs/heads/master | 2021-01-21T13:21:45.700914 | 2017-07-22T07:22:52 | 2017-07-22T07:22:52 | 91,810,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 31 22:52:07 2017
@author: tony
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import os
from tensorflow.python.framework import ops
#Reset graph
ops.reset_default_graph()
#Create a Graph Session
sess = tf.Session()
#Create Tensors
# Create a small random 'image' of size 4x4
x_shape = [1, 4, 4, 1]
x_val = np.random.uniform(size=x_shape)
#Create the Data Placeholder
x_data = tf.placeholder(tf.float32, shape=x_shape)
#First Layer: Moving Window (Convolution)
# Create a layer that takes a spatial moving window average
# Our window will be 2x2 with a stride of 2 for height and width
# The filter value will be 0.25 because we want the average of the 2x2 window
my_filter = tf.constant(0.25, shape=[2, 2, 1, 1])
my_strides = [1, 2, 2, 1]
mov_avg_layer= tf.nn.conv2d(x_data, my_filter, my_strides,
padding='SAME', name='Moving_Avg_Window')
#Second Layer: Custom
# Define a custom layer which will be sigmoid(Ax+b) where
# x is a 2x2 matrix and A and b are 2x2 matrices
def custom_layer(input_matrix):
input_matrix_sqeezed = tf.squeeze(input_matrix)
A = tf.constant([[1., 2.], [-1., 3.]])
b = tf.constant(1., shape=[2, 2])
temp1 = tf.matmul(A, input_matrix_sqeezed)
temp = tf.add(temp1, b) # Ax + b
return(tf.sigmoid(temp))
# Add custom layer to graph
with tf.name_scope('Custom_Layer') as scope:
custom_layer1 = custom_layer(mov_avg_layer)
#Run Output
print(sess.run(mov_avg_layer, feed_dict={x_data: x_val}))
print(sess.run(custom_layer1, feed_dict={x_data: x_val}))
#Create and Format Tensorboard outputs for viewing
# Add summaries to tensorboard
merged = tf.summary.merge_all(key='summaries')
#check floder
if not os.path.exists("H:\\my_TFpath\\tensorflowlogs"):
os.makedirs("H:\\my_TFpath\\tensorflowlogs")
# Initialize graph writer:
my_writer = tf.summary.FileWriter("H:\\my_TFpath\\tensorflowlogs", sess.graph) | [
"tony36486@users.noreply.github.com"
] | tony36486@users.noreply.github.com |
0b67d648bfe3eb20a16551d68355d41b176292bc | c32d944bcb8b5c375b86cc11fa74f6d3356d952c | /venv/bin/django-admin | b88bcac5d91bee06ffdca07e0fe1b8df397205c7 | [] | no_license | cleocardoso/RNcaronas | deff0465a080c9cb5fa635017570dd2233239ae0 | 2a420953864e1e0dceed7253c946f637049b0490 | refs/heads/main | 2023-03-15T09:54:15.035255 | 2021-03-17T17:24:47 | 2021-03-17T17:24:47 | 335,401,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | #!/home/cleonice/PycharmProjects/RNcaronas/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"cleotads21@gmail.com"
] | cleotads21@gmail.com | |
6d92263aec607e08cf6bd3646b8df1e30e2e2ca4 | e9cbdb3260cdeb5e4ef96cd48cdb93694c22c565 | /spider/search.py | e65fdea50fc2942ee1c1493e75e560da3df5608f | [] | no_license | FelixVicis/nan-fresno | 1ba41af5f82fa5cdad218495725819f6d1912f38 | 589c080aa40d15cdc995f72baab3447051036c5e | refs/heads/master | 2020-03-27T16:47:26.899627 | 2018-07-29T03:07:05 | 2018-07-29T03:07:05 | 146,806,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | import googlesearch as google
def websites(query, start=0, stop=None, per_page=10):
return google.search(query, start=start, stop=stop, num=per_page)
def images(query, start=0, stop=None, per_page=10):
return google.search_images(query, start=start, stop=stop, num=per_page)
| [
"Mills.Allen.James@gmail.com"
] | Mills.Allen.James@gmail.com |
9124d26a7c4310c597113e9a92316dc2a76880aa | 831fb492d63b6cb0cecc7fc93ba62cf912040ff4 | /mysite/settings.py | 5795cad0583df818aa260841e0aa73a1d6d5afea | [] | no_license | edu4il/my-first-blog | 4a3fc5dfd499927a8a0090bddf1240ffd1e8804e | 2009bb774d2fe6b553340dfe2fd950eefd0209d1 | refs/heads/master | 2020-03-22T18:14:48.620187 | 2018-07-12T02:43:27 | 2018-07-12T02:43:27 | 82,006,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'om^8sz8ll=q%00#zh&prq^0*oda_k-qgd^%l&ryaw4zy6d=zg*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
| [
"edu4il@nate.com"
] | edu4il@nate.com |
a3cb808b44d85ac6501e2d248c800322b76a5627 | 864831cd1de41069ec72ee3225a5dc5af0f9f3a9 | /django-react/leads/models.py | f0c48a5358c5f738955767af5510b7857b6effb0 | [] | no_license | KyrahWiggins/django-rest-demo | 7ec6fe98beb377065007673a75f329da4491fe3b | cf2c310dacaa5035a9ac66763bee429608092fe1 | refs/heads/main | 2023-07-06T05:21:21.218368 | 2021-07-27T20:04:36 | 2021-07-27T20:04:36 | 385,282,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class Lead(models.Model):
objects = None
name = models.CharField(max_length=100)
email = models.EmailField()
message = models.CharField(max_length=300)
phone = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
| [
"kyrah.wiggins@gmail.com"
] | kyrah.wiggins@gmail.com |
f731329651c7822ef9fefb15848334d0ba19c654 | 4d1cc7fbec98ab05eb500855e3d61bcdd0067d22 | /D.py | a9cc2f2c05d9891673e64796615e8d58fc2ebd46 | [] | no_license | itachiuchihu/lab6 | a8da5ef28ed7631429b34ffe7cbf21314a047eca | 6c6e94d627861a185de98cdc0f4723a226005ac5 | refs/heads/master | 2020-12-11T03:56:01.399899 | 2015-10-12T19:09:46 | 2015-10-12T19:09:46 | 43,736,480 | 0 | 0 | null | 2015-10-06T07:40:15 | 2015-10-06T07:40:15 | null | UTF-8 | Python | false | false | 274 | py | A=input()
k=int(A[0])
n=int(A[1])
B=[]
for i in range(k):
Q=''
Q=Q+input()
for t in range(n):
if
B=B+[int(Q[t])]
Q=''
for i in range(n):
u=i
C=[]
for t in range(k):
C=C+[B[u]]
u=u+n
f=min(C)
Q=Q+str(f)
print(Q) | [
"warcraft.menyilo@rambler.ru"
] | warcraft.menyilo@rambler.ru |
89082409ae181644fe0c44742f9546791c840103 | b915d2155a1d09e29db8563127faf5ca4ec184eb | /3/main.py | 7b4586ca15c91e5dec069282dfcb82bd4a6bc3e9 | [] | no_license | AnshThayil/AdventofCode2015 | 618bffb3b601a3e9d3c73fb72e7324a2c1ab2056 | aa5735eff61acb0cb2a5afe9265b2318c88ab2b8 | refs/heads/master | 2022-12-26T08:55:30.569708 | 2022-12-14T10:36:04 | 2022-12-14T10:36:04 | 114,738,595 | 0 | 0 | null | 2022-12-11T10:46:24 | 2017-12-19T08:24:03 | Python | UTF-8 | Python | false | false | 2,328 | py | #author - Ansh Thayil
#Advent of Code 2015 Day 3 - Perfectly Spherical Houses in a Vacuum
x = 0
y = 0
xlist =[0]
ylist = [0]
coords = []
def removeduplicates(list):
workablelist = list
list_without_duplicates = []
for i in range(len(list)):
for j in range(i + 1,len(list)):
if list[i] == list[j]:
workablelist[j] = '*'
for i in range(len(list)):
if workablelist[i] != "*":
list_without_duplicates.append(list[i])
return list_without_duplicates
file = open("inputday3.txt")
text = file.read()
for i in text:
if i == "^":
y += 1
xlist.append(x)
ylist.append(y)
elif i == "v":
y -= 1
xlist.append(x)
ylist.append(y)
elif i == ">":
x += 1
xlist.append(x)
ylist.append(y)
elif i == "<":
x -= 1
xlist.append(x)
ylist.append(y)
for i in range(len(xlist)):
coord = []
coord.append(xlist[i])
coord.append(ylist[i])
coords.append(coord)
coords_wo_duplicates = removeduplicates(coords)
xlist = [0]
ylist = [0]
x1 = 0
y1 = 0
x2 = 0
y2 = 0
boo = 0
part2coords = []
for i in text:
if boo == 0:
if i == "^":
y1 += 1
xlist.append(x1)
ylist.append(y1)
elif i == "v":
y1 -= 1
xlist.append(x1)
ylist.append(y1)
elif i == ">":
x1 += 1
xlist.append(x1)
ylist.append(y1)
elif i == "<":
x1 -= 1
xlist.append(x1)
ylist.append(y1)
boo = 1
elif boo == 1:
if i == "^":
y2 += 1
xlist.append(x2)
ylist.append(y2)
elif i == "v":
y2 -= 1
xlist.append(x2)
ylist.append(y2)
elif i == ">":
x2 += 1
xlist.append(x2)
ylist.append(y2)
elif i == "<":
x2 -= 1
xlist.append(x2)
ylist.append(y2)
boo = 0
for i in range(len(xlist)):
coord = []
coord.append(xlist[i])
coord.append(ylist[i])
part2coords.append(coord)
part2_coords_wo_duplicates = removeduplicates(part2coords)
print(len(coords_wo_duplicates))
print(len(part2_coords_wo_duplicates))
| [
"Ansh.Thayil@yahoo.in"
] | Ansh.Thayil@yahoo.in |
dfa62b814e12a7fa59459792800f836689250725 | 6d43c34b418233d9b81980e6cba01a26d6a3e91a | /Tutoring/file01.py | a2e1c5de2973e1ed48c157523a3749179049e822 | [] | no_license | spongebob03/Playground | 1cbba795294e5d609cb0ae951568f62d6f7e8dbc | 4acae2f742f9f8b7e950053207e7c9f86cea6233 | refs/heads/master | 2021-01-05T04:22:36.303355 | 2020-05-22T13:59:28 | 2020-05-22T13:59:28 | 240,878,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | f = open("test.txt","r")
data=f.read()
f.close()
dic={}
for i in data:
dic[i]=data.count(i)
print(dic)
f=open("proverbs.txt","w")
f.write(str(dic))
f.close()
| [
"sunhee1996@naver.com"
] | sunhee1996@naver.com |
5e66469bc6925e125f81384f459e25881ddab17c | 71c9cae3ed8f96dfd40bd5040bc5d7e9f37590e9 | /jerrybot.py | db37244fd9bc8f2aaefcd94cd638e885036ba6ba | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | akosthekiss/jerrybot | b6eb1dd261624473a553efcf0081ab92cbe4ed7c | d892d1f841b43a54a05b27a83118a0a8c9642f51 | refs/heads/master | 2021-01-11T05:14:52.444071 | 2016-09-25T22:09:53 | 2016-09-25T22:09:53 | 69,193,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,327 | py | #! /usr/bin/python
# Copyright 2016 University of Szeged
# Copyright 2016 Akos Kiss
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ConfigParser # will be configparser once we can move to py3
import os
import random
import StringIO
import subprocess
import sys
import time
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
from twisted.python import log
default_config = StringIO.StringIO("""
[irc]
server = chat.freenode.net
port = 6667
nick = jerrybot
channel = jerryscript
[jerryscript]
timeout = 5
maxlen = 1024
""")
class JerryBot(irc.IRCClient):
def __init__(self, config):
self._config = config
self.nickname = config.get('irc', 'nick')
self._channel = config.get('irc', 'channel')
self._timeout = config.get('jerryscript', 'timeout')
self._maxlen = config.getint('jerryscript', 'maxlen')
self._commands = {
'help': {
'command': self._command_help,
'help': 'list available commands'
},
'ping': {
'command': self._command_ping,
'help': 'a gentle pong to a gentle ping',
},
'version': {
'command': self._command_version,
'help': 'version of JerryScript',
},
'eval': {
'command': self._command_eval,
'help': 'eval JavaScript expression (timeout: %s secs, max output length: %s chars)' % (self._timeout, self._maxlen),
},
'hi': { 'command': self._command_hi, 'hidden': True },
'hello': { 'command': self._command_hi, 'hidden': True },
}
def connectionMade(self):
irc.IRCClient.connectionMade(self)
log.msg('connected to %s' % (self._config.get('irc', 'server')))
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
log.msg('disconnected from %s: %s' % (self._config.get('irc', 'server'), reason))
def signedOn(self):
self.join(self._channel)
def joined(self, channel):
log.msg('joined %s' % (channel))
def privmsg(self, user, channel, msg):
# no response to private messages
if channel == self.nickname:
return
user = user.split('!', 1)[0]
# only respond to messages directed at me
if msg.startswith(self.nickname) and msg.startswith((':', ',', ' '), len(self.nickname)):
msg = msg[len(self.nickname)+1:].strip()
log.msg('message from %s: %s' % (user, msg))
cmd = msg.split(None, 1)[0]
arg = msg[len(cmd):].strip()
self._commands.get(cmd, {'command': self._command_unknown})['command'](channel, user, cmd, arg)
def _command_unknown(self, channel, user, cmd, arg):
self.msg(channel, '%s: cannot do that (try: %s help)' % (user, self.nickname))
def _command_help(self, channel, user, cmd, arg):
help = ''.join(('%s: %s\n' % (name, desc.get('help', '')) for name, desc in sorted(self._commands.items()) if not desc.get('hidden', False)))
self.msg(channel, '%s: available commands:\n %s' % (user, help))
def _command_ping(self, channel, user, cmd, arg):
self.msg(channel, '%s: pong %s' % (user, arg))
def _command_hi(self, channel, user, cmd, arg):
greetings = [ 'hi', 'hello', 'hullo', 'nice to meet you', 'how are you?' ]
self.msg(channel, '%s: %s' % (user, random.choice(greetings)))
def _command_version(self, channel, user, cmd, arg):
self.msg(channel, '%s: %s' % (user, self._run_jerry(['--version'])))
def _command_eval(self, channel, user, cmd, arg):
self.msg(channel, '%s: %s' % (user, self._run_jerry(['--no-prompt'], inp=arg+'\n')))
def _run_cmd(self, cmd, inp=None):
log.msg('executing %s with %s' % (cmd, inp))
proc = subprocess.Popen(['timeout', self._timeout] + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = proc.communicate(inp)
return out, err, proc.returncode
def _run_jerry(self, args, inp=None):
repo = self._config.get('jerryscript', 'repo')
if not repo:
return 'cannot find jerryscript repository'
jerry = os.path.join(repo, 'build', 'bin', 'jerry')
if not os.path.isfile(jerry):
return 'cannot find jerry interpreter'
out, err, code = self._run_cmd([jerry] + args, inp=inp)
if code != 0 or len(err) != 0:
return 'something went wrong (%s): %s' % (code, err[:self._maxlen])
return out[:self._maxlen]
class JerryBotFactory(protocol.ClientFactory):
def __init__(self, config):
self._config = config
def buildProtocol(self, addr):
return JerryBot(self._config)
def clientConnectionLost(self, connector, reason):
log.msg('connection lost: %s' % reason)
connector.connect()
def clientConnectionFailed(self, connector, reason):
log.msg('connection failed: %s' % reason)
reactor.stop()
def parse_config():
config = ConfigParser.ConfigParser()
config.readfp(default_config)
argparser = argparse.ArgumentParser()
argparser.add_argument('-s', '--server', metavar='ADDR', help='irc server name (default: %s)' % config.get('irc', 'server'))
argparser.add_argument('-p', '--port', metavar='PORT', help='irc server port (default: %s)' % config.get('irc', 'port'))
argparser.add_argument('-n', '--nick', metavar='NAME', help='irc nick (default: %s)' % config.get('irc', 'nick'))
argparser.add_argument('-c', '--channel', metavar='NAME', help='irc channel (default: %s)' % config.get('irc', 'channel'))
argparser.add_argument('-r', '--repo', metavar='DIR', help='path to local jerryscript git repository')
argparser.add_argument('-C', '--config', metavar='FILE', help='config ini file')
args = argparser.parse_args()
if args.config:
config.read(args.config)
if args.server:
config.set('irc', 'server', args.server)
if args.port:
config.set('irc', 'port', args.port)
if args.nick:
config.set('irc', 'nick', args.nick)
if args.channel:
config.set('irc', 'channel', args.channel)
if args.repo:
config.set('jerryscript', 'repo', args.repo)
return config
def main():
# parse input
config = parse_config()
# initialize logging
log.startLogging(sys.stdout)
# create and connect factory to host and port
reactor.connectTCP(config.get('irc', 'server'),
config.getint('irc', 'port'),
JerryBotFactory(config))
# run bot
reactor.run()
if __name__ == '__main__':
main()
| [
"akiss@inf.u-szeged.hu"
] | akiss@inf.u-szeged.hu |
c51f1a3b2c6432d6e2c324706f3f1ef9f43cc9ac | a5c7a5b169781e6c8636e9351e576e3f18c62602 | /RaijinEngine/properties.py | f07568a11441bfc674575601c7679ca8cd9e28b4 | [] | no_license | shivankurkapoor/raijin | a910cedfe0f26f37c93f21cbb8c8eb568973117f | 514e9a866c7566542c892295be138e357065b0d5 | refs/heads/master | 2021-09-12T23:37:05.084116 | 2018-04-22T21:01:33 | 2018-04-22T21:01:33 | 59,457,994 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | datasetPath = "C:\\RaijinDataset\\ml-1m\\ml-1m\\"
#datasetPath = "C:\\RaijinDataset\\ml-10m\\ml-10M100K\\"
linksFile = datasetPath + 'links.dat'
moviesFile = datasetPath + 'movies.dat'
ratingsFile = datasetPath + 'ratings.dat'
tagsFile = datasetPath + 'tags.dat'
model_path = "C:\\RaijinDataset\\"
model = "models\\"
| [
"shivankurkapoor3192@gmail.com"
] | shivankurkapoor3192@gmail.com |
09a00f789cbf4c18b58320e124a3ce9bc5e31e41 | d7bc2e88a1905b54da4321d11270c1854ec06c4d | /get_tissue_samples.py | ff4d5a6cbb7895377e7604e6d9f4348b97de8f46 | [] | no_license | cu-swe4s-fall-2019/workflow-anle6372 | 3cb071c58305d297b02cbbe0a7339743eb3442dd | 7ea149af045121d67fe36801249ee81db43f858c | refs/heads/master | 2020-09-14T18:08:21.057637 | 2019-11-22T01:01:35 | 2019-11-22T01:01:35 | 223,210,139 | 0 | 0 | null | 2019-11-22T01:01:36 | 2019-11-21T15:53:36 | Python | UTF-8 | Python | false | false | 1,561 | py | """File for retrieving sample IDs
Parameters
----------
attribute_file : gzipped .gct file containing the gene reads
tissue_name : tissue sample to search corresponding IDs for
output_file_name : name of resulting file
listing sample IDs of associated tissue sample
Returns
-------
output_file : file listing sample IDs and associated counts of given gene
"""
import gzip
import argparse
if __name__ == '__main__':
# Argparse Defns
parser = argparse.ArgumentParser(
description='Pull sampleIDs for specific tissue type')
parser.add_argument('--attributes_file', type=str,
help='File containing attributes', required=True)
parser.add_argument(
'--tissue_name', type=str,
help='name of the gene', required=True)
parser.add_argument(
'--output_file_name', type=str,
help='name of output file', required=True)
args = parser.parse_args()
# Defines file names
sample_info_file_name = args.attributes_file
output_file = args.output_file_name
# Defines variable names
tissue_name = args.tissue_name
samples = []
info_header = None
for l in open(sample_info_file_name):
if info_header is None:
info_header = l.rstrip().split('\t')
else:
samples.append(l.rstrip().split('\t'))
f = open(output_file, 'w')
for i in range(len(samples)):
tissue_type = samples[i][5]
if tissue_type == tissue_name:
f.write(str(samples[i][0]))
f.write("\n")
f.close()
| [
"anle6372@colorado.edu"
] | anle6372@colorado.edu |
7e9dcb08a5d09de543ba08b0a18e43862bec4e80 | 8537ecfe2a23cfee7c9f86e2318501f745078d67 | /Practise_stuff/nympy_commands/oo_numpy_array_manipulation2.py | 2fd9ce51e253406e6f5724fd2fcd8efc7014909a | [] | no_license | oolsson/oo_eclipse | 91d33501d9ed6c6b3c51bb22b635eb75da88e4e1 | 1828866bc4e1f67b279c5a037e4a6a4439ddb090 | refs/heads/master | 2021-01-01T20:17:12.644890 | 2015-11-30T09:49:41 | 2015-11-30T09:49:41 | 23,485,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | '''
Created on Jan 22, 2012
@author: oo
'''
import numpy
np=numpy
A=[1,2,3]
B=[4,5,6]
A=np.array(A)
B=np.array(B)
c=np.concatenate((A,B))
print c
print '2------------'
c=np.column_stack((A,B))
print c
print '3------------'
c=np.hstack((A,B))
print c
c=np.vstack((A,B))
print c
print '4------------'
c=np.array_split(c,1)
print c
print '5-----------'
d=np.array([1])
d=np.tile(d,7)
print d
print '6-----------'
x = np.array([[1,2],[3,4]])
print np.repeat(x, 1)
print np.repeat(x, 3, axis=1)
print np.repeat(x, [1, 2], axis=0)
| [
"o.h.olsson@gmail.com"
] | o.h.olsson@gmail.com |
85863f93c57442e96186df3112f03e59a994bebf | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/InnerDetector/InDetExample/InDetSLHC_Example/share/jobOptions_SLHC_nn_prodTrainingSample.py | f8455debc388b3c7208aa0f0ff0ccf73d99c6714 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,056 | py | ###############################################################################
# jobOptions_SLHC_nn_prodTrainingSample.py
#
# script that reads a series of simulated HIT files, runs digitization and
# clusterization and produces the Ntuples needed to train the cluster splitting
# neuronal network.
# The ntuples produced are stored in TrkValidation.root
# -Validation
# |-> PixelRIOs : Cluster info.
# |-> NNinput : Input to train the NN.
#
# Note: This jobOptions WILL NOT WORK as it is neither for SLHC nor for IBL.
# YOU NEED TO EDIT PixelClusterValidationNtupleWriter.cxx
# IN InnerDetector/InDetValidation/InDetTrackValidation/InDetTrackValidation/
# TO USE ToT INSTEAD OF CHARGE IN NNinput
#
# Note 2: This jobOptions are based on InDetSLHCExample options. There there
# is also a stand alone .py file in this dir.
#
# Author: Tiago Perez <tperez@cern.ch>
# Date: 9-Jan-2012
##############################################################################
#--------------------------------------------------------------
# Template jobOptions: SLHC
# - Digitization
#--------------------------------------------------------------
from AthenaCommon.GlobalFlags import globalflags
globalflags.ConditionsTag = "OFLCOND-SDR-BS14T-ATLAS-00"
include("InDetSLHC_Example/preInclude.SLHC.py")
include("InDetSLHC_Example/preInclude.SiliconOnly.py")
from AthenaCommon.AthenaCommonFlags import jobproperties
jobproperties.AthenaCommonFlags.EvtMax=-1
#
## Input data
DATADIR="root://eosatlas.cern.ch//eos/atlas/user/t/tperez/"
#
## MinBias
#FILEPATH+="mc11_slhcid.108119.Pythia8_minbias_Inelastic_high.merge.HITS.e876_s1333_s1335_tid514272_00/"
#FILEPATH+="HITS.514272._000030.pool.root.1"
#
## ttbar
FILEPATH=DATADIR+"mc11_slhcid.105568.ttbar_Pythia.simul.HITS.e842_s1333_tid510282_00/"
FILEPATH+="HITS.510282._000429.pool.root.1"
#
#
jobproperties.AthenaCommonFlags.PoolHitsInput=[FILEPATH]
jobproperties.AthenaCommonFlags.PoolRDOOutput=DATADIR+"ttbar.digit.RDO.pool.root"
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion='ATLAS-SLHC-01-00-00'
from Digitization.DigitizationFlags import jobproperties
jobproperties.Digitization.doInDetNoise=False
include ( "Digitization/Digitization.py" )
include("InDetSLHC_Example/postInclude.SLHC_Digitization.py")
#
# Start clusterization
#
#
# Suppress usage of pixel distortions when validating simulation
# (otherwise clusters are corrected for module bow while G4 is not)
#
from IOVDbSvc.CondDB import conddb
if not conddb.folderRequested('/Indet/PixelDist'):
conddb.addFolder('PIXEL_OFL','/Indet/PixelDist')
conddb.addOverride("/Indet/PixelDist","InDetPixelDist-nominal")
#
# Include clusterization
# (need to set up services not already configured for digitization)
#
#include ("PixelConditionsServices/PixelRecoDb_jobOptions.py")
#
## Disable some COOL queries ?
from PixelConditionsTools.PixelConditionsToolsConf import PixelRecoDbTool
ToolSvc += PixelRecoDbTool()
ToolSvc.PixelRecoDbTool.InputSource = 0
## Configure the clusterization tool
from SiClusterizationTool.SiClusterizationToolConf import InDet__ClusterMakerTool
ClusterMakerTool = InDet__ClusterMakerTool( name = "InDet::ClusterMakerTool",
UsePixelCalibCondDB = False )
ToolSvc += ClusterMakerTool
## Configure PixelConditionsSummarySvc
from PixelConditionsServices.PixelConditionsServicesConf import PixelConditionsSummarySvc
InDetPixelConditionsSummarySvc = PixelConditionsSummarySvc()
InDetPixelConditionsSummarySvc.UseSpecialPixelMap = False
InDetPixelConditionsSummarySvc.UseDCS = False
InDetPixelConditionsSummarySvc.UseByteStream = False
ServiceMgr += InDetPixelConditionsSummarySvc
print InDetPixelConditionsSummarySvc
from InDetPrepRawDataFormation.InDetPrepRawDataFormationConf import InDet__PixelClusterization
job += InDet__PixelClusterization("PixelClusterization")
#
# Include PixelValidationNtuple
# with some information about Geant4 hits
#
from InDetTrackValidation.InDetTrackValidationConf import InDet__PixelClusterValidationNtupleWriter
job += InDet__PixelClusterValidationNtupleWriter("PixelNtupleWriter",
NtupleFileName = 'TRKVAL',
NtupleDirectoryName = 'Validation',
NtupleTreeName = 'PixelRIOs',
PixelClusterContainer = 'PixelClusters',
WriteDetailedPixelInformation = False,
DoHits = True,
DoMC = True,
FindNotAssociatedParticle= False,
WriteNNTraining = True,
# Extra flags ONLY ON PRIVATE InDetTrackValidation/PixelClusterValidationNtupleWriter
UseToT = True,
DetGeo = 'SLHC')
print job.PixelNtupleWriter
theApp.HistogramPersistency = 'ROOT'
if not 'OutputNTpl' in dir():
OutputNTpl = "TrkValidation_noTrack_ttbar_.root"
# Root file definition
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
ServiceMgr.THistSvc.Output += [ "TRKVAL DATAFILE='" + OutputNTpl + "' TYPE='ROOT' OPT='RECREATE'" ]
theApp.Dlls += [ 'RootHistCnv' ]
#
#
#
MessageSvc = Service( "MessageSvc" )
#increase the number of letter reserved to the alg/tool name from 18 to 30
MessageSvc.Format = "% F%50W%S%7W%R%T %0W%M"
# to change the default limit on number of message per alg
MessageSvc.defaultLimit = 9999999 # all messages
# Set output level threshold among DEBUG, INFO, WARNING, ERROR, FATAL
MessageSvc.OutputLevel = INFO
include("InDetSLHC_Example/postInclude.SLHC_Setup.py")
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
04b6dd3e9c8df9585e9921d7d92c5ec930088a0f | 74d4c335a53c2c42414d501c6e4d63ed4d9abe1f | /run_tests.py | 4d27eba6e6dd219cc65341c58e9875a5c0ad625b | [
"MIT"
] | permissive | nbeaver/format_json | 4d3d8ff279e67dcc10f05ca533670689c3f923ac | 4d0c2a0660f2e760664751adf92036fd657e1827 | refs/heads/master | 2020-06-03T05:51:39.011907 | 2019-06-18T23:30:17 | 2019-06-18T23:30:17 | 191,468,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,320 | py | #! /usr/bin/env python3
import format_json
import tempfile
import json
import logging
import unittest
class MyTestCase(unittest.TestCase):
def test_input_output(self, payload=''):
fp = tempfile.NamedTemporaryFile(mode='w', delete=False)
json.dump(payload, fp)
logging.info(fp.name)
fp.close()
format_json.format_json_in_place(fp.name)
with open(fp.name) as fp2:
out = json.load(fp2)
assert(out == payload)
def test_1(self):
self.test_input_output(1)
def test_1_str(self):
self.test_input_output('1')
def test_empty_list(self):
self.test_input_output([])
def test_empty_object(self):
self.test_input_output({})
def test_numeric_list(self):
data = list(range(10))
self.test_input_output(data)
def test_simple_object(self):
data = {
'b' : 'B',
'c' : 'C',
'a' : 'A',
}
self.test_input_output(data)
def test_formatting(self):
payload = {'a' : 'A', 'b': 'B', 'c': 'C'}
expected = """{
"a": "A",
"b": "B",
"c": "C"
}
"""
fp = tempfile.NamedTemporaryFile(mode='w', delete=False)
json.dump(payload, fp)
logging.info(fp.name)
fp.close()
format_json.format_json_in_place(fp.name)
with open(fp.name) as fp2:
out = fp2.read()
assert(out == expected)
def test_increase_indentation(self):
payload = {'a' : 'A', 'b': 'B', 'c': 'C'}
fp = tempfile.NamedTemporaryFile(mode='w', delete=False)
json.dump(payload, fp, indent=1)
logging.info(fp.name)
fp.close()
format_json.format_json_in_place(fp.name, indent_level=4)
with open(fp.name) as fp2:
out = json.load(fp2)
assert(out == payload)
def test_reduce_indentation(self):
payload = {'a' : 'A', 'b': 'B', 'c': 'C'}
fp = tempfile.NamedTemporaryFile(mode='w', delete=False)
json.dump(payload, fp, indent=9)
logging.info(fp.name)
fp.close()
format_json.format_json_in_place(fp.name, indent_level=4)
with open(fp.name) as fp2:
out = json.load(fp2)
assert(out == payload)
if __name__ == '__main__':
unittest.main()
| [
"nathanielmbeaver@gmail.com"
] | nathanielmbeaver@gmail.com |
9ab282d30730162f807df5f1051fb64b4b701f87 | 9abe7510f0dfe405774f5fddcb900b8cfbd89422 | /pi/sense_tools_dodge.py | 3cadb6b1ffc74eb9d6036e7959f849e3d3700ca3 | [] | no_license | ssocolow/Pastebin | 26506c6241b0f9b40fe837c89dde8709d1be207f | 0620ea0701f86b761f6484d587a879b619e63b72 | refs/heads/master | 2022-08-11T19:05:13.644064 | 2022-06-23T15:12:23 | 2022-06-23T15:12:23 | 175,307,139 | 0 | 0 | null | 2021-12-14T15:15:45 | 2019-03-12T22:41:51 | Python | UTF-8 | Python | false | false | 3,095 | py | #import sense hat libraries
from sense_hat import SenseHat
from time import sleep
from sense_tools import Sense_BOARD
import random
#make a sense hat
sense = SenseHat()
sense.clear()
sense.set_rotation(180)
b = Sense_BOARD(sense)
x = 3
y = 0
c = 0
t = 0.03
lives = 3
asteroids = []
first = True
#if pitch is > 20 it is pitched left, if it is < 340 it is right
while True:
c += 1
sleep(t)
#sense.clear() #takes too long to clear all pixels
pitch = sense.orientation["pitch"]
if first:
b.set_pixel(x,y,[0,0,255])
first = False
if pitch > 20 and pitch < 100 and x < 7:
b.set_pixel(x,y,[0,0,0])
x = x + 1
b.set_pixel(x,y,[0,0,255])
elif pitch < 340 and pitch > 300 and x > 0:
b.set_pixel(x,y,[0,0,0])
x = x - 1
b.set_pixel(x,y,[0,0,255])
else:
pass
# if lives == 3:
# sense.set_pixel(3,7,[255,255,255])
# sense.set_pixel(4,7,[255,255,255])
# sense.set_pixel(5,7,[255,255,255])
# elif lives == 2:
# sense.set_pixel(4,7,[255,255,255])
# sense.set_pixel(5,7,[255,255,255])
# elif lives == 1:
# sense.set_pixel(4,7,[255,255,255])
# elif lives == 0:
# sense.set_pixel(4,7,[255,0,0])
# quit()
if c % 10 == 0:
asteroids.append(random.choice([[1,7],[2,7]]))
if asteroids[-1][0] == 1:
b.set_pixel(0,7,[255,0,0])
b.set_pixel(1,7,[255,0,0])
b.set_pixel(2,7,[255,0,0])
b.set_pixel(3,7,[255,0,0])
else:
b.set_pixel(4,7,[255,0,0])
b.set_pixel(5,7,[255,0,0])
b.set_pixel(6,7,[255,0,0])
b.set_pixel(7,7,[255,0,0])
if c % 3 == 0:
for ast in asteroids:
if ast[1] == 0:
asteroids.remove(ast)
if ast[0] == 1:
b.set_pixel(0,0,[0,0,0])
b.set_pixel(1,0,[0,0,0])
b.set_pixel(2,0,[0,0,0])
b.set_pixel(3,0,[0,0,0])
else:
b.set_pixel(4,0,[0,0,0])
b.set_pixel(5,0,[0,0,0])
b.set_pixel(6,0,[0,0,0])
b.set_pixel(7,0,[0,0,0])
elif ast[0] == 1:
b.set_pixel(0,ast[1],[0,0,0])
b.set_pixel(1,ast[1],[0,0,0])
b.set_pixel(2,ast[1],[0,0,0])
b.set_pixel(3,ast[1],[0,0,0])
ast[1] -= 1
b.set_pixel(0,ast[1],[255,0,0])
b.set_pixel(1,ast[1],[255,0,0])
b.set_pixel(2,ast[1],[255,0,0])
b.set_pixel(3,ast[1],[255,0,0])
else:
b.set_pixel(4,ast[1],[0,0,0])
b.set_pixel(5,ast[1],[0,0,0])
b.set_pixel(6,ast[1],[0,0,0])
b.set_pixel(7,ast[1],[0,0,0])
ast[1] -= 1
b.set_pixel(4,ast[1],[255,0,0])
b.set_pixel(5,ast[1],[255,0,0])
b.set_pixel(6,ast[1],[255,0,0])
b.set_pixel(7,ast[1],[255,0,0])
if ast[0] == 1 and x < 4 and ast[1] == 0:
b.set_pixel(x,y,[0,255,0])
b.update_board()
print(c)
lives -= 1
quit()
if ast[0] == 2 and x > 3 and ast[1] == 0:
b.set_pixel(x,y,[0,255,0])
b.update_board()
print(c)
lives -= 1
quit()
#code can break if time goes negative
if c % 120 == 0:
t = t - 0.01
#b.print_board()
b.update_board()
| [
"noreply@github.com"
] | noreply@github.com |
359e4a6cb059bdb7dbb6abd441869231dd9db3e2 | 2474b18786f3d8d12c6822437ac5a327cb43554b | /reports/report1/oop.py | 7ededa3b3c50f5a98e4476c9e48f37eb329491e2 | [] | no_license | marwaHisham/python_tasks | 5f22466ef3360b12012aa2df46abbd2c0283f963 | ea73be6116a2ae38270d96888fe837740296e56c | refs/heads/master | 2021-05-08T22:57:25.837940 | 2018-02-03T18:49:06 | 2018-02-03T18:49:06 | 119,692,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | #1- super can use to define the parent you need to call even if you inherit from parent
#only and parent inherit from top parent you can use child to call top parent
class Human:
name="marwa"
makeFault=0
def __init__(self,name):# constructor definition
self.name=name
print("i am",name)
def speak(self):
print("my name is",self.name)
@classmethod
def fault(cls):
cls.makeFault+=1
print(cls.makeFault)
@staticmethod
def measuretmp(temp):
if(temp ==37):
return 'normal'
return 'not normal'
class Employee(Human):
def __init__(self,name,salary):
print("child \n")
super(Employee,self).__init__(name)
self.salary=salary
def work(self):
print("welcome")
class Employee2(Employee):
def __init__(self,name,salary):
super(Employee,self).__init__(name)
print("child2 \n")
self.salary=salary
def work(self):
print("njggfgfghjk")
#emp=Employee("marwa",500)
emp2=Employee2("marwa",500)
#emp.speak()
#emp.work()
# Human.fault()
# print("----------------------")
# print(Human.measuretmp(38))
# print("----------------------")
# man=Human("Ahmed")
# mostafa=Human("mostafa")
# man.speak()
# print(man.measuretmp(37))
# print("----------------------")
# print('man1 :',man.makeFault)
# print('man2 :',mostafa.makeFault)
# print('Human',Human.makeFault)
# mostafa.makeFault=2
# print('man2 :',mostafa.makeFault)
# print('Human',Human.makeFault)
# print('man1 :',man.makeFault)
| [
"promarwa2020@gmail.com"
] | promarwa2020@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.