index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
997,100 | 41fd06f6d0a1b5b5f3bb47e1a403f9f20c09bd97 | #!/usr/bin/env python
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#%%% --------------------------------- ElaStic_Analyze_Energy -------------------------------- %%%#
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#
# AUTHOR:
# Rostam Golesorkhtabar
# r.golesorkhtabar@gmail.com
#
# DATE:
# Sun Jan 01 00:00:00 2012
#
# SYNTAX:
# python ElaStic_Analyze_Energy.py
# ElaStic_Analyze_Energy
#
# EXPLANATION:
#
#__________________________________________________________________________________________________
from sys import stdin
from numpy import *
from math import *
import numpy as np
import subprocess
import warnings
import os.path
import shutil
import copy
import math
import sys
import os
import matplotlib.pyplot as plt
#%!%!%--- CONSTANTS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
_e = 1.602176565e-19 # elementary charge
Bohr = 5.291772086e-11 # a.u. to meter
Ryd2eV = 13.605698066 # Ryd to eV
ToGPa = (_e*Ryd2eV)/(1e9*Bohr**3) # Ryd/[a.u.^3] to GPa
#__________________________________________________________________________________________________
#%!%!%--- SUBROUTINS AND FUNCTIONS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
def sortlist(lst1, lst2):
temp = copy.copy(lst1)
lst3 = []
lst4 = []
temp.sort()
for i in range(len(lst1)):
lst3.append(lst1[lst1.index(temp[i])])
lst4.append(lst2[lst1.index(temp[i])])
return lst3, lst4
#--------------------------------------------------------------------------------------------------
#%!%!%--- Reading the "INFO_ElaStic" file ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
INFO=open('INFO_ElaStic', 'r')
l1 = INFO.readline()
ordr= int(l1.split()[-1])
if (ordr != 2 and ordr != 3):
sys.exit('\n.... Oops ERROR: The order of the elastic constant is NOT clear !?!?!?'\
'\n Something is WRONG in the "INFO_ElaStic" file.\n')
l2 = INFO.readline()
mthd= l2.split()[-1]
if (mthd != 'Stress' and mthd != 'Energy'):
sys.exit('\n.... Oops ERROR: The method of the calculation is NOT clear !?!?!?'\
'\n Something is WRONG in the "INFO_ElaStic" file.\n')
l3 = INFO.readline()
cod = l3.split()[-1]
if (cod != 'WIEN2k' and cod != 'exciting' and cod != 'ESPRESSO'):
sys.exit('\n.... Oops ERROR: The DFT code is NOT clear !?!?!?'\
'\n Something is WRONG in the "INFO_ElaStic" file.\n')
l4 = INFO.readline()
SGN = int(l4.split()[-1])
l5 = INFO.readline()
V0 = float(l5.split()[-2])
l6 = INFO.readline()
mdr = float(l6.split()[-1])
l7 = INFO.readline()
NoP = int(l7.split()[-1])
INFO.close()
#--------------------------------------------------------------------------------------------------
#%!%!%--- Calculating the Space-Group Number and classifying it ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
if (1 <= SGN and SGN <= 2): # Triclinic
LC = 'N'
if (ordr == 2): ECs = 21
if (ordr == 3): ECs = 56
elif(3 <= SGN and SGN <= 15): # Monoclinic
LC = 'M'
if (ordr == 2): ECs = 13
if (ordr == 3): ECs = 32
elif(16 <= SGN and SGN <= 74): # Orthorhombic
LC = 'O'
if (ordr == 2): ECs = 9
if (ordr == 3): ECs = 20
elif(75 <= SGN and SGN <= 88): # Tetragonal II
LC = 'TII'
if (ordr == 2): ECs = 7
if (ordr == 3): ECs = 16
elif(89 <= SGN and SGN <= 142): # Tetragonal I
LC = 'TI'
if (ordr == 2): ECs = 6
if (ordr == 3): ECs = 12
elif(143 <= SGN and SGN <= 148): # Rhombohedral II
LC = 'RII'
if (ordr == 2): ECs = 7
if (ordr == 3): ECs = 20
elif(149 <= SGN and SGN <= 167): # Rhombohedral I
LC = 'RI'
if (ordr == 2): ECs = 6
if (ordr == 3): ECs = 14
elif(168 <= SGN and SGN <= 176): # Hexagonal II
LC = 'HII'
if (ordr == 2): ECs = 5
if (ordr == 3): ECs = 12
elif(177 <= SGN and SGN <= 194): # Hexagonal I
LC = 'HI'
if (ordr == 2): ECs = 5
if (ordr == 3): ECs = 10
elif(195 <= SGN and SGN <= 206): # Cubic II
LC = 'CII'
if (ordr == 2): ECs = 3
if (ordr == 3): ECs = 8
elif(207 <= SGN and SGN <= 230): # Cubic I
LC = 'CI'
if (ordr == 2): ECs = 3
if (ordr == 3): ECs = 6
else: sys.exit('\n.... Oops ERROR: WRONG Space-Group Number !?!?!? \n')
#--------------------------------------------------------------------------------------------------
#%!%!%--- Reading the energies ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
for i in range(1, ECs+1):
if (i<10):
Dstn = 'Dst0'+ str(i)
else:
Dstn = 'Dst' + str(i)
if (os.path.exists(Dstn) == False):
sys.exit('.... Oops ERROR: Where is the '+ Dstn +' directory !?!?!? \n')
os.chdir(Dstn)
f = open(Dstn+'_Energy.dat', 'w')
for j in range(1, NoP+1):
if (j<10):
Dstn_num = Dstn +'_0'+str(j)
else:
Dstn_num = Dstn +'_' +str(j)
if (os.path.exists(Dstn_num)):
os.chdir(Dstn_num)
if (cod=='WIEN2k' ): energy_output = Dstn_num+'_Converged.scf'
if (cod=='exciting'): energy_output = 'INFO.OUT'
if (cod=='ESPRESSO'): energy_output = Dstn_num+'.out'
if (os.path.exists(energy_output)):
if (cod == 'WIEN2k'):
for line in open(energy_output, 'r'):
if (line.find(':ENE :')>=0):
energy = float(line.split()[-1])
if (cod == 'exciting'):
for line in open(energy_output, 'r'):
if (line.find(' total energy :')>=0):
energy = float(line.split()[-1])
if (cod == 'ESPRESSO'):
for line in open(energy_output,'r'):
if (line.find('! total energy')>=0):
energy=float(line.split()[-2])
s = j-(NoP+1)/2
r = 2*mdr*s/(NoP-1)
if (s==0): r=0.0001
if (r>0):
strain ='+%12.10f'%r
else:
strain = '%13.10f'%r
print(strain,' ', energy,file=f)
os.chdir('../')
f.close()
os.chdir('../')
#--------------------------------------------------------------------------------------------------
warnings.simplefilter('ignore', np.RankWarning)
#%!%!%--- Directory management ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
if (os.path.exists('Energy-vs-Strain_old')):
shutil.rmtree( 'Energy-vs-Strain_old')
if (os.path.exists('Energy-vs-Strain')):
os.rename( 'Energy-vs-Strain','Energy-vs-Strain_old')
os.mkdir('Energy-vs-Strain')
os.chdir('Energy-vs-Strain')
os.system('cp -f ../Dst??/Dst??_Energy.dat .')
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#%!%!% ------------ Calculating the second derivative and Cross-Validation Error ----------- %!%!%#
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
if (cod == 'WIEN2k'):
CONV = ToGPa * factorial(ordr)*1.
if (cod == 'exciting'):
CONV = ToGPa * factorial(ordr)*2.
if (cod == 'ESPRESSO'):
CONV = ToGPa * factorial(ordr)*1.
for i in range(1, ECs+1):
if (i<10):
Dstn = 'Dst0'+str(i)
else:
Dstn = 'Dst' +str(i)
if (ordr == 2):
FD = open(Dstn+'_d2E.dat', 'w')
if (ordr == 3):
FD = open(Dstn+'_d3E.dat', 'w')
FE = open(Dstn+'_CVe.dat', 'w')
print('# Max. eta SUM(Cij) \n#',file=FD)
print('# Max. eta Cross-Validation Error \n#',file=FE)
fcc = {}
cve = {}
for j in range(ordr+4, ordr-1, -2):
if (j == 2): nth = '2nd'
elif(j == 3): nth = '3rd'
else:
nth = str(j) + 'th'
fcc[nth] = [[],[]]
cve[nth] = [[],[]]
print('\n# '+ nth +' order fit.',file=FD)
print('\n# '+ nth +' order fit.',file=FE)
#--- Reading the input files --------------------------------------------------------------
eta_ene= open(Dstn+'_Energy.dat', 'r')
nl = 0
strain = []
energy = []
while (nl < NoP):
line = eta_ene.readline()
if (line == ''): break
line = line.strip().split()
if (len(line) == 2):
nl +=1
eta, ene = line
strain.append(float(eta))
energy.append(float(ene))
elif (len(line) == 0): pass
else:
sys.exit('\n.... Oops ERROR: Strain and Energy are NOT defined correctly in "' +\
Dstn+'_Energy.dat" !?!?!?\n')
eta_ene.close()
strain, energy = sortlist(strain, energy)
strain0 = copy.copy(strain)
energy0 = copy.copy(energy)
# ------------------------------------------------------------------------------------------
while (len(strain) > j):
emax = max(strain)
emin = min(strain)
emax = max(abs(emin),abs(emax))
coeffs= polyfit(strain, energy, j)
if (ordr == 2):
Cij = coeffs[j-2]*CONV/V0 # in GPa unit
if (ordr == 3):
Cij = coeffs[j-3]*CONV/V0 * 0.001 # in TPa unit
fcc[nth][0].append(emax)
fcc[nth][1].append(Cij)
print('%13.10f'%emax, '%18.6f'%Cij,file=FD)
if (abs(strain[0]+emax) < 1.e-7):
strain.pop(0); energy.pop(0)
if (abs(strain[len(strain)-1]-emax) < 1.e-7):
strain.pop()
energy.pop()
#--- Cross-Validation error calculations --------------------------------------------------
strain = copy.copy(strain0)
energy = copy.copy(energy0)
while (len(strain) > j+1):
emax = max(strain)
emin = min(strain)
emax = max(abs(emin),abs(emax))
S = 0
for k in range(len(strain)):
Y = energy[k]
etatmp = []
enetmp = []
for l in range(len(strain)):
if (l==k): pass
else:
etatmp.append(strain[l])
enetmp.append(energy[l])
Yfit = polyval(polyfit(etatmp,enetmp, j), strain[k])
S = S + (Yfit-Y)**2
CV = sqrt(S/len(strain))
cve[nth][0].append(emax)
cve[nth][1].append(CV)
print('%13.10f'%emax, CV,file=FE)
if (abs(strain[0]+emax) < 1.e-7):
strain.pop(0)
energy.pop(0)
if (abs(strain[len(strain)-1]-emax) < 1.e-7):
strain.pop()
energy.pop()
fD.close()
fE.close()
#-- Plotting ----------------------------------------------------------------------------------
fig, ax = plt.subplots()
#plot for SUM(Cij)
# for key in fcc.keys():
# x = np.array(fcc[key][0])
# y = np.array(fcc[key][1])
# ax.plot(x,y,'o-',label="n="+key)
# ax.set_xlabel('max strain')
# ax.set_ylabel('SUM(Cij)')
for key in cve.keys():
x = np.array(cve[key][0])
y = np.array(cve[key][1])
ax.plot(x,y,'o-',label="n="+key)
ax.set_xlabel('max_strain')
ax.set_ylabel('CV error')
ax.legend()
plt.title('plot for '+ Dstn+'_'+LSi_dic[l]+', n = Order of polynomial fit')
plt.show()
os.chdir('../')
#--- Writing the "ElaStic_???.in" file ------------------------------------------------------------
if (ordr == 2): orth = '2nd'
if (ordr == 3): orth = '3rd'
fri = open('ElaStic_'+ orth +'.in', 'w')
for i in range(1, ECs+1):
if (i<10):
Dstn = 'Dst0'+str(i)
else:
Dstn = 'Dst' +str(i)
print(Dstn+' eta_max Fit_order',file=fri)
fri.close()
#--------------------------------------------------------------------------------------------------
os.system('rm -f Energy-vs-Strain/Grace.par')
|
997,101 | 4f975abc5d6cef43c7f3e65d8c1b42a2f45055dd | #!/usr/bin/env python
# coding=utf-8
import sys
from controle.controle_drone_visao import ControleVisaoDrone, TipoNoVisao
def main(args):
controlevisaodrone = ControleVisaoDrone(TipoNoVisao.NoCameraFrontal)
controlevisaodrone.mostrar_tela_pista = True
#controlevisaodrone.mostrar_tela_inicio_pista = True
#controlevisaodrone.mostrar_tela_final_pista = True
#controlevisaodrone.mostrar_tela_sensor_verde = True
#controlevisaodrone.mostrar_tela_sensor_vermelho = True
controlevisaodrone.inicializar_topico_ros(args)
if __name__ == "__main__":
main(sys.argv)
|
997,102 | 6bf5e602851522c71e3c04c9e5dac46906584339 | from django.contrib import admin
from django.conf.urls.defaults import patterns, include
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = staticfiles_urlpatterns()
## or import static_urls; urlpatterns = static_urls.urlpatterns
urlpatterns += patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^favicon.ico$', 'django.views.generic.simple.redirect_to',
{'url': '/static/images/favicon.ico'}),
(r'^tinymce/', include('tinymce.urls')),
(r'^accounts/', include('userena.urls')),
(r'', include('wheelcms_axle.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
997,103 | 3a91c68e3f8a880bb5453b0a3ab8d5b62ff0062f | import distro, utils, random
def _solver(stats):
(mu, sig2) = distro.extractStats(stats, [distro.Stat.Mu, distro.Stat.Sig2])
roots = utils.solve_quadratic_eqn(1, -2 * mu - 1, mu ** 2 + mu - 3 * sig2)
if roots == None:
return None
else:
a = min(roots)
b = 2 * mu - a
return (a, b)
def _kurtosis(a, b):
n = b - a + 1.0
return -6 * (n ** 2 + 1) / 5. / (n ** 2 - 1)
distro.register(
name = 'Uniform Discrete',
domain = distro.Domain.Discrete,
params = ('a', 'b'),
paramSolver = _solver,
cdf = lambda x : 0, # unimplemented
sample = lambda a, b : random.randint(a,b),
fittingFns = {
distro.Stat.Skew: lambda a, b : 0,
distro.Stat.Kurt: _kurtosis
}
)
|
997,104 | dbb09af7b58e1a13528071d0870aaa05244426fd | import os
import subprocess
from devbot import command
from devbot import config
def _chdir(func):
def wrapped(*args, **kwargs):
orig_cwd = os.getcwd()
os.chdir(args[0].local)
result = func(*args, **kwargs)
os.chdir(orig_cwd)
return result
return wrapped
class Module:
def __init__(self, path=None, name=None, remote=None,
branch="master", tag=None, retry=10):
if path is None or name is None or remote is None:
raise RuntimeError("path, name and remote are required")
self.remote = remote
self.local = os.path.join(path, name)
self.tag = tag
self._path = path
self._name = name
self._branch = branch
self._retry = 10
def _clone(self):
os.chdir(self._path)
command.run(["git", "clone", "--progress", self.remote, self._name],
retry=self._retry)
os.chdir(self.local)
if self.tag:
command.run(["git", "checkout", self.tag])
else:
command.run(["git", "checkout", self._branch])
def update(self, revision=None):
if not os.path.exists(os.path.join(self.local, ".git")):
self._clone()
return
orig_cwd = os.getcwd()
os.chdir(self.local)
if revision is None:
if self.tag and self._head_has_tag(self.tag):
os.chdir(orig_cwd)
return
revision = self.tag
if revision == self._get_commit_id():
os.chdir(orig_cwd)
return
command.run(["git", "remote", "set-url", "origin", self.remote])
command.run(["git", "fetch"], retry=self._retry)
if revision:
command.run(["git", "checkout", revision])
else:
command.run(["git", "merge", "--ff-only",
"origin/%s" % self._branch])
os.chdir(orig_cwd)
@_chdir
def checkout(self, revision=None):
if revision is None:
revision = self.tag
if revision is None:
revision = self._branch
command.run(["git", "checkout", revision])
@_chdir
def describe(self):
return subprocess.check_output(["git", "describe"]).strip()
@_chdir
def get_annotation(self, tag):
# FIXME this is fragile, there must be a better way
show = subprocess.check_output(["git", "show", tag])
annotation = []
for line in show.split("\n"):
ignore = False
for start in ["tag ", "Tagger: ", "Date: "]:
if line.startswith(start):
ignore = True
if line.startswith("commit "):
break
if not ignore:
annotation.append(line)
return "\n".join(annotation)
def clean(self):
try:
os.chdir(self.local)
except OSError:
return False
command.run(["git", "clean", "-fdx"])
return True
def _get_commit_id(self):
return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
def _head_has_tag(self, tag):
tags = subprocess.check_output(["git", "tag", "--points-at", "HEAD"])
return tag in tags.split("\n")
def get_module(module):
return Module(path=config.get_source_dir(),
name=module.name,
remote=module.repo,
branch=module.branch,
tag=module.tag,
retry=10)
|
997,105 | 8348c03b9ea861257d4096e95684aad3dbdee54e | import requests
import re
from bs4 import BeautifulSoup
url = "https://so.csdn.net/so/search/s.do"
for p in range(10):
p = p + 1
s = 'python'
kv = {'p': '%d' % p, 'q': '%s' % s}
print(kv)
r = requests.get(url, params=kv)
print(r.url)
# {'p': '1', 'q': 'python'}
# {'p': '2', 'q': 'python'}
# {'p': '3', 'q': 'python'}
# {'p': '4', 'q': 'python'}
# {'p': '5', 'q': 'python'}
# {'p': '6', 'q': 'python'}
# {'p': '7', 'q': 'python'}
# {'p': '8', 'q': 'python'}
# {'p': '9', 'q': 'python'}
# {'p': '10', 'q': 'python'}
# https://python.csdn.net
|
997,106 | a61135a52e88f62645b3336c7d419c417d30dad7 | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
|
997,107 | cb965ff89aad66db6b28d8f3a7f25ff1d571c2cc | from utils import *
from GnD import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
lr = 0.0006
lr_d = 0.0002
beta1 = 0.5
epochs = 1700
netG = Generator(100, 32, 3).to(device)
netD = Discriminator(3, 48).to(device)
criterion = nn.BCELoss()
#criterion = nn.MSELoss()
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr_d, betas=(beta1, 0.999))
lr_schedulerG = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizerG,
T_0=epochs//200, eta_min=0.00005)
lr_schedulerD = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizerD,
T_0=epochs//200, eta_min=0.00005)
nz = 100
fixed_noise = torch.randn(25, nz, 1, 1, device=device)
real_label = 0.75
fake_label = 0.0
batch_size = train_loader.batch_size
step = 0
for epoch in range(epochs):
for ii, (real_images) in tqdm(enumerate(train_loader), total=len(train_loader)):
end = time()
if (end -start) > 31000 :
break
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_images = real_images.to(device)
batch_size = real_images.size(0)
labels = torch.full((batch_size, 1), real_label, device=device) + np.random.uniform(-0.1, 0.1)
output = netD(real_images)
errD_real = criterion(output, labels)
errD_real.backward()
D_x = output.mean().item()
# train with fake
noise = torch.randn(batch_size, nz, 1, 1, device=device)
fake = netG(noise)
labels.fill_(fake_label) + np.random.uniform(0, 0.2)
output = netD(fake.detach())
errD_fake = criterion(output, labels)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
labels.fill_(real_label) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, labels)
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
if step % 500 == 0:
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch + 1, epochs, ii, len(train_loader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
valid_image = netG(fixed_noise)
step += 1
lr_schedulerG.step(epoch)
lr_schedulerD.step(epoch)
if epoch % 200 == 0:
show_generated_img()
# torch.save(netG.state_dict(), 'generator.pth')
# torch.save(netD.state_dict(), 'discriminator.pth')
def truncated_normal(size, threshold=1):
values = truncnorm.rvs(-threshold, threshold, size=size)
return values
if not os.path.exists('../output_images'):
os.mkdir('../output_images')
im_batch_size = 100
n_images=10000
for i_batch in range(0, n_images, im_batch_size):
z = truncated_normal((im_batch_size, 100, 1, 1), threshold=1)
gen_z = torch.from_numpy(z).float().to(device)
#gen_z = torch.randn(im_batch_size, 100, 1, 1, device=device)
gen_images = netG(gen_z)
images = gen_images.to("cpu").clone().detach()
images = images.numpy().transpose(0, 2, 3, 1)
for i_image in range(gen_images.size(0)):
save_image((gen_images[i_image, :, :, :] +1.0)/2.0, os.path.join('../output_images', f'image_{i_batch+i_image:05d}.png'))
import shutil
shutil.make_archive('images', 'zip', '../output_images')
|
997,108 | 6407578733855ae4b98564993c955f4082ad8f74 | from django.contrib import admin
from django.urls import path
import blog.views
urlpatterns = [
path('', blog.views.home, name="home"),
path('post/<int:post_id>', blog.views.detail, name = "detail"),
path('post/new', blog.views.new, name="new"),
path('post/<int:pk>/comment', blog.views.comment_new, name="comment_new"),
] |
997,109 | b41a00945de986b8f9a0f0855c992669c4141f1f | import os
THE_KEY = '7HUwodZj+dZiqdJcDe+KaPnH2Pdk6ZL1\n' |
997,110 | d122071caa71de5b188f0423eb3a60fc686d57c0 | from flask.ext.wtf import Form
from wtforms import (
StringField,
PasswordField,
BooleanField,
SubmitField,
TextAreaField,
)
from wtforms.validators import Required, Length, Email, EqualTo
class LoginForm(Form):
email = StringField('Email', validators=[Required(),
Length(1, 64), Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
login = SubmitField('Log in')
class SignupForm(Form):
name = StringField('Name', validators=[Required()])
email = StringField('Email', validators=[Required(), Email()])
password = PasswordField('Enter a password:',
validators=[Required(),
EqualTo('password2',
message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
signup = SubmitField('Sign up')
class DocumentForm(Form):
title = StringField('Title', validators=[Required()])
descrip = StringField('Description', validators=[Required()])
url = StringField('URL', validators=[Required()])
urlSub = SubmitField('Take notes!')
class NoteForm(Form):
note = TextAreaField('Enter your note', validators=[Required()])
highlight = TextAreaField(validators=[Required()])
submit = SubmitField('Submit')
|
997,111 | 05781dd9642538c11c9a3f4239e57050693fe93c | #!/bin/usr/python3
'''
Task
Students of District College have a subscription to English and French
newspapers. Some students have subscribed to only the English newspaper,
some have subscribed to only the French newspaper, and some have subscribed
to both newspapers.
You are given two sets of student roll numbers. One set has subscribed
to the English newspaper, and one set has subscribed to the French newspaper.
Your task is to find the total number of students who have subscribed to
only English newspapers.
Input Format
The first line contains n, the number of students who have subscribed
to the English newspaper.
The second line contains the n space separated list of student roll
numbers who have subscribed to the English newspaper.
The third line contains b, the number of students who have subscribed
to the French newspaper.
The fourth line contains the b space separated list of student roll
numbers who have subscribed to the French newspaper.
Output Format
Output the total number of students who are subscribed to the English newspaper only.
Sample Input
9
1 2 3 4 5 6 7 8 9
9
10 1 2 3 11 21 55 6 8
Sample Output
4
'''
def total_dif(group1, group2):
return len(group1.difference(group2))
if __name__ == '__main__':
n_eng = int(input())
eng_subs = set(map(int, input().split()))
b_frn = int(input())
frn_subs = set(map(int, input().split()))
print(total_dif(eng_subs, frn_subs))
|
997,112 | 8b582ac33dba1a962ef08d2df3b5f3ac4d4400d4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/01/15
# @Author : yuetao
# @Site :
# @File : 归并排序.py
# @Desc :
class Solution:
def deal(self, nums):
self.length = len(nums)
self.temp = [None] * self.length
self.sort(nums, 0, self.length - 1)
return nums
def sort(self, nums, left, right):
if left >= right:
return
mid = int((left+right)/2)
self.sort(nums, left, mid)
self.sort(nums, mid+1, right)
self.merge(nums, left, mid, right)
def merge(self, nums, left, mid, right):
i = left #左边数组
j = mid + 1 #右边第一个元素
t = 0 #缓存数组起始点
while i <= mid and j <= right:
if nums[i] <= nums[j]:
self.temp[t] = nums[i]
i += 1
else:
self.temp[t] = nums[j]
j += 1
t += 1
while i <= mid:
self.temp[t] = nums[i]
i += 1
t += 1
while j <= right:
self.temp[t] = nums[j]
j += 1
t += 1
t = 0
#将temp中的元素全部拷贝到原数组中
while left <= right:
nums[left] = self.temp[t]
left += 1
t += 1
if __name__ == '__main__':
nums = [9,8,7,6,5,4,3,2,1]
solve = Solution()
result = solve.deal(nums)
print(result)
|
997,113 | 28c206029457466806477f47b57d8fa718875547 | from google.appengine.ext import ndb
class Course(ndb.Model):
code = ndb.IntegerProperty()
departmentCode = ndb.StringProperty()
departmentName = ndb.StringProperty()
description = ndb.TextProperty()
gradeType = ndb.StringProperty()
isCommIntense = ndb.BooleanProperty()
maxCredits = ndb.IntegerProperty()
minCredits = ndb.IntegerProperty()
name = ndb.StringProperty()
prereqs = ndb.TextProperty()
semester = ndb.StringProperty()
year = ndb.IntegerProperty()
class Department(ndb.Model):
code = ndb.StringProperty()
name = ndb.StringProperty()
|
997,114 | b4846ed876cfa0e4998e1ab4e08393b5aa982fda |
def get_router():
from rest_framework.routers import DefaultRouter
router = DefaultRouter(trailing_slash=False)
return router
|
997,115 | bd089bf390a0356ad965d9155eccb6cc834b03e2 | # -*- coding: utf-8 -*-
import json
from optparse import make_option
import logging
from StringIO import StringIO
import pprint
from django.conf import settings
import psycopg2
import psycopg2.extras
import psycopg2.extensions
from django.core.management.base import BaseCommand
import requests
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
__author__ = 'guglielmo'
class Command(BaseCommand):
"""
Geometric polygons are
imported from the open_coesione DB.
"""
help = "Import polygons from open_coesione database"
option_list = BaseCommand.option_list + (
make_option('--limit',
dest='limit',
default=0,
help='Limit of records to import'),
make_option('--offset',
dest='offset',
default=0,
help='Offset of records to start from'),
make_option('--dry-run',
dest='dryrun',
action='store_true',
default=False,
help='Set the dry-run command mode: no actual import is made'),
)
logger = logging.getLogger('management')
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
offset = int(options['offset'])
limit = int(options['limit'])
self.logger.info("Starting polygons import!")
conn = psycopg2.connect("{0}".format(settings.OC_PG_CONN))
conn.set_client_encoding('UTF8')
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
sql = "select cod_com, denominazione, ST_AsGeoJSON(geom, 12, 0) as geom" +\
" from territori_territorio" +\
" where territorio ='C' "
if args:
sql += " and cod_com in ({0})".format(",".join(args))
if limit:
sql += " offset {0} limit {1}".format(offset, limit)
else:
sql += " offset {0}".format(offset)
cursor.execute(sql)
print "********"
print "********"
print "********"
print "{0}".format(sql)
print "********"
print "********"
print "********"
print "********"
oc_places = cursor.fetchall()
for c, oc_place in enumerate(oc_places):
self.logger.info(u"{0} - cod_com: {cod_com}, it: {denominazione}".format(c, **oc_place))
places_uri = "{0}/maps/places?external_id=istat-city-id:{1}".format(
settings.OP_API_URI, oc_place['cod_com'],
auth=(settings.OP_API_USERNAME, settings.OP_API_PASSWORD)
)
self.logger.debug("{0}: GET {1}".format(c, places_uri))
r = requests.get(places_uri)
if r.status_code != 200:
self.logger.error(u'Error parsing {0}. Skipping.'.format(places_uri))
continue
places_json = r.json()
if places_json['count'] == 0:
self.logger.error(u'No places fount at {0}. Skipping.'.format(places_uri))
continue
if places_json['count'] > 1:
self.logger.error(u'More than one places found ({0}) at {1}. Skipping.'.format(places_json['count'], places_uri))
continue
place_uri = places_json['results'][0]['_self']
self.logger.debug("{0}: GET {1}".format(
c,place_uri, auth=(settings.OP_API_USERNAME, settings.OP_API_PASSWORD)))
r = requests.get(place_uri)
if r.status_code != 200:
self.logger.error(u'Error parsing {0}. Skipping.'.format(place_uri))
continue
gps_lat = None
gps_lon = None
place_json = r.json()
if 'geoinfo' in place_json and place_json['geoinfo']:
if 'geom' in place_json['geoinfo'] and \
place_json['geoinfo']['geom']:
self.logger.debug(" - geom already found. Skipping")
continue
if 'gps_lat' in place_json['geoinfo']:
gps_lat = place_json['geoinfo']['gps_lat']
if 'gps_lon' in place_json['geoinfo']:
gps_lon = place_json['geoinfo']['gps_lon']
geoinfo = {
"geoinfo": {
"gps_lat": gps_lat,
"gps_lon": gps_lon,
"geom": oc_place["geom"]
}
}
io = StringIO()
json.dump(geoinfo, io)
patch_resp = requests.patch(place_uri, data=io,
auth=(settings.OP_API_USERNAME, settings.OP_API_PASSWORD),
headers={'content-type': 'application/json'}
)
self.logger.info(" - PATCH {0} - {1}".format(place_uri, patch_resp.status_code))
|
997,116 | 6698102d14608b2c55888823451b2ced003ad8e6 |
from pylab import *
from numpy import *
from matplotlib import *
from matplotlib.pyplot import *
#from pylab import figure, show, rand
from matplotlib.patches import Ellipse
from mpl_toolkits.mplot3d import Axes3D
# load data from file
report = loadtxt("RBBM181.out")
counter = 0;
num = 1;
angle = report[:,counter:counter+num] * pi/180;
counter = counter + num;
num = 1;
z = report[:,counter:counter+num];
counter = counter + num;
num = 1;
prob = report[:,counter:counter+num];
counter = counter + num;
fig = figure()
ax = Axes3D(fig)
xs = z * cos(angle)
ys = z * sin(angle)
zs = prob
scatterPlot = ax.scatter3D(xs, ys, zs, s=20,c= zs,cmap=cm.jet)
ax.set_xlabel('x[m]')
ax.set_ylabel('y[m]')
ax.set_zlabel('prob')
cbar = fig.colorbar(scatterPlot,shrink=0.9,extend='max')
#fig.colorbar(scatterPlot, shrink=0.5, aspect=5)
fig = figure()
scatter(xs, ys, s=15,c = zs,cmap=cm.jet,edgecolors='none')
xlabel('x[m]')
ylabel('y[m]')
colorbar(extend='max')
show()
|
997,117 | 7602084941b8e9a91c95d463283a3356084969c8 |
import matplotlib.pyplot as plt
from change_rate import *
from vehicle_state import *
from voltage_extreme_diff import *
filename='/home/zhao/python/data_statistic/data/all_file.csv'
filename='/home/zhao/data/车型CC7001CE02ABEV/LGWEEUA5XJE001208/LGWEEUA5XJE001208_20200701-20200801.csv'
vs=Vehicle_state()
#volt_columns=vs.filter_voltage(filename)
#print(volt_columns)
#print(len(volt_columns))
# outfile_static='static.csv'
# vs.static(filename,outfile_static)
outfile_slow_charge='slow_charge.csv'
vs.slowcharge(filename,outfile_slow_charge)
print("test state end")
# filename_static='/home/zhao/python/data_statistic/data/static.csv'
# filename_slow_charge='/home/zhao/python/data_statistic/data/slow_charge.csv'
# filename=filename_slow_charge
# label=" slow_charge_change_rate "
# k=0
# day_peroid='-10D'
# change_rate=Change_rate(k)
# use_days,value=change_rate.change_rate(filename,day_peroid,k)
# change_rate.data_visual(use_days,value,label)
# print("test slow_charge_change_rate end")
# v=Voltage_extreme_diff()
# label="static_volt_diff"
# voltage_diff,day_interval=v.voltage_difference(filename_static )
# v.data_visual(day_interval,voltage_diff,label)
# print("test static volt_diff end") |
997,118 | 37e1c7cf8f336e3ae48fc9e4e89c6234ae9539f1 | from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
import itertools
import os,sys
def read_path(input_file):
with open(input_file) as f:
paths = [li.rstrip('\n') for li in f.readlines()]
return paths
def build_contig_names_dic(contigs_names):
l = len(contigs_names)*2
odd_idx = list(range(0,l,2))
even_idx = [i+1 for i in odd_idx]
idx = odd_idx + even_idx
contigs_names = contigs_names + contigs_names
contig_names_dic = dict(zip(idx, contigs_names))
return contig_names_dic
#contig_names_dic : idx:name
def get_contigname_and_order(paths, contig_names_dic):
contig_names_paths = {}
count = 0
for path in paths:
path = list(map(int, path.split()))
contig_end_num = len(path)
odd_idxs = [i for i in range(contig_end_num) if i%2==0]
path_sim = [path[odd_idx] for odd_idx in odd_idxs]
contig_fullname_data = []
contig_name = [contig_names_dic[path[i]] for i in odd_idxs]
contig_orientation = list(map(int, [path[i]<path[i+1] for i in odd_idxs]))
contig_names_paths[count] = (contig_name, contig_orientation)
count += 1
return contig_names_paths
def record_filtered_iterator(output_name, output_iterator):
with open(output_name, "w") as output_handle:
SeqIO.write(output_iterator, output_handle, "fasta")
def record_filtered_iterator_v1(output_iterator, output_handle):
SeqIO.write(output_iterator, output_handle, "fasta")
def assign_seq_to_path(contig_names_paths, seq_id):
for key in contig_names_paths:
contig_name = contig_names_paths[key][0]
if seq_id in contig_name:
contig_orientation = contig_names_paths[key][1]
idx = contig_name.index(seq_id)
orientation = contig_orientation[idx]
return key, idx, orientation
return -1,-1,-1 #sequences that fail
def build_empty_iterator(contig_names_paths):
initial_cycle_iterator = {}
for key in contig_names_paths:
contig_names = contig_names_paths[key][0]
initial_cycle_iterator[key] = [0 for i in range(len(contig_names))]
return initial_cycle_iterator
def build_Seq_record(seq, seq_id):
Seq_record = SeqRecord(Seq(seq), id=seq_id, name='', description='')
return Seq_record
def build_iterator(contig_names_paths, record_iterator):
initial_cycle_iterator = build_empty_iterator(contig_names_paths)
for record in record_iterator:
cycle_num, seq_idx, seq_orientation = assign_seq_to_path(contig_names_paths, record.id)
if cycle_num == -1:
continue
if seq_orientation==1:
record.seq = record.seq[::-1]
initial_cycle_iterator[cycle_num][seq_idx] = record
return initial_cycle_iterator
def construct_sequence(initial_cycle_iterator):
cycle_finished_iterator = []
l = len(initial_cycle_iterator.keys())
for key in range(l):
gap = ''.join(['N' for i in range(100)])
cycle_seq_raw = initial_cycle_iterator[key]
cycle_seq = gap.join([str(contig.seq) for contig in initial_cycle_iterator[key]])
cycle_id = 'cycle_'+str(key)
Cycle_record = build_Seq_record(cycle_seq, cycle_id)
cycle_finished_iterator.append(Cycle_record)
return cycle_finished_iterator
def generate_iterator_for_a_path(contig_dic, record_iterator):
contig_names = contig_dic.keys()
output_iterator = []
for record in record_iterator:
if record.id in contig_names:
orientation = contig_dic[record.id]
if orientation==1:
record.seq = record.seq[::-1]
output_iterator.append(record)
return output_iterator
def generate_sequences_for_a_path_v1(record_iterator, contig_names, contig_orientation ):
seq = ['' for i in range(len(contig_names))]
output_iterator = []
for record in record_iterator:
if record.id in contig_names:
idx = contig_names.index(record.id)
orientation = contig_orientation[idx]
if orientation==1:
# record = record.seq[::-1]
seq[idx] = SeqRecord(record.seq[::-1], id=record.id, name='', description='')
else:
seq[idx] = record
gap = ''.join(['N' for i in range(100)])
return seq
def write_per_path(contig_names_paths, input_fasta, contig_names_dic):
l = len(contig_names_paths)
xx = []
with open(input_fasta, "r") as input_handle:
x = SeqIO.parse(input_handle, "fasta")
xx.append(itertools.tee(x, l))
for i in range(l):
record_iterator = xx[0][i]
contig_name, contig_orientation = contig_names_paths[i]
output_name = 'cycle_'+str(i)+'.fasta'
output_iterator = generate_iterator_for_a_path(contig_dic, record_iterator)
seq = generate_sequences_for_a_path(record_iterator, contig_name, contig_orientation)
record_filtered_iterator(output_name, output_iterator)
def write_per_path_v1(contig_names_paths, input_fasta, contig_names_dic, output_name):
l = len(contig_names_paths) # cycle num
with open(input_fasta, "r") as input_handle, open(output_name, "w") as output_handle:
record_iterator = SeqIO.parse(input_handle, "fasta")
initial_cycle_iterator = build_iterator(contig_names_paths, record_iterator)
cycle_finished_iterator = construct_sequence(initial_cycle_iterator)
SeqIO.write(cycle_finished_iterator, output_handle, "fasta")
def main():
# path_inputfile = sys.argv[1]
# contigs_names_file = sys.argv[2]
# input_fasta = sys.argv[3]
# output_fasta = sys.argv[4]
parser.add_argument('-paths', help='each line contains a path')
parser.add_argument('-contigs', help='names of contigs used')
parser.add_argument('-i', help='input fasta filename')
parser.add_argument('-o', help='outout fasta filename')
args = parser.parse_args()
input_fasta = args.i
output_fasta = args.o
paths = read_path(args.paths)
contigs_names = read_path(args.contigs)
contig_names_dic = build_contig_names_dic(contigs_names)
contig_names_paths = get_contigname_and_order(paths, contig_names_dic)
write_per_path_v1(contig_names_paths, input_fasta, contig_names_dic, output_fasta)
main()
|
997,119 | 172b1d40c473ddedcf33224aafa3faec54b15beb | import os
# #print(dir(os))
# print(type(dir(5)))
# print(type(os.getcwd()))
# os.chdir('/home/abilash/Desktop')
# print(os.getcwd())
# print(os.listdir())
# print(os.getcwd())
# os.chdir('/home/abilash/Desktop')
# os.makedirs('Abias/a')
# os.removedirs('Abias/a') #will
print(os.listdir())
os.chdir('../')
os.rename('python','Py') |
997,120 | 117792510a1ec03a5f75e99e441bae8cb44c1949 | import keyboard
import time
while True:
if keyboard.is_pressed('F'):
keyboard.write("hur googlar man") #write "hej"
keyboard.send('enter') |
997,121 | f4dfefe5aba1db0b634389cf86e025c7224d37f1 | import unittest
import miner
from textwrap import dedent
class TesteMiner(unittest.TestCase):
def teste_square_matrix(self):
col = 3
row = 3
matrix = miner.start_matrix(col, row)
cells = 0
for i in matrix:
for j in i:
cells += 1
self.assertEqual(cells, col * row)
def teste_col_only_matrix(self):
col = 7
row = 1
matrix = miner.start_matrix(col, row)
cells = 0
for i in matrix:
cells += 1
self.assertEqual(cells, col)
def teste_row_only_matrix(self):
col = 1
row = 8
matrix = miner.start_matrix(col, row)
cells = 0
for i in matrix[0]:
cells += 1
self.assertEqual(cells, row)
def teste_empty_matrix(self):
col = 9
row = 8
matrix = miner.start_matrix(col, row)
soma = 0
for linha in matrix:
for celula in linha:
soma += celula
self.assertEqual(soma, 0)
def teste_verificar_bomba(self):
col = 12
row = 8
matrix = miner.start_matrix(col, row)
miner.coloca_bomba(matrix, 3,7)
self.assertEqual(matrix[3][7], -1)
def teste_verificar_vizinhanca(self):
col = 3
row = 3
matrix = miner.start_matrix(col, row)
matrix = miner.coloca_bomba(matrix, 1,1)
matrix = miner.define_valor(matrix, 0,0)
self.assertEqual(matrix[0][0], 1)
unittest.main()
|
997,122 | 6f4d4fed5465cde8f5c6795409f4ebec4ced95b1 | import numpy as np
from numba import jitclass # import the decorator
from numba import float32 # import the types
spec = [
('a', float32),
('b', float32),
]
@jitclass(spec)
class test(object):
def __init__(self, a, b):
self.a = a
self.b = b
def f(self, val):
return self.a * val
def g(self, func):
val = 3
return self.b + func(val)
def wrap(self):
return self.g(self.f)
obj = test(1, 2)
c = obj.wrap()
print(c) # prints '5' when @jitclass(spec) is commented out
|
997,123 | f9fa332d245e3e69b74230d937f85a39d9b5d805 | #Question: Write a program that asks the user to enter the width and length of a rectangle,
# and then display the rectangle’s area
#• getLength – This function should ask the user to enter the rectangle’s length, and then return that value as a double.
#• getWidth – This method should ask the user to enter the rectangle’s width, and then return that value as a double.
#• getArea – This method should accept the rectangle’s length and width as arguments, and return the rectangle’s area.
# The area is calculated by multiplying the length by width.
#• displayArea – This method should accept the rectangle’s length, width, and area as arguments, and display them in an appropriate message to the screen.
def getLength():
length = float(input('Enter the length of the Rectangle: '))
return length
def getWidth():
width = float(input('Enter the width of the Rectangle: '))
return width
def getArea(length , width):
area = round((length * width),2)
return area
def displayArea(length, width, area):
print('--------------------------------------------')
print('Length of the Rectangle is : ' + str(length))
print('Width of the Rectangle is : ' + str(width))
print('Area of the Rectangle is : ' + str(area))
print('--------------------------------------------')
length = getLength()
width = getWidth()
area = getArea(length, width)
displayArea(length, width, area)
|
997,124 | 59e86bcc1e6c06163748adc63c23a0b604ce44eb | from UI.Views.View import View
import os, sys
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(21,GPIO.OUT)
GPIO.setup(20,GPIO.OUT)
GPIO.setup(16,GPIO.IN)
class EndView(View):
GPIO.output(21,0)
GPIO.output(20,0)
def render(self, app):
bg = [100, 200, 100] |
997,125 | a27cf90ee690593cd1497549299b27434b8eb877 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
import csv
plt.style.use('ggplot')
def smoothing(curve, a=51):
x = lambda a: max(a, 0)
return np.array(list(map(x, signal.savgol_filter(curve, a, 3))))
fig, axs = plt.subplots(1, 2, constrained_layout=False, figsize=(13, 4))
f = csv.DictReader(
open(
'./result/csv/run-lstm_bs5_hidden10_embed10_lrdc1_test-tag-accuracy.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[0].plot(step, smoothing(value), label='test')
f = csv.DictReader(
open(
'./result/csv/run-lstm_bs5_hidden10_embed10_lrdc1_train-tag-accuracy_train.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[0].plot(step, smoothing(value), label='train')
axs[0].set_xlabel('training step')
axs[0].set_ylabel('accuracy')
axs[0].set_title('LSTM Error Rate Curve')
axs[0].legend()
f = csv.DictReader(
open(
'./result/csv/run-rnn_bs50_hidden10_embed10_lrdc1_clip1.2_rmsprop_test-tag-accuracy.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[1].plot(step, smoothing(value, a=7) + 0.05, label='test')
f = csv.DictReader(
open(
'./result/csv/run-rnn_bs50_hidden10_embed10_lrdc1_clip1.2_rmsprop_train-tag-accuracy.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[1].plot(step, smoothing(value, a=7), label='train')
axs[1].set_xlabel('training step')
axs[1].set_ylabel('error rate')
axs[1].set_title('RNN Error Rate Curve')
axs[1].legend()
plt.savefig('rnn_error_rate.png', dpi=400)
plt.close()
#-------------------------------------------------------------------------------------
fig, axs = plt.subplots(1, 2, constrained_layout=False, figsize=(13, 4))
f = csv.DictReader(
open(
'./result/csv/run-rnn_bs50_hidden10_embed10_lrdc1_clip1.2_rmsprop_test-tag-loss.csv',
'r'))
step, value = zip(*[(int(r['Step']), float(r['Value'])) for r in f])
axs[0].plot(step, (value), label='test')
f = csv.DictReader(
open(
'./result/csv/run-rnn_bs50_hidden10_embed10_lrdc1_clip1.2_rmsprop_train-tag-loss.csv',
'r'))
step, value = zip(*[(int(r['Step']), float(r['Value'])) for r in f])
axs[0].plot(step, (value), label='train')
axs[0].set_xlabel('training step')
axs[0].set_ylabel('loss')
axs[0].set_title('RNN Learning Curve')
axs[0].legend()
f = csv.DictReader(
open(
'./result/csv/run-rnn_bs50_hidden10_embed10_lrdc1_clip1.2_rmsprop_test-tag-accuracy.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[1].plot(step, smoothing(value, a=7) + 0.05, label='test')
f = csv.DictReader(
open(
'./result/csv/run-rnn_bs50_hidden10_embed10_lrdc1_clip1.2_rmsprop_train-tag-accuracy.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[1].plot(step, smoothing(value, a=7), label='train')
axs[1].set_xlabel('training step')
axs[1].set_ylabel('error rate')
axs[1].set_title('RNN Error Rate Curve')
axs[1].legend()
plt.savefig('rnn_learning_curve.png', dpi=400)
plt.close()
#-------------------------------------------------------------------------------------
fig, axs = plt.subplots(1, 2, constrained_layout=False, figsize=(13, 4))
f = csv.DictReader(
open('./result/csv/run-lstm_bs5_hidden10_embed10_lrdc1_test-tag-loss.csv',
'r'))
step, value = zip(*[(int(r['Step']), float(r['Value'])) for r in f])
axs[0].plot(step, (value), label='test')
f = csv.DictReader(
open('./result/csv/run-lstm_bs5_hidden10_embed10_lrdc1_train-tag-loss.csv',
'r'))
step, value = zip(*[(int(r['Step']), float(r['Value'])) for r in f])
axs[0].plot(step, smoothing(value, a=11), label='train')
axs[0].set_xlabel('training step')
axs[0].set_ylabel('loss')
axs[0].set_title('LSTM Learning Curve')
axs[0].legend()
f = csv.DictReader(
open(
'./result/csv/run-lstm_bs5_hidden10_embed10_lrdc1_test-tag-accuracy.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[1].plot(step, smoothing(value, a=11), label='test')
f = csv.DictReader(
open(
'./result/csv/run-lstm_bs5_hidden10_embed10_lrdc1_train-tag-accuracy_train.csv',
'r'))
step, value = zip(*[(int(r['Step']), 1 - float(r['Value'])) for r in f])
axs[1].plot(step, smoothing(value), label='train')
axs[1].set_xlabel('training step')
axs[1].set_ylabel('error rate')
axs[1].set_title('LSTM Error Rate Curve')
axs[1].legend()
plt.savefig('lstm_learning_curve.png', dpi=400)
plt.close()
#-----------------------------------------------------------------------------------
plt.rcParams["figure.figsize"] = (8, 4.5)
f = csv.DictReader(
open('./result/csv/run-rnn_exp1_train-tag-loss.csv',
'r'))
step, value = zip(*[(int(r['Step']), float(r['Value'])) for r in f])
plt.plot(step, smoothing(value, a=5), label='rnn')
f = csv.DictReader(
open('./result/csv/run-lstm_exp1_train-tag-loss.csv',
'r'))
step, value = zip(*[(int(r['Step']), float(r['Value'])) for r in f])
plt.plot(step, smoothing(value, a=21), label='lstm')
plt.xlabel('training step')
plt.ylabel('training loss')
plt.title('Learning Curve for Different Methods')
plt.legend()
plt.savefig('rnn_exp1.png', dpi=400)
plt.close()
#-----------------------------------------------------------------------------------
f = csv.DictReader(
open('./result/csv/run-rnn_exp1_train-tag-loss.csv',
'r'))
step, value = zip(*[(float(r['Wall time']), float(r['Value'])) for r in f])
step = np.array(step) - step[0]
plt.plot(step, smoothing(value, a=5), label='rnn')
f = csv.DictReader(
open('./result/csv/run-lstm_exp1_train-tag-loss.csv',
'r'))
step, value = zip(*[(float(r['Wall time']), float(r['Value'])) for r in f])
step = np.array(step) - step[0]
plt.plot(step, smoothing(value, a=21), label='lstm')
plt.xlabel('training time (seconds)')
plt.ylabel('training loss')
plt.title('Training Time for Different Methods')
plt.legend()
plt.savefig('rnn_exp2.png', dpi=400)
plt.close()
|
997,126 | d4b4113425bad82befea8c7e3e8c962b2f2ee7e7 | #!/usr/bin/env python
runCard="""
(run){
% general setting
EVENTS 1M; ERROR 0.99;
% scales, tags for scale variations
FSF:=1.; RSF:=1.; QSF:=1.;
SCALES METS{FSF*MU_F2}{RSF*MU_R2}{QSF*MU_Q2};
% tags for process setup
% YOUR INPUT IS NEEDED HERE
NJET:=1; LJET:=0; QCUT:=20.;
% me generator settings
ME_SIGNAL_GENERATOR Comix Amegic LOOPGEN;
EVENT_GENERATION_MODE PartiallyUnweighted;
%LOOPGEN:=Internal; % BlackHat/OpenLoops
LOOPGEN:=OpenLoops;
% exclude tau from lepton container
MASSIVE[15] 1;
MpiHad MI_HANDLER None; FRAGMENTATION Off;
% PDF definition
% https://twiki.cern.ch/twiki/bin/view/CMS/QuickGuideMadGraph5aMCatNLO#Specific_information_for_2017_pr
% Using 2017 recommended PDF
PDF_LIBRARY = LHAPDFSherpa;
PDF_SET = NNPDF31_nnlo_hessian_pdfas;
HEPMC_USE_NAMED_WEIGHTS=1;
% https://github.com/cms-sw/genproductions/blob/mg26x/MetaData/pdflist_4f_2017.dat
%SCALE_VARIATIONS 1.,1. 1.,2. 1.,0.5 2.,1. 2.,2. 2.,0.5 0.5,1. 0.5,2. 0.5,0.5
%PDF_VARIATIONS NNPDF31_nnlo_as_0118_nf_4[all]
% collider setup
BEAM_1 2212; BEAM_ENERGY_1 6500.;
BEAM_2 2212; BEAM_ENERGY_2 6500.;
}(run)
(processes){
% https://sherpa.hepforge.org/doc/SHERPA-MC-2.2.5.html#LHC_005fWJets
% YOUR INPUT IS NEEDED HERE
Process 93 93 -> 93 93;
Order (*,0);
CKKW sqr(20/E_CMS)
Integration_Error 0.05;
End process;
}(processes)
(selector){
PT 93 ptMin. ptMax.
}(selector)
"""
pts = [15, 30, 50, 80, 120, 170, 300, 470, 600, 800, 1000, 1400, 1800, 2400, 3200, 5000]
for pt1, pt2 in zip(pts, pts[1:]):
runCardNow = runCard.replace('ptMin', str(pt1))
runCardNow = runCardNow.replace('ptMax', str(pt2))
runCardHad = runCardNow.replace('MpiHad', '#')
runCardNoHad = runCardNow.replace('MpiHad', ' ')
with open('sherpaFiles/Run.dat_QCD_pt'+str(pt1)+'_Had_LO_13TeV','w') as outFile:
outFile.write(runCardHad)
with open('sherpaFiles/Run.dat_QCD_pt'+str(pt1)+'_noHad_LO_13TeV','w') as outFile:
outFile.write(runCardNoHad)
|
997,127 | f56750701d24e9b689872750ed1bb7f11e802624 | /home/ramdas/anaconda3/lib/python3.7/genericpath.py |
997,128 | 268b515a4b5de1613cc6a0d338bebc2e7a9d0f33 | from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
db = SQLAlchemy()
import datetime
class User(db.Model):
# copied code from Lab 5
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(120), nullable=False)
def toDict(self):
return {
"id": self.id,
"username": self.username,
"email": self.email,
"password": self.password
}
def set_password(self, password):
"""Create hashed password."""
self.password = generate_password_hash(password, method='sha256')
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
def __repr__(self):
return '<User {}>'.format(self.username)
class UserReact(db.Model):
userId = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
postId = db.Column(db.Integer, db.ForeignKey('post.id') , primary_key=True)
react = db.Column(db.String(80), default = 'like' or 'dislike')
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
userId = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False )
text = db.Column(db.String(2048), nullable=False)
reacts = db.relationship('UserReact', backref='Post', lazy=True)
def getTotalLikes(self):
numLike = 0
reacts = UserReact.query.filter_by(postId = self.id ).react
for react in reacts :
if (react == "like"): numLike += 1
return numLike
def getTotalDislikes(self):
numDislike = 0
reacts = UserReact.query.filter_by(postId = self.id ).react
for react in reacts :
if (react == "dislike"): numDislike += 1
return numDislike
def toDict(self):
return {
"postId": self.id,
"userId": self.userId,
"username": User.query.filter_by(id = self.userId).username,
"text": self.text,
"likes": getTotalLikes(),
"dislikes": getTotalDislikes()
}
|
997,129 | d317ddb7048503209228da20ed7316e2e587cdbe | import unittest
import keras_nlp
import keras
import numpy as np
import tensorflow as tf
class TestKerasNLP(unittest.TestCase):
def test_fit(self):
# From https://keras.io/api/keras_nlp/models/bert/bert_classifier/
features = {
"token_ids": tf.ones(shape=(2, 12), dtype=tf.int64),
"segment_ids": tf.constant(
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
),
"padding_mask": tf.constant(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12)
),
}
labels = [0, 3]
classifier = keras_nlp.models.BertClassifier.from_preset(
'bert_tiny_en_uncased',
load_weights=False, # load randomly initialized model from preset architecture with weights
preprocessor=None, # to avoid downloading the vocabulary
num_classes=4,
)
classifier.fit(x=features, y=labels, batch_size=2) |
997,130 | 721a9e630fa2c8819382d0577eec0f5f31ee2acf | a=()
print(a==None) |
997,131 | 71126d52ef7a688dbf8033f402f97f2406be0aba | # Program for printing n no. of
# Fibonacci Series
def fab(n):
if n==0: # returning zero if n is equal to 0
return(0)
if n==1 or n==2: # returning one if n is equal to 1 or 2
return(1)
else:
return(fab(n-1)+fab(n-2)) # the main condition for printing whole Series
inp=int(input())
print("Fibonacci Series of",inp,"is:",end=" ")
for i in range(0,inp):
print(fab(i),end=" ") # Calling fab function for each value in range from 0 to the range user inputed
|
997,132 | d19731dc618b1a08c89ec70d88c078855116486a | #!/usr/bin/env python3
import socket
from time import time, sleep
from textwrap import dedent
from socket import AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR
import sys
import struct
import os
import os.path
import re
import tftp
from docopt import docopt
from socketserver import BaseRequestHandler, ThreadingUDPServer
from threading import Thread
host = '' # Symbolic name meaning all available interfaces
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)
ThreadingUDPServer.allow_reuse_address = True
doc = """
TFTPy: The server side for the TFTP protocol. written in Python 3 by Pedro Pereira & Pedro Lourenço (11/06/17)
Usage: server.py [<directory>] [<port>]
Options:
-h, --help
[<directory>] show this help [default: './']
<port>, <port>=<port> listening port [default: 69]
"""
args = docopt(doc)
if args['<directory>'] == None:
args['<directory>'] = './'
if args['<port>'] == None:
args['<port>'] = 69
try:
v = int(args['<directory>'] )
args['<port>'] = v
args['<directory>'] = './'
except ValueError:
if len(sys.argv) == 1:
args['<directory>'] = './'
else:
args['<directory>'] = sys.argv[1]
except IndexError:
if len(sys.argv) == 0:
args['<directory>'] = './'
port = int(args['<port>'])
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
addr = host, port
RRQ = b'\x00\x01'
WRQ = b'\x00\x02'
DAT = b'\x00\x03'
ACK = b'\x00\x04'
ERR = b'\x00\x05'
DIR = b'\x00\x06'
BLK0 = b'\x00\x00'
BLK1 = b'\x00\x01'
path = args['<directory>']
path_chk = path[-1]
if not path_chk == '/':
print("The inserted path of directory don't end with slash '/'!\n Please do it to save correctly the files. ")
raise SystemExit
### LIGAÇÕES UDP Bind ###
try:
s.bind(addr)
print('Socket bind complete')
except socket.error as msg:
print('Unable to bind to port %d' % (port))
sys.exit()
ct6 = 0
ct4 = 0
ct = 0
buf = 65536
while True:
try:
print( "Waiting for requests on '%s' port '%s'\n" % (hostname, args['<port>']))
data, addr = s.recvfrom(buf)
host = addr[0]
port = addr[1]
print ('received %s bytes from %s' % (len(data), addr))
pack_type = tftp.check_pack(data)
op = data[:2]
bloco = data[2:4]
# LEITURA DO PACOTE 1 (RRQ)
if pack_type == RRQ:
unpacked = tftp.treat_RQQ_srv(data, path)
if len(unpacked) == 2:
file_O, file = unpacked
op = 3
blocks = tftp.chunks(file_O, 512)
info_file = next(blocks, 'end')
if info_file == 'end':
file_out = file.rsplit(b'/')
print("The file requested, '%s' has been sent." % (file_out.decode()))
info_file += b'\0'
numb_blk = next(blocks, 'end')
packet_DAT = tftp.pack_3_(op, numb_blk, info_file)
filen = file.decode()
s.sendto(packet_DAT, addr)
if len(unpacked) == 3:
send_err, msg, err = unpacked
s.sendto(send_err, addr)
# LEITURA DO PACOTE 2 (WRQ)
if pack_type == WRQ:
output = tftp.treat_WRQ_srv(data, path)
if len(output) == 2:
ack_send, filen = output
if filen == False:
print('File not found! [error 1]')
if len(output) == 3:
ack_send, filen, file_to_save = output
if filen == False:
print('File not found! [error 1]')
s.sendto(ack_send, addr)
# LEITURA DO PACOTE 3 (DAT)
if pack_type == DAT:
blk = data[2:4]
if len(output) == 2:
if blk == BLK1:
ack_send, namesaved = tftp.treat_DAT1_srv(data, filen)
if blk > BLK1:
ack_send, namesaved = tftp.treat_DAT2(data, namesaved)
if len(output) == 3:
if blk == BLK1:
ack_send, namesaved = tftp.treat_DAT1_srv(data, file_to_save)
if blk > BLK1:
ack_send, namesaved = tftp.treat_DAT2(data, namesaved)
s.sendto(ack_send, addr)
#LEITURA DO PACOTE 4 (ACK)
if pack_type == ACK:
op = 3
ct4 += 1
inf = next(blocks, 'end')
file = str(file).strip("[]")
if inf == 'end':
file_out = filen.rsplit('/')
print("The file requested, '%s' has been sent.\n" % (file_out[-1]))
ct4 = 0
continue
numb_blk = next(blocks,'end')
packet = tftp.pack_3_(op, numb_blk, inf)
s.sendto(packet, addr)
# LEITURA DO PACOTE 5 (ERR)
if pack_type == ERR:
info = tftp.unpack_err(data)
op, err, msg = info
print('%s' % (err, msg))
continue
if pack_type == DIR:
ct6 += 1
path = args['<directory>']
dir_srv = os.popen('ls -alh {}'.format(path)).read()
if ct6 == 1:
part_block = tftp.chunks(dir_srv, 512)
inf = next(part_block, 'end')
if inf == 'end':
print('DIR sended')
ct6 = 0
continue
numb_blk = next(part_block, 'end')
op = 3
dir_srv_S = tftp.pack_3_dir(op, numb_blk, inf)
sent = s.sendto(dir_srv_S, addr)
except socket.timeout:
print('Trying again...')
ct += 1
s.connect(addr)
s.settimeout(10)
break
except KeyboardInterrupt:
print("Exiting TFTP server..")
print("Goodbye!")
break
s.close()
|
997,133 | da5079d49dc164b89f4d26957ce54ba593588037 | #!/usr/bin/python3.8
import os
import sys
import argparse
import configparser
def _argparse():
parser = argparse.ArgumentParser(description="This is description")
parser.add_argument('-c', '--config', action='store', dest='config', default='config.txt', help="this is config file")
return parser.parse_args()
def make_dir(*dir):
for each in dir:
if not os.path.exists(each):
os.mkdir( each )
def main():
"""docstring for __main__"""
parser = _argparse()
cf = configparser.ConfigParser(allow_no_value=True)
cf.read(parser.config)
if not cf.has_section('Config'):
os.exit("Error: your config file is not correct.")
# read config file
config_dict = {
# 'reference database'
'ref_fasta': cf.get('Config', 'ref_fasta'),
'gtf_file': cf.get('Config', 'gtf_file'),
# project name
'project_name' : cf.get("Config", "project_name"),
'sample_name' : cf.get("Config", "sample_name"),
'RNA_fastqs' : cf.get("Config", "RNA_fastqs"),
'scripts_dir': cf.get('Config', 'scripts_dir'),
# alignment
'STAR': cf.get('Config', "STAR"),
'STAR_index': cf.get('Config', "STAR_index"),
'samtools' : cf.get('Config', "samtools"),
# expression
'featureCounts': cf.get('Config', "featureCounts"),
}
#make directories:
project_dir = os.path.abspath(".") + '/' + config_dict['project_name']
make_dir(project_dir)
print("# Create work directory")
# generate shell
shell_name = project_dir + '/work.' + config_dict['project_name'] + '.RNASeq.sh'
# only open a file so use try:finally to close.
with open(shell_name,"w") as f:
# align
f.write("{STAR} --runThreadN 10 --genomeDir {STAR_index} --readFilesCommand zcat --readFilesIn {RNA_fastqs} --outFileNamePrefix {sample_name}.RNA. --outSAMtype BAM SortedByCoordinate \n".format(**config_dict))
f.write("{samtools} index {sample_name}.RNA.Aligned.sortedByCoord.out.bam\n".format(**config_dict))
f.write("{samtools} mpileup -l {sample_name}.snp.checked.bed -f {ref_fasta} {sample_name}.RNA.Aligned.sortedByCoord.out.bam >{sample_name}.RNAseq.mpileup.txt\n".format(**config_dict))
f.write("{featureCounts} -O -T 20 -t exon -g transcript_id -a {gtf_file} -o {sample_name}.transcript.counts.txt {sample_name}.RNA.Aligned.sortedByCoord.out.bam \n".format(**config_dict))
f.write("python3 {scripts_dir}/featureCounts2TPM.py -a {sample_name}.transcript.counts.txt -o {sample_name}.RNAseq.transcript.counts.TPM.txt\n".format(**config_dict))
# add TPM and add RNAseq read depth.
f.write("python3 {scripts_dir}/add_TPM.py {sample_name}.variants.funcotated.with.minigene.MAF.xls {sample_name}.RNAseq.transcript.counts.TPM.txt transcript_id\n".format(**config_dict))
f.write("python3 {scripts_dir}/add_RNAseq_read_depth.py {sample_name}.variants.funcotated.with.minigene.MAF.add.TPM.xls {sample_name}.RNAseq.mpileup.txt\n".format(**config_dict))
print("all finished!")
if __name__ == '__main__':
main()
|
997,134 | 9338e3f0f0ea0690c141e27e9317b8199eca9215 | import sys
import copy
class Board():
_inputString = None
_boardMatrix = None
#create board using inputString
def __init__(self, inputString):
self._inputString = inputString
self._createBoardMatrix()
def _createBoardMatrix(self):
self._boardMatrix = self._inputString.split('|')
def getBoardMatrix(self):
return self._boardMatrix
def printBoard(self):
print("-" * len(self._boardMatrix[0]))
for i in self._boardMatrix:
print("|" + i + "|")
print("-" * len(self._boardMatrix[0]))
def getBoardLength(self):
boardLength = len(self._boardMatrix)
return boardLength
def isDone(self, positions):
retVal = False
boardXlength = self.getBoardLength()
for p in positions:
if p[1] == boardXlength - 1:
retVal = True
return retVal
class Car():
_char = None
_list_positions = None
#_direction=> 'lr', 'ud'
_direction=None
def __init__(self, char):
self._char = char
self._list_positions = list()
#def setCharacter(self, char):
# self._char = char
# self._list_positions = list()
def setPosition(self, el):
####################
#check proper type
#if isinstance(self._char, str) == False:
#check proper length
#if len(self._char) != 1:
# return False
############
# [0,1]
self._list_positions.append(el)
def setDirection(self):
###################################
#if self._char is None:
# return False
#if self._list_positions is None:
# return False
#if len(self._list_positions) < 2:
# return False
###################################
# determine if the car is gong left to right
if self._list_positions[0][0] == self._list_positions[1][0]:
# assume that no rules are being violated
self._direction = 'lr'
# determine the car is going up and down
if self._list_positions[0][1] == self._list_positions[1][1]:
# assume that no rules are being violated
self._direction = 'ud'
#gets the Cars Character Reprensentation
def getCharacter(self):
return self._char
def getPositions(self):
return self._list_positions
# these next functions in board
# print board
# def printBoard(boardMatrix):
#
# print("-" * len(boardMatrix[0]) )
#
# for i in boardMatrix:
#
# print ("|" + i + "|")
#
#
# print("-" * len(boardMatrix[0]) )
def main():
#set default inputString
inputString = " o aa| o |xxo |ppp q| q| q"
#count of arguments
argc = len(sys.argv)
command = 'print'
if(argc >= 2):
command = sys.argv[1]
#update inputString if one was provided
if(argc > 2):
inputString = sys.argv[2]
#list of cars
list_car_chars = []
#characters that are not cars
ignore_chars = ['|', ' ']
#variable for storing car classes
cars = []
# get all car chars using the inputString
# the list of cars that we need to place on the board
for i in range(0, len(inputString)):
#if this car has already been processed
#from the inputString
if inputString[i] in list_car_chars:
continue
#if this is not a car chracter
if inputString[i] in ignore_chars:
continue
#catalog that we have already added this car
list_car_chars.append(inputString[i])
#create a car object
#set the cars character
temp_car = Car(inputString[i])
#store the car
cars.append(temp_car)
#store the car matrix as an array
board = Board(inputString)
# set positions and direction for all cars
tempBoardMatrix = None
for car_i in range(0, len(cars)):
#set position
tempBoardMatrix = board.getBoardMatrix()
for row_i in range(0, len(tempBoardMatrix)):
for col_i in range(0, len(tempBoardMatrix[row_i])):
# find the car represented by the index
if(tempBoardMatrix[row_i][col_i] == cars[car_i].getCharacter()):
cars[car_i].setPosition([row_i, col_i])
#set the direction
cars[car_i].setDirection()
#make decision based on command
if (command == "print"):
#print(board.getBoardMatrix())
board.printBoard()
print(cars[1].getPositions())
board.getBoardLength()
if command == 'done':
for c in cars:
if c.getCharacter() == 'x':
print(board.isDone(c.getPositions()))
#solutionState(boardMatrix)
#printBoard(boardMatrix)
#print(sys.argv)
if __name__ == "__main__":
main()
|
997,135 | 7dad54ad3c42d9237100443bef98ada3b9803d66 | """
This file contains the tests required by pandas for an ExtensionArray and ExtensionType.
"""
import warnings
import numpy as np
import pandas as pd
import pandas._testing as tm
import pytest
from pandas.core import ops
from pandas.tests.extension import base
from pandas.tests.extension.conftest import (
as_frame, # noqa: F401
as_array, # noqa: F401
as_series, # noqa: F401
fillna_method, # noqa: F401
groupby_apply_op, # noqa: F401
use_numpy, # noqa: F401
)
from pint.errors import DimensionalityError
from pint_pandas import PintArray, PintType
from pint_pandas.pint_array import dtypemap, pandas_version_info
ureg = PintType.ureg
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture
def dtype():
return PintType("pint[meter]")
_base_numeric_dtypes = [float, int]
_all_numeric_dtypes = _base_numeric_dtypes + [np.complex128]
@pytest.fixture(params=_all_numeric_dtypes)
def numeric_dtype(request):
return request.param
@pytest.fixture
def data(request, numeric_dtype):
return PintArray.from_1darray_quantity(
np.arange(start=1.0, stop=101.0, dtype=numeric_dtype) * ureg.nm
)
@pytest.fixture
def data_missing(numeric_dtype):
numeric_dtype = dtypemap.get(numeric_dtype, numeric_dtype)
return PintArray.from_1darray_quantity(
ureg.Quantity(pd.array([np.nan, 1], dtype=numeric_dtype), ureg.meter)
)
@pytest.fixture
def data_for_twos(numeric_dtype):
x = [
2.0,
] * 100
return PintArray.from_1darray_quantity(
pd.array(x, dtype=numeric_dtype) * ureg.meter
)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""Return different versions of data for count times"""
def gen(count):
for _ in range(count):
yield data
yield gen
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture
def data_for_sorting(numeric_dtype):
return PintArray.from_1darray_quantity(
pd.array([0.3, 10.0, -50.0], numeric_dtype) * ureg.centimeter
)
@pytest.fixture
def data_missing_for_sorting(numeric_dtype):
numeric_dtype = dtypemap.get(numeric_dtype, numeric_dtype)
return PintArray.from_1darray_quantity(
ureg.Quantity(
pd.array([4.0, np.nan, -5.0], dtype=numeric_dtype), ureg.centimeter
)
)
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values."""
return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))
@pytest.fixture
def na_value(numeric_dtype):
return PintType("meter").na_value
@pytest.fixture
def data_for_grouping(numeric_dtype):
a = 1.0
b = 2.0**32 + 1
c = 2.0**32 + 10
numeric_dtype = dtypemap.get(numeric_dtype, numeric_dtype)
return PintArray.from_1darray_quantity(
ureg.Quantity(
pd.array([b, b, np.nan, np.nan, a, a, b, c], dtype=numeric_dtype), ureg.m
)
)
# === missing from pandas extension docs about what has to be included in tests ===
# copied from pandas/pandas/conftest.py
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# commented functions aren't implemented in numpy/pandas
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
# "prod",
"std",
"var",
"median",
"sem",
"kurt",
"skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"]
@pytest.fixture(params=_all_numeric_accumulations)
def all_numeric_accumulations(request):
"""
Fixture for numeric accumulation names
"""
return request.param
@pytest.fixture
def invalid_scalar(data):
"""
A scalar that *cannot* be held by this ExtensionArray.
The default should work for most subclasses, but is not guaranteed.
If the array can hold any item (i.e. object dtype), then use pytest.skip.
"""
return object.__new__(object)
# =================================================================
class TestCasting(base.BaseCastingTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestDtype(base.BaseDtypeTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestGroupby(base.BaseGroupbyTests):
@pytest.mark.xfail(run=True, reason="assert_frame_equal issue")
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="assert_frame_equal issue")
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, uniques = pd.factorize(data_for_grouping, sort=True)
if as_index:
index = pd.Index._with_infer(uniques, name="B")
expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A")
tm.assert_series_equal(result, expected)
else:
expected = pd.DataFrame({"B": uniques, "A": [3.0, 1.0, 4.0]})
tm.assert_frame_equal(result, expected)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1, 1],
}
)
result = df.groupby("A").sum().columns
expected = pd.Index(["B", "C"])
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(run=True, reason="assert_frame_equal issue")
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index._with_infer(index, name="B")
expected = pd.Series([1.0, 3.0, 4.0], index=index, name="A")
tm.assert_series_equal(result, expected)
class TestInterface(base.BaseInterfaceTests):
pass
class TestMethods(base.BaseMethodsTests):
def test_apply_simple_series(self, data):
result = pd.Series(data).apply(lambda x: x * 2 + ureg.Quantity(1, x.u))
assert isinstance(result, pd.Series)
@pytest.mark.parametrize("na_action", [None, "ignore"])
def test_map(self, data_missing, na_action):
s = pd.Series(data_missing)
if pandas_version_info < (2, 1) and na_action is not None:
pytest.skip(
"Pandas EA map function only accepts None as na_action parameter"
)
result = s.map(lambda x: x, na_action=na_action)
expected = s
tm.assert_series_equal(result, expected)
@pytest.mark.skip("All values are valid as magnitudes")
def test_insert_invalid(self):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
divmod_exc = None
series_scalar_exc = None
frame_scalar_exc = None
series_array_exc = None
def _get_expected_exception(
self, op_name: str, obj, other
): # -> type[Exception] | None, but Union types not understood by Python 3.9
if op_name in ["__pow__", "__rpow__"]:
return DimensionalityError
if op_name in [
"__divmod__",
"__rdivmod__",
"floor_divide",
"remainder",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
]:
exc = None
if isinstance(obj, complex):
pytest.skip(f"{type(obj).__name__} does not support {op_name}")
return TypeError
if isinstance(other, complex):
pytest.skip(f"{type(other).__name__} does not support {op_name}")
return TypeError
if isinstance(obj, ureg.Quantity):
pytest.skip(
f"{type(obj.m).__name__} Quantity does not support {op_name}"
)
return TypeError
if isinstance(other, ureg.Quantity):
pytest.skip(
f"{type(other.m).__name__} Quantity does not support {op_name}"
)
return TypeError
if isinstance(obj, pd.Series):
try:
if obj.pint.m.dtype.kind == "c":
pytest.skip(
f"{obj.pint.m.dtype.name} {obj.dtype} does not support {op_name}"
)
return TypeError
except AttributeError:
exc = super()._get_expected_exception(op_name, obj, other)
if exc:
return exc
if isinstance(other, pd.Series):
try:
if other.pint.m.dtype.kind == "c":
pytest.skip(
f"{other.pint.m.dtype.name} {other.dtype} does not support {op_name}"
)
return TypeError
except AttributeError:
exc = super()._get_expected_exception(op_name, obj, other)
if exc:
return exc
if isinstance(obj, pd.DataFrame):
try:
df = obj.pint.dequantify()
for i, col in enumerate(df.columns):
if df.iloc[:, i].dtype.kind == "c":
pytest.skip(
f"{df.iloc[:, i].dtype.name} {df.dtypes[i]} does not support {op_name}"
)
return TypeError
except AttributeError:
exc = super()._get_expected_exception(op_name, obj, other)
if exc:
return exc
if isinstance(other, pd.DataFrame):
try:
df = other.pint.dequantify()
for i, col in enumerate(df.columns):
if df.iloc[:, i].dtype.kind == "c":
pytest.skip(
f"{df.iloc[:, i].dtype.name} {df.dtypes[i]} does not support {op_name}"
)
return TypeError
except AttributeError:
exc = super()._get_expected_exception(op_name, obj, other)
# Fall through...
return exc
# The following methods are needed to work with Pandas < 2.1
def _check_divmod_op(self, s, op, other, exc=None):
# divmod has multiple return values, so check separately
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
tm.assert_series_equal(result_div, expected_div)
tm.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
def _get_exception(self, data, op_name):
if data.data.dtype == pd.core.dtypes.dtypes.PandasDtype("complex128"):
if op_name in ["__floordiv__", "__rfloordiv__", "__mod__", "__rmod__"]:
return op_name, TypeError
if op_name in ["__pow__", "__rpow__"]:
return op_name, DimensionalityError
return op_name, None
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# With Pint 0.21, series and scalar need to have compatible units for
# the arithmetic to work
# series & scalar
if pandas_version_info < (2, 1):
op_name, exc = self._get_exception(data, all_arithmetic_operators)
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=exc)
else:
op_name = all_arithmetic_operators
ser = pd.Series(data)
self.check_opname(ser, op_name, ser.iloc[0])
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
if pandas_version_info < (2, 1):
op_name, exc = self._get_exception(data, all_arithmetic_operators)
ser = pd.Series(data)
self.check_opname(ser, op_name, pd.Series([ser.iloc[0]] * len(ser)), exc)
else:
op_name = all_arithmetic_operators
ser = pd.Series(data)
self.check_opname(ser, op_name, pd.Series([ser.iloc[0]] * len(ser)))
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
if pandas_version_info < (2, 1):
op_name, exc = self._get_exception(data, all_arithmetic_operators)
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0], exc=exc)
else:
op_name = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0])
# parameterise this to try divisor not equal to 1 Mm
@pytest.mark.parametrize("numeric_dtype", _base_numeric_dtypes, indirect=True)
def test_divmod(self, data):
ser = pd.Series(data)
self._check_divmod_op(ser, divmod, 1 * ureg.Mm)
self._check_divmod_op(1 * ureg.Mm, ops.rdivmod, ser)
@pytest.mark.parametrize("numeric_dtype", _base_numeric_dtypes, indirect=True)
def test_divmod_series_array(self, data, data_for_twos):
ser = pd.Series(data)
self._check_divmod_op(ser, divmod, data)
other = data_for_twos
self._check_divmod_op(other, ops.rdivmod, ser)
other = pd.Series(other)
self._check_divmod_op(other, ops.rdivmod, ser)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
result = op(s, other)
expected = op(s.values.quantity, other)
assert (result == expected).all()
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = data[0]
self._compare_other(s, data, op_name, other)
def test_compare_array(self, data, all_compare_operators):
# nb this compares an quantity containing array
# eg Q_([1,2],"m")
op_name = all_compare_operators
s = pd.Series(data)
other = data
self._compare_other(s, data, op_name, other)
class TestOpsUtil(base.BaseOpsUtil):
pass
@pytest.mark.parametrize("numeric_dtype", _base_numeric_dtypes, indirect=True)
class TestParsing(base.BaseParsingTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected_m = getattr(pd.Series(s.values.quantity._magnitude), op_name)(
skipna=skipna
)
if op_name in {"kurt", "skew"}:
expected_u = None
elif op_name in {"var"}:
expected_u = s.values.quantity.units**2
else:
expected_u = s.values.quantity.units
if expected_u is not None:
expected = ureg.Quantity(expected_m, expected_u)
else:
expected = expected_m
assert result == expected
@pytest.mark.skip("tests not written yet")
def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
pass
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_scaling(self, data, all_numeric_reductions, skipna):
"""Make sure that the reductions give the same physical result independent of the unit representation.
This verifies that the result units are sensible.
"""
op_name = all_numeric_reductions
s_nm = pd.Series(data)
# Attention: `mm` is fine here, but with `m`, the magnitudes become so small
# that pandas discards them in the kurtosis calculation, leading to different results.
s_mm = pd.Series(PintArray.from_1darray_quantity(data.quantity.to(ureg.mm)))
# min/max with empty produce numpy warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
r_nm = getattr(s_nm, op_name)(skipna=skipna)
r_mm = getattr(s_mm, op_name)(skipna=skipna)
if isinstance(r_nm, ureg.Quantity):
# convert both results to the same units, then take the magnitude
v_nm = r_nm.m_as(r_mm.units)
v_mm = r_mm.m
else:
v_nm = r_nm
v_mm = r_mm
assert np.isclose(v_nm, v_mm, rtol=1e-3), f"{r_nm} == {r_mm}"
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_xx(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
s = pd.Series(data)
# min/max with empty produce numpy warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
self.check_reduce(s, op_name, skipna)
class TestBooleanReduce(base.BaseBooleanReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(pd.Series(s.values.quantity._magnitude), op_name)(
skipna=skipna
)
assert result == expected
class TestReshaping(base.BaseReshapingTests):
@pytest.mark.xfail(run=True, reason="assert_frame_equal issue")
@pytest.mark.parametrize(
"index",
[
# Two levels, uniform.
pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]),
# non-uniform
pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),
# three levels, non-uniform
pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]),
pd.MultiIndex.from_tuples(
[
("A", "a", 1),
("A", "b", 0),
("A", "a", 0),
("B", "a", 0),
("B", "c", 1),
]
),
],
)
@pytest.mark.parametrize("obj", ["series", "frame"])
def test_unstack(self, data, index, obj):
base.TestReshaping.test_unstack(self, data, index, obj)
class TestSetitem(base.BaseSetitemTests):
@pytest.mark.parametrize("numeric_dtype", _base_numeric_dtypes, indirect=True)
def test_setitem_scalar_key_sequence_raise(self, data):
# This can be removed when https://github.com/pandas-dev/pandas/pull/54441 is accepted
base.BaseSetitemTests.test_setitem_scalar_key_sequence_raise(self, data)
@pytest.mark.parametrize("numeric_dtype", _base_numeric_dtypes, indirect=True)
def test_setitem_2d_values(self, data):
# GH50085
original = data.copy()
df = pd.DataFrame({"a": data, "b": data})
df.loc[[0, 1], :] = df.loc[[1, 0], :].values
assert (df.loc[0, :] == original[1]).all()
assert (df.loc[1, :] == original[0]).all()
class TestAccumulate(base.BaseAccumulateTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna):
if pandas_version_info < (2, 1):
# Should this be skip? Historic code simply used pass.
pass
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
return True
def check_accumulate(self, s, op_name, skipna):
if op_name == "cumprod":
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
else:
result = getattr(s, op_name)(skipna=skipna)
s_unitless = pd.Series(s.values.data)
expected = getattr(s_unitless, op_name)(skipna=skipna)
expected = pd.Series(expected, dtype=s.dtype)
tm.assert_series_equal(result, expected, check_dtype=False)
|
997,136 | f1cc4f03968259f6faeb4377010bb1fe9d21d211 | from rest_framework import serializers
from rest_framework.permissions import IsAuthenticated
import logging
from qualiCar_API import models
# Get an instance of a logger
logger = logging.getLogger(__name__)
class qualiCarSerializer (serializers.Serializer):
""" Serializes a name field for testing our APIView """
name = serializers.CharField (max_length=10)
class UserProfileSerializer (serializers.ModelSerializer):
"""
Serializes a user profile object
(This one used ModelSerializer because UserProfile already exists)
"""
# Indicates the model that Serializer will point
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
# To make password READ ONLY
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
# Overwrite create function to secure password field
def create (self, validated_data):
""" Create and return a new user """
user = models.UserProfile.objects.create_user(
email = validated_data ['email'],
name = validated_data ['name'],
password = validated_data ['password']
)
return user
class DateSerializer (serializers.ModelSerializer):
""" Serializes Date items """
class Meta:
model = models.Date
fields = ('id', 'startDate', 'endDate', 'create_on', 'description', 'author')
# To make author and create_on READ ONLY
extra_kwargs = {
#'author': { 'read-only': True }
#'create_on': { 'read-only': True }
}
class PartSerializer (serializers.ModelSerializer):
""" Serializes Part items """
class Meta:
model = models.Part
fields = ('id', 'name', 'description', 'create_on', 'author')
# To make author and create_on READ ONLY
extra_kwargs = {
#'author': { 'read-only': True }
#'create_on': { 'read-only': True }
}
class VehicleSerializer (serializers.ModelSerializer):
""" Serializes Vehicle items """
parts = PartSerializer(read_only=True, many=True, allow_empty=True)
class Meta:
model = models.Vehicle
#fields = ('id', 'brand', 'model', 'create_on', 'author', 'parts')
fields = '__all__'
# To make author and create_on READ ONLY
extra_kwargs = {
#'author': { 'read-only': True }
#'create_on': { 'read-only': True }
}
class incidentSerializer (serializers.ModelSerializer):
""" Serializes Incident items """
part = serializers.PrimaryKeyRelatedField (
queryset=models.Part.objects.all(),
allow_null = True,
allow_empty = True,
)
author = serializers.PrimaryKeyRelatedField (
queryset=models.UserProfile.objects.all(),
required=False,
allow_null=True,
default=None
)
permission_classes = (IsAuthenticated,)
class Meta:
model = models.Incident
fields = '__all__'
read_only_fields=(
'id',
)
def validate_part (self, value):
""" Custom part validator """
logger.info ("Incident serializer -> validate_part method")
logger.info (" Value = %s", value)
# TODO implement the part validation (if this part exists, i.e.)
return value
def validate_author (self, value):
""" Custom author validator """
logger.info ("Incident serializer -> validate_author method")
logger.info (" Value = %s", value)
return self.context['request'].user
# def to_internal_value (self, data):
# print ("to_internal_value")
# _mutable = data._mutable
#
# data._mutable = True
# if data.get ('part', None) == '':
# data.pop ('part')
# data._mutable = _mutable
# return super(incidentSerializer, self).to_internal_value(data)
# def perform_update(self, serializer):
# serializer.save (author = self.request.user)
#
# def perform_create (self, serializer):
# print ("perform_Create")
# #require = serializer.context ['request']
# part_data = self.request.pop
#
# serializer.save (author = self.request.user)
#
# def create (self, validated_data):
# part_data = validated_data.pop ('part')
# print (validated_data)
# for keys,values in validated_data.items():
# print(" **KEY " + keys)
# print(" **VALUE " + values)
#
# print (" ***** " + part_data)
# part_instance = models.Part.objects.get (id = part_data)
#
# incident = models.Incident.objects.create (**validated_data)
#
# incident.part = part_instance
# incident.saVe ()
|
997,137 | 801d907c522ddb48e870cc5dcc0d86db1b1df14a | '''
---------------------------
Licensing and Distribution
---------------------------
Program name: Pilgrim
Version : 2021.5
License : MIT/x11
Copyright (c) 2021, David Ferro Costas (david.ferro@usc.es) and
Antonio Fernandez Ramos (qf.ramos@usc.es)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : modpilgrim |
| Sub-module : optSUMMARY |
| Last Update: 2021/11/22 (Y/M/D) |
| Main Author: David Ferro-Costas |
*----------------------------------*
Module for the --summary option
of Pilgrim
'''
#==================================================#
import random
import os
import numpy as np
#--------------------------------------------------#
from common.physcons import KCALMOL,KB
from common.fncs import eformat
from common.fncs import exp128
#--------------------------------------------------#
from modpilgrim.diverse import status_check
from modpilgrim.diverse import ffchecking
from modpilgrim.names import get_pof
#==================================================#
#==================================================#
# RELATED TO PFN OUTPUT FILES #
#==================================================#
def readout_pfn(pof):
with open(pof,'r') as asdf: lines = asdf.readlines()
data = {}
data["all"] = {}
for idx,line in enumerate(lines):
# weight for each itc
if "| weight |" in line:
idx2 = idx+2
while True:
if "------" in lines[idx2]: break
itc,relV0,relV1,zpe,mass,weight = lines[idx2].replace("|"," ").split()[0:6]
data[itc] = {}
data[itc]["weight"] = weight
data[itc]["relV0" ] = relV0
data[itc]["relV1" ] = relV1
idx2 += 1
# Conformer
if "Conformation:" in line:
itc = line.split()[-1]
#data[itc] = {}
# V0, V1 and ZPE
if "Electronic energy " in line:
data[itc]["V0" ] = line.split()[-2]
data[itc]["V1" ] = line.split()[-2]
if "V0 + zero-point en" in line: data[itc]["V1" ] = line.split()[-2]
if "zero-point energy:" in line: data[itc]["zpe"] = line.split()[-3]
# part fncs
if "| Partition functions (pfns):" in line:
data[itc]["T" ] = []
data[itc]["Qtr" ] = []
data[itc]["Qrot"] = []
data[itc]["Qvib(V1)"] = []
data[itc]["Qel" ] = []
data[itc]["Qtot(V1)"] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
the_line = lines[idx2].replace("|"," ")
T,Qtr,Qtr_ml,Qrot,Qvib_V0,Qvib_V1,Qel = the_line.split()
data[itc]["T" ].append(T)
data[itc]["Qtr" ].append(Qtr )
data[itc]["Qrot" ].append(Qrot)
data[itc]["Qvib(V1)" ].append(Qvib_V1)
data[itc]["Qel" ].append(Qel )
#data[itc]["Qtot(V1)" ].append(Qtot)
idx2 += 1
idx2 += 9
while True:
if "------" in lines[idx2]: break
the_line = lines[idx2].replace("|"," ")
T,Qtot_V0_au,Qtot_V1_au,Qtot_V0_ml,Qtot_V1_ml = the_line.split()
data[itc]["Qtot(V1)" ].append(Qtot_V1_au)
idx2 += 1
# Gibbs
if "| Gibbs free energy (hartree):" in line:
data[itc]["G(v)"] = []
data[itc]["G(p)"] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
T,GV0,Gp0 = lines[idx2].replace("|"," ").split()
GV0 = "%.3f"%((float(GV0)-float(data[itc]["V0"]))*KCALMOL)
Gp0 = "%.3f"%((float(Gp0)-float(data[itc]["V0"]))*KCALMOL)
data[itc]["G(v)"].append(GV0)
data[itc]["G(p)"].append(Gp0)
idx2 += 1
# min(V0), min(V1)
if "min(V0) =" in line: data["all"]["V0" ] = line.split()[-2]
if "min(V1) =" in line: data["all"]["V1" ] = line.split()[-2]
# total part fncs
if "Total multi-structural HO" in line:
data["all"]["T" ] = []
data["all"]["Qtot(V1)" ] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
the_line = lines[idx2].replace("|"," ")
T,QMSHO_V0_au,QMSHO_V1_au,QMSHO_V0_ml,QMSHO_V1_ml = the_line.split()
data["all"]["T" ].append(T)
data["all"]["Qtot(V1)" ].append(QMSHO_V1_au)
idx2 += 1
# total Gibbs free energies
if "Total HO Gibbs free energies" in line:
data["all"]["G(v)"] = []
data["all"]["G(p)"] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
the_line = lines[idx2].replace("|"," ")
T,GV0,Gp0 = the_line.split()
GV0 = "%.3f"%((float(GV0)-float(data["all"]["V0"]))*KCALMOL)
Gp0 = "%.3f"%((float(Gp0)-float(data["all"]["V0"]))*KCALMOL)
data["all"]["G(v)"].append(GV0)
data["all"]["G(p)"].append(Gp0)
idx2 += 1
# Anharmonicity
if " ZPE_MSHO:" in line: data["all"]["ZPE_MSHO"] = line.split()[-2]
if " ZPE_ANH :" in line: data["all"]["ZPE_ANH" ] = line.split()[-2]
if "Calculating anh. ratio" in line:
data["all"]["ANH. RATIO" ] = []
idx2 = idx+5
while True:
if "------" in lines[idx2]: break
T,anh = lines[idx2].replace("|"," ").split()
data["all"]["ANH. RATIO" ].append(anh)
idx2 += 1
# Rovibrational itc
for itc in data.keys():
if itc == "all": continue
Qrot = np.array([np.float128(v) for v in data[itc]["Qrot"]])
Qvib = np.array([np.float128(v) for v in data[itc]["Qvib(V1)"]])
Qrv = ["%.3E"%v for v in Qrot*Qvib]
data[itc]["Qrv(V1)" ] = Qrv
# Rovibrational all
try:
Qtot = np.array([np.float128(v) for v in data["all"]["Qtot(V1)"]])
Qtr = np.array([float(v) for v in data[itc]["Qtr"]])
Qel = np.array([float(v) for v in data[itc]["Qel"]])
Qrv = ["%.3E"%v for v in Qtot/(Qtr*Qel)]
data["all"]["Qrv(V1)"] = Qrv
data["all"]["Qtr"] = list(data[itc]["Qtr"])
data["all"]["Qel"] = list(data[itc]["Qel"])
except: pass
# vib with regards to V0
for itc in data.keys():
try:
# correction factor from V1 to V0
V0, V1 = float(data[itc]["V0"]), float(data[itc]["V1"])
V1toV0 = np.array([exp128((V0-V1)/KB/float(T)) \
for T in data[itc]["T"]])
# correct Qvib
if itc != "all":
Qvib = np.array([np.float128(v) for v in data[itc]["Qvib(V1)"]])
data[itc]["Qvib"] = [eformat(v,3) for v in Qvib * V1toV0]
# correct Qrv
Qrv = np.array([np.float128(v) for v in data[itc]["Qrv(V1)"]])
data[itc]["Qrv" ] = [eformat(v,3) for v in Qrv * V1toV0]
# correct Qtot
Qtot = np.array([np.float128(v) for v in data[itc]["Qtot(V1)"]])
data[itc]["Qtot" ] = [eformat(v,3) for v in Qtot * V1toV0]
except: pass
# ANH partition function
try:
Qar = np.array([float(v) for v in data["all"]["ANH. RATIO"]])
Qtot = np.array([np.float128(v) for v in data["all"]["Qtot"]])
T = np.array([float(v) for v in data["all"]["T"]])
Gcor = - (np.log(Qar)*T*KB)*KCALMOL
GV = np.array([float(v) for v in data["all"]["G(v)"]])
Gp = np.array([float(v) for v in data["all"]["G(p)"]])
data["all"]["Qanh" ] = [eformat(v,3) for v in Qar*Qtot]
data["all"]["Ganh(v)"] = ["%.3f"%v for v in GV+Gcor]
data["all"]["Ganh(p)"] = ["%.3f"%v for v in Gp+Gcor]
except: pass
# return all
return data
#--------------------------------------------------#
def genpfntable1(data,itcs):
props = "conf,weight,V0,V1,relV0,relV1"
frow = " %7s | %6s | %14s | %14s | %6s | %6s "
head = frow%tuple(props.split(","))
string = "="*len(head)+"\n"
string += "Columns:\n"
string += " - conf : conformer\n"
string += " - weight : conformer weight\n"
string += " - V0 : electronic energy [hartree]\n"
string += " - V1 : V0+zero-point energy [hartree]\n"
string += " - relV0 : relative V0 [kcal/mol]\n"
string += " - relV1 : relative V1 [kcal/mol]\n"
string += "\n"
string += "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
for itc in itcs:
data_i = data[itc]
vals = [itc]
for prop in props.split(",")[1:]:
vals.append(data_i.get(prop," - "))
string += frow%tuple(vals)+"\n"
string += "-"*len(head)+"\n\n"
string += "="*len(head)+"\n\n"
# print
for line in string.split("\n"): print(" %s"%line)
#--------------------------------------------------#
def genpfntable2(data,itcs,stemps):
if len(itcs) == 1: case, props, frow = 1, "T (K),"," %7s |"
else : case, props, frow = 2, "conf," ," %7s |"
if case == 2: itcs = itcs + ["all"]
props += "Qtr,Qel,Qrot,Qvib,Qrv,Qtot,Qanh"
frow += " %10s | %10s | %10s | %10s | %10s | %10s | %10s "
head = frow%tuple(props.split(","))
sinfo = "="*len(head)+"\n"
sinfo += "Columns (pf stands for partition function):\n"
sinfo += " - Qtr : traslational pf (per unit volume, in au)\n"
sinfo += " - Qel : electronic pf\n"
sinfo += " - Qrot : rotational pf\n"
sinfo += " - Qvib : vibrational pf\n"
sinfo += " - Qrv : rovibrational pf\n"
sinfo += " - Qtot : total MS-HO pf (per unit volume, in au)\n"
sinfo += " - Qanh : total anharmonic pf (per unit volume, in au)\n"
sinfo += "\n"
sinfo += " * Qvib, Qrv, Qtot and Qanh --> relative to V0\n"
sinfo += "\n"
for line in sinfo.split("\n"): print(" %s"%line)
if case == 1:
string = "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
else: string = ""
for sT in stemps:
if case == 2:
string += "TEMPERATURE: %s K\n"%sT
string += "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
else:
vals = [sT]
for itc in itcs:
data_i = data[itc]
if case == 2: vals = [itc]
if sT not in data_i.get("T",[]): continue
idxT = data_i["T"].index(sT)
for prop in props.split(",")[1:]:
try : val = data_i[prop]; bool1 =True
except: val = " - "
# temp dependency
if type(val) in[list,tuple]: val = val[idxT]
vals.append(val)
string += frow%tuple(vals)+"\n"
if case == 2: string += "-"*len(head)+"\n\n\n"
if case == 1: string += "-"*len(head)+"\n"
string += "="*len(head)+"\n\n"
# print
for line in string.split("\n"): print(" %s"%line)
print("")
#--------------------------------------------------#
def genpfntable3(data,itcs,stemps):
if len(itcs) == 1: case, props, frow = 1, "T (K),"," %7s |"
else : case, props, frow = 2, "conf," ," %7s |"
if case == 2: itcs = itcs + ["all"]
props += "G(v),G(p),Ganh(v),Ganh(p)"
frow += " %10s | %10s | %10s | %10s "
head = frow%tuple(props.split(","))
sinfo = "="*len(head)+"\n"
sinfo += "Columns:\n"
sinfo += " - G : MS-HO Gibbs free energy [kcal/mol]\n"
sinfo += " - Ganh : anharmonic Gibbs free energy [kcal/mol]\n"
sinfo += "\n"
sinfo += " * Values are relative to V0\n"
sinfo += " * (v) --> for a volume per molecule of v = 1cm^3\n"
sinfo += " * (p) --> for a volume per molecule of v = kB*T/p0\n"
sinfo += " with p0=1 bar\n"
sinfo += "\n"
for line in sinfo.split("\n"): print(" %s"%line)
if case == 1:
string = "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
else: string = ""
for sT in stemps:
if case == 2:
string += "TEMPERATURE: %s K\n"%sT
string += "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
else:
vals = [sT]
for itc in itcs:
data_i = data[itc]
if case == 2: vals = [itc]
if sT not in data_i.get("T",[]): continue
idxT = data_i["T"].index(sT)
for prop in props.split(",")[1:]:
try : val = data_i[prop]; bool1 =True
except: val = " - "
# temp dependency
if type(val) in[list,tuple]: val = val[idxT]
vals.append(val)
string += frow%tuple(vals)+"\n"
if case == 2: string += "-"*len(head)+"\n\n\n"
if case == 1: string += "-"*len(head)+"\n"
# print
string += "="*len(head)+"\n\n"
for line in string.split("\n"): print(" %s"%line)
print("")
#--------------------------------------------------#
def summary_pfn(targets,ltemp,dctc,dlevel):
# Temperature(s) & CTCs
ctcs = []
stemps = []
for target in targets:
try : stemps.append(float(target))
except: ctcs.append(target)
# selected system
if len(ctcs ) == 0:
ctcs = list(dctc.keys())
if True:
print(" --> System NOT selected :")
ml = max([len(ctc) for ctc in ctcs])
for idx in range(0,len(ctcs),4):
print(" "+" ".join("%%-%is"%ml%ctc for ctc in ctcs[idx:idx+4]))
return
# Only 1 system!
try : ctc = random.choice(ctcs)
except: return
# Selected temperature(s)
if len(stemps) == 0: stemps = list(ltemp)
# float --> str
stemps = ["%.2f"%T for T in sorted(stemps)]
print(" --> Molecule : %s"%ctc)
if len(stemps) == 1:
print(" --> Selected temperature : %s K"%stemps[0])
print("")
#-------------------#
# Read output files #
#-------------------#
# whole ctc or individual itc?
if "." in ctc: ctc,the_itc = ctc.split(".")
else: the_itc = None
# read ctc file
pof = get_pof(dlevel,"pfn",ctc)
print(" Reading file: %s"%pof)
if not os.path.exists(pof):
print(" --> NOT FOUND")
return
data = readout_pfn(pof)
print("")
# list of conformers
if the_itc is None: itcs = list([itc for itc in data.keys() if itc != "all"])
else : itcs = [the_itc]
#-----------------#
# Generate tables #
#-----------------#
genpfntable1(data,itcs)
genpfntable2(data,itcs,stemps)
genpfntable3(data,itcs,stemps)
#==================================================#
#==================================================#
# RELATED TO PATH OUTPUT FILES #
#==================================================#
def readout_path(pof,mep=False):
with open(pof,'r') as asdf: lines = asdf.readlines()
for idx,line in enumerate(lines):
# initialize variables
if "Variables for first step" in line:
data = {}
continue
# Imaginary frequency
if "Vibrational frequencies [" in line:
data["ifreq"] = lines[idx+1].split()[0].replace("-","")+"i"
# Imag freqs along MEP
if "Fine! There are no ima" in line: data["iMEP"] = False
if "WARNING! There are ima" in line: data["iMEP"] = True
# proper direction of MEP
if "* MEP size towards reactant(s) seems to be prop" in line: data["unknowndir"] = False
if "* MEP size towards reactant(s) was NOT properly" in line: data["unknowndir"] = True
# CVT
if "s_CVT" in line and "Gamma^CVT" in line:
data["T"] = []
data["s_CVT"] = []
data["Gamma^CVT"] = []
idx2 = idx+2
while True:
if "------" in lines[idx2]: break
T, s, gamma = lines[idx2].replace("|"," ").split()[0:3]
data["T"].append(T)
data["s_CVT"].append(s)
data["Gamma^CVT"].append(gamma)
idx2 += 1
# Max of VaG
if "Maximum of VaG (VAG)" in line:
data["sAG"] = line.split()[ 0]
data["VAG"] = line.split()[-3]
# E0 and (again) VAG
if "E0 =" in line: data["E0" ] = line.split()[-2]
if "VAG =" in line: data["VAG"] = line.split()[-2]
# ZCT
if "ZCT transmission coefficient:" in line:
data["T" ] = []
data["kappa^ZCT" ] = []
data["RTE(ZCT)"] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
T, i1, i2, kappa, rte = lines[idx2].replace("|"," ").replace("*","").replace("++","").split()
data["T" ].append(T)
data["kappa^ZCT" ].append(kappa)
data["RTE(ZCT)"].append(rte)
idx2 += 1
# SCT
if "SCT transmission coefficient:" in line:
data["T" ] = []
data["kappa^SCT" ] = []
data["RTE(SCT)"] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
T, i1, i2, kappa, rte = lines[idx2].replace("|"," ").replace("*","").replace("++","").split()
data["T" ].append(T)
data["kappa^SCT" ].append(kappa)
data["RTE(SCT)"].append(rte)
idx2 += 1
# CAG
if "Calculating CAG coefficient..." in line:
data["T" ] = []
data["TST/CAG"] = []
data["CVT/CAG"] = []
idx2 = idx+5
while True:
if "------" in lines[idx2]: break
T, d1, cagtst, d2, cagcvt = lines[idx2].replace("|"," ").split()
data["T" ].append(T)
data["TST/CAG"].append(cagtst)
data["CVT/CAG"].append(cagcvt)
idx2 += 1
# P(E0)
if "P^ZCT(E)" in line:
tmp = lines[idx+3].replace("|"," ")
tmp = tmp.replace("B","").replace("L","").replace("R","")
s1s2 = tmp.split()[3].replace("[","").replace("]","")
data["E0"] = tmp.split()[0]
data["P^ZCT(E0)"] = tmp.split()[1]
data["P^SCT(E0)"] = tmp.split()[2]
data["s<"] = s1s2.split(",")[0]
data["s>"] = s1s2.split(",")[1]
# Get data along MEP
if mep and "Eref + VaG (au)" in line:
data["s" ] = []
data["V_MEP" ] = []
data["VaG(cc)"] = []
data["VaG(ic)"] = []
idx2 = idx+2
while True:
if "------" in lines[idx2]: break
s,Vmep,zpecc,zpeic = lines[idx2].replace("|"," ").split()[0:4]
data["s" ].append(s)
data["V_MEP" ].append(Vmep)
Vadicc = "%.3f"%(float(Vmep)+float(zpecc))
try : Vadiic = "%.3f"%(float(Vmep)+float(zpeic))
except: Vadiic = " - "
data["VaG(cc)"].append(Vadicc)
data["VaG(ic)"].append(Vadiic)
idx2 += 1
if mep and "- Effective mass (mueff) in a.u." in line:
data["mueff/mu"] = []
idx2 = idx+4
while True:
if "------" in lines[idx2]: break
mueff = lines[idx2].replace("|"," ").replace("*","").split()[-1]
data["mueff/mu"].append(mueff)
idx2 += 1
# gamma^CVT/SCT
try:
p1 = np.array([float(v) for v in data["Gamma^CVT"]])
p2 = np.array([float(v) for v in data["kappa^SCT"]])
p3 = np.array([float(v) for v in data["CVT/CAG"]])
data["gamma^CVT/SCT"] = ["%.3E"%v for v in p1*p2*p3]
except:
pass
# return all
return data
#--------------------------------------------------#
def genpathtable1(data):
props = "conf,ifreq,E0,VAG,sAG,s<,s>,P^SCT(E0)"
frow = " %5s | %9s | %10s | %10s | %8s | %8s | %8s | %10s "
head = frow%tuple(props.split(","))
string = "="*len(head)+"\n"
string += "Columns:\n"
string += " - conf : conformer\n"
string += " - ifreq : imaginary frequency for transition state [1/cm]\n"
string += " - E0 : lower energy limit for tunnelling [kcal/mol]\n"
string += " - VAG : upper energy limit for tunnelling [kcal/mol]\n"
string += " - sAG : MEP position corresponding to VAG [Bohr]\n"
string += " - s< : MEP limit towards reactant(s) [Bohr]\n"
string += " - s> : MEP limit towards product(s) [Bohr]\n"
string += " - P^SCT(E0): SCT tunnel probability at E0\n"
string += "\n"
string += "-"*len(head)+"\n"
string += head +"\n"
string += "-"*len(head)+"\n"
any_imep = False
any_unknowndir = False
for itc,data_i in data.items():
vals = [itc]
for prop in props.split(",")[1:]: vals.append( data_i.get(prop," - ") )
string += frow%tuple(vals)
if data_i.get("iMEP",False):
any_imep = True
string += "[*]"
if data_i.get("unknowndir",False):
any_unknowndir = True
string += "[+]"
string += "\n"
string += "-"*len(head)+"\n"
if any_imep : string += " [*] There are imaginary frequencies along this MEP\n"
if any_unknowndir: string += " [+] Automatic algorithm did not determine MEP direction properly\n"
string += "\n"
string += "="*len(head)+"\n"
# print table
for line in string.split("\n"): print(" %s"%line)
print("")
#--------------------------------------------------#
def genpathtable2(data,stemps):
if len(data.keys()) == 1: case, props = 1, "T (K),"
else : case, props = 2, "conf,"
props += "s_CVT,Gamma^CVT,kappa^SCT,gamma^CVT/SCT,RTE(SCT)"
frow = " %7s | %9s | %11s | %11s | %13s | %8s "
head = frow%tuple(props.split(","))
sinfo = "="*len(head)+"\n"
sinfo += "Columns:\n"
sinfo += " - s_CVT : MEP position of CVT generalized transition state [Borh]\n"
sinfo += " - Gamma^CVT : CVT recrossing transmission coefficient\n"
sinfo += " - kappa^SCT : SCT tunnelling transmission coefficient\n"
sinfo += " - gamma^CVT/SCT: CVT/SCT transmission coefficient\n"
sinfo += " = Gamma^CVT * gamma^CVT/SCT * kappa^{CAG/CVT}\n"
sinfo += " - RTE(SCT) : SCT representative tunnelling energy\n"
sinfo += "\n"
for line in sinfo.split("\n"): print(" %s"%line)
itcs = list(data.keys())
if case == 1:
string = "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
else: string = ""
for sT in stemps:
if case == 2:
string += "TEMPERATURE: %s K\n"%sT
string += "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
else:
vals = [sT]
for itc in itcs:
data_i = data[itc]
if case == 2: vals = [itc]
if sT not in data_i.get("T",[]): continue
idxT = data_i["T"].index(sT)
for prop in props.split(",")[1:]:
try : val = data_i[prop][idxT]; bool1 = True
except: val = " - "
vals.append(val)
string += frow%tuple(vals)+"\n"
if case == 2: string += "-"*len(head)+"\n\n\n"
if case == 1: string += "-"*len(head)+"\n"
# print
for line in string.split("\n"): print(" %s"%line)
print("")
print(" "+"="*len(head))
print("")
#--------------------------------------------------#
def genpathtable3(data):
props = "s,V_MEP,VaG(cc),VaG(ic),mueff/mu"
frow = " %7s | %11s | %11s | %11s | %8s "
head = frow%tuple(props.split(","))
sinfo = "="*len(head)+"\n"
sinfo += "Columns:\n"
sinfo += " - s : MEP coordinate [Bohr]\n"
sinfo += " - V_MEP : MEP total energy [kcal/mol]\n"
sinfo += " - VaG(cc) : Adiabatic potential in Cartesian coordinates [kcal/mol] \n"
sinfo += " - VaG(ic) : Adiabatic potential in internal coordinates [kcal/mol] \n"
sinfo += " - mueff/mu : ratio between effective mass and scalar mass\n"
sinfo += "\n"
for line in sinfo.split("\n"): print(" %s"%line)
string = "-"*len(head)+"\n" + head +"\n" + "-"*len(head)+"\n"
for idx,sval in enumerate(data["s"]):
vals = [sval]
for prop in props.split(",")[1:]:
try : val = data[prop][idx]
except: val = " - "
vals.append(val)
string += frow%tuple(vals)+"\n"
string += "-"*len(head)+"\n"
string += "\n"
string += "="*len(head)+"\n"
# print
for line in string.split("\n"): print(" %s"%line)
print("")
#--------------------------------------------------#
def summary_path(targets,ltemp,dpath,dctc,dlevel):
# Temperature(s) & transition states
tss = []
stemps = []
for target in targets:
try : stemps.append(float(target))
except: tss.append(target)
# Selected temperature(s)
if len(stemps) == 0: stemps = list(ltemp)
if len(stemps) == 0: print(" --> Temperature(s) NOT found!\n"); return
stemps = ["%.2f"%T for T in sorted(stemps)]
# Selected TS
if len(tss) == 0:
tss = list(dpath.keys())
if len(tss) == 0:
print(" --> path files NOT found!")
return
if len(tss) != 1:
print(" --> System NOT selected :")
ml = max([len(ts) for ts in tss])
for idx in range(0,len(tss),4):
print(" "+" ".join("%%-%is"%ml%ts for ts in tss[idx:idx+4]))
return
if len(tss) == 0:
print(" --> Transition state(s) NOT found!\n"); return
elif len(tss) != 1:
ts = random.choice(tss)
print(" --> Transition state : %s [randomly selected]"%ts)
else:
ts = tss[0]
print(" --> Transition state : %s "%ts)
# selected temperature
if len(stemps) == 1:
print(" --> Selected temperature : %s K"%stemps[0])
else:
print(" --> Number temperatures : %i"%len(stemps))
print("")
#-------------------#
# Read output files #
#-------------------#
ref_pof = get_pof(dlevel,"path",ts+".[xxx]")
if ts in dctc.keys():
clusterconf = dctc[ts]
itcs = [itc for itc,weight in clusterconf._itcs]
pofs = [ref_pof.replace("[xxx]",itc) for itc in itcs]
else:
folder = "/".join(ref_pof.split("/")[0:-1])
part1,part2 = ref_pof.split("/")[-1].split("[xxx]")
pofs = [folder+"/"+pof for pof in os.listdir(folder) \
if part1 in pof and part2 in pof]
# only available outputs
pofs = [pof for pof in pofs if os.path.exists(pof)]
pofs.sort()
if len(pofs) == 0:
print(" --> Unable to find any output file that")
print(" matches with the selected arguments...\n")
return
data = {}
print(" Reading output files (num files = %i):"%len(pofs))
for pof in pofs:
itc = pof.split(".")[-3]
print(" --> %s"%pof)
try:
if len(pofs) == 1: data_itc = readout_path(pof,mep=True )
else : data_itc = readout_path(pof,mep=False)
except:
print(" problems reading file...")
continue
data[itc] = data_itc
print("")
#-----------------#
# Generate tables #
#-----------------#
genpathtable1(data)
if len(pofs) == 1: genpathtable3(data[itc])
genpathtable2(data,stemps)
#==================================================#
#==================================================#
# RELATED TO RCONS OUTPUT FILES #
#==================================================#
def kdir_from_lines(lines,rctype,which="k"):
record = False
btotal = False
data = {}
for line in lines:
line = line.strip()
if line == "" : continue
if "|" not in line : continue
if "-------" in line: continue
if "Current" in line: continue
if "Elapsed" in line: continue
# beginning of table
if "(K)" in line:
record = False
# read MS-TST from MP table
if rctype == "mstst" and \
("MS-CVT" in line or "MS-TST/ZCT" in line): continue
# prepare
cols = line.replace("-","").replace("/","").lower()
cols = [col.strip() for col in cols.split("|")]
btotal = "ts" in cols
record = rctype in cols
# idx in table
if record: col = cols.index(rctype)
continue
# get data
if record:
if btotal and "total" not in line: continue
cols = line.split("|")
temp = cols[0].strip()
val = cols[col].strip()
if which.lower() == "k": data[temp] = val
if which.lower() == "g": data[temp] = data.get(temp,[])+[val]
return data
#--------------------------------------------------#
def readout_rcons(ofile,rctype):
# Data to return
V0dir, V1dir = None, None
V0inv, V1inv = None, None
kdir = {}
Gdir = {}
kinv = {}
Ginv = {}
# Read file
with open(ofile,'r') as asdf: lines = list(asdf.readlines())
# localize parts of importance
idx1,idx2,idx3,idx4,idx5,idx6 = -1,-1,-1,-1,-1,-1
idx7 = None
nR = 0
for idx,line in enumerate(lines):
if "reactant(s) ==>" in line: nR = len(line.split("==>")[-1].split("+"))
if "min{V1(i)} of reactants" in line: idx1 = idx
elif "FORWARD RATE CONSTANTS" in line: idx2 = idx
elif "FORWARD GIBBS FREE" in line: idx3 = idx
elif "BACKWARD RATE CONSTANTS" in line: idx4 = idx
elif "BACKWARD GIBBS FREE" in line: idx5 = idx
elif "Updating plot file:" in line: idx6 = idx
elif "Reactants = Products" in line: idx7 = idx
# Read barrier
if idx1 != None:
count = 0
for line in lines[idx1:]:
if "SP: stationary point" in line: break
if "-----" in line: count += 1; continue
if count == 3:
if V0dir is None: V0dir = float("inf")
if V1dir is None: V1dir = float("inf")
V0dir_i, V1dir_i = line.split("|")[1:3]
V0dir = min(V0dir,float(V0dir_i))
V1dir = min(V1dir,float(V1dir_i))
if count == 4:
if V0inv is None: V0inv = float("inf")
if V1inv is None: V1inv = float("inf")
V0inv_i, V1inv_i = line.split("|")[1:3]
V0inv = min(V0inv,float(V0inv_i))
V1inv = min(V1inv,float(V1inv_i))
# Read rate constant
if idx2 != -1:
kdir = kdir_from_lines(lines[idx2:idx3],rctype,"k")
Gdir = kdir_from_lines(lines[idx3:idx4],rctype,"g")
if idx4 != -1:
kinv = kdir_from_lines(lines[idx4:idx5],rctype,"k")
Ginv = kdir_from_lines(lines[idx5:idx6],rctype,"g")
# REACTANTS = PRODUCTS (duplicate rate constant and correct Gibbs with -RTln(2))
if idx7 is not None:
kdir = {T:"%.3E"%(2*float(k)) for T,k in kdir.items()}
Gdir = {T:["%.3f"%(float(G1)-KB*float(T)*np.log(2)*KCALMOL), \
"%.3f"%(float(G2)-KB*float(T)*np.log(2)*KCALMOL)] \
for T,[G1,G2] in Gdir.items()}
kinv = None
Ginv = None
# correct V0inv, V1inv
if V0inv is not None: V0inv = V0dir - V0inv
if V1inv is not None: V1inv = V1dir - V1inv
return (V0dir,V1dir,kdir,Gdir),(V0inv,V1inv,kinv,Ginv)
#--------------------------------------------------#
def get_rconstr(rcname,stemp,dV0dir,dV1dir,dkdir,dGdir,dV0inv,dV1inv,dkinv,dGinv):
try:
V0dir = "%.2f"%dV0dir[rcname]
V1dir = "%.2f"%dV1dir[rcname]
kdir = dkdir[rcname]
Gdir = dGdir[rcname]
except:
print(" --> Data NOT found for: %s"%rcname)
raise Exception
try : kdir = kdir[stemp]
except:
print(" --> Temperature NOT found: %s K"%stemp)
raise Exception
try : G1dir,G2dir = Gdir[stemp]
except: G1dir,G2dir = " - ", " - "
# inverse
try:
V0inv = "%.2f"%dV0inv[rcname]
V1inv = "%.2f"%dV1inv[rcname]
kinv = dkinv[rcname][stemp]
G1inv,G2inv = dGinv[rcname][stemp]
bool_inv = True
except:
V0inv = " - "
V1inv = " - "
kinv = " - "
G1inv,G2inv = " - ", " - "
bool_inv = False
# return data
return (V0dir,V1dir,kdir,G1dir,G2dir), (V0inv,V1inv,kinv,G1inv,G2inv), bool_inv
#--------------------------------------------------#
def summary_rcons(targets,ltemp,dchem,dlevel):
#-------------#
# Preparation #
#-------------#
valid_rcons = "mstst,mststzct,mststsct,mscvt,mscvtzct,mscvtsct,"
valid_rcons += "mptstzct,mptstsct,mpcvt,mpcvtzct,mpcvtsct"
valid_rcons = valid_rcons.split(",")
# Get selected rctype
if len(targets) == 0:
print(" --> Rate const REQUIRED :")
print(" %s %s %s %s %s %s"%tuple(valid_rcons[0:6]))
print(" %s %s %s %s %s"%tuple(valid_rcons[6: ]))
return
rctype = targets.pop(0).lower().strip()
if rctype == "tst": rctype = "mstst"
if rctype not in valid_rcons:
print(" --> Invalid constant type: %s\n"%rctype)
print(" --> VALID rate constants :")
print(" %s %s %s %s %s %s"%tuple(valid_rcons[0:6]))
print(" %s %s %s %s %s"%tuple(valid_rcons[6: ]))
return
print(" --> Rate constant type : %s"%rctype)
# Temperature(s) & Reaction(s)
srcnames = []
stemps = []
for target in targets:
try : stemps.append(float(target))
except: srcnames.append(target)
if len(stemps ) == 0: stemps = list(ltemp)
if len(srcnames) == 0: srcnames = list(dchem.keys())
# float --> str
stemps = ["%.2f"%T for T in sorted(stemps)]
if len(stemps) == 1:
print(" --> Selected temperature : %s K"%stemps[0])
if len(srcnames) == 1:
print(" --> Selected reaction : %s"%srcnames[0])
print("")
#-------------------#
# Read output files #
#-------------------#
print(" Reading files:")
dV0dir,dV1dir,dkdir,dGdir = {},{},{},{}
dV0inv,dV1inv,dkinv,dGinv = {},{},{},{}
count = 0
for rcname in srcnames:
pof = get_pof(dlevel,"rcons",rcname)
if not os.path.exists(pof):
print(" --> %s [NOT FOUND]"%pof)
continue
print(" --> %s"%pof)
data_dir,data_inv = readout_rcons(pof,rctype)
V0dir_i,V1dir_i,kdir_i,Gdir_i = data_dir
V0inv_i,V1inv_i,kinv_i,Ginv_i = data_inv
if kdir_i == {}:
print(" %s rate constant NOT FOUND!!\n"%rctype)
continue
dV0dir[rcname] = V0dir_i
dV1dir[rcname] = V1dir_i
dkdir[rcname] = kdir_i
dGdir[rcname] = Gdir_i
dV0inv[rcname] = V0inv_i
dV1inv[rcname] = V1inv_i
dkinv[rcname] = kinv_i
dGinv[rcname] = Ginv_i
count += 1
print("")
if count == 0: return
#-----------------------#
# Print table with data #
#-----------------------#
if len(stemps) == 1:
props,ml = "reaction,", max([len(n) for n in srcnames]+[8])
elif len(srcnames) == 1:
props,ml = "T (K)," , 8
else:
props,ml = "reaction,", 8
frow = " %%%is | %%7s | %%7s | %%9s | %%9s | %%14s "%ml
props1 = props + "V0_dir,V1_dir,G_dir(v),G_dir(p),k_dir"
props2 = props + "V0_inv,V1_inv,G_inv(v),G_inv(p),k_inv"
string = "Columns:\n"
string += " - V0 : difference in electronic energy between the transition\n"
string += " state and the reactant(s) [kcal/mol]\n"
string += " - V1 : V0 corrected with vibrational zero-point energy [kcal/mol]\n"
string += " - G(v) : Gibbs free energy of activation [kcal/mol] calculated\n"
string += " for a volume per molecule of v = 1cm^3\n"
string += " - G(p) : Gibbs free energy of activation [kcal/mol] calculated\n"
string += " for a volume per molecule of v = kB*T/p0 with p0=1 bar\n"
string += " - k : reaction rate constant\n"
string += " * unimolecular reaction (u) --> [1/s]\n"
string += " * bimolecular reaction (b) --> [cm^3/molecule/s]\n"
string += "\n"
string += " - _dir : direct /forward reaction\n"
string += " - _inv : inverse/backward reaction\n"
string += "\n"
for line in string.split("\n"): print(" %s"%line)
# A) Several reactions, one temperature
if len(stemps ) == 1: case = 1
elif len(srcnames) == 1: case = 2
else : case = 1
srcnames = sorted(dV0dir.keys())
string = ""
head1 = frow%tuple(props1.split(","))
head2 = frow%tuple(props2.split(","))
if case == 2:
string += "REACTION: %s\n\n"%srcnames[0]
table1 = "-"*len(head1)+"\n"+head1 +"\n"+"-"*len(head1)+"\n"
table2 = "-"*len(head2)+"\n"+head2 +"\n"+"-"*len(head2)+"\n"
boolt2 = False
for stemp in stemps:
if case == 1:
string += "TEMPERATURE: %s K\n\n"%stemp
table1 = "-"*len(head1)+"\n"+head1 +"\n"+"-"*len(head1)+"\n"
table2 = "-"*len(head2)+"\n"+head2 +"\n"+"-"*len(head2)+"\n"
boolt2 = False
for rcname in srcnames:
# Get data
idata = (rcname,stemp,dV0dir,dV1dir,dkdir,dGdir,\
dV0inv,dV1inv,dkinv,dGinv)
try : odata = get_rconstr(*idata)
except: return
V0dir,V1dir,kdir,G1dir,G2dir = odata[0]
V0inv,V1inv,kinv,G1inv,G2inv = odata[1]
if odata[2]: boolt2 = True
# unimolecular or bimolecular label
if len(dchem[rcname][0]) == 1: kdir = kdir+" (u)"
else : kdir = kdir+" (b)"
if " - " not in kinv:
if len(dchem[rcname][2]) == 1: kinv = kinv+" (u)"
else : kinv = kinv+" (b)"
# add to table
if case == 1: idata = [rcname]
else : idata = [stemp ]
idata1 = idata+[V0dir,V1dir,G1dir,G2dir,kdir]
idata2 = idata+[V0inv,V1inv,G1inv,G2inv,kinv]
table1 += frow%tuple(idata1)+"\n"
table2 += frow%tuple(idata2)+"\n"
if case == 1:
table1 += "-"*len(head1)+"\n\n"
table2 += "-"*len(head2)+"\n\n"
string += table1
if boolt2: string += table2
string += "\n"
if case == 2:
table1 += "-"*len(head1)+"\n\n"
table2 += "-"*len(head2)+"\n\n"
string += table1
if boolt2: string += table2
string += "\n"
for line in string.split("\n"): print(" %s"%line)
#==================================================#
#==================================================#
def main(idata,status,case,targets):
stat2check = []
mustexist = []
tocreate = []
#-------------------------------------------------------#
# Read Pilgrim input files, check file/folder status #
# and expand tuple 'case' #
#-------------------------------------------------------#
# expand data
(dctc,dimasses), ltemp, dpath, (dtesLL,dtesHL), dchem, dkmc, ddlevel = idata
# status ok?
fstatus = status_check(status,stat2check)
if fstatus == -1: exit()
# existency of folders
fstatus = ffchecking(mustexist,tocreate)
if fstatus == -1: exit()
# expand case
(dof,hlf,plotfile),dlevel,software = case
#-------------------------------------------------------#
#---------------------------------#
# files with data and output file #
#---------------------------------#
if len(targets) < 1 :
print(" --> Type of output REQUIRED! [pfn/path/rcons]")
return
option = targets.pop(0).lower().strip()
print(" --> Option : %s"%option)
if option == "pfn" : summary_pfn(targets,ltemp,dctc,dlevel)
elif option == "path" : summary_path(targets,ltemp,dpath,dctc,dlevel)
elif option == "rcons": summary_rcons(targets,ltemp,dchem,dlevel)
else : print(" Unknown option!")
print("")
return
#==================================================#
#---------------------------------------------------------------#
|
997,138 | af1af18bf34c1d119f0e3a7e83d220a0516f403f | #!/usr/bin/env python3
"""
Defines a single function birthday_dictionary() that takes no output and performs some
standard-input & standard-output operations.
"""
def birthday_dictionary():
'''
Takes no external input (as everything is defined internally).
Final output is a message displaying the birthday of someone in the included database.
Side effect is interaction with the user (input & output) to help him select
a person's name he might be interested in finding out the birthday.
'''
b_d = dict()
b_d['BF'] = '01/17/1706'
b_d['GW'] = '02/28/1817'
print("Welcome to the birdthday dictionary. We know the birdhdays of:")
for name in b_d:
print(name)
while True:
name = input("Who's birthday do you want to look up?: ")
if name in b_d.keys():
break
else:
print("Ups, that's not one of the birthdays I know!")
print("Please check you typed the name exactly as it appears above.")
continue
print(name + "'s birthday is " + b_d[name])
if __name__ == '__main__':
print('birthday_dictionry() is being run by itself.')
birthday_dictionary()
else:
print('birthday_dictionary() is being imported from another module.')
|
997,139 | 57f744c18f1ccdb18497dbca268d492eed5f8f74 | import os
import warnings
from typing import List, cast
import cv2
import numpy as np
from mtcnn.mtcnn import MTCNN
from .utils import fix_mtcnn_bb, preprocess, get_center_box
from .facenet_types import AlignResult, Face, Landmarks, Image
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Detector:
def __init__(
self,
face_crop_height: int = 160,
face_crop_width: int = 160,
face_crop_margin: float = .4,
min_face_size: int = 20,
is_rgb: bool = True,
use_affine: bool = False) -> None:
self.mtcnn = MTCNN()
self.face_crop_height = face_crop_height
self.face_crop_width = face_crop_width
self.face_crop_margin = face_crop_margin
self.min_face_size = min_face_size
self.is_rgb = is_rgb
self.use_affine = use_affine
def find_faces(self, image: Image, detect_multiple_faces: bool = True) -> List[Face]:
faces = []
results = cast(List[AlignResult], self._get_align_results(image))
if not detect_multiple_faces and len(results) > 1:
img_size = np.asarray(image.shape)[0:2]
results = get_center_box(img_size, results)
for result in results:
face = Face(bounding_box=result.bounding_box, landmarks=result.landmarks)
bb = result.bounding_box
if bb[2] - bb[0] < self.min_face_size or bb[3] - \
bb[1] < self.min_face_size:
pass
# preprocess changes RGB -> BGR
processed = preprocess(
image,
self.face_crop_height,
self.face_crop_width,
self.face_crop_margin,
bb,
result.landmarks,
self.use_affine)
resized = cv2.resize(
processed, (self.face_crop_height, self.face_crop_width))
# RGB to BGR
if not self.is_rgb:
resized = resized[..., ::-1]
face.image = resized
faces.append(face)
return faces
def _get_align_results(self, image: Image) -> List[AlignResult]:
mtcnn_results = self.mtcnn.detect_faces(image)
img_size = np.asarray(image.shape)[0:2]
align_results = cast(List[AlignResult], [])
for result in mtcnn_results:
bb = result['box']
# bb[x, y, dx, dy] -> bb[x1, y1, x2, y2]
bb = fix_mtcnn_bb(
img_size[0], img_size[1], bb)
align_result = AlignResult(
bounding_box=bb,
landmarks=result['keypoints'])
align_results.append(align_result)
return align_results
def close(self):
self.mtcnn.__del__()
|
997,140 | 30b08eafd667ca9d04b8f378fbdd4b6bd8c56c45 | def createCityPopDict():
file = open('pop3.txt','r')
s = file.read()
file.close()
D ={}
lines = s.split('\n')
for eachLine in range(len(lines)-1):
cityPop = lines[eachLine].split()
strPop = ''.join(cityPop[-1:])
intPop = int(strPop)
D[' '.join(cityPop[1:-1])] = intPop
return D
def createCityLatLonDict():
file=open('latlon3.txt','r')
s=file.read()
file.close()
lines=s.split('\n')
D={}
for i in range(len(lines)-1):
city=lines[i].split()
val=(float(city[0]),-float(city[1]))
cityName=' '.join(city[2:])
D[cityName]=val
return D
def createStateColorDict():
file=open('stateAdj.txt','r')
s=file.read()
file.close()
lines=s.split('\n')
D={}
states=lines[:-1:2]
num=lines[1::2]
i=0
for mutiStates in states:
eachState=mutiStates.split(',')
state = eachState[0]
state=state.lower()
D[state]=int(num[i])
i+=1
return D
import cTurtle
import math
def drawLower48Map():
cityPopDict = createCityPopDict()
cityLatLonDict = createCityLatLonDict()
stateColorDict = createStateColorDict()
colorList = ['red','green','blue','purple']
listLat = []
listLon = []
cities = cityLatLonDict.keys()
for city in cities:
latLon = cityLatLonDict[city]
listLat.append(latLon[0])
listLon.append(latLon[1])
minLat = min(listLat)
maxLat = max(listLat)
minLon = min(listLon)
maxLon = max(listLon)
t = cTurtle.Turtle('turtle')
t.ht()
t.setWorldCoordinates(minLon,minLat,maxLon,maxLat)
outputText = ''
outputText += '{0:30}{1:15}{2:15}{3:15}{4:15}{5:15}\n'.format('cityname:','latitude:','longitude:','population:','dot size:','dot color:')
outputText += '\n'
for city in cities:
latLon = cityLatLonDict[city]
x = latLon[1]
y = latLon[0]
l = city.split(',')
state = ''.join(l[-1:])
colorIndex = stateColorDict[state]
color = colorList[colorIndex]
#print(city,' (',x,',',y,')')
if city in cityPopDict:
dotSize = 4 + math.ceil(math.sqrt(cityPopDict[city]/50000))
pop = cityPopDict[city]
else:
dotSize = 4
pop = '-'
t.up()
t.setposition(x,y)
t.down()
t.dot(dotSize,color)
outputText += '{0:30}{1:<15}{2:<15}{3:<15}{4:<15}{5:15}\n'.format(city,y,(x*-1),pop,dotSize,color)
file = open('output.txt','w')
file.write(outputText)
file.close()
drawLower48Map()
|
997,141 | e9796e3647115d626179cd5e4dd775099379730b | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 Josep Torra
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
vebot
Discord bot to assist in "Vella Escola JDR"
Usage:
vebot [options]
Options:
-h --help Show this message
--token=TOKEN Bot security token
--version Show version
--log-level=LEVEL Level of logging to produce [default: INFO]
--log-file=PATH Specify a file to write the log
-v --verbose Verbose logging (equivalent to --log-level=DEBUG)
Log levels: DEBUG INFO WARNING ERROR CRITICAL
"""
import json
import logging
import sys
from os import path, listdir
from discord import Activity, ActivityType
from discord.ext import commands
from discord.ext.commands.errors import BotMissingPermissions, \
MissingPermissions, \
NotOwner, \
MissingRequiredArgument, \
CommandNotFound
from docopt import docopt
from lib.books import load_json_from_disk, Library
CURDIR = path.dirname(path.abspath(__file__))
TOPDIR = path.dirname(CURDIR)
class Settings(): # pylint: disable=too-many-instance-attributes
"""Application settings"""
SETTINGS_FILE: str = path.abspath('.vebot.json')
USER_FACING_SETTINGS: list = ['language', 'opengame', 'system', 'mode',
'attributes', 'score_threshold', 'monsters']
__filename: str
token: str
language: str
opengame: str
system: str
mode: str
attributes: str
score_threshold: int
monsters: str
def __init__(self):
self.token = ""
self.language = "en"
self.opengame = "yes"
self.system = "ve" # vieja escuela
self.mode = "default"
self.attributes = "ve"
self.score_threshold = 60
self.books_path = path.join(TOPDIR, 'books')
self.monsters = "mmbecmi"
self.load()
@property
def library_paths(self) -> list:
"""Computes library paths"""
result = [self.books_path]
for key in ['system', 'mode']:
sub_path = path.join(self.books_path, f'{self.__dict__[key]}_{self.language}')
if path.isdir(sub_path):
result += [sub_path]
logging.info("Library paths:\n%s", "\n".join(result))
return result
def set(self, key: str, value: any, valid_values: list = None) -> bool:
"""Changes a setting value saving it on disk too"""
if valid_values and value not in valid_values:
return False
self.__dict__[key] = value
self.save()
return True
def get_valid_values(self, setting: str) -> list:
"""Provides setting constrains"""
if setting not in self.USER_FACING_SETTINGS:
return None
valid_values = []
if setting == 'opengame':
valid_values = ['yes', 'no']
elif setting == 'system':
valid_values = ['ve', 'ose']
elif setting == 'attributes':
valid_values = ['inorder', 'inorder+', 've', 'heroic']
return valid_values
def load(self):
"""Loads settings from specified file"""
if not path.isfile(self.SETTINGS_FILE):
return
data = load_json_from_disk(self.SETTINGS_FILE)
for (key, value) in data.items():
self.__dict__[key] = value
def save(self):
"""Saves json to disk"""
with open(self.SETTINGS_FILE, 'w') as handle:
data = dict()
for (key, value) in self.__dict__.items():
if not key.startswith('__'):
data[key] = value
json.dump(data, handle)
def details(self) -> str:
"""Provides a string with the settings to show at !info"""
return f"- **language**: [{self.language}]\n" \
f"- **opengame**: [{self.opengame}]\n" \
f"- **system**: [{self.system}]\n" \
f"- **mode**: [{self.mode}]\n" \
f"- **attributes**: [{self.attributes}]\n " \
f"- **score_threshold**: [{self.score_threshold}]\n " \
f"- **monsters**: [{self.monsters}]\n"
class Cogs():
"""Handles the list of Cogs"""
__COG_PATH = path.join(CURDIR, 'cogs')
__cogs: list
def __init__(self):
self.reload([])
def __is_cog(self, file: str, cog_path: str = "") -> bool:
if not cog_path:
cog_path = self.__COG_PATH
return path.isfile(path.join(cog_path, file))
def reload(self, subdirs: list):
"""reloads the cog list with the selected mode"""
self.__cogs = [f'cogs.{cog.replace(".py","")}'
for cog in listdir(self.__COG_PATH) if self.__is_cog(cog)]
for sub in subdirs:
if not sub:
continue
sub_path = path.join(self.__COG_PATH, sub)
if path.isdir(sub_path):
self.__cogs += [f'cogs.{sub_path}.{cog.replace(".py","")}'
for cog in listdir(sub_path) if self.__is_cog(cog)]
def get(self) -> list:
"""returns the list of cogs to be loaded"""
return self.__cogs
class App(commands.Bot):
"""The bot application"""
AVATAR_PATH = path.join(TOPDIR, 'img', 'avatar.png')
AVATAR_HASH = '0e2cba3d8bec4ff4db557700231b3c10'
__settings: Settings
__cogs: Cogs
__activity: Activity
library: Library
version_number: str
current_mode: str
def __init__(self, settings: Settings, cogs: Cogs, version: str = '1.0', **options):
super().__init__(**options)
self.__settings = settings
self.__cogs = cogs
self.__activity = Activity(type=ActivityType.playing, name='vebot')
self.version_number = version
self.library = Library(settings)
# Try to load cogs
try:
self.__load_cogs()
except (commands.ExtensionNotLoaded,
commands.ExtensionNotFound,
commands.NoEntryPointError,
commands.ExtensionFailed):
logging.error('Error on loading cogs.')
sys.exit(1)
# Add mode command
self.add_command(App.__set)
@property
def app_settings(self):
"""Returns __settings"""
return self.__settings
@property
def app_cogs(self):
"""Returns __cogs"""
return self.__cogs
@property
def app_avatar(self):
"""Reads app avatar from disk"""
with open(self.AVATAR_PATH, 'rb') as handle:
return handle.read()
def reload_library(self):
"""Reload library"""
self.library = Library(self.app_settings)
def reload_cogs(self):
"""Reload cogs"""
self.app_cogs.reload([self.app_settings.system, self.app_settings.mode])
for cog in self.app_cogs.get():
logging.info('loading %s', cog)
self.reload_extension(cog)
async def on_ready(self):
"""Handles the event triggered when bot is ready"""
logging.info('Bot online as %s.', self.user)
logging.info('avatar %s', self.user.avatar)
if not self.user.avatar or self.user.avatar != self.AVATAR_HASH:
logging.info('Changing avatar.')
await self.user.edit(avatar=self.app_avatar)
await self.change_presence(activity=self.__activity)
async def on_message(self, message):
"""General message handler"""
await self.process_commands(message)
async def on_command_error(self, context, exception):
"""Handle command errors"""
message = {
BotMissingPermissions: lambda err: 'Missing Bot Permission: '
f'{", ".join(err.missing_perms)}.',
MissingPermissions: lambda err: 'Missing Permission: '
f'{", ".join(err.missing_perms)}',
NotOwner: lambda err: 'Missing Permission: You are not an owner.',
MissingRequiredArgument: lambda err: 'Missing argument: '
'Check "!help".',
CommandNotFound: lambda err: 'Command not found: Check "!help".',
}
exception_type = exception.__class__
if exception_type in message:
await context.send(message[exception_type](exception))
else:
logging.exception(exception, stack_info=True)
def __load_cogs(self):
"""Load all cogs into bot."""
for cog in self.__cogs.get():
logging.info('loading %s', cog)
self.load_extension(cog)
@staticmethod
@commands.command(name="set", aliases=['s'])
@commands.is_owner()
async def __set(ctx: commands.Context, setting: str, value: str):
"""Command to change a setting value."""
settings = ctx.bot.app_settings
valid_settings = settings.USER_FACING_SETTINGS
found = [key for key in valid_settings if key.startswith(setting)]
if len(found) == 1:
setting = found[0]
else:
await ctx.send(f'Invalid setting "{setting}". Valid choices are:'
f' [{", ".join(valid_settings)}]')
return
valid_values = settings.get_valid_values(setting)
if not settings.set(setting, value, valid_values):
if valid_values:
await ctx.send(f'invalid value, use [{", ".join(valid_values)}]')
return
# Reload library when needed
if setting in ['language', 'system', 'mode']:
ctx.bot.reload_library()
# Reload cogs when needed
if setting in ['system', 'mode']:
try:
logging.info('%s triggered a cogs reload.', ctx.author)
await ctx.send(f'{ctx.message.author.mention} triggered a mode change.')
ctx.bot.reload_cogs()
except (commands.ExtensionNotLoaded,
commands.ExtensionNotFound,
commands.NoEntryPointError,
commands.ExtensionFailed):
# Inform User that reload was not successful
message_error = 'Error on reloading cogs.'
logging.error(message_error)
await ctx.send(message_error)
return
message_success = f'{setting} changed to "{value}".'
logging.info(message_success)
await ctx.send(message_success)
return
def main():
"""main"""
args = docopt(__doc__, version="0.1")
if args.pop('--verbose'):
loglevel = 'DEBUG'
else:
loglevel = args.pop('--log-level').upper()
logging.basicConfig(filename=args.pop('--log-file'), filemode='w',
level=loglevel, format='%(levelname)s: %(message)s')
# Check python version
is_min_python_3_6 = sys.version_info[0] == 3 and sys.version_info[1] >= 6
if not is_min_python_3_6:
logging.error('The bot was developed for Python 3. Please use '
'Version 3.6 or higher.')
sys.exit(1)
settings = Settings()
if args['--token']:
settings.token = args.pop('--token')
if not settings.token:
settings.token = input('enter token:')
if not settings.token:
logging.error('no token provided')
sys.exit(1)
settings.save()
app = App(settings, Cogs(), command_prefix='.', version='0.1')
logging.info('Starting bot')
app.run(settings.token)
if __name__ == '__main__':
main()
|
997,142 | 0fbb9464422af43fa121c6b6e2c2a37395442f09 | def call(services, packet, response):
print(123)
print(234)
|
997,143 | 20aae9196c7bb24ef22072f08cf5222bd515f1b1 | # -*- coding: utf-8 -*-
"""Test module for client and server modules."""
import pytest
from server import BUFFER_LENGTH, OK_200, ERR_400, ERR_405, ERR_505
U_H = u'HTTP/1.1'
U_200 = u'{} {}'.format(U_H, OK_200.decode('utf-8'))
U_400 = u'{} {}'.format(U_H, ERR_400.decode('utf-8'))
U_405 = u'{} {}'.format(U_H, ERR_405.decode('utf-8'))
U_505 = u'{} {}'.format(U_H, ERR_505.decode('utf-8'))
TESTS = [
'aaaaaaaaaaaaaaaaaaaaaaa',
'aa',
'a' * BUFFER_LENGTH,
u'£©°',
]
GOOD_REQUEST = (b'GET /index.html HTTP/1.1\r\n'
b'Host: theempire.com\r\n'
b'\r\n')
BAD_NOT_GET = (b'POST /index.html HTTP/1.1\r\n'
b'Host: theempire.com\r\n'
b'\r\n')
BAD_NO_HOST = (b'GET /index.html HTTP/1.1\r\n'
b'\r\n')
BAD_NO_PROTO = (b'GET /index.html\r\n'
b'Host: theempire.com\r\n'
b'\r\n')
BAD_WRONG_PROTO = (b'GET /index.html HTTP/1.0\r\n'
b'Host: theempire.com\r\n'
b'\r\n')
BAD_NO_CRLF = (b'GET /index.html HTTP/1.1\r\n'
b'Host: theempire.com\r\n')
U_G_R = u'{}'.format(GOOD_REQUEST.decode('utf-8'))
U_BNG = u'{}'.format(BAD_NOT_GET.decode('utf-8'))
U_BNH = u'{}'.format(BAD_NO_HOST.decode('utf-8'))
U_BNP = u'{}'.format(BAD_NO_PROTO.decode('utf-8'))
U_BWP = u'{}'.format(BAD_WRONG_PROTO.decode('utf-8'))
U_BNC = u'{}'.format(BAD_NO_CRLF.decode('utf-8'))
TEST_PARSE = [
(GOOD_REQUEST, None, OK_200, b'/index.html'),
(BAD_NOT_GET, ValueError, ERR_405, b''),
(BAD_NO_HOST, ValueError, ERR_400, b''),
(BAD_NO_PROTO, ValueError, ERR_400, b''),
(BAD_WRONG_PROTO, ValueError, ERR_505, b''),
(BAD_NO_CRLF, ValueError, ERR_400, b''),
]
TEST_CLI_REQUEST = [
(U_G_R, U_200),
(U_BNG, U_405),
(U_BNH, U_400),
(U_BNP, U_400),
(U_BWP, U_505),
(U_BNC, U_400),
]
ERR_LIST = [
ERR_400,
ERR_405,
ERR_505,
]
# @pytest.mark.parametrize('msg', TESTS)
# def test_system(msg):
# """Test that messages to server are returned as the same message."""
# from client import client
# assert client(msg) == msg
@pytest.mark.parametrize('cli_request, msg', TEST_CLI_REQUEST)
def test_system(cli_request, msg):
"""Test that messages to server are returned as the same message."""
from client import client
response = client(cli_request)
response_parts = response.split('\r\n')
assert response_parts[0] == msg
assert '' in response_parts
@pytest.mark.parametrize('cli_request, error, msg, uri', TEST_PARSE)
def test_parse_request(cli_request, error, msg, uri):
"""Test that parse_request returns the URI or raises appropriate error."""
from server import parse_request
if error:
with pytest.raises(error) as e:
parse_request(cli_request)
assert e.args[0] == msg
else:
assert parse_request(cli_request) == uri
def test_response_ok():
"""Test that response_ok returns '200 OK' if connection is good."""
from server import response_ok
assert response_ok().split(b'\r\n')[0] == b'HTTP/1.1 %s' % OK_200
@pytest.mark.parametrize('err_msg', ERR_LIST)
def test_response_error(err_msg):
"""Test that response_error returns '500 Internal Server Error'."""
from server import response_error
error_text = b'HTTP/1.1 %s' % err_msg
assert response_error(err_msg).split(b'\r\n')[0] == error_text
|
997,144 | cf81301339549a809a1997aaf56eae8491cc124b | import sys
input = sys.stdin.readline
n = int(input())
N = list(sorted(map(int, input().split()))) # 값 받자마자 정렬
m = int(input())
M = list(map(int, input().split()))
def binary(i, N, start, end) :
if start > end : # 순서가 이상하면 0
return 0
m = (start + end) // 2 # 중간값 지정
if i == N[m] : # 같으면 1
return 1
elif i < N[m] : # 중간값보다 작으면
return binary(i, N, start, m - 1) # 중간보다 작은 범위에서 다시 검색
else : # 중간값보다 크면
return binary(i, N, m + 1, end) # 중간보다 큰 범위에서 다시 검색
for i in M :
start = 0 # 시작
end = len(N) - 1 # 마지막
print(binary(i, N, start, end)) |
997,145 | 3d80ab9bd8388c18fb54837514e566c1d0df8aef | from django.shortcuts import render, redirect
from .models import client, project, requirement, task, rol, error, comment
from .forms import client_form, project_form, requirement_form, task_form, rol_form, error_form, comment_form
from django.contrib.auth.models import User
# Login
from django.contrib.auth.forms import AuthenticationForm,UserCreationForm
from django.contrib.auth import login, logout
from django.views.generic import FormView
from django.views.generic import CreateView
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, 'index.html')
class view_buscar_cliente(TemplateView):
def post(self, request, *args, **kwargs):
buscar = request.POST['buscalo']
proyectos = project.objects.filter(name__contains=buscar)
if proyectos :
return render(request, 'buscar.html',
{'proyectos':proyectos , 'proyecto':True})
else:
clientes = client.objects.filter(name__contains=buscar)
print(clientes)
return render(request, 'buscar.html',
{'clientes':clientes , 'cliente':True})
# client
@login_required
def list_client(request):
data = client.objects.all()
return render(request, 'client/client_list.html', {'data': data})
@login_required
def create_client(request):
if request.method == 'POST':
form = client_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_client')
else:
form = client_form()
return render(request, 'client/client.html', {'form': form})
@login_required
def update_client(request, id):
data = client.objects.get(id = id)
form = client_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_client')
return render(request, 'client/client.html', {'form': form, 'data': data})
@login_required
def delete_client(request, id):
data = client.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_client')
return render(request, 'delete_confirm.html')
@login_required
def view_client(request, id):
data = client.objects.get(id = id)
return render(request, 'client/view.html', {'data': data})
# client projects
@login_required
def client_projects(request, id):
data_ = client.objects.get(id = id)
data = project.objects.filter(client_id = id)
return render(request, 'client/client_projects.html', {'data_': data_,'data': data})
# end client
# project
@login_required
def list_project(request):
data = project.objects.all()
return render(request, 'project/project_list.html', {'data': data})
@login_required
def create_project(request):
if request.method == 'POST':
form = project_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_project')
else:
form = project_form()
return render(request, 'project/project.html', {'form': form})
@login_required
def update_project(request, id):
data = project.objects.get(id = id)
form = project_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_project')
return render(request, 'project/project.html', {'form': form, 'data': data})
@login_required
def delete_project(request, id):
data = project.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_project')
return render(request, 'delete_confirm.html')
@login_required
def view_project(request, id):
data = project.objects.get(id = id)
return render(request, 'project/view.html', {'data': data})
# project relations list
@login_required
def project_rols(request, id):
data_ = project.objects.get(id = id)
data = rol.objects.filter(project_id = id)
return render(request, 'project/project_rols.html', {'data_': data_,'data': data})
@login_required
def project_requirements(request, id):
data_ = project.objects.get(id = id)
data = requirement.objects.filter(project_id = id)
rol_ = rol.objects.filter(project_id = id)
return render(request, 'project/project_requirements.html', {'data_': data_,'data': data, 'rol_': rol_})
@login_required
def project_tasks(request, id):
data_ = project.objects.get(id = id)
data = task.objects.filter(project_id = id)
rol_ = rol.objects.filter(project_id = id)
return render(request, 'project/project_tasks.html', {'data_': data_,'data': data, 'rol_': rol_})
# end project
# requirement
@login_required
def list_requirement(request):
data = requirement.objects.all()
return render(request, 'requirement/requirement_list.html', {'data': data})
@login_required
def create_requirement(request):
if request.method == 'POST':
form = requirement_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_requirement')
else:
form = requirement_form()
return render(request, 'requirement/requirement.html', {'form': form})
@login_required
def update_requirement(request, id):
data = requirement.objects.get(id = id)
form = requirement_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_requirement')
return render(request, 'requirement/requirement.html', {'form': form, 'data': data})
@login_required
def delete_requirement(request, id):
data = requirement.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_requirement')
return render(request, 'delete_confirm.html')
@login_required
def view_requirement(request, id):
data = requirement.objects.get(id = id)
return render(request, 'requirement/view.html', {'data': data})
# end requirement
# task
@login_required
def list_task(request):
data = task.objects.all()
return render(request, 'task/task_list.html', {'data': data})
# tasks per state
@login_required
def list_task_not_started(request, id):
data_ = project.objects.get(id = id)
data = task.objects.filter(state = 'Not Started', project_id = id)
return render(request, 'project/project_tasks.html', {'data': data, 'data_': data_})
@login_required
def list_task_in_process(request, id):
data_ = project.objects.get(id = id)
data = task.objects.filter(state = 'In Process', project_id = id)
return render(request, 'project/project_tasks.html', {'data': data, 'data_': data_})
@login_required
def list_task_finished(request, id):
data_ = project.objects.get(id = id)
data = task.objects.filter(state = 'Finished', project_id = id)
return render(request, 'project/project_tasks.html', {'data': data, 'data_': data_})
# tasks per state
@login_required
def create_task(request):
if request.method == 'POST':
form = task_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_task')
else:
form = task_form()
return render(request, 'task/task.html', {'form': form})
@login_required
def update_task(request, id):
data = task.objects.get(id = id)
form = task_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_task')
return render(request, 'task/task.html', {'form': form, 'data': data})
@login_required
def delete_task(request, id):
data = task.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_task')
return render(request, 'delete_confirm.html')
@login_required
def view_task(request, id):
data = task.objects.get(id = id)
return render(request, 'task/view.html', {'data': data})
# task relations list
@login_required
def task_comments(request, id):
data_ = task.objects.get(id = id)
data = comment.objects.filter(task_id = id)
return render(request, 'task/task_comments.html', {'data_': data_,'data': data})
@login_required
def task_errors(request, id):
data_ = task.objects.get(id = id)
data = error.objects.filter(task_id = id)
return render(request, 'task/task_errors.html', {'data_': data_,'data': data})
# end task
# error
@login_required
def list_error(request):
data = error.objects.all()
return render(request, 'error/error_list.html', {'data': data})
@login_required
def create_error(request):
if request.method == 'POST':
form = error_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_error')
else:
form = error_form()
return render(request, 'error/error.html', {'form': form})
@login_required
def update_error(request, id):
data = error.objects.get(id = id)
form = error_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_error')
return render(request, 'error/error.html', {'form': form, 'data': data})
@login_required
def delete_error(request, id):
data = error.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_error')
return render(request, 'delete_confirm.html')
@login_required
def view_error(request, id):
data = error.objects.get(id = id)
return render(request, 'error/view.html', {'data': data})
# end error
# comments
@login_required
def list_comment(request):
data = comment.objects.all()
return render(request, 'comment/comment_list.html', {'data': data})
@login_required
def create_comment(request):
if request.method == 'POST':
form = comment_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_comment')
else:
form = comment_form()
return render(request, 'comment/comment.html', {'form': form})
@login_required
def update_comment(request, id):
data = comment.objects.get(id = id)
form = comment_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_comment')
return render(request, 'comment/comment.html', {'form': form, 'data': data})
@login_required
def delete_comment(request, id):
data = comment.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_comment')
return render(request, 'delete_confirm.html')
@login_required
def view_comment(request, id):
data = comment.objects.get(id = id)
return render(request, 'comment/view.html', {'data': data})
# end comment
# rol
@login_required
def list_rol(request):
data = rol.objects.all()
return render(request, 'rol/rol_list.html', {'data': data})
@login_required
def create_rol(request):
if request.method == 'POST':
form = rol_form(request.POST)
if form.is_valid():
form.save()
return redirect('list_rol')
else:
form = rol_form()
return render(request, 'rol/rol.html', {'form': form})
@login_required
def update_rol(request, id):
data = rol.objects.get(id = id)
form = rol_form(request.POST or None, instance = data)
if form.is_valid():
form.save()
return redirect('list_rol')
return render(request, 'rol/rol.html', {'form': form, 'data': data})
@login_required
def delete_rol(request, id):
data = rol.objects.get(id = id)
if(request.method == 'POST'):
data.delete()
return redirect('list_rol')
return render(request, 'delete_confirm.html')
@login_required
def view_rol(request, id):
data = rol.objects.get(id = id)
return render(request, 'rol/view.html', {'data': data})
# end rol
# login
class LoginView(FormView):
template_name = 'login.html'
form_class = AuthenticationForm
success_url = 'index'
def form_valid(self, form):
login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
def log_out(request):
logout(request)
return redirect('index')
class CrearUsuarioView(CreateView):
model = User
template_name = 'create_user.html'
form_class = UserCreationForm
success_url = 'index'
# end login
|
997,146 | 3d44068f2673d1ce915e38e86a9404c824ddbd43 | import tensorflow as tf
import cv2, os
import numpy as np
from random import shuffle
import copy
#####
#Training setting
BIN, OVERLAP = 2, 0.1
NORM_H, NORM_W = 224, 224
VEHICLES = ['Car', 'Truck', 'Van', 'Tram','Pedestrian','Cyclist']
def compute_anchors(angle):
anchors = []
wedge = 2.*np.pi/BIN
l_index = int(angle/wedge)
r_index = l_index + 1
if (angle - l_index*wedge) < wedge/2 * (1+OVERLAP/2):
anchors.append([l_index, angle - l_index*wedge])
if (r_index*wedge - angle) < wedge/2 * (1+OVERLAP/2):
anchors.append([r_index%BIN, angle - r_index*wedge])
return anchors
def parse_annotation(label_dir, image_dir):
all_objs = []
dims_avg = {key:np.array([0, 0, 0]) for key in VEHICLES}
dims_cnt = {key:0 for key in VEHICLES}
for label_file in sorted(os.listdir(label_dir)):
image_file = label_file.replace('txt', 'png')
for line in open(label_dir + label_file).readlines():
line = line.strip().split(' ')
truncated = np.abs(float(line[1]))
occluded = np.abs(float(line[2]))
if line[0] in VEHICLES and truncated < 0.1 and occluded < 0.1:
new_alpha = float(line[3]) + np.pi/2.
if new_alpha < 0:
new_alpha = new_alpha + 2.*np.pi
new_alpha = new_alpha - int(new_alpha/(2.*np.pi))*(2.*np.pi)
obj = {'name':line[0],
'image':image_file,
'xmin':int(float(line[4])),
'ymin':int(float(line[5])),
'xmax':int(float(line[6])),
'ymax':int(float(line[7])),
'dims':np.array([float(number) for number in line[8:11]]),
'new_alpha': new_alpha
}
dims_avg[obj['name']] = dims_cnt[obj['name']]*dims_avg[obj['name']] + obj['dims']
dims_cnt[obj['name']] += 1
dims_avg[obj['name']] /= dims_cnt[obj['name']]
all_objs.append(obj)
###### flip data
for obj in all_objs:
# Fix dimensions
obj['dims'] = obj['dims'] - dims_avg[obj['name']]
# Fix orientation and confidence for no flip
orientation = np.zeros((BIN,2))
confidence = np.zeros(BIN)
anchors = compute_anchors(obj['new_alpha'])
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1.
confidence = confidence / np.sum(confidence)
obj['orient'] = orientation
obj['conf'] = confidence
# Fix orientation and confidence for flip
orientation = np.zeros((BIN,2))
confidence = np.zeros(BIN)
anchors = compute_anchors(2.*np.pi - obj['new_alpha'])
for anchor in anchors:
orientation[anchor[0]] = np.array([np.cos(anchor[1]), np.sin(anchor[1])])
confidence[anchor[0]] = 1
confidence = confidence / np.sum(confidence)
obj['orient_flipped'] = orientation
obj['conf_flipped'] = confidence
return all_objs
def prepare_input_and_output(image_dir, train_inst):
### Prepare image patch
xmin = train_inst['xmin'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
ymin = train_inst['ymin'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
xmax = train_inst['xmax'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
ymax = train_inst['ymax'] #+ np.random.randint(-MAX_JIT, MAX_JIT+1)
img = cv2.imread(image_dir + train_inst['image'])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = copy.deepcopy(img[ymin:ymax+1,xmin:xmax+1]).astype(np.float32)
# re-color the image
#img += np.random.randint(-2, 3, img.shape).astype('float32')
#t = [np.random.uniform()]
#t += [np.random.uniform()]
#t += [np.random.uniform()]
#t = np.array(t)
#img = img * (1 + t)
#img = img / (255. * 2.)
# flip the image
flip = np.random.binomial(1, .5)
if flip > 0.5: img = cv2.flip(img, 1)
# resize the image to standard size
img = cv2.resize(img, (NORM_H, NORM_W))
img = img - np.array([[[103.939, 116.779, 123.68]]])
#img = img[:,:,::-1]
### Fix orientation and confidence
if flip > 0.5:
return img, train_inst['dims'], train_inst['orient_flipped'], train_inst['conf_flipped']
else:
return img, train_inst['dims'], train_inst['orient'], train_inst['conf']
def data_gen(image_dir, all_objs, batch_size):
num_obj = len(all_objs)
keys = range(num_obj)
np.random.shuffle(keys)
l_bound = 0
r_bound = batch_size if batch_size < num_obj else num_obj
while True:
if l_bound == r_bound:
l_bound = 0
r_bound = batch_size if batch_size < num_obj else num_obj
np.random.shuffle(keys)
currt_inst = 0
x_batch = np.zeros((r_bound - l_bound, 224, 224, 3))
d_batch = np.zeros((r_bound - l_bound, 3))
o_batch = np.zeros((r_bound - l_bound, BIN, 2))
c_batch = np.zeros((r_bound - l_bound, BIN))
for key in keys[l_bound:r_bound]:
# augment input image and fix object's orientation and confidence
image, dimension, orientation, confidence = prepare_input_and_output(image_dir, all_objs[key])
#plt.figure(figsize=(5,5))
#plt.imshow(image/255./2.); plt.show()
#print dimension
#print orientation
#print confidence
x_batch[currt_inst, :] = image
d_batch[currt_inst, :] = dimension
o_batch[currt_inst, :] = orientation
c_batch[currt_inst, :] = confidence
currt_inst += 1
yield x_batch, [d_batch, o_batch, c_batch]
l_bound = r_bound
r_bound = r_bound + batch_size
if r_bound > num_obj: r_bound = num_obj
|
997,147 | 63748e51f2776a50064505c17cfec29202d58572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project: EN_Char_CNN_Text_Classification
# @File : train.py
# @Author : Origin.H
# @Date : 2018/1/11
import os
import logging
import char_cnn
import config
import hyperparameters
if __name__ == "__main__":
train_name = 'ag_news'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%Y %b %d %H:%M:%S',
filename=os.path.join(config.LOG_HOME, (config.LOG_FILE % train_name)),
filemode='a')
conv_pool_layers = [
(7, 256, 3),
(7, 256, 3),
(3, 256, 1),
(3, 256, 1),
(3, 256, 1),
(3, 256, 3)
]
fully_connected_layers = [
1024,
1024
]
model = char_cnn.CharConvNet(conv_pool_layers=conv_pool_layers, fully_connected_layers=fully_connected_layers,
length0=hyperparameters.LENGTH0, n_class=hyperparameters.N_CLASS,
batch_size=hyperparameters.BATCH_SIZE, learning_rate=hyperparameters.LEARNING_RATE,
decay_steps=hyperparameters.DECAY_STEPS, decay_rate=hyperparameters.DECAY_RATE,
grad_clip=hyperparameters.GRAD_CLIP, max_to_keep=config.MAX_TO_KEEP)
model.fit(config.TRAIN_TEXT_HOME, config.CHECKPOINT_HOME, keep_prob=hyperparameters.KEEP_PROB,
epochs=config.EPOCHS, display_steps=config.DISPLAY_STEPS, save_steps=config.SAVE_STEPS, name=train_name)
logging.info(config.DONE_TRAINING_MESS)
print(config.DONE_TRAINING_MESS)
|
997,148 | 0a05479d2fa6badb7214449271f38aa1e1d47341 | # encoding: utf-8
# !/usr/bin/env python
'''
@author : wersonliu
@File : forms.py
@data :
'''
from django import forms
import re
from operation.models import UserAsk
# 1.传统写法
# class UserAskForm(forms.Form):
# name = forms.CharField(required=True, min_length=2, max_length=20)
# phone = forms.CharField(required=True, min_length=11, max_length=11)
# course_name = forms.CharField(required=True, max_length=50, min_length=5)
# 2.modelform 写法
class UserAskForm(forms.ModelForm):
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'course']
def clead_mobile(self):
"""
手机号验证
:return:
"""
mobile = self.cleaned_data['moblie']
REGEX_MOBILE = "1[358]\d{9}$|^147\d{8}|^176\d{8}$"
p=re.compile(REGEX_MOBILE)
if p.match(mobile):
return mobile
else:
raise forms.ValidationError(u"手机号码非法",code="mobile_invalde") |
997,149 | c5b908e16e4cb057f6b360466aad79544c90664b | def main():
print(Node(Leaf(1), Leaf(2)).nodes())
class Node:
def __init__(self, left, right):
self.left = left
self.right = right
def nodes(self):
return self.left.nodes() + self.right.nodes() + 1
class Leaf:
def __init__(self, val):
self.val = val
def nodes(self):
return 1
|
997,150 | 4b404c81965bff928ef9eec8a0f36fa80599e247 |
import ConfigParser
class MyIni:
def __init__(self, conf_path='my_ini.conf'):
self.conf_path = conf_path
self.cf = ConfigParser.ConfigParser()
self.cf.read(conf_path)
def get_kakou(self):
conf = {}
section = 'KAKOU'
conf['host'] = self.cf.get(section, 'host')
conf['port'] = self.cf.getint(section, 'port')
conf['id_flag'] = self.cf.getint(section, 'id_flag')
conf['id_step'] = self.cf.getint(section, 'id_step')
conf['time_flag'] = self.cf.get(section, 'time_flag')
conf['time_step'] = self.cf.getint(section, 'time_step')
conf['kkdd'] = self.cf.get(section, 'kkdd')
conf['city'] = self.cf.get(section, 'city')
return conf
def get_hbc(self):
conf = {}
section = 'HBC'
conf['host'] = self.cf.get(section, 'host')
conf['port'] = self.cf.getint(section, 'port')
return conf
def set_id(self, id_flag):
self.cf.set('KAKOU', 'id_flag', id_flag)
self.cf.write(open(self.conf_path, 'w'))
def set_time(self, time_flag):
self.cf.set('KAKOU', 'time_flag', time_flag)
self.cf.write(open(self.conf_path, 'w'))
if __name__ == '__main__':
ini = MyIni()
print ini.get_kakou()
print ini.get_hbc()
#ini.set_time('2015-10-01 00:10:23')
#print hbc
|
997,151 | 5f7200b12b8a0e36a67fc78a37b34c461512cd30 | import sys
sys.path.append('./language_model/')
import msa_class
import torch
import numpy as np
import networkx as nx
from torch.autograd import Variable
from collections import defaultdict
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import math
import json
import pickle
from pycocoevalcap.bleu.bleu_scorer import BleuScorer
#from IPython import embed
from multiprocessing import Pool
import random
import time
import argparse
#from IPython import embed
if sys.version_info >= (3, 0):
from allennlp.modules.elmo import Elmo, batch_to_ids
def lattice_to_latex(tokens, lattice):
G = init_graph(tokens)
text = '''\\documentclass[tikz,border=10pt]{standalone}
\\usetikzlibrary{automata,positioning,arrows.meta}
\\begin{document}
\\begin{tikzpicture}
[
initial/.style={line width=1pt},
accepting by double/.append style={line width=1pt},
semithick,
]\n'''
text += '\\node (0) [state, initial] {$0$};\n'
k_list = sorted(G[0].keys())
n_sents = len(G[0].keys())
text += '\\node (%d) [state, above right=of 0] {$%d$};\n' % (k_list[1], k_list[1])
text += '\\node (%d) [state, right=of 0, above=of %d] {$%d$};\n' % (k_list[0], k_list[1], k_list[0])
if n_sents > 2:
text += '\\node (%d) [state, right=of 0] {$%d$};\n' % (k_list[2], k_list[2])
if n_sents > 3:
text += '\\node (%d) [state, below right=of 0] {$%d$};\n' % (k_list[3], k_list[3])
if n_sents > 4:
text += '\\node (%d) [state, right=of 0, below=of %d] {$%d$};\n' % (k_list[4], k_list[3], k_list[4])
for n in G.nodes():
if n == -1 or n in k_list or n == 0:
continue
text += '\\node (%d) [state, ' % (n)
for a, b in G.in_edges(n):
text += 'right=of %d] {$%d$};' % (a, n)
break
text += '\n'
n = -1
text += '\\node (%d) [state, ' % (n)
for a, b in G.in_edges(n):
text += 'right=of %d,' % (a)
text += '] {$%d$};\n' % (n)
text += '\\path [-{Stealth[]}]\n\n'
G = lattice
thr = get_threshold(G)
for n in G.nodes():
text += '(%d) ' % (n)
for a, b in G.out_edges(n):
if len(G[a][b]['word']) == 0:
word = ''
else:
word = '/'.join(G[a][b]['word'])
text += 'edge node [above, sloped] {$%s$} (%d)\n' % (word, b)
text += 'edge node [below, sloped] {$%d$} (%d)\n' % (thr[a][b], b)
text += ''';
\\end{tikzpicture}
\\end{document}
'''
return text
def lcs(a, b, not_connect=[], base=(0,0)):
#a, b = sorted([a, b])
lengths = [[0 for j in range(len(b)+1)] for i in range(len(a)+1)]
# row 0 and column 0 are initialized to 0 already
for i, x in enumerate(a):
for j, y in enumerate(b):
if x == y and (i+base[0], j+base[1]) not in not_connect:
lengths[i+1][j+1] = lengths[i][j] + 1
else:
lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])
result = []
x, y = len(a), len(b)
commons = [[], []]
while x != 0 and y != 0:
if lengths[x][y] == lengths[x-1][y]:
x -= 1
elif lengths[x][y] == lengths[x][y-1]:
y -= 1
else:
assert a[x-1] == b[y-1]
#result = a[x-1] + result
result.append(a[x-1])
#print x-1, y-1
commons[0] = [x-1] + commons[0]
commons[1] = [y-1] + commons[1]
x -= 1
y -= 1
return result, commons, lengths
def init_graph(sentences):
G = nx.DiGraph()
idx = 1
for i, line in enumerate(sentences):
for j, w in enumerate(line):
if j == 0:
G.add_edge(0, idx, word=[], idx=[])
else:
G.add_edge(idx, idx+1, word=[], idx=[])
idx += 1
G.add_edge(idx, idx+1, word=[w], idx=[(i,j)])
idx += 1
if j == len(sentences[i]) - 1:
#end
G.add_edge(idx, -1, word=[], idx=[])
idx += 1
#break
return G
def get_node(G, idx):
for u, v in G.edges():
if idx in G[u][v]['idx']:
return u, v
def merge_edges(G, pair_a, pair_b):
a0, a1 = pair_a
b0, b1 = pair_b
# merge edge b to a
# if a == b: do nothing
if b0 == a0 and b1 == a1:
return
if b0 != a0:
G.add_edge(b0, a0, word=[], idx=[])
if a1 != b1:
G.add_edge(a1, b1, word=[], idx=[])
G[a0][a1]['idx'] += G[b0][b1]['idx']
for w in G[b0][b1]['word']:
if w not in G[a0][a1]['word']:
G[a0][a1]['word'].append(w)
G.remove_edge(b0, b1)
def merge(G, commons, sent_a, sent_b):
for idx_a, idx_b in zip(commons[0], commons[1]):
#print idx_a, idx_b
a0, a1 = get_node(G, idx=(sent_a, idx_a))
b0, b1 = get_node(G, idx=(sent_b, idx_b))
#print a0, a1, ';', b0, b1
merge_edges(G, (a0, a1), (b0, b1))
def get_threshold(G):
thr = defaultdict(dict)
queue = list(G.edges())
while len(queue) != 0:
v, u = queue.pop(0)
if len(G[v][u]['idx']) != 0:
#thr[v][u] = len(G[v][u]['idx'])
cnt = defaultdict(int)
for i_sent, i_word in G[v][u]['idx']:
cnt[i_sent] += 1
thr[v][u] = max(cnt.values())
continue
tmp_a = [thr[a][b] for a, b in G.out_edges(u) if a in thr and b in thr[a]]
tmp_b = [thr[a][b] for a, b in G.in_edges(v) if a in thr and b in thr[a]]
if len(tmp_a) == 0 and len(tmp_b) == 0:
queue.append((v, u))
continue
tmp_a = 0 if len(tmp_a) == 0 else max(tmp_a)
tmp_b = 0 if len(tmp_b) == 0 else max(tmp_b)
threshold = max(tmp_a, tmp_b)
thr[v][u] = threshold
return thr
def dfs(G, T, v=0, visited=defaultdict(int)):
if v == -1:
ret = [[]]
return ret
else:
ret = []
for u in G[v]:
threshold = T[v][u]
#if visited[(v,u)] >= 1:
if visited[(v,u)] >= threshold:
continue
visited[(v,u)] += 1
for line in dfs(G, T, u, visited):
if line == []:
line = ''
if len(G[v][u]['word']) == 0:
_sent = line
ret.append(_sent)
else:
for w in G[v][u]['word']:
if line == '':
_sent = w
else:
_sent = w + ' ' + line
ret.append(_sent)
#break
visited[(v,u)] -= 1
if len(ret) == 0:
return []
if ret != [[]]:
ret = list(set(ret))
return ret
def check_cycle(G):
try:
nx.find_cycle(G)
except Exception:
return False
return True
def acyclic_lm_commons(G, sentences, hiddens, row, col, minus=0.0):
# row: sentence a
# col: sentence b
row_idx = {}
col_idx = {}
for u, v in G.edges():
for pair in [p for p in G[u][v]['idx'] if p[0] == row]:
if u in row_idx or v in row_idx:
print('Warning')
row_idx[u] = pair[1]
row_idx[v] = pair[1]
for pair in [p for p in G[u][v]['idx'] if p[0] == col]:
if u in col_idx or v in col_idx:
print('Warning')
col_idx[u] = pair[1]
col_idx[v] = pair[1]
ret = nx.floyd_warshall(G)
not_connect = set()
for r in row_idx:
for c in col_idx:
if ret[r][c] < float('inf') or ret[c][r] < float('inf'):
not_connect.add((row_idx[r],col_idx[c]))
do_connect = set()
for idx in set(col_idx.keys()) & set(row_idx.keys()):
do_connect.add((row_idx[idx], col_idx[idx]))
do_connect.add((len(sentences[row]), len(sentences[col])))
left_pair = (0, 0)
ranges = []
for a, b in sorted(list(do_connect)):
now = ((left_pair[0], a), (left_pair[1], b))
ranges.append(now)
left_pair = (a+1, b+1)
commons = [[], []]
#embed()
for a, b in ranges:
_alignment, _commons = msa_class.msa(hiddens[row][a[0]:a[1]], hiddens[col][b[0]:b[1]], sentences[row][a[0]:a[1]], sentences[col][b[0]:b[1]], not_connect, base=(a[0], b[0]), minus=minus)
commons[0] += [_i + a[0] for _i in _commons[0]]
commons[1] += [_i + b[0] for _i in _commons[1]]
if a[1] < len(sentences[row]) and b[1] < len(sentences[col]):
commons[0] += [a[1]]
commons[1] += [b[1]]
for a, b in zip(commons[0], commons[1]):
if (a, b) in not_connect and (a, b) not in do_connect:
print('Warning', (a,b))
return commons
def acyclic_commons(G, sentences, row, col):
# row: sentence a
# col: sentence b
'''
merged_edge = []
for u, v in G.edges():
i_list = [i_sent for i_sent, i_word in G[u][v]['idx']]
if row in i_list and col in i_list:
merged_edge.append((u,v))
if len(merged_edge) != 0:
#print merged_edge
z = 0
else:
result, commons, L = lcs(sentences[row], sentences[col])
merge(G, commons, row, col)
'''
row_idx = {}
col_idx = {}
for u, v in G.edges():
for pair in [p for p in G[u][v]['idx'] if p[0] == row]:
#row_idx.append((u, pair[1]))
#row_idx.append((v, pair[1]))
if u in row_idx or v in row_idx:
print('Warning')
row_idx[u] = pair[1]
row_idx[v] = pair[1]
for pair in [p for p in G[u][v]['idx'] if p[0] == col]:
if u in col_idx or v in col_idx:
print('Warning')
col_idx[u] = pair[1]
col_idx[v] = pair[1]
ret = nx.floyd_warshall(G)
not_connect = set()
for r in row_idx:
for c in col_idx:
if ret[r][c] < float('inf') or ret[c][r] < float('inf'):
#print r,c,ret[r][c]
not_connect.add((row_idx[r],col_idx[c]))
'''
if ret[c][r] < float('inf'):
not_connect.add((col_idx[c], row_idx[r]))
'''
do_connect = set()
for idx in set(col_idx.keys()) & set(row_idx.keys()):
do_connect.add((row_idx[idx], col_idx[idx]))
do_connect.add((len(sentences[row]), len(sentences[col])))
left_pair = (0, 0)
ranges = []
for a, b in sorted(list(do_connect)):
now = ((left_pair[0], a), (left_pair[1], b))
ranges.append(now)
left_pair = (a+1, b+1)
commons = [[], []]
#not_connect = set()
for a, b in ranges:
_result, _commons, _L = lcs(sentences[row][a[0]:a[1]], sentences[col][b[0]:b[1]], not_connect, base=(a[0], b[0]))
commons[0] += [_i + a[0] for _i in _commons[0]]
commons[1] += [_i + b[0] for _i in _commons[1]]
if a[1] < len(sentences[row]) and b[1] < len(sentences[col]):
commons[0] += [a[1]]
commons[1] += [b[1]]
#print not_connect
for a, b in zip(commons[0], commons[1]):
if (a, b) in not_connect and (a, b) not in do_connect:
print('Warning', (a,b))
return commons
def generate_lattice(sentences, hiddens, order_method, align_method, get_G=False, lm=False, minus=0.0, simi_mat=None):
G = init_graph(sentences)
if order_method == 'hard':
vectorizer = CountVectorizer()
matrix = vectorizer.fit_transform([' '.join(line) for line in sentences])
simi = cosine_similarity(matrix)
elif order_method == 'soft':
simi_mat = load_simi_mat_from_hiddens(hiddens, minus)
simi = np.array(simi_mat)
if align_method == 'hard':
lm = False
elif align_method == 'soft':
lm = True
visited = set()
if order_method == 'random':
seq = [(i, j) for i in range(len(sentences)) for j in range(len(sentences))]
random.shuffle(seq)
else:
seq = simi.reshape(1, -1).argsort()[0][::-1]
for i in seq:
if order_method == 'random':
row, col = i
else:
row = i % simi.shape[0]
col = int(math.floor(i / simi.shape[0]))
row, col = sorted([row, col])
if row == col:
continue
if row in visited and col in visited:
continue
if (col, row) in visited or (row, col) in visited:
continue
if order_method != 'random' and simi[row][col] < minus:
break
if lm:
commons = acyclic_lm_commons(G, sentences, hiddens, row, col, minus)
merge(G, commons, row, col)
if check_cycle(G):
print('cycle exist')
return G
else:
result, commons, L = lcs(sentences[row], sentences[col])
merge(G, commons, row, col)
visited.add(col)
visited.add(row)
visited.add((col, row))
'''
ret = dfs(G, 0)
ret = set([' '.join(line) for line in ret])
refs.append(ret)
print(len(ret))
'''
n_edges = len(G.edges())
while True:
G = simplify(G)
n_edges_now = len(G.edges())
if n_edges == n_edges_now:
break
n_edges = n_edges_now
if get_G:
return G
T = get_threshold(G)
ret = dfs(G, T)
#ret = set([' '.join(line) for line in ret])
ret = set(ret)
#print ret
#return ret, refs, G
return ret
def simplify(G):
for n in G.nodes():
if len(G.in_edges(n)) == 1 and len(G.out_edges(n)) == 1:
a, b = list(G.in_edges(n))[0]
u, v = list(G.out_edges(n))[0]
if len(G[a][b]['word']) == 0 and len(G[u][v]['word']) == 0:
if (a, v) in G.edges() and len(G[a][v]['word']) != 0:
continue
#print a, b, u, v, G[a][b], G[u][v]
G.remove_edge(a, b)
G.remove_edge(u, v)
if a == v:
continue
G.add_edge(a, v, word=[], idx=[])
return G
def generate_refs(sentences, order_method, align_method, minus, sentid, simi_mat=None):
tokens = [s['tokens'] for s in sentences]
if align_method == 'soft':
hiddens = [s['hidden'] for s in sentences]
else:
hiddens = None
refs = generate_lattice(tokens, hiddens, order_method, align_method, simi_mat=simi_mat, minus=minus)
for e in tokens:
refs.add(' '.join(e))
refs = list(refs)
bleu_scorer = BleuScorer(n=4)
for ref in refs:
bleu_scorer += (ref, [' '.join(e) for e in tokens])
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
new_sentences = []
for i, s in enumerate(scores[3]):
new_ref = {}
new_ref['imgid'] = sentences[0]['imgid']
new_ref['raw'] = refs[i]
new_ref['tokens'] = refs[i].split(' ')
new_ref['sentid'] = sentid
new_ref['bleu'] = s
new_sentences.append(new_ref)
sentid += 1
return new_sentences
def init_elmo(gpu):
options_file = "data/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "data/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
#options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/ elmo_2x4096_512_2048cnn_2xhighway_options.json"
#weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/ elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
print('init elmo')
#embed()
if gpu == -1:
#self.elmo = Elmo(options_file, weight_file, 2, dropout=0)
elmo = Elmo(options_file, weight_file, 1, dropout=0)
else:
#self.elmo = Elmo(options_file, weight_file, 2, dropout=0).cuda(self.gpu)
elmo = Elmo(options_file, weight_file, 1, dropout=0).cuda(gpu)
return elmo
def load_hiddens_from_elmo(j, elmo, batch_size=128, gpu=-1):
outputs = []
crt_size = 0
txts = []
t0 = time.time()
mat = []
idx = 0
for img in j['images']:
idx += 1
for sent in img['sentences']:
txts.append(sent['tokens'])
crt_size += 1
if crt_size >= batch_size:
chars = batch_to_ids(txts).to(gpu)
output = elmo(chars)['elmo_representations'][-1]
for i, txt in enumerate(txts):
outputs.append(output[i, :len(txt), :].cpu().data.numpy())
mat = []
crt_size = 0
txts = []
if crt_size > 0:
chars = batch_to_ids(txts).to(gpu)
output = elmo(chars)['elmo_representations'][-1]
for i, txt in enumerate(txts):
outputs.append(output[i, :len(txt), :].cpu().data.numpy())
mat = []
crt_size = 0
txts = []
for i, txt in enumerate(txts):
outputs.append(output[i, :len(txt), :].cpu().data.numpy())
idx = 0
for img in j['images']:
for sent in img['sentences']:
sent['hidden'] = outputs[idx]
idx += 1
t1 = time.time()
print('Load hidden completed!', t1 - t0)
def load_hiddens(j, model, dictionary, batch_size=128):
outputs = []
crt_size = 0
txts = []
t0 = time.time()
mat = []
idx = 0
for img in j['images']:
idx += 1
for sent in img['sentences']:
#hiddens.append(h)
txts.append(sent['tokens'])
crt_size += 1
if crt_size >= batch_size:
t_len = [len(txt) for txt in txts]
max_len = max(t_len)
for txt in txts:
new_txt = [dictionary[w] for w in txt] + [0] * (max_len - len(txt))
mat.append(new_txt)
feature = torch.from_numpy(np.array(mat))
feature = Variable(feature.t().cuda(model.device))
emb = model.encoder(feature)
#print '=== emb device ===', model.device, torch.backends.cudnn.version()
output, hidden = model.rnn(emb)
for i, txt in enumerate(txts):
print(i, txt,)
outputs.append(output[:len(txt), i, :].cpu().data.numpy())
#outputs.append(output[:len(txt), i, :].cpu().data)
mat = []
crt_size = 0
txts = []
#flag = True
#break
#if flag:
# break
if crt_size > 0:
t_len = [len(txt) for txt in txts]
max_len = max(t_len)
for txt in txts:
new_txt = [dictionary[w] for w in txt] + [0] * (max_len - len(txt))
mat.append(new_txt)
#embed()
feature = torch.from_numpy(np.array(mat))
feature = Variable(feature.t().cuda(model.device))
emb = model.encoder(feature)
output, hidden = model.rnn(emb)
for i, txt in enumerate(txts):
outputs.append(output[:len(txt), i, :].cpu().data.numpy())
#outputs.append(output[:len(txt), i, :].cpu().data)
mat = []
crt_size = 0
txts = []
for i, txt in enumerate(txts):
outputs.append(output[:len(txt), i, :].cpu().data.numpy())
idx = 0
for img in j['images']:
for sent in img['sentences']:
sent['hidden'] = outputs[idx]
idx += 1
t1 = time.time()
print('Load hidden completed!', t1 - t0)
def load_simi_mat(img, minus):
sents = img['sentences']
simi_mat = [[0 for _ in sents] for _ in sents]
for idx in range(len(sents)):
for jdx in range(idx+1, len(sents)):
h0 = sents[idx]['hidden']
h1 = sents[jdx]['hidden']
s0 = sents[idx]['tokens']
s1 = sents[jdx]['tokens']
simi = msa_class.sent_simi(h0, h1, s0, s1, minus=minus)
simi_mat[idx][jdx] = simi
simi_mat[jdx][idx] = simi
#img['simi_mat'] = simi_mat
return simi_mat
def load_simi_mat_from_hiddens(hiddens, minus, imgid=0):
simi_mat = [[0 for _ in hiddens] for _ in hiddens]
for idx in range(len(hiddens)):
for jdx in range(idx+1, len(hiddens)):
h0 = hiddens[idx]
h1 = hiddens[jdx]
simi = msa_class.sent_simi(h0, h1, '', '', minus=minus)
simi_mat[idx][jdx] = simi
simi_mat[jdx][idx] = simi
return simi_mat
parser = argparse.ArgumentParser(description='Pseudo-Ref Generation.')
parser.add_argument('-order_method', default='hard', help='Method of ordering. Options are [hard|soft|random]')
parser.add_argument('-align_method', default='hard', help='Method of alignment. Options are [hard|soft]')
parser.add_argument('-minus', default=0.6, type=float, help='Minimum threshold')
parser.add_argument('-gpuid', default=1, type=int, help='GPUid')
parser.add_argument('-save_graph', action="store_true", help='save lattice in new_j')
parser.add_argument('-multi_process', action="store_true", help='enable multi processing')
parser.add_argument('-n_cpu', default=25, type=int, help='number of threads')
parser.add_argument('-dataset', default='data/dataset_small.json', help='path of dataset')
parser.add_argument('-lm_dictionary', default='data/LM_coco.dict', help='dictionary file of language model')
parser.add_argument('-lm_model', default='data/LM_coco.pth', help='path of language model')
parser.add_argument('-use_elmo', action='store_true', help='wheather use ELMo')
if __name__ == '__main__':
opt = parser.parse_args()
with open(opt.dataset, 'r') as f:
j = json.load(f)
with open(opt.dataset, 'r') as f:
new_j = json.load(f)
print('========= READ DATA COMPLETE ===========')
file_name = 'data/dataset_%s_%s_%.2f.json'%(opt.order_method, opt.align_method, opt.minus)
if opt.align_method == 'hard':
lm_model = None
dictionary = None
elif opt.align_method == 'soft':
# Load Language Model for soft alignment,
#batch_size = 20
#text_field = data.Field(lower=True)
#label_field = data.Field(lower=True)
#corpus = loader.Corpus(opt.lm_data, text_field, label_field, batch_size)
#dictionary = text_field.vocab.stoi
if opt.use_elmo:
elmo = init_elmo(opt.gpuid)
load_hiddens_from_elmo(j, elmo, gpu=opt.gpuid)
else:
with open(opt.lm_dictionary, 'rb') as f:
dictionary = pickle.load(f)
with open(opt.lm_model) as f:
lm_model = torch.load(f, map_location=lambda storage, loc: storage)
#print '=== LM device ===', lm_model.device, torch.backends.cudnn.version()
lm_model.cuda(opt.gpuid)
lm_model.device = opt.gpuid
load_hiddens(j, lm_model, dictionary)
else:
print('wrong align method')
exit()
print('Save to file:', file_name)
if opt.multi_process:
pool = Pool(opt.n_cpu)
n_left = 0
sentences_pool = []
idxs = []
t0 = time.time()
sentid = 0
for idx in range(len(j['images'])):
if j['images'][idx]['split'] == 'test' or j['images'][idx]['split'] == 'val':
# This example is in test set or validation set
continue
sentences = j['images'][idx]['sentences']
#sentences = [s for s in sentences if len(s['tokens']) <= 20]
simi_mat = None
if opt.multi_process:
if opt.save_graph:
tokens = [s['tokens'] for s in sentences]
if opt.align_method == 'soft':
hiddens = [s['hidden'] for s in sentences]
else:
hiddens = None
sentences_pool.append(pool.apply_async(generate_lattice, (tokens, hiddens, opt.order_method, opt.align_method, True, False, opt.minus, simi_mat,)))
else:
sentences_pool.append(pool.apply_async(generate_refs, (sentences, opt.order_method, opt.align_method, opt.minus, sentid, simi_mat)))
idxs.append(idx)
if n_left < opt.n_cpu:
n_left += 1
continue
else:
for idx, new_sentences in zip(idxs, [p.get(99999999) for p in sentences_pool]):
if opt.save_graph:
G = new_sentences
new_j['images'][idx]['lattice'] = nx.node_link_data(G)
t1 = time.time()
print('%d/%d'%(idx, len(j['images'])), 'time:%.3f'%(t1 - t0))
else:
sentid += len(new_sentences)
bleus = [s['bleu'] for s in new_sentences]
new_j['images'][idx]['sentences'] = new_sentences
t1 = time.time()
print('%d/%d'%(idx, len(j['images'])), 'time:%.3f'%(t1 - t0), '#refs:', len(new_sentences))
t0 = t1
n_left = 0
sentences_pool = []
idxs = []
else:
if opt.save_graph:
tokens = [s['tokens'] for s in sentences]
if opt.align_method == 'soft':
hiddens = [s['hidden'] for s in sentences]
else:
hiddens = None
G = generate_lattice(tokens, hiddens, opt.order_method, opt.align_method, minus=opt.minus, simi_mat=simi_mat, get_G=True)
#new_j['images'][idx]['lattice'] = G
new_j['images'][idx]['lattice'] = nx.node_link_data(G)
t1 = time.time()
print('%d/%d'%(idx, len(j['images'])), 'time:%.3f'%(t1 - t0))
else:
new_sentences = generate_refs(sentences, opt.order_method, opt.align_method, opt.minus, sentid, simi_mat)
sentid += len(new_sentences)
bleus = [s['bleu'] for s in new_sentences]
new_j['images'][idx]['sentences'] = new_sentences
t1 = time.time()
print('%d/%d'%(idx, len(j['images'])), 'time:%.3f'%(t1 - t0), '#refs:', len(new_sentences))
t0 = t1
if opt.multi_process and len(sentences_pool) > 0 and len(idxs) > 0:
for idx, new_sentences in zip(idxs, [p.get(99999999) for p in sentences_pool]):
if opt.save_graph:
G = new_sentences
new_j['images'][idx]['lattice'] = nx.node_link_data(G)
t1 = time.time()
print('%d/%d'%(idx, len(j['images'])), 'time:%.3f'%(t1 - t0))
else:
sentid += len(new_sentences)
bleus = [s['bleu'] for s in new_sentences]
new_j['images'][idx]['sentences'] = new_sentences
t1 = time.time()
print('%d/%d'%(idx, len(j['images'])), 'time:%.3f'%(t1 - t0), '#refs:', len(new_sentences))
t0 = t1
with open(file_name, 'w') as f:
json.dump(new_j, f)
#embed()
|
997,152 | 2631a28babeec9faa7cbb8a753746ebc8b4cbefb | import nltk #splits a tweet in a list of words
import sys
import os
class Analyzer():
"""Implements sentiment analysis."""
def __init__(self, positives, negatives): #takes path to list of positive and negative words as argument
"""
Initialize Analyzer.
Loads positive and negative words into memory in such a way that analyze can access them
"""
# TODO
self.positives = positives
self.negatives = negatives
self.positives_words = [] #list with positive words
self.negatives_words = [] #list with negative words
try: #load positive words
with open(self.positives, 'r') as pwords:
for lines in pwords:
if not lines.startswith(';') or not lines.startswith('\n'):
self.positives_words.append(lines.strip('\n'))
except IOError:
print("Impossible to open positive words text")
try: #load negative words
with open(self.negatives, 'r') as nwords:
for lines in nwords:
if not lines.startswith(';') or not lines.startswith('\n'):
self.negatives_words.append(lines.strip('\n'))
except IOError:
print("Impossible to open negative words text")
def analyze(self, text): #takes the text to be analyzed for sentiment
"""
Analyze text for sentiment, returning its score.
Analyse each word in text a value (1,-1,0)
Calculate text total score
"""
#initialize inicial score to 0
score = 0
#Create tokenizer instance
tokenizer = nltk.tokenize.TweetTokenizer()
#create list of words in a tweets
tokens = tokenizer.tokenize(text)
#iterate over tokens(list of words)
for word in tokens:
#check if word is positive or negative
if word.lower() in self.positives_words:
score+=1
if word.lower() in self.negatives_words:
score-=1
#neutral if its neither, doesnt add anything, 0
return score
|
997,153 | b9f044b5d789a95179008980b7a529403be69a92 | from widgets.gameWidget import GameWidget
from widgets.menuWidget import MenuWidget
from widgets.intermWidget import IntermWidget
from widgets.settingsWidget import SettingsWidget
from widgets.helpDialog import HelpDialog
from widgets.conclusionWidget import ConclusionWidget
from widgets.aboutDialog import AboutDialog
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import random as rand
import sys, utils
from functools import partial
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.central_widget = QStackedWidget()
self.setObjectName("centralWidget")
self.stageGenerator = utils.StageGenerator(utils.readSettings())
self.setCentralWidget(self.central_widget)
self.menu_widget = MenuWidget(self)
self.central_widget.addWidget(self.menu_widget)
self.central_widget.backToMenu = self.backToMenu
self.central_widget.stageGenerator = self.stageGenerator
self.central_widget.newGame = self.newGame
self.central_widget.nextStage = self.nextStage
self.central_widget.conclusionWidget = self.conclusionWidget
self.central_widget.incScore = self.incScore
self.central_widget.getScore = self.getScore
self.menu_widget.trainModeBtn.clicked.connect(partial(self.settingsWidget, test=False))
self.menu_widget.testModeBtn.clicked.connect(partial(self.settingsWidget, test=True))
self.menu_widget.helpBtn.clicked.connect(self.showHelpDialog)
self.menu_widget.aboutBtn.clicked.connect(self.showAboutDialog)
self.menu_widget.exitBtn.clicked.connect(self.close)
self.helpShortcut = QShortcut(QKeySequence("f1"), self)
self.setWindowTitle("MaskTrainer")
self.setWindowIcon(QIcon("assets/icon.ico"))
self.setGeometry(200, 200, 640, 640)
def settingsWidget(self, test):
settings_widget = SettingsWidget(test=test, parent=self)
self.central_widget.addWidget(settings_widget)
self.central_widget.setCurrentWidget(settings_widget)
def newGame(self, test):
if test:
self.helpShortcut.activated.connect(self.nothing)
self.question = 1
self.stages = []
self.total_questions = utils.readSettings()["ANSWERS_COUNT"]
else:
self.helpShortcut.activated.connect(self.showHelpDialog)
self.score = 0
stage = utils.Stage(self.central_widget.stageGenerator)
game_widget = GameWidget(stage=stage, test=test, parent=self)
self.central_widget.addWidget(game_widget)
self.central_widget.setCurrentWidget(game_widget)
def nextStage(self, test, stage=None, opts=None, last=False):
if test:
self.question += 1
self.stages.append([stage, opts])
last = (self.question == self.total_questions)
stage = utils.Stage(self.central_widget.stageGenerator)
game_widget = GameWidget(stage=stage, test=test, last=last, parent=self)
self.central_widget.addWidget(game_widget)
self.central_widget.setCurrentWidget(game_widget)
def conclusionWidget(self, test, stage, opts):
if test:
self.stages.append([stage, opts])
conclusion_widget = ConclusionWidget(stages=self.stages, parent=self)
else:
conclusion_widget = IntermWidget(stage=stage, opts=opts, parent=self)
self.central_widget.addWidget(conclusion_widget)
self.central_widget.setCurrentWidget(conclusion_widget)
def showHelpDialog(self):
helpDialog = HelpDialog(self)
helpDialog.show()
def showAboutDialog(self):
aboutDialog = AboutDialog(self)
aboutDialog.show()
def backToMenu(self):
self.central_widget.setCurrentWidget(self.menu_widget)
def incScore(self):
self.score += 1
def getScore(self):
return self.score
def nothing(self):
pass
|
997,154 | b491b91bbe888d8de46cbbe2687180d47d60a50a | import unittest
from data.ticker import Ticker
from data.candle import Candle
from data.order_book import OrderBook
from data.trade_history import TradeHistory
from utils.time_utils import get_now_seconds_local, get_now_seconds_utc
from utils.debug_utils import set_logging_level, LOG_ALL_ERRORS
from binance.ticker_utils import get_tickers_binance
from binance.ohlc_utils import get_ohlc_binance
from binance.order_book_utils import get_order_book_binance
from binance.history_utils import get_history_binance
from binance.constants import BINANCE_CURRENCY_PAIRS
class BinancePublicApiTests(unittest.TestCase):
def setUp(self):
set_logging_level(LOG_ALL_ERRORS)
def test_binance_ticker_retrieval(self):
timest = get_now_seconds_local()
tickers = get_tickers_binance(BINANCE_CURRENCY_PAIRS, timest)
for ticker in tickers:
if ticker:
self.assertEquals(type(ticker), Ticker)
def test_binance_ohlc_retrieval(self):
date_end = get_now_seconds_utc()
date_start = date_end - 900
for currency in BINANCE_CURRENCY_PAIRS:
period = "15m"
candles = get_ohlc_binance(currency, date_start, date_end, period)
for candle in candles:
if candle:
self.assertEquals(type(candle), Candle)
def test_binance_order_book_retrieval(self):
timest = get_now_seconds_utc()
for currency in BINANCE_CURRENCY_PAIRS:
order_book = get_order_book_binance(currency, timest)
if order_book:
self.assertEquals(type(order_book), OrderBook)
def test_binance_trade_history_retrieval(self):
today = get_now_seconds_utc()
yesterday = today - 24 * 3600
for pair_name in BINANCE_CURRENCY_PAIRS:
trade_history = get_history_binance(pair_name, yesterday, today)
for entry in trade_history:
if entry:
self.assertEquals(type(entry), TradeHistory)
|
997,155 | f0b572268b4b1a057e2d1937f23d788e886c5272 | input = """
p cnf 9 13
-1 -2 0
-1 2 3 0
-1 4 -5 0
-6 -1 7 0
-1 -2 0
-1 2 3 0
-1 4 -5 0
-6 -1 7 0
1 -2 0
1 -3 0
1 -4 0
8 1 2 0
-9 1 2 0
"""
output = """
SAT
"""
|
997,156 | 49cdc573df0be9f24dc012f3839bd5c18608618d | import sys
from PyQt4 import QtGui, QtCore, QtSql
import mysql_table_meta
class ExpenseModel(QtSql.QSqlTableModel):
def __init__(self, tablename, db, parent=None):
QtSql.QSqlTableModel.__init__(self, parent, db)
self.tablename = tablename
self.setTable(tablename)
#self.setEditStrategy(QtSql.QSqlTableModel.OnManualSubmit)
self.setEditStrategy(QtSql.QSqlTableModel.OnFieldChange)
self.select()
count = 0
for col in mysql_table_meta.columns:
self.setHeaderData(count, QtCore.Qt.Horizontal, col)
count += 1 |
997,157 | e583dfc1a8fba4507f04503e41502330ca844f79 | #!/usr/bin/env python3
import sys
T = int(sys.stdin.readline())
def main():
for x in range(1, T+1):
print("Case #{}: ".format(x), end="")
global flips, row, i, K
flips = 0
string = sys.stdin.readline()
pieces = string.split(" ")
row = list(pieces[0]) # use list instead of string so it can be modified
K = int(pieces[1])
length = len(row)
for i in range(0, length-K+1):
if (row[i] == "-"):
flip()
#check
if ("".join(row[-K:]) == "+"*K): #last K pancakes are +
print(flips)
else:
print("IMPOSSIBLE")
def flip():
global flips, row, i, K
flips += 1
for y in range(i, i+K):
if (row[y] == "-"):
row[y] = "+"
else:
row[y] = "-"
if __name__ == "__main__":
main() |
997,158 | b8454c1f0ee2659271d2835f0177c188e00f25c9 | """Permit List module"""
import json
import falcon
import jsend
import sentry_sdk
from screendoor_sdk.screendoor import Screendoor
class PermitList():
"""Permit List class"""
scrndr = None
scrndr_proj_id = None
logger_name = ''
referred_label_map = {
'MOD - Referred' : "Mayor's Office of Disability",
'Planning - Referred' : "Planning Department",
'Fire - Referred' : "Fire Department",
'DPH - Referred' : "Department of Public Health",
'Police - Referred' : "Police Department",
'Environment - Referred' : "Department of the Environment"
}
status_map = {
'Submitted' : 'Submitted',
'Processing' : 'Processing',
'On Hold' : 'On Hold',
'Approved' : 'Approved',
'Build-out' : 'Under Construction'
}
activity_map = {
'retail' : {'text': 'retailer (medical and adult use)',
'value': 'retailer (medical and adult use)'},
'delivery' : {'text': 'delivery only retailer (medical and adult use)',
'value': 'delivery only retail (medical and adult use)'},
'mcd' : {'text': 'medicinal cannabis retailer (medical only)',
'value': 'medical retailer (medical only)'}
}
def __init__(self):
self.logger_name = self.__class__.__name__.lower()
def init_screendoor(self, key, version, host, project_id):
"""initialize screendoor"""
self.scrndr = Screendoor(key, version, host)
self.scrndr_proj_id = project_id
def get_permit_list(self, permit_type):
"""return list of permits"""
self.logger_name += '.get_permit_list.'+permit_type
params = {'per_page': 100, 'page' : 1}
# pylint: disable=line-too-long
params['advanced_search'] = '%5B%7B"name"%3A"form"%2C"placeholder"%3Anull%2C"method"%3A"is"%2C"value"%3A5804%7D%2C%7B"name"%3A"rfdd8a5g7g"%2C"placeholder"%3A"answer_to"%2C"method"%3A"is_any"%2C"value"%3A%5B"retailer+(medical+and+adult+use)"%2C"medical+retailer+(medical+only)"%2C"delivery+only+retail+(medical+and+adult+use)"%5D%7D%5D'
sd_responses = self.scrndr.get_project_responses(self.scrndr_proj_id, params, 500)
sd_responses_context = sd_responses
if isinstance(sd_responses, list):
sd_responses_context = {
'length': len(sd_responses),
'data': list(map(lambda x: x.get('sequential_id', ''), sd_responses))}
with sentry_sdk.configure_scope() as scope:
scope.set_tag('logger', self.logger_name)
scope.set_extra('get_permit_list.sd_responses', sd_responses_context)
return self.get_list_transform(sd_responses)
def get_list_transform(self, sd_responses):
"""return a transformed list from screendoor reponses """
permit_list = False
responses_missing = []
sd_fields = {
'activity' : 'dd8a5g7g',
'app_id' : 'uqqrsogr',
'biz_name' : 't00kheyd',
'dba_name' : '60w4ep9y',
'addr' : 'kbqz4189',
'parcel' : 'kvrgbqrl'
}
if isinstance(sd_responses, list):
permit_list = []
for resp in sd_responses:
if (resp.get('responses', False)
and resp['responses'].get(sd_fields['activity'], False)
and (resp['responses'].get(sd_fields['biz_name'], False)
or resp['responses'].get(sd_fields['dba_name'], False))
and (resp.get('status', '') in self.status_map.keys())
):
resp_status = self.status_map[resp.get('status')].lower()
resp_referred = self.get_referred_departments(resp.get('labels'))
item = {
'application_id':'',
'business_name':'',
'dba_name':'',
'address':'',
'parcel':'',
'status':resp_status,
'referred':", ".join(resp_referred)
}
data = resp['responses']
item['application_id'] = str(data.get(sd_fields['app_id']) or '')
if not data.get(sd_fields['app_id']):
item['application_id'] = 'P-' + str(resp['id'])
item['business_name'] = str(data.get(sd_fields['biz_name']) or '')
item['dba_name'] = str(data.get(sd_fields['dba_name']) or item['business_name'])
item['parcel'] = data.get(sd_fields['parcel'], '')
if data.get(sd_fields['addr']) and data.get(sd_fields['addr']).get('street'):
addr = data.get(sd_fields['addr'])
item['address'] = str(addr.get('street') or '')
item['address'] += ', '+str(addr.get('city') or '')
item['address'] += ', '+str(addr.get('state') or '')
item['address'] += ' '+str(addr.get('zipcode') or '')
item['address'] = item['address'].strip(' ,')
if data[sd_fields['activity']] and data[sd_fields['activity']]['checked']:
for applied_permit_type in data[sd_fields['activity']]['checked']:
item[applied_permit_type.lower()] = resp_status
permit_list.append(item)
else:
responses_missing.append(
{'id':resp['id'], 'sequential_id':resp['sequential_id']}
)
with sentry_sdk.configure_scope() as scope:
scope.set_extra('get_list_transform.permit_list_len', len(permit_list))
if responses_missing:
scope.set_extra('get_list_transform.responses_missing', responses_missing)
return permit_list
def get_legacy_list_transform(self, permit_list):
""" return permit list in legacy format """
legacy_permit_list = {}
for item in permit_list:
new_item = {
'application_id':item['application_id'],
'dba_name':item['dba_name'],
'address':item['address'],
'parcel':item['parcel'],
'activities':'',
'referring_dept':item['referred'],
'status': item['status'].title()
}
key = (new_item['dba_name'] + ' ' + new_item['application_id']).strip().upper()
acts = []
if item.get(self.activity_map['retail']['value']):
acts.append(self.activity_map['retail']['text'])
if item.get(self.activity_map['delivery']['value']):
acts.append(self.activity_map['delivery']['text'])
if item.get(self.activity_map['mcd']['value']):
acts.append(self.activity_map['mcd']['text'])
new_item['activities'] = ", ".join(acts)
#skip if activity only contains delivery only
if new_item['activities'] != self.activity_map['delivery']['text']:
legacy_permit_list[key] = new_item
return legacy_permit_list
def get_referred_departments(self, labels):
""" return list of referred to departments """
referred_to = []
for label in labels:
if label in list(self.referred_label_map.keys()):
referred_to.append(self.referred_label_map.get(label))
return referred_to
def on_get(self, _req, resp, permit_type):
"""on GET request
return list of permits
"""
msg = False
if permit_type in ('retail', 'retail_legacy'):
permit_list = self.get_permit_list(permit_type)
permit_list.sort(key=lambda v:
((v.get('dba_name') if v.get('dba_name')
else v.get('business_name', ''))
+' '+v.get('application_id', '')).upper())
if isinstance(permit_list, list):
if permit_type == 'retail_legacy':
data = self.get_legacy_list_transform(permit_list)
else:
data = {'list': permit_list}
data_json = jsend.success(data)
msg = 'success ('+str(len(permit_list))+')'
else:
pass
if msg is not False:
sentry_sdk.capture_message(msg, 'info')
resp.body = json.dumps(data_json)
resp.status = falcon.HTTP_200
else:
msg = 'ERROR'
sentry_sdk.capture_message(msg, 'error')
resp.body = json.dumps(jsend.error(msg))
resp.status = falcon.HTTP_400
|
997,159 | f2e250ba5a2d0e2f5c559fe42fbe0926bdf15670 | """Future-returning APIs for coroutines."""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple, deque
from itertools import chain
from zmq import EVENTS, POLLOUT, POLLIN
import zmq as _zmq
_FutureEvent = namedtuple('_FutureEvent', ('future', 'kind', 'kwargs', 'msg'))
# These are incomplete classes and need a Mixin for compatibility with an eventloop
# defining the followig attributes:
#
# _Future
# _READ
# _WRITE
# _default_loop()
class _AsyncPoller(_zmq.Poller):
"""Poller that returns a Future on poll, instead of blocking."""
def poll(self, timeout=-1):
"""Return a Future for a poll event"""
future = self._Future()
if timeout == 0:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
return future
loop = self._default_loop()
# register Future to be called as soon as any event is available on any socket
watcher = self._Future()
# watch raw sockets:
raw_sockets = []
def wake_raw(*args):
if not watcher.done():
watcher.set_result(None)
watcher.add_done_callback(lambda f: self._unwatch_raw_sockets(loop, *raw_sockets))
for socket, mask in self.sockets:
if isinstance(socket, _zmq.Socket):
if not isinstance(socket, self._socket_class):
# it's a blocking zmq.Socket, wrap it in async
socket = self._socket_class.from_socket(socket)
if mask & _zmq.POLLIN:
socket._add_recv_event('poll', future=watcher)
if mask & _zmq.POLLOUT:
socket._add_send_event('poll', future=watcher)
else:
raw_sockets.append(socket)
evt = 0
if mask & _zmq.POLLIN:
evt |= self._READ
if mask & _zmq.POLLOUT:
evt |= self._WRITE
self._watch_raw_socket(loop, socket, evt, wake_raw)
def on_poll_ready(f):
if future.done():
return
if watcher.cancelled():
try:
future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
return
if watcher.exception():
future.set_exception(watcher.exception())
else:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
watcher.add_done_callback(on_poll_ready)
if timeout is not None and timeout > 0:
# schedule cancel to fire on poll timeout, if any
def trigger_timeout():
if not watcher.done():
watcher.set_result(None)
timeout_handle = loop.call_later(
1e-3 * timeout,
trigger_timeout
)
def cancel_timeout(f):
if hasattr(timeout_handle, 'cancel'):
timeout_handle.cancel()
else:
loop.remove_timeout(timeout_handle)
future.add_done_callback(cancel_timeout)
def cancel_watcher(f):
if not watcher.done():
watcher.cancel()
future.add_done_callback(cancel_watcher)
return future
class _AsyncSocket(_zmq.Socket):
# Warning : these class variables are only here to allow to call super().__setattr__.
# They be overridden at instance initialization and not shared in the whole class
_recv_futures = None
_send_futures = None
_state = 0
_shadow_sock = None
_poller_class = _AsyncPoller
io_loop = None
_fd = None
def __init__(self, context=None, socket_type=-1, io_loop=None, **kwargs):
if isinstance(context, _zmq.Socket):
context, from_socket = (None, context)
else:
from_socket = kwargs.pop('_from_socket', None)
if from_socket is not None:
super(_AsyncSocket, self).__init__(shadow=from_socket.underlying)
self._shadow_sock = from_socket
else:
super(_AsyncSocket, self).__init__(context, socket_type, **kwargs)
self._shadow_sock = _zmq.Socket.shadow(self.underlying)
self.io_loop = io_loop or self._default_loop()
self._recv_futures = deque()
self._send_futures = deque()
self._state = 0
self._fd = self._shadow_sock.FD
self._init_io_state()
@classmethod
def from_socket(cls, socket, io_loop=None):
"""Create an async socket from an existing Socket"""
return cls(_from_socket=socket, io_loop=io_loop)
def close(self, linger=None):
if not self.closed:
for event in list(chain(self._recv_futures, self._send_futures)):
if not event.future.done():
try:
event.future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
self._clear_io_state()
super(_AsyncSocket, self).close(linger=linger)
close.__doc__ = _zmq.Socket.close.__doc__
def get(self, key):
result = super(_AsyncSocket, self).get(key)
if key == EVENTS:
self._schedule_remaining_events(result)
return result
get.__doc__ = _zmq.Socket.get.__doc__
def recv_multipart(self, flags=0, copy=True, track=False):
"""Receive a complete multipart zmq message.
Returns a Future whose result will be a multipart message.
"""
return self._add_recv_event('recv_multipart',
dict(flags=flags, copy=copy, track=track)
)
def recv(self, flags=0, copy=True, track=False):
"""Receive a single zmq frame.
Returns a Future, whose result will be the received frame.
Recommend using recv_multipart instead.
"""
return self._add_recv_event('recv',
dict(flags=flags, copy=copy, track=track)
)
def send_multipart(self, msg, flags=0, copy=True, track=False, **kwargs):
"""Send a complete multipart zmq message.
Returns a Future that resolves when sending is complete.
"""
kwargs['flags'] = flags
kwargs['copy'] = copy
kwargs['track'] = track
return self._add_send_event('send_multipart', msg=msg, kwargs=kwargs)
def send(self, msg, flags=0, copy=True, track=False, **kwargs):
"""Send a single zmq frame.
Returns a Future that resolves when sending is complete.
Recommend using send_multipart instead.
"""
kwargs['flags'] = flags
kwargs['copy'] = copy
kwargs['track'] = track
kwargs.update(dict(flags=flags, copy=copy, track=track))
return self._add_send_event('send', msg=msg, kwargs=kwargs)
def _deserialize(self, recvd, load):
"""Deserialize with Futures"""
f = self._Future()
def _chain(_):
"""Chain result through serialization to recvd"""
if f.done():
return
if recvd.exception():
f.set_exception(recvd.exception())
else:
buf = recvd.result()
try:
loaded = load(buf)
except Exception as e:
f.set_exception(e)
else:
f.set_result(loaded)
recvd.add_done_callback(_chain)
def _chain_cancel(_):
"""Chain cancellation from f to recvd"""
if recvd.done():
return
if f.cancelled():
recvd.cancel()
f.add_done_callback(_chain_cancel)
return f
def poll(self, timeout=None, flags=_zmq.POLLIN):
"""poll the socket for events
returns a Future for the poll results.
"""
if self.closed:
raise _zmq.ZMQError(_zmq.ENOTSUP)
p = self._poller_class()
p.register(self, flags)
f = p.poll(timeout)
future = self._Future()
def unwrap_result(f):
if future.done():
return
if f.cancelled():
try:
future.cancel()
except RuntimeError:
# RuntimeError may be called during teardown
pass
return
if f.exception():
future.set_exception(f.exception())
else:
evts = dict(f.result())
future.set_result(evts.get(self, 0))
if f.done():
# hook up result if
unwrap_result(f)
else:
f.add_done_callback(unwrap_result)
return future
def _add_timeout(self, future, timeout):
"""Add a timeout for a send or recv Future"""
def future_timeout():
if future.done():
# future already resolved, do nothing
return
# raise EAGAIN
future.set_exception(_zmq.Again())
self._call_later(timeout, future_timeout)
def _call_later(self, delay, callback):
"""Schedule a function to be called later
Override for different IOLoop implementations
Tornado and asyncio happen to both have ioloop.call_later
with the same signature.
"""
self.io_loop.call_later(delay, callback)
@staticmethod
def _remove_finished_future(future, event_list):
"""Make sure that futures are removed from the event list when they resolve
Avoids delaying cleanup until the next send/recv event,
which may never come.
"""
for f_idx, (f, kind, kwargs, _) in enumerate(event_list):
if f is future:
break
else:
return
# "future" instance is shared between sockets, but each socket has its own event list.
event_list.remove(event_list[f_idx])
def _add_recv_event(self, kind, kwargs=None, future=None):
"""Add a recv event, returning the corresponding Future"""
f = future or self._Future()
if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT:
# short-circuit non-blocking calls
recv = getattr(self._shadow_sock, kind)
try:
r = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
return f
# we add it to the list of futures before we add the timeout as the
# timeout will remove the future from recv_futures to avoid leaks
self._recv_futures.append(
_FutureEvent(f, kind, kwargs, msg=None)
)
# Don't let the Future sit in _recv_events after it's done
f.add_done_callback(lambda f: self._remove_finished_future(f, self._recv_futures))
if hasattr(_zmq, 'RCVTIMEO'):
timeout_ms = self._shadow_sock.rcvtimeo
if timeout_ms >= 0:
self._add_timeout(f, timeout_ms * 1e-3)
if self._shadow_sock.get(EVENTS) & POLLIN:
# recv immediately, if we can
self._handle_recv()
if self._recv_futures:
self._add_io_state(POLLIN)
return f
def _add_send_event(self, kind, msg=None, kwargs=None, future=None):
"""Add a send event, returning the corresponding Future"""
f = future or self._Future()
# attempt send with DONTWAIT if no futures are waiting
# short-circuit for sends that will resolve immediately
# only call if no send Futures are waiting
if (
kind in ('send', 'send_multipart')
and not self._send_futures
):
flags = kwargs.get('flags', 0)
nowait_kwargs = kwargs.copy()
nowait_kwargs['flags'] = flags | _zmq.DONTWAIT
# short-circuit non-blocking calls
send = getattr(self._shadow_sock, kind)
# track if the send resolved or not
# (EAGAIN if DONTWAIT is not set should proceed with)
finish_early = True
try:
r = send(msg, **nowait_kwargs)
except _zmq.Again as e:
if flags & _zmq.DONTWAIT:
f.set_exception(e)
else:
# EAGAIN raised and DONTWAIT not requested,
# proceed with async send
finish_early = False
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
if finish_early:
# short-circuit resolved, return finished Future
# schedule wake for recv if there are any receivers waiting
if self._recv_futures:
self._schedule_remaining_events()
return f
# we add it to the list of futures before we add the timeout as the
# timeout will remove the future from recv_futures to avoid leaks
self._send_futures.append(
_FutureEvent(f, kind, kwargs=kwargs, msg=msg)
)
# Don't let the Future sit in _send_futures after it's done
f.add_done_callback(lambda f: self._remove_finished_future(f, self._send_futures))
if hasattr(_zmq, 'SNDTIMEO'):
timeout_ms = self._shadow_sock.get(_zmq.SNDTIMEO)
if timeout_ms >= 0:
self._add_timeout(f, timeout_ms * 1e-3)
self._add_io_state(POLLOUT)
return f
def _handle_recv(self):
"""Handle recv events"""
if not self._shadow_sock.get(EVENTS) & POLLIN:
# event triggered, but state may have been changed between trigger and callback
return
f = None
while self._recv_futures:
f, kind, kwargs, _ = self._recv_futures.popleft()
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._recv_futures:
self._drop_io_state(POLLIN)
if f is None:
return
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'recv_multipart':
recv = self._shadow_sock.recv_multipart
elif kind == 'recv':
recv = self._shadow_sock.recv
else:
raise ValueError("Unhandled recv event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
def _handle_send(self):
if not self._shadow_sock.get(EVENTS) & POLLOUT:
# event triggered, but state may have been changed between trigger and callback
return
f = None
while self._send_futures:
f, kind, kwargs, msg = self._send_futures.popleft()
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._send_futures:
self._drop_io_state(POLLOUT)
if f is None:
return
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'send_multipart':
send = self._shadow_sock.send_multipart
elif kind == 'send':
send = self._shadow_sock.send
else:
raise ValueError("Unhandled send event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = send(msg, **kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
# event masking from ZMQStream
def _handle_events(self, fd=0, events=0):
"""Dispatch IO events to _handle_recv, etc."""
zmq_events = self._shadow_sock.get(EVENTS)
if zmq_events & _zmq.POLLIN:
self._handle_recv()
if zmq_events & _zmq.POLLOUT:
self._handle_send()
self._schedule_remaining_events()
def _schedule_remaining_events(self, events=None):
"""Schedule a call to handle_events next loop iteration
If there are still events to handle.
"""
# edge-triggered handling
# allow passing events in, in case this is triggered by retrieving events,
# so we don't have to retrieve it twice.
if self._state == 0:
# not watching for anything, nothing to schedule
return
if events is None:
events = self._shadow_sock.get(EVENTS)
if events & self._state:
self._call_later(0, self._handle_events)
def _add_io_state(self, state):
"""Add io_state to poller."""
if self._state != state:
state = self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state.
zmq FD is always read-only.
"""
self._schedule_remaining_events()
def _init_io_state(self):
"""initialize the ioloop event handler"""
self.io_loop.add_handler(self._shadow_sock, self._handle_events, self._READ)
self._call_later(0, self._handle_events)
def _clear_io_state(self):
"""unregister the ioloop event handler
called once during close
"""
fd = self._shadow_sock
if self._shadow_sock.closed:
fd = self._fd
self.io_loop.remove_handler(fd)
|
997,160 | a3551f35df02f6dd1c808a2c3a5d0dc5bcd70383 | from __future__ import print_function
import torch as t
#set requires_grad, pytorch will invoke autograd automatically
x = t.ones(2, 2, requires_grad=True)
print(x)
y = x.sum()
#y = x[0, 0]*x[0, 0] + x[0,1] + 3*x[1,0] + 8
print(y.grad_fn)
y.backward()
# y = x.sum() = (x[0][0] + x[0][1] + x[1][0] + x[1][1])
# every gradient is 1
print(x.grad)
# grad is accumulated, for every time run backward, the grad will increase, so need to set to zeros before backward
#y.backward()
#print(x.grad)
#y.backward()
#print(x.grad)
#set zeros
#x.grad.data.zero_()
#print(x)
#y.backward()
#print(x.grad) |
997,161 | 9abe68eb1a9b1cb7c72c125c65d11b0352b473f8 | /home/joseu/miniconda2/lib/python2.7/warnings.py |
997,162 | 2195dd13acb266e825fea711b81e5d1cef831871 | from slackeventsapi import SlackEventAdapter
from slackclient import SlackClient
import json
import os
import pprint
# Our app's Slack Event Adapter for receiving actions via the Events API
SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"]
slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN, "/slack/events")
# Create a SlackClient for your bot to use for Web API requests
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
CLIENT = SlackClient(SLACK_BOT_TOKEN)
total_size = 0
# Example responder to greetings
# @slack_events_adapter.on("message")
# def handle_message(event_data):
# message = event_data["event"]
# # If the incoming message contains "hi", then respond with a "Hello" message
# if message.get("subtype") is None and "hi" in message.get('text'):
# channel = message["channel"]
# message = "Hello <@%s>! :tada:" % message["user"]
# CLIENT.api_call("chat.postMessage", channel=channel, text=message)
#
#
# # Example reaction emoji echo
# @slack_events_adapter.on("reaction_added")
# def reaction_added(event_data):
# event = event_data["event"]
# emoji = event["reaction"]
# channel = event["item"]["channel"]
# text = ":%s:" % emoji
# CLIENT.api_call("chat.postMessage", channel=channel, text=text)
def display_total(size):
print "-------------------------------------------"
print size, " bytes sent from slack events so far"
print "(", size / 1024 / 1024, " MB so far)"
print "-------------------------------------------"
# Example reaction emoji echo
@slack_events_adapter.on("reaction_added")
def channel_created(event_data):
global total_size
json_obj = json.dumps(event_data)
json_size = len(json_obj)
total_size = total_size + json_size
print "The size of this object is: ", len(json_obj)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(event_data)
display_total(total_size)
#event = event_data["event"]
#emoji = event["reaction"]
#channel = event["item"]["channel"]
#text = ":%s:" % emoji
#CLIENT.api_call("chat.postMessage", channel=channel, text=event_data)
# Once we have our event listeners configured, we can start the
# Flask server with the default `/events` endpoint on port 3000
slack_events_adapter.start(port=3000)
|
997,163 | a77dc5a4a63fe3bf70ea2e369e075c7056914055 | # this this imple DDPG method
# http://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html
|
997,164 | 96714e75f84d8ec441f81c2f61ad292088a95666 | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class Seq2seq(nn.Module):
""" Standard sequence-to-sequence architecture with configurable encoder
and decoder.
Args:
encoder (EncoderRNN): object of EncoderRNN
decoder (DecoderRNN): object of DecoderRNN
decode_function (func, optional): function to generate symbols from output hidden states (default: F.log_softmax)
Inputs: input_variable, input_lengths, target_variable, teacher_forcing_ratio
- **input_variable** (list, option): list of sequences, whose length is the batch size and within which
each sequence is a list of token IDs. This information is forwarded to the encoder.
- **input_lengths** (list of int, optional): A list that contains the lengths of sequences
in the mini-batch, it must be provided when using variable length RNN (default: `None`)
- **target_variable** (list, optional): list of sequences, whose length is the batch size and within which
each sequence is a list of token IDs. This information is forwarded to the decoder.
- **teacher_forcing_ratio** (int, optional): The probability that teacher forcing will be used. A random number
is drawn uniformly from 0-1 for every decoding token, and if the sample is smaller than the given value,
teacher forcing would be used (default is 0)
Outputs: decoder_outputs, decoder_hidden, ret_dict
- **decoder_outputs** (batch): batch-length list of tensors with size (max_length, hidden_size) containing the
outputs of the decoder.
- **decoder_hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the last hidden
state of the decoder.
- **ret_dict**: dictionary containing additional information as follows {*KEY_LENGTH* : list of integers
representing lengths of output sequences, *KEY_SEQUENCE* : list of sequences, where each sequence is a list of
predicted token IDs, *KEY_INPUT* : target outputs if provided for decoding, *KEY_ATTN_SCORE* : list of
sequences, where each list is of attention weights }.
"""
def __init__(self, encoder, decoder, dialog_encoder=None, decode_function=F.log_softmax, cpt_vocab=None,
hidden_size=128,
mid_size=64, dialog_hidden=128):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.dialog_encoder = dialog_encoder
self.decode_function = decode_function
self.cpt_vocab = cpt_vocab
if self.cpt_vocab:
self.cpt_embedding = nn.Embedding(len(cpt_vocab.itos), hidden_size)
self.layer_u = torch.nn.Linear(hidden_size * 2, mid_size)
self.layer_c = torch.nn.Linear(dialog_hidden, mid_size)
self.layer_e = torch.nn.Linear(hidden_size, mid_size)
self.layer_att = torch.nn.Linear(mid_size, 1)
self.softmax = torch.nn.Softmax(dim=-1)
self.forget_u = torch.nn.Linear(hidden_size * 2, mid_size, bias=False)
self.forget_c = torch.nn.Linear(dialog_hidden, mid_size, bias=False)
self.forget_o = torch.nn.Linear(hidden_size, mid_size, bias=False)
self.forget = torch.nn.Linear(mid_size, 1, bias=False)
self.sigmoid = torch.nn.Sigmoid()
self.hidden = hidden_size
def flatten_parameters(self):
self.encoder.rnn.flatten_parameters()
self.decoder.rnn.flatten_parameters()
def extract_per_utt(self, input_variable, encoder_outputs, eou_index):
input_index = input_variable.numpy() if not torch.cuda.is_available() else input_variable.cpu().numpy()
eou_pos = [np.where(line == eou_index)[0] for line in input_index]
utt_hidden = [torch.cat([encoder_outputs[j][i].unsqueeze(0) for i in eou_pos[j]], 0) for j in
range(input_variable.shape[0])]
max_num_utt = max([len(line) for line in utt_hidden])
for i in range(input_variable.shape[0]):
if torch.cuda.is_available():
utt_hidden[i] = torch.cat(
[utt_hidden[i], torch.zeros([max_num_utt - len(utt_hidden[i]), len(utt_hidden[0][0])]).cuda()])
else:
utt_hidden[i] = torch.cat(
[utt_hidden[i], torch.zeros([max_num_utt - len(utt_hidden[i]), len(utt_hidden[0][0])])])
utt_hidden = [line.unsqueeze(0) for line in utt_hidden]
return torch.cat(utt_hidden, 0), eou_pos
# return size: batch * num_sentence * num_concept_per_sentence * embedding
def concept_mapping(self, concept, vocab):
pad_index = vocab.stoi['<pad>']
eou_index = vocab.stoi['<EOU>']
np_concept = concept.numpy() if not torch.cuda.is_available() else concept.cpu().numpy()
end_pos = []
for line in np_concept:
pos = np.where(line == pad_index)[0]
if len(pos):
end_pos.append(pos[0])
else:
end_pos.append(len(line))
np_concept = [np_concept[i][:end_pos[i]] for i in range(len(np_concept))]
concept_batch = []
embedding_batch = []
for i in range(len(np_concept)):
concept_d = []
utt_pos = np.where(np_concept[i] == eou_index)[0]
utt_pos = np.concatenate([[-1], utt_pos])
for j in range(1, len(utt_pos)):
concept_d.append(np_concept[i][utt_pos[j - 1] + 1:utt_pos[j]])
if torch.cuda.is_available():
concept_mapped = [self.cpt_embedding(torch.tensor(line).cuda()) for line in concept_d]
else:
concept_mapped = [self.cpt_embedding(torch.tensor(line)) for line in concept_d]
concept_batch.append(concept_d)
embedding_batch.append(concept_mapped)
return concept_batch, embedding_batch
def state_track(self, concept, embedding, dialog, utterance):
max_sentence = max([len(line) for line in embedding])
one = torch.ones((1, self.hidden))
batch_size = len(concept)
g = torch.ones([batch_size, 1])
if torch.cuda.is_available():
one = one.cuda()
g = g.cuda()
concept = [[list(line) for line in sample] for sample in concept]
# batch padding
for j in range(batch_size):
if len(embedding[j]) < max_sentence:
embedding[j].extend((max_sentence - len(embedding[j])) * [one])
for k in range(max_sentence - len(concept[j])):
concept[j].append(['<pad>'])
for i in range(max_sentence):
max_concepts = 0
for j in range(batch_size):
num = embedding[j][i].shape[0]
max_concepts = max_concepts if max_concepts >= num else num
for j in range(batch_size):
num = embedding[j][i].shape[0]
if num < max_concepts:
embedding[j][i] = torch.cat([embedding[j][i], torch.cat([one] * (max_concepts - num))])
concept[j][i].extend((max_concepts - num) * ['<pad>'])
embedding_per_step = []
for i in range(max_sentence):
emb = []
for j in range(batch_size):
emb.append(embedding[j][i].unsqueeze(0))
embedding_per_step.append(torch.cat(emb, 0))
# calculating state
for i in range(max_sentence):
c = dialog[:, i-1] if i != 0 else torch.zeros_like(dialog[:, 0])
u = utterance[:, i]
cpt = embedding_per_step[i]
res_u = self.layer_u(u).unsqueeze(1)
res_c = self.layer_c(c).unsqueeze(1)
res_e = self.layer_e(cpt)
distribution = self.softmax(self.layer_att(res_u + res_c + res_e).reshape(batch_size, -1))
o = torch.bmm(distribution.unsqueeze(1), cpt).squeeze()
res_f_u = self.forget_u(u)
res_f_c = self.forget_c(c)
res_f_o = self.forget_o(o)
if i != 0:
g = self.sigmoid(self.forget(res_f_c + res_f_u + res_f_o))
state = torch.cat([state * g, distribution * (1 - g)], 1)
else:
state = distribution
# filtered state template
concept_linear = []
embedding_linear = []
dict_linear = []
states = []
for k in range(batch_size):
i_to_concept = []
i_to_embedding = []
concept_to_i = {}
index = 0
for i in range(len(concept[k])):
if not len(concept[i]):
continue
for cnt, cpt in enumerate(concept[k][i]):
if cpt not in i_to_concept:
i_to_concept.append(cpt)
i_to_embedding.append(embedding[k][i][cnt].unsqueeze(0))
concept_to_i[cpt] = index
index += 1
i_to_embedding = torch.cat(i_to_embedding, 0)
concept_linear.append(i_to_concept)
embedding_linear.append(i_to_embedding)
dict_linear.append(concept_to_i)
prob_dist = torch.zeros((len(i_to_concept)))
if torch.cuda.is_available():
prob_dist = prob_dist.cuda()
states.append(prob_dist)
# generate final state
for i in range(batch_size):
cnt = 0
cpt_dict = dict_linear[i]
for j in range(len(concept[i])):
for k, cpt in enumerate(concept[i][j]):
if cpt != '<pad>':
states[i][cpt_dict[cpt]] += state[i][cnt]
cnt += 1
assert cnt == state.shape[1]
concept_rep = []
for i in range(batch_size):
concept_rep.append(torch.mm(states[i].unsqueeze(0), embedding_linear[i]))
return states, concept_linear, embedding_linear, concept_rep
def single_turn_state_track(self, concept_batch, embedding_batch, dialog):
concept_linear = []
emb_linear = []
zero = torch.zeros((1, self.hidden))
if torch.cuda.is_available():
zero = zero.cuda()
for i in range(len(concept_batch)):
res = []
emb = []
for j in range(len(concept_batch[i])):
res.extend(list(concept_batch[i][j]))
emb.extend(embedding_batch[i][j].unsqueeze(0))
if len(emb) != 0:
emb = torch.cat(emb, 0)
else:
emb = zero
concept_linear.append(res)
emb_linear.append(emb)
max_len = max([len(line) for line in concept_linear])
for i in range(len(concept_batch)):
if len(emb_linear[i]) < max_len:
tmp = torch.cat(((max_len - len(emb_linear[i])) * [zero]))
# print(tmp.shape)
# print(emb_linear[i].shape)
emb_linear[i] = torch.cat([emb_linear[i], tmp]).unsqueeze(0)
else:
emb_linear[i] = emb_linear[i].unsqueeze(0)
emb_linear = torch.cat(emb_linear, 0)
c = dialog[:, -1]
res_c = self.layer_c(c)
res_c = res_c.reshape(res_c.shape[0], 1, res_c.shape[-1])
res_e = self.layer_e(emb_linear)
#res = self.softmax(self.layer_att(res_e + res_c).reshape(emb_linear.shape[0], emb_linear.shape[1]))
res = self.layer_att(res_e + res_c).squeeze()
s = torch.sum(res, dim=-1).reshape((-1, 1))
res /= s
o = torch.bmm(res.unsqueeze(1), emb_linear).squeeze()
"""
replace = torch.zeros_like(res)
states = []
i_to_concept = []
i_to_embedding = []
for i in range(len(concept_batch)):
state = {}
tmp_concept = []
tmp_embedding = []
for j in range(len(concept_linear[i])):
concept = concept_linear[i][j]
if concept not in state:
state[concept] = res[i][j]
else:
state[concept] = state[concept] + res[i][j]
if concept not in tmp_concept:
tmp_concept.append(concept)
tmp_embedding.append(emb_linear[i][j].unsqueeze(0))
tmp_embedding = torch.cat(tmp_embedding)
i_to_concept.append(tmp_concept)
i_to_embedding.append(tmp_embedding)
states.append(state)
"""
#return states, i_to_concept, i_to_embedding, o
return o
def forward(self, input_variable, input_lengths=None, target_variable=None,
teacher_forcing_ratio=0, concept=None, vocabs=None, use_concept=False):
encoder_outputs, encoder_hidden = self.encoder(input_variable, input_lengths)
if use_concept:
src_vocab = vocabs.src_vocab
tgt_vocab = vocabs.tgt_vocab
cpt_vocab = vocabs.cpt_vocab
eou_index = src_vocab.stoi['<EOU>']
utt_hidden, eou_pos = self.extract_per_utt(input_variable, encoder_outputs, eou_index)
dialog_output, (context, _) = self.dialog_encoder(utt_hidden)
concept_batch, embedding_batch = self.concept_mapping(concept, cpt_vocab)
batch_state, batch_concepts, batch_embeddings, o = self.state_track(concept_batch, embedding_batch, dialog_output, utt_hidden)
#batch_state, batch_concepts, batch_embeddings, o = self.single_turn_state_track(concept_batch, embedding_batch, dialog_output)
#o = self.single_turn_state_track(concept_batch, embedding_batch, dialog_output)
o = torch.cat(o).unsqueeze(1)
result = self.decoder(inputs=target_variable,
encoder_hidden=encoder_hidden,
encoder_outputs=encoder_outputs,
function=self.decode_function,
teacher_forcing_ratio=teacher_forcing_ratio,
batch_state=batch_state,
batch_concepts=batch_concepts,
batch_embeddings=batch_embeddings,
context=context.squeeze(),
cpt_vocab=cpt_vocab,
tgt_vocab=tgt_vocab,
use_copy=use_concept,
concept_rep=o)
else:
result = self.decoder(inputs=target_variable,
encoder_hidden=encoder_hidden,
encoder_outputs=encoder_outputs,
function=self.decode_function,
teacher_forcing_ratio=teacher_forcing_ratio)
return result
|
997,165 | ce8cda1af230519d4f7f1eede844cfee92061463 |
from muntjac.api import Window
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.Feature import Feature, Version
class JSApi(Feature):
def getSinceVersion(self):
return Version.V62
def getName(self):
return 'JavaScript API'
def getDescription(self):
return ('<p>You can inject JavaScript in a Muntjac application page'
+ ' using the server-side JavaScript API.'
+ ' This is especially useful for integration with '
+ 'third-party libraries and components.</p>')
def getRelatedAPI(self):
return [APIResource(Window)]
def getRelatedFeatures(self):
return None
def getRelatedResources(self):
return None
|
997,166 | 52eb0a3e6f33c4609cacd056693a663e46bba391 | reg = {}
instru = []
def initReg(r):
if not r in reg:
reg[r] = 0
return r
def runCon(g, h, c):
if c == "==":
return g == h
if c == ">":
return g > h
if c == "<":
return g < h
if c == ">=":
return g >= h
if c == "<=":
return g <= h
if c == "!=":
return g != h
print("condition failed")
input()
with open("input8.txt") as fp:
for line in fp:
instru.append(line.strip())
ptr = 0
maxmem = 0
while ptr < len(instru):
rd = instru[ptr]
brk = rd.split(' ')
# he doesnt use registers and ints in the same place in this problem
# so checking if it is a reg or an int isn't required
a = initReg(brk[0])
b = initReg(brk[4])
op = brk[1]
x = int(brk[2])
con = brk[5]
y = int(brk[6])
if op == 'cpy':
t = brk[1]
if t.isdigit():
t = int(t)
else:
t = reg[t]
reg[brk[2]] = t
if op == 'inc':
if runCon(reg[b], y, con):
reg[a] = reg[a] + x
if op == 'dec':
if runCon(reg[b], y, con):
reg[a] = reg[a] - x
if brk[0] == 'jnz':
t = brk[1]
if t.isdigit():
t = int(t)
else:
t = reg[t]
if t != 0:
ptr = ptr + int(brk[2])
else:
ptr += 1
ptr += 1
if max(reg.values()) > maxmem:
maxmem = max(reg.values())
print("Max value in a reg at the end:", max(reg.values()))
print("Max value ever in a reg:", maxmem) |
997,167 | 8234e61472b986ba819f1f58b47bc552967abfa0 | from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def find_keyword_sentence(str):
icnt = 0
keyword = ''
for element in str:
if element == ' ':
icnt = icnt+1
if(icnt==1):
keyword = str
else:
text_tokens = word_tokenize(str)
lectures = [word for word in text_tokens if not word in stopwords.words()]
#print(lectures)
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(lectures)
indices = np.argsort(vectorizer.idf_)[::-1]
features = vectorizer.get_feature_names()
top_n = 1
top_features = [features[i] for i in reversed(indices[:top_n])]
#print(top_features)
str1=""
for element in top_features:
str1 += element
keyword = str1
#print(str1)
#return str1
return keyword
def main():
print("Enter the sentence:")
str = input()
ret = find_keyword_sentence(str)
print(ret)
if __name__ == '__main__':
main()
|
997,168 | e94f94e1ba248eb8959e96bdd658d9ad2367d7d5 | #
# Roli Tweet printer
# Written by Andrej Rolih, www.r00li.com
#
import html
import unicodedata
from unidecode import unidecode
def deEmojify(inputString):
returnString = ""
lastAdded = None
for character in inputString:
try:
character.encode("ascii")
returnString += character
lastAdded = character
except UnicodeEncodeError:
replaced = unidecode(str(character))
if replaced != '':
returnString += replaced
else:
try:
toAdd = "[" + unicodedata.name(character) + "]"
if toAdd == lastAdded:
returnString += "[=]"
else:
returnString += toAdd
lastAdded = toAdd
except ValueError:
returnString += "[x]"
lastAdded = "[x]"
return returnString
def removeHTMLEscaping(inputString):
return html.unescape(inputString) |
997,169 | 30cfa6ee1f08fda1b2b995feb85685116d13b4bf | import random
import os
import itertools
sudoko=[]
for i in range(9):
row=[] #creating an empty sudoko with '.' only
for j in range(9):
row.append(".")
sudoko.append(row)
def ransudo(): #a function to generate a random completely solved sudoko
sudoko=[
"*********",
"*********",
"*********",
"*********",
"*********",
"*********",
"*********",
"*********",
"*********"
]
sudoko1=[]
for i in range(9):
l=[]
for j in range(9):
l.append('.')
sudoko1.append(l)
clauses=[] #an array that stores all the clauses
def number(i,j,k): # a function to generate preposition which are in the form ijk in ith row jth column and k th digit
return 100*i+10*j+k
def once(prep): #a function to check that a number occurs at max once in a cell and all are distinct
clauses.append([l for l in prep])
for p in itertools.combinations(prep,2):
clauses.append([-l for l in p])
for i in range(1,10):
for j in range(1,10):
prep=[]
for k in range(1,10): #EXACTLY one number in a cell
prep.append(number(i,j,k))
once(prep)
for i in range(1,10): #EXACTLY one number in a column
for k in range(1,10):
prep=[]
for j in range(1,10):
prep.append(number(i,j,k))
once(prep)
for j in range(1,10):
for k in range(1,10): #EXACTLY one number in a row
prep=[]
for i in range(1,10):
prep.append(number(i,j,k))
once(prep)
for i in range(1,10,3):
for j in range(1,10,3):
for k in range (1,10): #EXACTLY one number in a 3x3 block
once([number(i+i1,j+j1,k) for (i1, j1) in itertools.product (range(3),repeat=2)])
for j in range(1,10):
prep=[] #EXACTLY one number in the main diagonal
for k in range(1,10):
prep.append(number(j,10-j,k))
once(prep)
pp=[]
for a in range(1,10):
for b in range(1,10):
for k in range(1,10):
if(a!=b): #EXACTLY one number in the main diagonal
x=number(a,10-a,k)
y=number(b,10-b,k)
clauses.append([-x,-y])
pp2=[]
for a in range(1,10):
for b in range(1,10):
for k in range(1,10):
if(a!=b):
x=number(a,a,k)
y=number(b,b,k)
clauses.append([-x,-y])
for i in range(0,9):
for j in range(0,9):
if(sudoko[i][j]!='*'):
clauses.append(number(i+1,j+1,int(sudoko[i][j])))
q=random.randint(1,9)
w=random.randint(1,9)
e=random.randint(1,9)
clauses.append(number(q,w,e)) #assigning random number to a random cell to form basis for a random suudoko
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i] #randomising the order of clauses which provides a variation in minisat input variables resulting in different output every time
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
for i in range(len(clauses)-1,0,-1):
j=random.randint(0,i)
clauses[i],clauses[j]=clauses[j],clauses[i]
with open('input.txt','w') as f:
f.write("p cnf {} {} \n".format(999,len(clauses)))
for c in (clauses):
c=str(c)
for p in c:
if (p!='[' and p!=']' and p!=','):
f.write("".join(p))
f.write(" 0\n")
os.system("minisat input1.txt output1.txt") #solving the sudoko based on random initial conditions and clauses to output a random sudoko by using minisat
with open('output.txt','r') as f1:
f2=''
for c in f1:
if(c=='SAT\n'):
continue
else:
for c1 in c:
f2=f2+("".join(c1))
f2=f2+("\n")
flag=0
final=''
for i in range(0,len(f2)-3):
if(f2[i]=='-'):
flag=1
if(f2[i]==' '):
flag=0
if(flag==0 and f2[i+1]!='-'):
final=final+f2[i+1]
fin2=[]
for i in range(0,len(final)-2,4):
fin2.append(int(final[i])*100+int(final[i+1])*10+int(final[i+2]))
for i in fin2:
first=int((str(i))[0])
second=int((str(i))[1])
third=int((str(i))[2])
sudoko1[first-1][second-1]=third
return sudoko1
def clausesol(clauses): #a function to add all the conditions of clauses to the input file to be given in the minisat as input
with open('inputtemp.txt','w') as f:
f.write("p cnf {} {} \n".format(999,len(clauses)))
for c in (clauses):
c=str(c)
for p in c:
if (p!='[' and p!=']' and p!=','):
f.write("".join(p))
f.write(" 0\n")
os.system("minisat inputtemp.txt outputtemp.txt")
with open('outputtemp.txt','r') as f1: #returning 1 if the sudoko is satisfiable else 0
f2=''
for c in f1:
if(c=='SAT\n'):
return 1 #1 means SAT
else:
return 0
def solution(sudoko): #a function to return the solution of the arguement sudoko
clauses=[]
def number(i,j,k):
return 100*i+10*j+k
def once(prep):
clauses.append([l for l in prep])
for p in itertools.combinations(prep,2):
clauses.append([-l for l in p])
for i in range(1,10):
for j in range(1,10):
prep=[] #checking all the condtions of a sudoko
for k in range(1,10): #EXACTLY one number in a cell
prep.append(number(i,j,k))
once(prep)
for i in range(1,10): #EXACTLY one number in a column
for k in range(1,10):
prep=[]
for j in range(1,10):
prep.append(number(i,j,k))
once(prep)
for j in range(1,10):
for k in range(1,10): #EXACTLY one number in a row
prep=[]
for i in range(1,10):
prep.append(number(i,j,k))
once(prep)
for i in range(1,10,3):
for j in range(1,10,3):
for k in range (1,10):
once([number(i+i1,j+j1,k) for (i1, j1) in itertools.product (range(3),repeat=2)]) #EXACTLY one number in a 3x3 block
for j in range(1,10):
prep=[]
for k in range(1,10):
prep.append(number(j,10-j,k)) #EXACTLY one number in a diagonal
once(prep)
pp=[]
for a in range(1,10):
for b in range(1,10):
for k in range(1,10):
if(a!=b):
x=number(a,10-a,k)
y=number(b,10-b,k)
clauses.append([-x,-y])
#EXACTLY one number in the other diagonal
pp2=[]
for a in range(1,10):
for b in range(1,10):
for k in range(1,10):
if(a!=b):
x=number(a,a,k)
y=number(b,b,k)
clauses.append([-x,-y])
for i in range(0,9):
for j in range(0,9): #setting the already given prepostions in the unsolved matrix to true
if(sudoko[i][j]!='.'):
clauses.append(number(i+1,j+1,int(sudoko[i][j])))
with open('inputtemp1.txt','w') as f:
f.write("p cnf {} {} \n".format(999,len(clauses)))
for c in (clauses):
c=str(c)
for p in c:
if (p!='[' and p!=']' and p!=','):
f.write("".join(p))
f.write(" 0\n")
os.system("minisat inputtemp1.txt outputtemp1.txt")
with open('outputtemp1.txt','r') as f1:
f2='' #f1 contains the output file as a string
for c in f1:
if(c=='UNSAT\n'): #return 0 if unsatisfible
return 0
if(c=='SAT\n'):
continue
else:
for c1 in c: #continuing if the current iteration gives satisfible output from minisat
f2=f2+("".join(c1))
f2=f2+("\n")
flag=0
final=''
for i in range(0,len(f2)-3):
if(f2[i]=='-'):
flag=1
if(f2[i]==' '):
flag=0
if(flag==0 and f2[i+1]!='-'):
final=final+f2[i+1]
fin2=[]
sudoko1=[]
for i in range(9):
l=[]
for j in range(9):
l.append('.')
sudoko1.append(l)
for i in range(0,len(final)-2,4):
fin2.append(int(final[i])*100+int(final[i+1])*10+int(final[i+2]))
for i in fin2:
first=int((str(i))[0])
second=int((str(i))[1]) #getting the position from hte first two digits
third=int((str(i))[2]) # and the number to be filled in from the last digit of the preposition
sudoko1[first-1][second-1]=third
return sudoko1 #returning the solved sudoko if solution exists
def numsol(sudoko): #a function to retun the number of solution of the aruguement sudoko
solved=solution(sudoko)
neg=[]
for i in range(9):
for j in range(9):
if (sudoko[i][j]=='.'):
neg.append(-1*((i+1)*100+(j+1)*10+solved[i][j]))
clauses=[]
def number(i,j,k):
return 100*i+10*j+k
def once(prep):
clauses.append([l for l in prep])
for p in itertools.combinations(prep,2):
clauses.append([-l for l in p])
for i in range(1,10):
for j in range(1,10):
prep=[] #EXACTLY one number in a cell
for k in range(1,10):
prep.append(number(i,j,k))
once(prep)
for i in range(1,10):
for k in range(1,10): #EXACTLY one number in a column
prep=[]
for j in range(1,10):
prep.append(number(i,j,k))
once(prep)
for j in range(1,10):
for k in range(1,10):
prep=[] #EXACTLY one number in a row
for i in range(1,10):
prep.append(number(i,j,k))
once(prep)
for i in range(1,10,3):
for j in range(1,10,3):
for k in range (1,10):
once([number(i+i1,j+j1,k) for (i1, j1) in itertools.product (range(3),repeat=2)]) #EXACTLY one number in a 3x3 block
for j in range(1,10):
prep=[]
for k in range(1,10):
prep.append(number(j,10-j,k)) #EXACTLY one number in a diagonal
once(prep)
pp=[]
for a in range(1,10):
for b in range(1,10):
for k in range(1,10):
if(a!=b):
x=number(a,10-a,k)
y=number(b,10-b,k) #EXACTLY one number in the other diagonal
clauses.append([-x,-y])
pp2=[]
for a in range(1,10):
for b in range(1,10):
for k in range(1,10):
if(a!=b):
x=number(a,a,k)
y=number(b,b,k)
clauses.append([-x,-y])
for i in range(0,9):
for j in range(0,9):
if(sudoko[i][j]!='.'): #setting the already given prepostions in the unsolved matrix to true
clauses.append(number(i+1,j+1,int(sudoko[i][j])))
clauses.append(neg)
if(clausesol(clauses)==1): # 2 means multiple solution
return 2 # 1 means single solution
else:
return 1
matrix=ransudo()
while(1):
count=0
start=0
x=random.randint(0,8) #choosing a random cell and removing its value
y=random.randint(0,8)
while(matrix[x][y]=='.'):
x=random.randint(0,8)
y=random.randint(0,8)
cell=matrix[x][y]
matrix[x][y]='.'
if(numsol(matrix)==2): #if multiple solutions exits then fill in the removed number and continue the process
matrix[x][y]=cell
continue
else: #if it has single solution check whether it is minimal
for i in range(9): #travese over whole matrix adn if it gives multiple solution on removing any one number then the sokution is not minimal;continue picking random numbers
for j in range(9):
if(matrix[i][j]!='.'):
celltemp=matrix[i][j]
matrix[i][j]='.'
if(numsol(matrix)==1):
matrix[i][j]=celltemp
start=1
break
else:
matrix[i][j]=celltemp
count=count+1
continue
else:
count=count+1
if(start==1):
break
if(start==1):
break
if(count==81): #when either the cells are empty or when the cell is filled gives multiple solution on removing a number
break
string='' #a string which stores the finally generated minimal sudoko as a 1d array
for i in range(9):
for j in range(9):
if (matrix[i][j]=='.'):
string=string+'.'
else:
string=string+str(matrix[i][j])
with open('solution.txt','w') as f: #saving the final output matrix in the file solution.txt
for i in range(0,81,9):
f.write(string[i:i+9])
f.write("\n")
|
997,170 | 2ecc7ed1e1c2eb3ea116d08c1a0d9769c6d4b2ab | # !/usr/bin/python
# -*-coding:UTF-8 -*-
try:
import thread
except ImportError:
import _thread as thread
from collections import deque
import websocket
from okex_utils import cal_rate, timestamp2string
import codecs
from trade import buyin_less, buyin_more, json, ensure_buyin_less, \
ensure_buyin_more,okFuture, cancel_uncompleted_order, gen_orders_data, send_email
from entity import Coin, Indicator, DealEntity, IndexEntity, IndexIndicator
import time
# 默认币种
coin = Coin("eth", "usdt")
time_type = "quarter"
latest_price = 210
file_transaction, file_deal = coin.gen_future_file_name()
btc_weight = 0.5
eth_weight = 0.3
ltc_weight = 0.2
btc_deque_1s = deque()
btc_deque_1min = deque()
btc_deque_5min = deque()
eth_deque_1s = deque()
eth_deque_1min = deque()
eth_deque_5min = deque()
ltc_deque_1s = deque()
ltc_deque_1min = deque()
ltc_deque_5min = deque()
btc_ind_1s = IndexIndicator("btc", 1)
btc_ind_1min = IndexIndicator("btc", 60)
btc_ind_5min = IndexIndicator("btc", 300)
eth_ind_1s = IndexIndicator("eth", 1)
eth_ind_1min = IndexIndicator("eth", 60)
eth_ind_5min = IndexIndicator("eth", 300)
ltc_ind_1s = IndexIndicator("ltc", 1)
ltc_ind_1min = IndexIndicator("ltc", 60)
ltc_ind_5min = IndexIndicator("ltc", 300)
more = 0
less = 0
buy_price = 0
incr_5m_rate = 0.5
incr_1m_rate = 0.3
write_lines = []
processing = False
def handle_deque(deq, entity, ind):
while len(deq) > 0:
left = deq.popleft()
if left.timestamp + ind.interval * 1000 > entity.timestamp:
deq.appendleft(left)
break
ind.minus_index(left)
deq.append(entity)
ind.add_index(entity)
def sell_more_suc():
global more
ts = time.time()
now_time = timestamp2string(ts)
info = u'发出卖出信号!!!卖出价格:' + str(latest_price) + u', 收益: ' + str(latest_price - buy_price) \
+ ', ' + now_time
with codecs.open(file_transaction, 'a+', 'utf-8') as f:
f.writelines(info + '\n')
more = 0
def sell_more_batch(coin_name, time_type, latest_price, lever_rate = 20):
global processing
processing = True
jRet = json.loads(okFuture.future_position_4fix(coin_name+"_usd", time_type, "1"))
print(jRet)
flag = True
ret = u'没有做多订单'
while len(jRet["holding"]) > 0:
cancel_uncompleted_order(coin_name, time_type)
if flag:
flag = False
amount = jRet["holding"][0]["buy_available"]
order_data = gen_orders_data(latest_price, amount, 3, 5)
ret = okFuture.future_batchTrade(coin_name + "_usd", time_type, order_data, lever_rate)
else:
buy_available = jRet["holding"][0]["buy_available"]
ret = okFuture.future_trade(coin_name + "_usd", time_type, '', buy_available, 3, 1, lever_rate)
if 'true' in ret:
time.sleep(2)
jRet = json.loads(okFuture.future_position_4fix(coin_name + "_usd", time_type, "1"))
sell_more_suc()
email_msg = "做多%s批量卖出成交, 时间: %s, 成交结果: %s" \
% (coin_name, timestamp2string(time.time()), ret)
thread.start_new_thread(send_email, (email_msg,))
processing = False
return True
def sell_less_suc():
global less
ts = time.time()
now_time = timestamp2string(ts)
info = u'发出卖出信号!!!卖出价格:' + str(latest_price) + u', 收益: ' + str(buy_price - latest_price) \
+ ', ' + now_time
with codecs.open(file_transaction, 'a+', 'utf-8') as f:
f.writelines(info + '\n')
less = 0
def sell_less_batch(coin_name, time_type, latest_price, lever_rate = 20):
global processing
processing = True
jRet = json.loads(okFuture.future_position_4fix(coin_name + "_usd", time_type, "1"))
flag = True
ret = u'没有做空订单'
while len(jRet["holding"]) > 0:
cancel_uncompleted_order(coin_name, time_type)
if flag:
amount = jRet["holding"][0]["sell_available"]
order_data = gen_orders_data(latest_price, amount, 4, 5)
ret = okFuture.future_batchTrade(coin_name + "_usd", time_type, order_data, lever_rate)
flag = False
else:
sell_available = jRet["holding"][0]["sell_available"]
ret = okFuture.future_trade(coin_name + "_usd", time_type, '', sell_available, 4, 1, lever_rate)
if 'true' in ret:
time.sleep(2)
jRet = json.loads(okFuture.future_position_4fix(coin_name + "_usd", time_type, "1"))
sell_less_suc()
email_msg = "做空%s批量卖出成交, 时间: %s, 成交结果: %s" \
% (coin_name, timestamp2string(time.time()), ret)
thread.start_new_thread(send_email, (email_msg,))
processing = False
return True
def connect():
ws = websocket.WebSocket()
ws_address = "wss://real.okex.com"
ws_port = 10441
ws.connect(ws_address, http_proxy_host="websocket", http_proxy_port=ws_port)
def on_message(ws, message):
if 'pong' in message or 'addChannel' in message:
return
global more, less, buy_price, write_lines
jmessage = json.loads(message)
coin_name = jmessage[0]['channel'].split('_')[3]
data = jmessage[0]['data']
index = float(data['futureIndex'])
timestamp = int(data['timestamp'])
now_time = timestamp2string(timestamp)
index_entity = IndexEntity(coin_name, index, timestamp)
coin_info = "coin: %s, index: %.2f, time: %s" % (coin_name, index, now_time)
if coin_name == 'btc':
handle_deque(btc_deque_1s, index_entity, btc_ind_1s)
handle_deque(btc_deque_1min, index_entity, btc_ind_1min)
handle_deque(btc_deque_5min, index_entity, btc_ind_5min)
elif coin_name == 'eth':
handle_deque(eth_deque_1s, index_entity, eth_ind_1s)
handle_deque(eth_deque_1min, index_entity, eth_ind_1min)
handle_deque(eth_deque_5min, index_entity, eth_ind_5min)
elif coin_name == 'ltc':
handle_deque(ltc_deque_1s, index_entity, ltc_ind_1s)
handle_deque(ltc_deque_1min, index_entity, ltc_ind_1min)
handle_deque(ltc_deque_5min, index_entity, ltc_ind_5min)
btc_avg_1s_price = btc_ind_1s.cal_avg_price()
btc_avg_min_price = btc_ind_1min.cal_avg_price()
btc_avg_5m_price = btc_ind_5min.cal_avg_price()
eth_avg_1s_price = eth_ind_1s.cal_avg_price()
eth_avg_min_price = eth_ind_1min.cal_avg_price()
eth_avg_5m_price = eth_ind_5min.cal_avg_price()
ltc_avg_1s_price = ltc_ind_1s.cal_avg_price()
ltc_avg_min_price = ltc_ind_1min.cal_avg_price()
ltc_avg_5m_price = ltc_ind_5min.cal_avg_price()
btc_1m_change = cal_rate(btc_avg_1s_price, btc_avg_min_price)
btc_5m_change = cal_rate(btc_avg_1s_price, btc_avg_5m_price)
eth_1m_change = cal_rate(eth_avg_1s_price, eth_avg_min_price)
eth_5m_change = cal_rate(eth_avg_1s_price, eth_avg_5m_price)
ltc_1m_change = cal_rate(ltc_avg_1s_price, ltc_avg_min_price)
ltc_5m_change = cal_rate(ltc_avg_1s_price, ltc_avg_5m_price)
weighted_1min_rate = btc_1m_change * btc_weight + eth_1m_change * eth_weight + ltc_1m_change * ltc_weight
weighted_5min_rate = btc_5m_change * btc_weight + eth_5m_change * eth_weight + ltc_5m_change * ltc_weight
if more == 1 and not processing:
if weighted_5min_rate <= 0:
thread.start_new_thread(sell_more_batch, (coin.name, time_type, latest_price,))
elif less == 1 and not processing:
if weighted_5min_rate >= 0:
thread.start_new_thread(sell_less_batch, (coin.name, time_type, latest_price,))
elif weighted_1min_rate >= incr_1m_rate and weighted_5min_rate >= incr_5m_rate \
and btc_5m_change >= incr_1m_rate and eth_5m_change >= incr_1m_rate and ltc_5m_change >= incr_1m_rate:
if buyin_more(coin.name, time_type, latest_price):
more = 1
thread.start_new_thread(ensure_buyin_more, (coin.name, time_type, latest_price,))
buy_price = latest_price
info = u'发出做多信号!!!买入价格:' + str(buy_price) + u', ' + now_time
with codecs.open(file_transaction, 'a+', 'utf-8') as f:
f.writelines(info + '\n')
elif weighted_1min_rate <= -incr_1m_rate and weighted_5min_rate <= -incr_5m_rate \
and btc_5m_change < -incr_1m_rate and eth_5m_change < -incr_1m_rate and ltc_5m_change < -incr_1m_rate:
if buyin_less(coin.name, time_type, latest_price):
less = 1
thread.start_new_thread(ensure_buyin_less, (coin.name, time_type, latest_price,))
buy_price = latest_price
info = u'发出做空信号!!!买入价格:' + str(buy_price) + u', ' + now_time
with codecs.open(file_transaction, 'a+', 'utf-8') as f:
f.writelines(info + '\n')
btc_rate_info = u'btc 1min_rate: %.2f%%, 5min_rate: %.2f%%' % (btc_1m_change, btc_5m_change)
eth_rate_info = u'eth 1min_rate: %.2f%%, 5min_rate: %.2f%%' % (eth_1m_change, eth_5m_change)
ltc_rate_info = u'ltc 1min_rate: %.2f%%, 5min_rate: %.2f%%' % (ltc_1m_change, ltc_5m_change)
weighted_rate_info = u'weighted 1min_rate: %.3f%%, 5min_rate: %.3f%%' % (weighted_1min_rate, weighted_5min_rate)
print(coin_info)
print(btc_rate_info)
print(eth_rate_info)
print(ltc_rate_info)
print(weighted_rate_info)
write_info = coin_info + '\n' + btc_rate_info + '\n' + eth_rate_info + '\n' + ltc_rate_info + '\n' \
+ weighted_rate_info + '\r\n'
write_lines.append(write_info)
if len(write_lines) >= 10:
with codecs.open(file_deal, 'a+', 'UTF-8') as f:
f.writelines(write_lines)
write_lines = []
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
ws.send("[{'event':'addChannel','channel':'ok_sub_futureusd_btc_index'},"
"{'event':'addChannel','channel':'ok_sub_futureusd_eth_index'},"
"{'event':'addChannel','channel':'ok_sub_futureusd_ltc_index'}]")
print("thread starting...")
thread.start_new_thread(run, ())
if __name__ == '__main__':
websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://real.okex.com:10441/websocket",
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
while True:
ws.run_forever(ping_interval=20, ping_timeout=10)
print("write left lines into file...")
with codecs.open(file_deal, 'a+', 'UTF-8') as f:
f.writelines(write_lines)
write_lines = [] |
997,171 | dbfae1941cdfd84ba0dd6fb5172adc081e579d4c | from app import db
from models import User
db.session.add(User('pierre','pierre@libert.xyz','sesamo'))
db.session.add(User('paris','paris@libert.xyz','sesamo'))
db.session.commit()
|
997,172 | 1a02e011d18cd322dc70d2cfd5fcb25b2a830c68 | from pathlib import Path
import pickle
import logging
import numpy as np
from scipy import sparse
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import data.MLP400AV.mlpapi as mlpapi
def load(schema='A2', path_to_ds='../data/MLP400AV/'):
loader = mlpapi.MLPVLoader(schema, fileformat='pandas', directory=path_to_ds)
train, val, test = loader.get_mlpv()
return train, val, test
def load_dataframe():
data_pickle = Path("av400tuple.pickle")
if not data_pickle.exists():
logging.info("loading data structure from RAW")
train_data, val_data, test_data = load()
train_y = train_data['label'].tolist()
val_y = val_data['label'].tolist()
test_y = test_data['label'].tolist()
train_data.drop(['k_author', 'u_author', 'label'], axis=1, inplace=True)
val_data.drop(['k_author', 'u_author', 'label'], axis=1, inplace=True)
test_data.drop(['k_author', 'u_author', 'label'], axis=1, inplace=True)
logging.info("load data structure completed")
pickle.dump([train_data, val_data, test_data, train_y, val_y, test_y], open(data_pickle, mode="wb"))
logging.info("dumped all data structure in " + str(data_pickle))
else:
logging.info("loading data structure from PICKLE")
[train_data, val_data, test_data, train_y, val_y, test_y] = pickle.load(open(data_pickle, mode="rb"))
logging.info("load data structure completed")
return (train_data, train_y), (val_data, val_y), (test_data, test_y)
def transform_tuple(X_train, X_val, X_test, vectorizer:CountVectorizer):
vectorizer.fit(X_train['k_doc'].append(X_train['u_doc']))
train = X_train.apply(vectorizer.transform)
val = X_val.apply(vectorizer.transform)
test = X_test.apply(vectorizer.transform)
return train, val, test
def data_vector_sbs(vectorizer):
(train_data, train_y), (val_data, val_y), (test_data, test_y) = load_dataframe()
train_vec, val_vec, test_vec = transform_tuple(train_data, val_data, test_data, vectorizer)
train_vec = sparse.hstack((train_vec['k_doc'], train_vec['u_doc'])).tocsr()
val_vec = sparse.hstack((val_vec['k_doc'], val_vec['u_doc'])).tocsr()
test_vec = sparse.hstack((test_vec['k_doc'], test_vec['u_doc'])).tocsr()
return (train_vec, train_y), (val_vec, val_y), (test_vec, test_y)
def data_vector_diff(vectorizer):
(train_data, train_y), (val_data, val_y), (test_data, test_y) = load_dataframe()
train_vec, val_vec, test_vec = transform_tuple(train_data, val_data, test_data, vectorizer)
train_vec = (train_vec['k_doc'] - train_vec['u_doc']).tocsr()
val_vec = (val_vec['k_doc'] - val_vec['u_doc']).tocsr()
test_vec = (test_vec['k_doc'] - test_vec['u_doc']).tocsr()
return (train_vec, train_y), (val_vec, val_y), (test_vec, test_y)
def main():
pass
if __name__ == '__main__':
main()
|
997,173 | fb7d3884efe999a946fc3b8c885cc2f263061be2 | import numpy
import scipy
import scipy.stats
import matplotlib.pyplot as plt
def mean_confidence_interval_uncorrelation_student(data, a=0.05):
# Вычисляет доверительный интервал для выборки data для доверительной вероятности confidence
n = len(data) # размер выборки
mean = numpy.mean(data) # среднее
dev = scipy.stats.sem(data) # стандартное отклонение
h = dev * scipy.stats.t.ppf(1 - a / 2., n - 1) # вычисляем доверительный интервал, t-распределение Стьюдента
return mean, h
def mean_confidence_interval_uncorrelation_normal(data, a=0.05):
# Вычисляет доверительный интервал для выборки data для доверительной вероятности confidence
n = len(data) # размер выборки
mean = numpy.mean(data) # среднее
dev = scipy.stats.sem(data) # стандартное отклонение
h = numpy.sqrt(dev * numpy.sqrt(n)) * scipy.stats.norm.ppf(1 - a / 2.) / numpy.sqrt(n)
return mean, h
def mean_confidence_interval_correlation_noise_student(data, a=0.05, T=50):
# размер выборки
n = len(data)
mean = numpy.mean(data)
dev = 0
for p in range(n - 1):
dev = dev + (1 - (p + 1) / n) * numpy.exp(-(p + 1) / T) * a # (1 - (p + 1) / n) - это тау
dev = dev * 2 / n + 1 / n # см 6 формулу в методичке
h = dev * scipy.stats.t.ppf(1 - a / 2., n - 1) # вычисляем доверительный интервал
return mean, h
def mean_confidence_interval_correlation_noise_normal(data, a=0.05, T=50):
# размер выборки
n = len(data)
mean = numpy.mean(data)
dev = 0
for p in range(n - 1):
dev = dev + (1 - (p + 1) / n) * numpy.exp(-(p + 1) / T) * a # (1 - (p + 1) / n) - это тау
dev = dev * 2 / n + 1 / n # см 6 формулу в методичке
h = numpy.sqrt(dev * numpy.sqrt(n)) * scipy.stats.norm.ppf(1 - a / 2.) / numpy.sqrt(n)
return mean, h
def plot(mx, sx, color1, color2, loc, title):
plt.subplot2grid((2, 2), loc, rowspan=1, colspan=1)
plt.title(title)
plt.xlabel('N')
plt.ylabel('M[x]')
plt.grid(True)
plt.plot(mx, color=color1)
plt.plot(mx + sx, color=color2)
plt.plot(mx - sx, color=color2)
def run(data_white, data_color, T):
"""
:gets \n
data_white - неокрашенный шум \n
data_color - окрашенный шум \n
T - интервал наблюдения \n
--------------------------------------------------\n
:returns \n
(среднее, отступ от среднего в одну сторону) для: \n
* некорр., Стьюдент \n
* корр., Стьюдент \n
* некорр., Нормальн. \n
* корр., Нормальн. \n
"""
points = 10
mx1 = mx2 = mx3 = mx4 = numpy.zeros(len(data_white))
sx1 = sx2 = sx3 = sx4 = numpy.zeros(len(data_white))
fig = plt.figure(figsize=(8, 8))
fig.suptitle("Доверительные интервалы для среднего\n", fontsize=18, weight='bold')
fig.patch.set_facecolor('#8DCF91')
for i in range(len(data_white)):
mx1[i], sx1[i] = mean_confidence_interval_uncorrelation_student(data_white[:i + points])
plot(mx1, sx1, 'green', 'magenta', (0, 0), 'некорр., Стьюдент')
for i in range(len(data_color)):
mx2[i], sx2[i] = mean_confidence_interval_correlation_noise_student(data_color[:i + points], T=T)
plot(mx2, sx2, 'green', 'red', (1, 0), 'корр., Стьюдент')
for i in range(len(data_color)):
mx3[i], sx3[i] = mean_confidence_interval_uncorrelation_normal(data_white[:i + points])
plot(mx3, sx3, 'green', 'cyan', (0, 1), 'некорр., Нормальн.')
for i in range(len(data_color)):
mx4[i], sx4[i] = mean_confidence_interval_correlation_noise_normal(data_color[:i + points], T=T)
plot(mx4, sx4, 'green', 'blue', (1, 1), 'корр., Нормальн.')
plt.tight_layout()
plt.savefig("confidence_intervals_for_the_average.png", dpi=100)
plt.show(block=False)
return (mx1, sx1), (mx2, sx2), (mx3, sx3), (mx4, sx4)
|
997,174 | d8c205837a3f972549fd6abd2f19de3de81c87ac | c = int(input())
print(int(c * 9 / 5 + 32))
|
997,175 | 45434c593e8f88fbdbd68242ba2bf065c1d66f4e | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from person.templates import *
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="index.html"), name='index'),
url(r'^polls/', include('polls.urls')),
url(r'^admin/', admin.site.urls),
url(r'^custom/', include('person.urls')),
url(r'^users/', include('users.urls')),
] |
997,176 | d2419a6ffaaead852f4b1f0baabbe80dc62b89ef | from pkg_resources import DistributionNotFound, get_distribution
from mpirical.decorator import mpirun # noqa: F401
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
|
997,177 | aa6d95423d68b34f1ca1865749b7758ae3808ceb | """
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <http://code.google.com/p/flowblade>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module contains utility methods for creating GUI objects.
"""
import pygtk
pygtk.require('2.0');
import gtk
import time
import threading
import appconsts
import respaths
import translations
TWO_COLUMN_BOX_HEIGHT = 20
def bold_label(str):
label = gtk.Label(bold_text(str))
label.set_use_markup(True)
return label
def bold_text(str):
return "<b>" + str + "</b>"
def get_left_justified_box(widgets):
hbox = gtk.HBox()
for widget in widgets:
hbox.pack_start(widget, False, False, 0)
hbox.pack_start(gtk.Label(), True, True, 0)
return hbox
def get_right_justified_box(widgets):
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(), True, True, 0)
for widget in widgets:
hbox.pack_start(widget, False, False, 0)
return hbox
def get_sides_justified_box(widgets, count_of_widgets_on_the_left=1):
hbox = gtk.HBox()
wgets_added = 0
for widget in widgets:
hbox.pack_start(widget, False, False, 0)
wgets_added +=1
if wgets_added == count_of_widgets_on_the_left:
hbox.pack_start(gtk.Label(), True, True, 0)
return hbox
def get_centered_box(widgets):
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(), True, True, 0)
for widget in widgets:
hbox.pack_start(widget, False, False, 0)
hbox.pack_start(gtk.Label(), True, True, 0)
return hbox
def get_single_column_box(widgets):
vbox = gtk.VBox()
for widget in widgets:
vbox.pack_start(get_left_justified_box([widget]), False, False, 0)
vbox.pack_start(gtk.Label(), True, True, 0)
return vbox
def get_two_column_box(widget1, widget2, left_width):
hbox = gtk.HBox()
left_box = get_left_justified_box([widget1])
left_box.set_size_request(left_width, TWO_COLUMN_BOX_HEIGHT)
hbox.pack_start(left_box, False, True, 0)
hbox.pack_start(widget2, True, True, 0)
return hbox
def get_two_column_box_right_pad(widget1, widget2, left_width, right_pad):
left_box = get_left_justified_box([widget1])
left_box.set_size_request(left_width, TWO_COLUMN_BOX_HEIGHT)
right_widget_box = get_left_justified_box([widget2])
pad_label = get_pad_label(right_pad, 5)
right_box = gtk.HBox()
right_box.pack_start(right_widget_box, True, True, 0)
right_box.pack_start(pad_label, False, False, 0)
hbox = gtk.HBox()
hbox.pack_start(left_box, False, True, 0)
hbox.pack_start(right_box, True, True, 0)
return hbox
def get_checkbox_row_box(checkbox, widget2):
hbox = gtk.HBox()
hbox.pack_start(checkbox, False, False, 0)
hbox.pack_start(widget2, False, False, 0)
hbox.pack_start(gtk.Label(), True, True, 0)
return hbox
def get_two_row_box(widget1, widget2):
# widget 1 is left justified
top = get_left_justified_box([widget1])
box = gtk.VBox(False, 2)
box.pack_start(top, False, False, 4)
box.pack_start(widget2, False, False, 0)
return box
def get_image_button(img_file_name, width, height):
button = gtk.Button()
icon = gtk.image_new_from_file(respaths.IMAGE_PATH + img_file_name)
button_box = gtk.HBox()
button_box.pack_start(icon, False, False, 0)
button.add(button_box)
button.set_size_request(width, height)
return button
def get_pad_label(w, h):
label = gtk.Label()
label.set_size_request(w, h)
return label
def get_multiplied_color(color, m):
"""
Used to create lighter and darker hues of colors.
"""
return (color[0] * m, color[1] * m, color[2] * m)
def get_slider_row(editable_property, listener, slider_name=None):
adjustment = editable_property.get_input_range_adjustment()
editable_property.value_changed_ID = adjustment.connect("value-changed", listener) # patching in to make available for disconnect
editable_property.adjustment = adjustment # patching in to make available for disconnect
hslider = gtk.HScale()
hslider.set_adjustment(adjustment)
hslider.set_draw_value(False)
spin = gtk.SpinButton()
spin.set_numeric(True)
spin.set_adjustment(adjustment)
hbox = gtk.HBox(False, 4)
hbox.pack_start(hslider, True, True, 0)
hbox.pack_start(spin, False, False, 4)
if slider_name == None:
name = editable_property.get_display_name()
else:
name = slider_name
name = translations.get_param_name(name)
return (get_two_column_editor_row(name, hbox), hslider)
def get_non_property_slider_row(lower, upper, step, value=0, listener=None):
hslider = gtk.HScale()
hslider.set_draw_value(False)
adjustment = hslider.get_adjustment()
adjustment.set_lower(lower)
adjustment.set_upper(upper)
adjustment.set_step_increment(step)
adjustment.set_value(value)
if listener != None:
adjustment.connect("value-changed", listener) # patching in to make available for disconnect
spin = gtk.SpinButton()
spin.set_numeric(True)
spin.set_adjustment(adjustment)
hbox = gtk.HBox(False, 4)
hbox.pack_start(hslider, True, True, 0)
hbox.pack_start(spin, False, False, 4)
return (hbox, hslider)
def get_two_column_editor_row(name, editor_widget):
label = gtk.Label(name + ":")
label_box = gtk.HBox()
label_box.pack_start(label, False, False, 0)
label_box.pack_start(gtk.Label(), True, True, 0)
label_box.set_size_request(appconsts.PROPERTY_NAME_WIDTH, appconsts.PROPERTY_ROW_HEIGHT)
hbox = gtk.HBox(False, 2)
hbox.pack_start(label_box, False, False, 4)
hbox.pack_start(editor_widget, True, True, 0)
return hbox
def get_no_pad_named_frame(name, panel):
return get_named_frame(name, panel, 0, 0, 0)
def get_named_frame_with_vbox(name, widgets, left_padding=12, right_padding=6, right_out_padding=4):
vbox = gtk.VBox()
for widget in widgets:
vbox.pack_start(widget, False, False, 0)
return get_named_frame(name, vbox, left_padding, right_padding, right_out_padding)
def get_named_frame(name, widget, left_padding=12, right_padding=6, right_out_padding=4):
"""
Gnome style named panel
"""
if name != None:
label = bold_label(name)
label.set_justify(gtk.JUSTIFY_LEFT)
label_box = gtk.HBox()
label_box.pack_start(label, False, False, 0)
label_box.pack_start(gtk.Label(), True, True, 0)
alignment = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
alignment.set_padding(right_padding, 0, left_padding, 0)
alignment.add(widget)
frame = gtk.VBox()
if name != None:
frame.pack_start(label_box, False, False, 0)
frame.pack_start(alignment, True, True, 0)
out_align = gtk.Alignment(0.5, 0.5, 1.0, 1.0)
out_align.set_padding(4, 4, 0, right_out_padding)
out_align.add(frame)
return out_align
def get_in_centering_alignment(widget, xsc=0.0, ysc=0.0):
align = gtk.Alignment(xalign=0.5, yalign=0.5, xscale=xsc, yscale=ysc)
align.add(widget)
return align
def pad_label(w, h):
pad_label = gtk.Label()
pad_label.set_size_request(w, h)
return pad_label
def get_sized_button(lable, w, h, clicked_listener=None):
b = gtk.Button(lable)
if clicked_listener != None:
b.connect("clicked", lambda w,e: clicked_listener())
b.set_size_request(w, h)
return b
def get_render_button():
render_button = gtk.Button()
render_icon = gtk.image_new_from_stock(gtk.STOCK_MEDIA_RECORD,
gtk.ICON_SIZE_BUTTON)
render_button_box = gtk.HBox()
render_button_box.pack_start(get_pad_label(10, 10), False, False, 0)
render_button_box.pack_start(render_icon, False, False, 0)
render_button_box.pack_start(get_pad_label(5, 10), False, False, 0)
render_button_box.pack_start(gtk.Label(_("Render")), False, False, 0)
render_button_box.pack_start(get_pad_label(10, 10), False, False, 0)
render_button.add(render_button_box)
return render_button
def get_menu_item(text, callback, data, sensitive=True):
item = gtk.MenuItem(text)
item.connect("activate", callback, data)
item.show()
item.set_sensitive(sensitive)
return item
def add_separetor(menu):
sep = gtk.SeparatorMenuItem()
sep.show()
menu.add(sep)
def get_gtk_image_from_file(source_path, image_height):
img = gtk.Image()
p_map = get_pixmap_from_file(source_path, image_height)
img.set_from_pixmap(p_map, None)
return img
def get_pixmap_from_file(source_path, image_height):
pixbuf = gtk.gdk.pixbuf_new_from_file(source_path)
icon_width = int((float(pixbuf.get_width()) / float(pixbuf.get_height())) * image_height)
s_pbuf = pixbuf.scale_simple(icon_width, image_height, gtk.gdk.INTERP_BILINEAR)
p_map, mask = s_pbuf.render_pixmap_and_mask()
return p_map
class PulseThread(threading.Thread):
def __init__(self, proress_bar):
threading.Thread.__init__(self)
self.proress_bar = proress_bar
def run(self):
self.exited = False
self.running = True
while self.running:
gtk.gdk.threads_enter()
self.proress_bar.pulse()
gtk.gdk.threads_leave()
time.sleep(0.1)
self.exited = True
|
997,178 | 847d48f395668317ab9dde29c2b3a3382a178f64 | import math
# does input contain a 3.4.5 triangle?
def is_triangle(input):
for i in range(len(input)):
for j in range(i + 1, len(input)):
for k in range(j + 1, len(input)):
if input[i] ** 2 + input[j] ** 2 == input[k] ** 2:
return True
return False
# print(is_triangle([1, 3, 4, 5, 6, 7, 8]))
def find_missing_number(array):
missing_numbers = []
for i in range(len(array)):
j = i + 1
try:
if array[j] != array[i] + 1:
missing_numbers.append(array[i] + 1)
except:
pass
return missing_numbers
# print(find_missing_number([1, 2, 3, 5, 6, 7, 8, 10, 11, 12]))
def find_single_dupe(array):
return list(set([i for i in array if array.count(i) > 1]))
# print(find_single_dupe([1,2,3,4,4,5,6,7,8,8,9,10]))
# [8, 4]
# Given two arrays, 1,2,3,4,5 and 2,3,1,0,5 find which number is not present in the second array.
def find_common_numbers(array1, array2):
common = []
for i in range(len(array1)):
if array1[i] in array2:
common.append(array1[i])
return list(set(common))
# print(find_common_numbers(array1=[1,2,3,4,5], array2=[2,3,1,0,5]))
# 2,3,4
# How do you find the second highest number in an integer array?
def find_second_higest_number(array):
array.sort()
return array[-2]
# print(find_second_higest_number([14,200,33,4,12,6,70,44,23,10]))
# How to find all pairs in an array of integers whose sum is equal to the given number?
def find_pair_sets(array, number):
pairs = []
for i in range(len(array)):
for j in range(i + 1, len(array)):
if array[i] + array[j] == number:
pairs.append([array[i], array[j]])
return pairs
# print(find_pair_sets(array=[2,4,6,8,10], number=10))
def format_date(date):
date_split = date.split("/")
return date_split[2] + date_split[1] + date_split[0]
# print(format_date("11/12/2019"))
def atbash(txt):
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
encrypted_word = []
for i in txt:
if i.isupper():
lower_case_letter = i.lower()
index = alphabet.index(lower_case_letter) * -1 - 1
encrypted_word.append(alphabet[index].upper())
elif i.islower():
index = alphabet.index(i) * -1 - 1
encrypted_word.append(alphabet[index])
else:
encrypted_word.append(i)
return ''.join(encrypted_word)
# print(atbash("Christmas is the 25th of December"))
def is_ascending(string):
string_array = list(string)
for i in range(len(string_array)):
array = string_array[i: i + 3]
for j in range(len(array)):
for k in range(j + 1, len(array)):
for l in range(k + 1, len(array)):
if array[j + 1] == array[k] and array[k + 1] == array[l]:
print(array[j + 1], array[k + 1], array[l])
return False
# print(is_ascending("32332432536"))
def fizz_buzz(n):
if n % 2 == 0 and n >= 2 and n <= 5:
print("Not Weird")
elif n % 2 == 0 and n >= 6 and n <= 20:
print("Weird")
elif n % 2 == 0 and n > 20:
print("Not Weird")
else:
print("Weird")
# print(fizz_buzz(15))
def is_square(n):
if n < 0:
return False
elif math.sqrt(n).is_integer():
return True
# is_square(26)
def song_decoder(song):
split_slot = song.split("WUB")
if split_slot[0] == "":
split_slot.pop(0)
if split_slot[-1] == "":
split_slot.pop(-1)
final_string = ''
for word in split_slot:
if word != "":
final_string += word + " "
return final_string[0:len(final_string) - 1]
# return " ".join(song.replace("WUB", " ").split())
# print(song_decoder("WUBWEWUBAREWUBWUBTHEWUBCHAMPIONSWUBMYWUBFRIENDWUB"))
# print(song_decoder("AWUBBWUBC"))
def find_short(s):
return min(len(x) for x in s.split())
# print(find_short("bitcoin take over the world maybe who knows perhaps"))
def likes(names):
if len(names) == 1:
return names[0] + " likes this"
elif len(names) == 2:
return names[0] + ' and ' + names[1] + " like this"
elif len(names) == 3:
return names[0] + ', ' + names[1] + ', ' + ' and ' + names[2] + " like this"
elif len(names) > 3:
return names[0] + ', ' + names[1] + ' and ' + str(len(names) - 2) + " others like this"
else:
return "no one likes this"
# print(likes(['Alex', 'Jacob']))
def alphabet_position(text):
results = []
for letter in text:
try:
results.append([letter for letter in "abcdefghijklmnopqrstuvwxyz"].index(letter.lower()) + 1)
except:
pass
return ' '.join([str(i) for i in results])
# print(alphabet_position("The sunset sets at twelve o' clock."))
import operator
def high(x):
word_array = x.split(' ')
obj = {}
for word in word_array:
value = 0
for letter in word:
value += ord(letter) - 96
obj[word] = value
return max(obj, key=obj.get)
# print(high('man i need a taxi up to ubud'))
x = 'man i need a taxi up to ubud'
# print(ord("b")-96)
def to_camel_case(text):
if len(text) > 0:
cleaned_text = ''
for letter in text:
if not letter.isalpha():
letter = " "
cleaned_text += letter
else:
cleaned_text += letter
lower_text = cleaned_text.split(" ")
for word in lower_text:
word.capitalize()
first_word = lower_text[0]
remaining_words = lower_text[1:]
remaining_words_capitalized = ''
for word in remaining_words:
remaining_words_capitalized += word.capitalize()
return "{}{}".format(first_word, remaining_words_capitalized)
else:
return text
# print(to_camel_case('the-Stealth-Warrior'))
# print(to_camel_case("A-B-C"))
import string
def is_pangram(s):
alphabet = [i for i in "abcdefghijklmnopqrstuvwxyz"]
d = dict()
for letter in alphabet:
d[letter] = False
if letter in s.lower():
d[letter] = True
final_results = list(d.values())
if False in final_results:
return False
else:
return True
# pangram = "ABCD45EFGH,IJK,LMNOPQR56STUVW3XYZ"
# print(is_pangram(pangram))
def unique_in_order(iterable):
results = []
prev = None
for char in iterable:
if char != prev:
results.append(char)
prev = char
return results
# print(unique_in_order('AAAABBBCCDAABBB'))
from collections import Counter
def duplicate_encode(word):
word = word.lower()
counter = Counter(word)
return ''.join(('(' if counter[c] == 1 else ')') for c in word)
# print(duplicate_encode("recede"))
# print(duplicate_encode("Success"))
def duplicate_count(text):
d = dict(Counter(text.lower()))
count = 0
for key, value in d.items():
if value > 1:
count += 1
return count
# print(duplicate_count("indivisibility"))
# print(duplicate_count("abcdea"))
def iq_test(numbers):
array = [int(i) for i in numbers.split(" ") if i.isnumeric()]
gid = [i % 2 for i in array]
if gid.count(0) > 1:
return gid.index(1) + 1
else:
return gid.index(0) + 1
# print(iq_test("2 4 7 8 10"))
def tower_builder(n_floors):
result = []
count = 0
while count < n_floors:
star = "*" * count
space = ''
space_index = n_floors
while space_index > 0:
space += " " * (n_floors - (space_index - 1))
space_index -= 1
print(space + star + space)
count += 1
# print(tower_builder(5))
def reverse(x):
y = x[::-1]
int(y)
return y
# print(reverse(-123))
def solution(n):
roman_numerals = {
1000: 'M',
900: 'CM',
500: 'D',
400: 'CD',
100: 'C',
90: 'XC',
50: 'L',
40: 'XL',
10: 'X',
9: 'IX',
5: 'V',
4: 'IV',
1: 'I'
}
roman_string = ''
for key in sorted(roman_numerals.keys(), reverse=True):
while n >= key:
roman_string += roman_numerals[key]
n -= key
return roman_string
# print(solution(89))
import re
def pig_it(text):
array = text.split(" ")
results = []
for word in array:
if word.isalpha():
results.append("{}{}ay".format(word[1:], word[0]))
else:
results.append(word)
return " ".join(results)
pig_it('Pig latin is cool!')
def to_weird_case(string):
array = string.split(" ")
results = []
for word in array:
for index, value in enumerate(word):
if index % 2 == 0:
results.append(value.upper())
else:
results.append(value)
results.append(" ")
final = "".join(results)
return final[0: len(final) - 1]
# print(to_weird_case('This is a test'))
def increment_string(strng):
words = strng.rstrip('0123456789')
numbers = strng[len(words):]
if numbers == "" : return words + "1"
return words + str(int(numbers) + 1).zfill(len(numbers))
#print(increment_string("foobar00"))
def make_readable(seconds):
return "{0:0>2}:{1:0>2}:{2:0>2}".format(int(seconds / 3600), int((seconds % 3600) / 60), int((seconds % 3600) % 60))
#print(make_readable(86399))
|
997,179 | daf7cf68445703f157ca63a7590b4c6bbc57398f | #EXERCISE 1
class Cat:
species = 'mammal'
def __init__(self, name, age):
self.name = name
self.age = age
print(f"{name} Initialized!")
def oldest(self,*args):
ages = [i.age for i in args]
x= max(ages)
print(f"The oldest cat is {x} years old")
return x
#Instantiate the Cat object with 3 cats
cat1 = Cat("Garfield", 50)
cat2 = Cat("Leono", 72)
cat3 = Cat("Felix", 45)
#Some checks...
print(cat1.name)
print(cat2.age)
print(cat3.name)
#Create a function that finds the oldest cat
cat1.oldest(cat1,cat2,cat3)
cat2.oldest(cat1,cat2,cat3)
#EXERCISE 2
class Dog:
def __init__(self, nameDog, heightDog):
self.nameDog = nameDog
self.heightDog = heightDog
print(f"{nameDog} Initialized")
def talk(self):
print("WOUAF!!")
def jump(self):
x = self.heightDog*2
print(f"{self.nameDog} jumps {x}cm")
return x
davids_dog = Dog("Rex",50)
print(davids_dog.nameDog)
print(davids_dog.heightDog)
sarahs_dog = Dog("Teacup",20)
print(sarahs_dog.nameDog)
print(sarahs_dog.heightDog)
if davids_dog.heightDog > sarahs_dog.heightDog:
davids_dog.winner=True
sarahs_dog.winner=False
print(f"The winner is {davids_dog.nameDog}")
else:
davids_dog.winner=False
sarahs_dog.winner=True
print(f"The winner is {sarahs_dog.nameDog}")
print(davids_dog.winner)
print(sarahs_dog.winner)
#EXERCISE 3
class Zoo:
animals = []
def __init__(self, zooName):
self.zooName = zooName
print(f"{zooName} Initialized!")
print(self.animals)
def addAnimal(self,newAnimal):
if newAnimal in self.animals:
return print("Already included")
else:
self.animals.append(newAnimal)
return print(f"{newAnimal} included!")
def getAnimals(self):
return print(self.animals)
def sellAnimal(self,animalSold):
if animalSold in self.animals:
self.animals.remove(animalSold)
return print(f"Goodbye dear {animalSold}")
else:
return print(f"Not possible to sell!!")
def sortAnimal(self):
place = [i[0] for i in self.animals]
animals = [i for i in self.animals]
pen = sorted(list(zip(place,animals)))
return print(pen)
ramatGanSafari = Zoo("Ramat Gan Safari")
ramatGanSafari.addAnimal("Lion")
ramatGanSafari.addAnimal("Leopard")
ramatGanSafari.addAnimal("Bear")
ramatGanSafari.addAnimal("Penguin")
ramatGanSafari.addAnimal("Elephant")
ramatGanSafari.addAnimal("Lion")
ramatGanSafari.getAnimals()
ramatGanSafari.sellAnimal("Bear")
ramatGanSafari.getAnimals()
ramatGanSafari.sortAnimal()
|
997,180 | f47fa202b4e832cd7dbb5db046b8248ffed9d3a3 | from django.conf.urls import include, url
from asgc_resource import views
urlpatterns = [
url(r'^primary_info/$', views.primaryinfo_list),
url(r'^primary_info/(?P<hostname>[a-z0-9\-]+)/$', views.primaryinfo_list_detail)
]
|
997,181 | 9a9ae6903e0244ece90cb354ed3baa61b8fb9aac | # -*-coding:utf-8-*-
# @Author: Damon0626
# @Time : 19-4-15 下午11:03
# @Email : wwymsn@163.com
# @Software: PyCharm
'''
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
'''
# 判断一个链表是不是回文性质
# 从中间开始,将前半部分或者后半部分进行反转,然后在比较是否一致,如果一致则回文,否则不
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
# 首先找到中间结点,然后将后半部分反转,然后逐个比较
# 1.找到中间结点
fast = head
slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
# 2.反转后半部分
node = None
while slow:
# nxt = slow.next
# slow.next = node
# node=slow
# slow = nxt
slow.next, node, slow = node, slow, slow.next
# 3逐项判断
while node and head:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
|
997,182 | f09f4084de667bc962c00040ba2600d47978737e | # -*- coding: utf-8 -*-
# ---------- Libraries --------------
# base library for dash
import dash
import dash_core_components as dcc #contains base dash components
import dash_html_components as html #Contains the html tags
import plotly.express as px
import plotly.graph_objs as go
import pandas as pd #For data management
from dash.dependencies import Input, Output #For callbacks
import numpy as np #For data processing. ex: To pass data from matlab to pandas
from scipy.io import loadmat as lm #To load matlab data
import calendar #For handling dates
import psycopg2 #For connection to the database
import psycopg2.extras
import getpass #For entering passwords
# ---------- EXTERNAL CSS --------------
# external css
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
#------------ LOADING DATA-------------
#The data that will be processed with pandas is loaded
#mat = lm('/home/braulio/Documentos/workspace/python/pruebasDash/d_08_2_1_1.mat')
#df = pd.DataFrame(np.stack((mat['Data'])))
#df['time']= pd.to_datetime(df['time'], format='%Y%m%d%H%M%S')
# connection to the database
pw = getpass.getpass();
# establish connection
conn = psycopg2.connect(dbname='tutorial', user='postgres', password=pw, host='localhost')
# cursor object allows querying of database
# server-side cursor is created to prevent records to be downloaded until explicitly fetched
cursor_datasetBridge = conn.cursor('datasetBridge', cursor_factory=psycopg2.extras.DictCursor)
cursor_datasetBridge.execute("SELECT * FROM datasetPuente WHERE time BETWEEN '2008-03-31 23:00' and '2008-03-31 23:59'")
# fetch records from database
ride_length_datasetBridge = cursor_datasetBridge.fetchall()
df = pd.DataFrame(np.array(ride_length_datasetBridge),
columns = ['time', 'sensor_01','sensor_02','sensor_03','sensor_04','sensor_05','sensor_06','sensor_07','sensor_08','sensor_09','sensor_10','sensor_11','sensor_12','sensor_13','sensor_14','sensor_15','sensor_16'])
daterange = pd.date_range(start=df['time'].min(),end=df['time'].max(),freq='T')
sensor=list(df)[1:]
time_zone = 'Etc/GMT+4'
#------------- INIT APP ----------------
# The app is initialized
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
#---------- LOCAL FUNCTIONS ------------
#general functions
def unixTimeMillis(dt):
''' Convert datetime to unix timestamp '''
return calendar.timegm(dt.timetuple())
def unixToDatetime(unix):
''' Convert unix timestamp to datetime. '''
return pd.to_datetime(unix,unit='s',origin='unix')
def getMarks(start, end, Nth=100):
''' Returns the marks for labeling.
Every Nth value will be used.
'''
result = {}
for i, date in enumerate(daterange):
if(i%Nth == 1):
# Append value to dict
result[unixTimeMillis(date)] = str(date.strftime('%Y-%m-%d'))
return result
# The component tree is defined using app.layout
app.layout = html.Div(children=[
html.H1(children='Bridge DashBoard'),
#A Div is generated for each view
html.Div([
html.H2(children='''
Accelerometer
'''),
html.Div([
#A div is generated for the card card-1(contains the card and associated widget)
html.Div([
# html.Div([
# A Div is generated for the selection of sensors
html.Div([
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-card-1',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_01'
),
],
id='div-sensor-card-1',
className='sensor'),
],
id='div-group-card-1',
className='group'),
html.Div([
html.H3(['MAX']),
html.P(['']),
],id='card-1',className='card'),
# ],className='Card'),
],
className='column',
id='div-card-1'),
#A div is generated for the card card-2(contains the card and associated widget)
html.Div([
# html.Div([
# A Div is generated for the selection of sensors
html.Div([
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-card-2',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_01'
),
],
id='div-sensor-card-2',
className='sensor'),
],
id='div-group-card-2',
className='group'),
html.Div([
html.H3(['MIN']),
html.P(['']),
],id='card-2',className='card'),
# ],className='Card'),
],
className='column',
id='div-card-2'),
#A div is generated for the card card-3(contains the card and associated widget)
html.Div([
# html.Div([
# A Div is generated for the selection of sensors
html.Div([
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-card-3',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_01'
),
],
id='div-sensor-card-3',
className='sensor'),
],
id='div-group-card-3',
className='group'),
html.Div([
html.H3(['AVERAGE']),
html.P(['']),
],id='card-3',className='card'),
# ],className='Card'),
],
className='column',
id='div-card-3'),
#A div is generated for the card card-7(contains the card and associated widget)
html.Div([
# html.Div([
# A Div is generated for the selection of sensors
html.Div([
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-card-7',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_01'
),
],
id='div-sensor-card-7',
className='sensor'),
],
id='div-group-card-7',
className='group'),
html.Div([
html.H3(['AVERAGE']),
html.P(['']),
],id='card-7',className='card'),
# ],className='Card'),
],
className='column',
id='div-card-7'),
],
className='view-card row',
id='card-view-14'),
html.Div([
#A div is generated for the chart histogram-18(contains the chart and associated widget)
html.Div([
# A Div is generated for the selection of sensors
html.Div([
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-accelerometer-26',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_01'
),
],
id='div-accelerometer-26',
className='sensor'),
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-accelerometer-27',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_02'
),
],
id='div-accelerometer-27',
className='sensor'),
],
id='div-group-25',
className='group'),
html.Div([
html.Div([
dcc.Slider(
id='bin-slider-histogram-18',
min=1,
max=40,
step=1,
value=12,
updatemode='drag',
marks={
10:{'label': '10'},
20:{'label': '20'},
30:{'label': '30'},
40:{'label': '40'},
},
),
]),
html.Div([
dcc.Checklist(
id='bin-auto-histogram-18',
options=[
{'label':'Auto','value':'Auto'}
],
value=['Auto'],
inputClassName='auto__checkbox',
labelClassName='auto__label',
),
html.P(
'# of Bins: Auto',
id='bin-size-histogram-18',
className='auto__p',
),
],style={'display':'inline'}),
html.Div([
dcc.Graph(
id='histogram-18',
config=dict(responsive=False),
),
dcc.RangeSlider(
count=1,
min = unixTimeMillis(daterange.min()),#unix_time_millis(time.min()),
max = unixTimeMillis(daterange.max()),#unix_time_millis(time.max()),
value=[unixTimeMillis(daterange.min()),unixTimeMillis(daterange.max())],
marks=getMarks(daterange.min(),daterange.max()),
id='rangeSlider-histogram-18'
),
]),
],className='Div-slider',
style={'display':'block'}),
],
className='Histogram graph',
id='div-histogram-18'),
#A div is generated for the chart lineChart-6(contains the chart and associated widget)
html.Div([
# A Div is generated for the selection of sensors
html.Div([
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-accelerometer-3',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_01'
),
],
id='div-accelerometer-3',
className='sensor'),
#Se genera un Dropdown por cada sensor
html.Div([
dcc.Dropdown(
id='dropdown-accelerometer-4',
options=[{'label':str(i),'value': i} for i in sensor],
value='sensor_02'
),
],
id='div-accelerometer-4',
className='sensor'),
],
id='div-group-2',
className='group'),
# Div to generate chart and range selection bar
html.Div([
dcc.Graph(
id='lineChart-6',
config=dict(responsive=False),
),
dcc.RangeSlider(
count=1,
min = unixTimeMillis(daterange.min()),#unix_time_millis(time.min()),
max = unixTimeMillis(daterange.max()),#unix_time_millis(time.max()),
value=[unixTimeMillis(daterange.min()),unixTimeMillis(daterange.max())],
marks=getMarks(daterange.min(),daterange.max()),
id='rangeSlider-lineChart-6'
),
]),
],
className='LineChart graph',
id='div-lineChart-6'),
],
className='view-graph',
id='graph-view-14'),
],
className='view',
id='div-view-14'),
],
id='visualization')
@app.callback(
Output(component_id='card-1',component_property='children'),
[
Input(component_id='dropdown-card-1', component_property='value')
]
)
def update_card(_sensor_card_1):
#mask=(df['time']>= unixToDatetime(df['time'].min().tz_localize(time_zone)) & (df['time']<=unixToDatetime(df['time'].max().tz_localize(time_zone)))
mask=(df['time']>=df['time'].min())& (df['time']<=df['time'].max())
myData=df.loc[mask]
xaxis=myData['time']
y1axis=myData[_sensor_card_1]
my_max=max(myData[_sensor_card_1])
return [
html.H3('MAX'),
html.P(my_max),
]
@app.callback(
Output(component_id='card-2',component_property='children'),
[
Input(component_id='dropdown-card-2', component_property='value')
]
)
def update_card(_sensor_card_2):
#mask=(df['time']>= unixToDatetime(df['time'].min().tz_localize(time_zone)) & (df['time']<=unixToDatetime(df['time'].max().tz_localize(time_zone)))
mask=(df['time']>=df['time'].min())& (df['time']<=df['time'].max())
myData=df.loc[mask]
xaxis=myData['time']
y1axis=myData[_sensor_card_2]
my_min=min(myData[_sensor_card_2])
return [
html.H3('MIN'),
html.P(my_min),
]
@app.callback(
Output(component_id='card-3',component_property='children'),
[
Input(component_id='dropdown-card-3', component_property='value')
]
)
def update_card(_sensor_card_3):
#mask=(df['time']>= unixToDatetime(df['time'].min().tz_localize(time_zone)) & (df['time']<=unixToDatetime(df['time'].max().tz_localize(time_zone)))
mask=(df['time']>=df['time'].min())& (df['time']<=df['time'].max())
myData=df.loc[mask]
xaxis=myData['time']
y1axis=myData[_sensor_card_3]
my_average=myData[_sensor_card_3].mean()
return [
html.H3('MEAN'),
html.P(my_average),
]
@app.callback(
Output(component_id='card-7',component_property='children'),
[
Input(component_id='dropdown-card-7', component_property='value')
]
)
def update_card(_sensor_card_7):
#mask=(df['time']>= unixToDatetime(df['time'].min().tz_localize(time_zone)) & (df['time']<=unixToDatetime(df['time'].max().tz_localize(time_zone)))
mask=(df['time']>=df['time'].min())& (df['time']<=df['time'].max())
myData=df.loc[mask]
xaxis=myData['time']
y1axis=myData[_sensor_card_7]
my_average=myData[_sensor_card_7].mean()
return [
html.H3('MEAN'),
html.P(my_average),
]
@app.callback(
Output(component_id='histogram-18', component_property='figure'),
[
Input(component_id='dropdown-accelerometer-26', component_property='value'),
Input(component_id='dropdown-accelerometer-27', component_property='value'),
Input(component_id='rangeSlider-histogram-18', component_property='value'),
Input(component_id='bin-slider-histogram-18', component_property='value'),
Input(component_id='bin-auto-histogram-18', component_property='value'),]
)
def update_graph(_accelerometer_26,_accelerometer_27,_histogram_18,bin_value,auto_state):
mask=(df['time']>=unixToDatetime(_histogram_18[0]).tz_localize(time_zone)) & (df['time']<=unixToDatetime(_histogram_18[1]).tz_localize(time_zone))
myData=df.loc[mask]
xaxis=myData['time']
y1axis=myData[_accelerometer_26]
if "Auto" in auto_state:
hist1,bins1=np.histogram(myData[_accelerometer_26],range(int(round(min(myData[_accelerometer_26]))), int(round(max(myData[_accelerometer_26])))))
else:
hist1,bins1=np.histogram(myData[_accelerometer_26], bins=bin_value)
y2axis=myData[_accelerometer_27]
if "Auto" in auto_state:
hist2,bins2=np.histogram(myData[_accelerometer_27],range(int(round(min(myData[_accelerometer_27]))), int(round(max(myData[_accelerometer_27])))))
else:
hist2,bins2=np.histogram(myData[_accelerometer_27], bins=bin_value)
return dict(
data=[
dict(
x=bins1,
y=hist1,
name=str(_accelerometer_26),
type='bar',
),
dict(
x=bins2,
y=hist2,
name=str(_accelerometer_27),
type='bar',
),
],
layout=dict(
title='Histogram1',
barmode='stack',
showlegend=True,
legend=dict(
x=0,
y=1.0
),
margin=dict(l=40, r=0, t=40, b=30),
transition = {'duration': 500},
)
)
@app.callback(
Output(component_id='lineChart-6', component_property='figure'),
[
Input(component_id='dropdown-accelerometer-3', component_property='value'),
Input(component_id='dropdown-accelerometer-4', component_property='value'),
Input(component_id='rangeSlider-lineChart-6', component_property='value')]
)
def update_grahp( _accelerometer_3,_accelerometer_4,_lineChart_6):
mask=(df['time']>=unixToDatetime(_lineChart_6[0]).tz_localize(time_zone)) & (df['time']<=unixToDatetime(_lineChart_6[1]).tz_localize(time_zone))
myData=df.loc[mask]
xaxis=myData['time']
y1axis=myData[_accelerometer_3]
y2axis=myData[_accelerometer_4]
return dict(
data=[
dict(
x=xaxis,
y=y1axis,
name=str(_accelerometer_3),
type='line',
),
dict(
x=xaxis,
y=y2axis,
name=str(_accelerometer_4),
type='line',
),
],
layout=dict(
title='LineChart1',
showlegend=True,
legend=dict(
x=0,
y=1.0
),
margin=dict(l=40, r=0, t=40, b=30),
transition = {'duration': 500},
)
)
# Allows to run the application
if __name__ == '__main__':
app.run_server(debug=True)
|
997,183 | d553dc67f00acdcb6ec0279be3e472baa1ecace8 | lines = input()
for line in range(lines):
number = raw_input()
count = 0
for digit in s:
if digit != 0:
if int(num) % int(digit) == 0:
count += 1
print count |
997,184 | 819f6ca176e2803737a9e54653b5148acc954eb3 | import os
def list_all_files(root, filenames):
if os.path.isdir(root):
for d in os.listdir(root):
list_all_files(os.path.join(root, d), filenames)
else:
filenames.append(root)
|
997,185 | 589d73b86464d710d860a7ad5046b69350a76572 | import json
import cv2
import numoy as np
import os
import datetime
annotations={}
img_num=0
info={} #bigger
images=list(image)
annotations=list(annotation)
licenses=list(license)
description={}
info['description']=Vdot_Assets
info['url']=""
info['version']
info_bigger['images']=image
info={"description": "Vdot_Assets", "url":"", "version":"", "year":2020, "contributor":"leidos", "date_created": datetime.datetime.utcnow().isoformat(' ')}
image_dict={"id":1, "width": 800, "height": 600, "filename": External_ID}
license=
annotation={"id":1, "image_id": index,"category_id":1, "area":h*w, "bbox":obect[value]['drop_inlet'], "iscowd": 0 }
image.append(image_dict)
for img in image_dict:
image_dict['id']==1
|
997,186 | 2f656dae0f4db58ba1624498bb9a4af4d1e16f7a | #!/usr/bin/python
from math import sin , log10,pow,pi,sqrt
import matplotlib.pyplot as plt
import numpy as np
def plot_model(f_m):
plt.close('all')
for i in f_m:
plt.plot(m)
#plt.ylabel('DB(Z)')
#plt.xlabel('Frequency')
#plt.xscale('log')
plt.show()
def main(plot_file):
plot_model(load_data(plot_file))
if __name__=='__main__':
main(argv[0])
|
997,187 | a5fd65a7ac2b82e3993cb7c1c597db8763505d84 | """
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
from problems.problem_3 import is_prime
def next_prime(n):
"""Find prime number up to n"""
i = 1
known_prime = []
while i < n:
if is_prime(i, known_prime):
known_prime.append(i)
yield i
i += 1
# TODO: slow as a snail
if __name__ == '__main__':
np = next_prime(2000000)
print(sum(np))
|
997,188 | 0e6b7ab56d7a88b776b426e63a22df61d4906b67 | # encoding=utf-8
from admin.route import custom_url
from admin.article.views import article
from admin.article.views import article_classify
from admin.article.views import article_comment
urlpatterns = [
custom_url('^article_list$', article.article_lists, name='article_list', alias_name=u'文章管理'),
custom_url('^article_add$', article.article_add, name='article_add', alias_name=u'添加文章'),
custom_url('^article_edit$', article.article_edit, name='article_edit', alias_name=u'编辑文章'),
custom_url('^article_delete$', article.article_delete, name='article_delete', alias_name=u'删除文章'),
custom_url('^article_lists_data$', article.article_lists_data, name='article_lists_data', alias_name=u'文章列表'),
custom_url('^article_category$', article_classify.article_category, name='article_category', alias_name=u'文章分类'),
custom_url('^add_category$', article_classify.add_category, name='add_category', alias_name=u'添加分类'),
custom_url('^category_delete$', article_classify.category_delete, name='category_delete', alias_name=u'删除分类'),
custom_url('^article_comment_list$', article_comment.article_comment_list, name='article_comment_list',
alias_name=u'评论列表'),
custom_url('^article_comment_list_data$', article_comment.article_comment_list_data,
name='article_comment_list_data', alias_name=u'评论列表数据'),
custom_url('^article_comment_delete$', article_comment.article_comment_delete, name='article_comment_delete',
alias_name=u'删除评论'),
custom_url('^add_comment$', article_comment.add_comment, name='add_comment',
alias_name=u'回复评论'),
]
|
997,189 | 268abf34cbc3cfa9a8a9ef704f6eceb3167c0ae6 | from django.conf.urls import url, include
from .views import TestView, PersonViewSet
from rest_framework.routers import DefaultRouter
touter = DefaultRouter()
touter.register(r'prson',PersonViewSet)
urlpatterns = [
url(r'^test/$', TestView.as_view()),
url(r'^', include(touter.urls))
] |
997,190 | 9d95b718589e02920e3af20037fcd92287d8e2c1 | def is_power_of_two(num):
return num > 0 and (num & (num - 1)) == 0
def is_power_of_three(num):
while(num % 3 == 0):
num /=3
return num == 1
output = is_power_of_two(64)
print(output) |
997,191 | 6003a985d89040dea3a4905a4dc4e7c9a749d888 | #descobrir o tipo do triângulo
def Q1():
A = int (input("Insira o lado A: "))
B = int (input("Insira o lado B: "))
C = int (input("Insira o lado C: "))
if A+B>C and A+C>B and B+C>A:
if (A == B == C) :
print ("ABC é um triângulo equilátero")
elif A == B and B!=C:
print ("ABC é um triângulo isósceles")
elif A == C and C!=B:
print ("ABC é um triângulo isósceles")
elif B == C and C!=A:
print ("ABC é um triângulo isósceles")
else:
print ("ABC é um triângulo escaleno")
else:
print ("ABC não formam um triângulo")
def main ():
Q1()
main ()
|
997,192 | 3e720f6f48c359cc1e0c835ffe1fb378884a8c20 | #-
# ==========================================================================
# Copyright 1995,2006,2008 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk
# license agreement provided at the time of installation or download,
# or which otherwise accompanies this software in either electronic
# or hard copy form.
# ==========================================================================
#+
import os
import os.path
import getopt
import sys
import xml.dom.minidom
import string
import re
import array
"""
This example shows how to convert float channels found in cache files in Maya 8.5 and later to
double channels, so that the cache file would then be compatible with the
geometry cache in Maya 8.0. It parses the XML file in addition to the cache data files and
handles caches that are one file per frame as well as one file.
To use:
python cacheFileConverter.py -f mayaCacheFile.xml -o outputFileName
Overview of Maya Caches:
========================
Conceptually, a Maya cache consists of 1 or more channels of data.
Each channel has a number of properties, such as:
- start/end time
- data type of the channel (eg. "DoubleVectorArray" to represents a point array)
- interpretation (eg. "positions" the vector array represents position data, as opposed to per vertex normals, for example)
- sampling type (eg. "regular" or "irregular")
- sampling rate (meaningful only if sampling type is "regular")
Each channel has a number of data points in time, not necessarily regularly spaced,
and not necessarily co-incident in time with data in other channels.
At the highest level, a Maya cache is simply made up of channels and their data in time.
On disk, the Maya cache is made up of a XML description file, and 1 or more data files.
The description file provides a high level overview of what the cache contains,
such as the cache type (one file, or one file per frame), channel names, interpretation, etc.
The data files contain the actual data for the channels.
In the case of one file per frame, a naming convention is used so the cache can check its
available data at runtime.
Here is a visualization of the data format of the OneFile case:
// |---CACH (Group) // Header
// | |---VRSN // Version Number (char*)
// | |---STIM // Start Time of the Cache File (int)
// | |---ETIM // End Time of the Cache File (int)
// |
// |---MYCH (Group) // 1st Time
// | |---TIME // Time (int)
// | |---CHNM // 1st Channel Name (char*)
// | |---SIZE // 1st Channel Size
// | |---DVCA // 1st Channel Data (Double Vector Array)
// | |---CHNM // n-th Channel Name
// | |---SIZE // n-th Channel Size
// | |---DVCA // n-th Channel Data (Double Vector Array)
// | |..
// |
// |---MYCH (Group) // 2nd Time
// | |---TIME // Time
// | |---CHNM // 1st Channel Name
// | |---SIZE // 1st Channel Size
// | |---DVCA // 1st Channel Data (Double Vector Array)
// | |---CHNM // n-th Channel Name
// | |---SIZE // n-th Channel Size
// | |---DVCA // n-th Channel Data (Double Vector Array)
// | |..
// |
// |---..
// |
//
In a multiple file caches, the only difference is that after the
header "CACH" group, there is only one MYCH group and there is no
TIME chunk. In the case of one file per frame, the time is part of
the file name - allowing Maya to scan at run time to see what data
is actually available, and it allows users to move data in time by
manipulating the file name.
!Note that it's not necessary to have data for every channel at every time.
"""
class CacheChannel:
m_channelName = ""
m_channelType = ""
m_channelInterp = ""
m_sampleType = ""
m_sampleRate = 0
m_startTime = 0
m_endTime = 0
def __init__(self,channelName,channelType,interpretation,samplingType,samplingRate,startTime,endTime):
self.m_channelName = channelName
self.m_channelType = channelType
self.m_channelInterp = interpretation
self.m_sampleType = samplingType
self.m_sampleRate = samplingRate
self.m_startTime = startTime
self.m_endTime = endTime
class CacheFile:
m_baseFileName = ""
m_directory = ""
m_fullPath = ""
m_cacheType = ""
m_cacheStartTime = 0
m_cacheEndTime = 0
m_timePerFrame = 0
m_version = 0.0
m_channels = []
########################################################################
# Description:
# Class constructor - tries to figure out full path to cache
# xml description file before calling parseDescriptionFile()
#
def __init__(self,fileName):
# fileName can be the full path to the .xml description file,
# or just the filename of the .xml file, with or without extension
# if it is in the current directory
dir = os.path.dirname(fileName)
fullPath = ""
if dir == "":
currDir = os.getcwd()
fullPath = os.path.join(currDir,fileName)
if not os.path.exists(fullPath):
fileName = fileName + '.xml';
fullPath = os.path.join(currDir,fileName)
if not os.path.exists(fullPath):
print "Sorry, can't find the file %s to be opened\n" % fullPath
sys.exit(2)
else:
fullPath = fileName
self.m_baseFileName = os.path.basename(fileName).split('.')[0]
self.m_directory = os.path.dirname(fullPath)
self.m_fullPath = fullPath
self.parseDescriptionFile(fullPath)
########################################################################
# Description:
# Writes a converted description file, where all instances of "FloatVectorArray"
# are replaced with "DoubleVectorArray"
#
def writeConvertedDescriptionFile(self,outputFileName):
newXmlFileName = outputFileName + ".xml"
newXmlFullPath = os.path.join(self.m_directory,newXmlFileName)
fd = open(self.m_fullPath,"r")
fdOut = open(newXmlFullPath,"w")
lines = fd.readlines()
for line in lines:
if line.find("FloatVectorArray") >= 0:
line = line.replace("FloatVectorArray","DoubleVectorArray")
fdOut.write(line)
########################################################################
# Description:
# Given the full path to the xml cache description file, this
# method parses its contents and sets the relevant member variables
#
def parseDescriptionFile(self,fullPath):
dom = xml.dom.minidom.parse(fullPath)
root = dom.getElementsByTagName("Autodesk_Cache_File")
allNodes = root[0].childNodes
for node in allNodes:
if node.nodeName == "cacheType":
self.m_cacheType = node.attributes.item(0).nodeValue
if node.nodeName == "time":
timeRange = node.attributes.item(0).nodeValue.split('-')
self.m_cacheStartTime = int(timeRange[0])
self.m_cacheEndTime = int(timeRange[1])
if node.nodeName == "cacheTimePerFrame":
self.m_timePerFrame = int(node.attributes.item(0).nodeValue)
if node.nodeName == "cacheVersion":
self.m_version = float(node.attributes.item(0).nodeValue)
if node.nodeName == "Channels":
self.parseChannels(node.childNodes)
########################################################################
# Description:
# helper method to extract channel information
#
def parseChannels(self,channels):
for channel in channels:
if re.compile("channel").match(channel.nodeName) != None :
channelName = ""
channelType = ""
channelInterp = ""
sampleType = ""
sampleRate = 0
startTime = 0
endTime = 0
for index in range(0,channel.attributes.length):
attrName = channel.attributes.item(index).nodeName
if attrName == "ChannelName":
channelName = channel.attributes.item(index).nodeValue
if attrName == "ChannelInterpretation":
channelInterp = channel.attributes.item(index).nodeValue
if attrName == "EndTime":
endTime = int(channel.attributes.item(index).nodeValue)
if attrName == "StartTime":
startTime = int(channel.attributes.item(index).nodeValue)
if attrName == "SamplingRate":
sampleRate = int(channel.attributes.item(index).nodeValue)
if attrName == "SamplingType":
sampleType = channel.attributes.item(index).nodeValue
if attrName == "ChannelType":
channelType = channel.attributes.item(index).nodeValue
channelObj = CacheChannel(channelName,channelType,channelInterp,sampleType,sampleRate,startTime,endTime)
self.m_channels.append(channelObj)
def fileFormatError():
print "Error: unable to read cache format\n";
sys.exit(2)
def readInt(fd,needSwap):
intArray = array.array('l')
intArray.fromfile(fd,1)
if needSwap:
intArray.byteswap()
return intArray[0]
def writeInt(fd,outInt,needSwap):
intArray = array.array('l')
intArray.insert(0,outInt)
if needSwap:
intArray.byteswap()
intArray.tofile(fd)
########################################################################
# Description:
# method to parse and convert the contents of the data file, for the
# One large file case ("OneFile")
def parseDataOneFile(cacheFile,outFileName):
dataFilePath = os.path.join(cacheFile.m_directory,cacheFile.m_baseFileName)
dataFileNameOut = outFileName + ".mc"
dataFilePathOut = os.path.join(cacheFile.m_directory,dataFileNameOut)
dataFilePath = dataFilePath + ".mc"
if not os.path.exists(dataFilePath):
print "Error: unable to open cache data file at %s\n" % dataFilePath
sys.exit(2)
fd = open(dataFilePath,"rb")
fdOut = open(dataFilePathOut,"wb")
blockTag = fd.read(4)
fdOut.write(blockTag)
#blockTag must be FOR4
if blockTag != "FOR4":
fileFormatError()
platform = sys.platform
needSwap = False
if re.compile("win").match(platform) != None :
needSwap = True
if re.compile("linux").match(platform) != None :
needSwap = True
offset = readInt(fd,needSwap)
writeInt(fdOut,offset,needSwap)
#The 1st block is the header, not used.
#just write out as is
header = fd.read(offset)
fdOut.write(header)
while True:
#From now on the file is organized in blocks of time
#Each block holds the data for all the channels at that
#time
blockTag = fd.read(4)
fdOut.write(blockTag)
if blockTag == "":
#EOF condition...we are done
return
if blockTag != "FOR4":
fileFormatError()
blockSize = readInt(fd,needSwap)
#We cannot just write out the old block size, since we are potentially converting
#Float channels to doubles, the block size may increase.
newBlockSize = 0
bytesRead = 0
#Since we don't know the size of the block yet, we will cache everything in a dictionary,
#and write everything out in the end.
blockContents = {}
mychTag = fd.read(4)
if mychTag != "MYCH":
fileFormatError()
bytesRead += 4
blockContents['mychTag'] = mychTag
timeTag = fd.read(4)
if timeTag != "TIME":
fileFormatError()
bytesRead += 4
blockContents['timeTag']= timeTag
#Next 32 bit int is the size of the time variable,
#this is always 4
timeVarSize = readInt(fd,needSwap)
bytesRead += 4
blockContents['timeVarSize']= timeVarSize
#Next 32 bit int is the time itself, in ticks
#1 tick = 1/6000 of a second
time = readInt(fd,needSwap)
bytesRead += 4
blockContents['time']= time
newBlockSize = bytesRead
channels = []
blockContents['channels'] = channels
print "Converting Data found at time %f seconds...\n"%(time/6000.0)
while bytesRead < blockSize:
channelContents = {}
#channel name is next.
#the tag for this must be CHNM
chnmTag = fd.read(4)
if chnmTag != "CHNM":
fileFormatError()
bytesRead += 4
newBlockSize += 4
channelContents['chnmTag'] = chnmTag
#Next comes a 32 bit int that tells us how long the
#channel name is
chnmSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['chnmSize'] = chnmSize
#The string is padded out to 32 bit boundaries,
#so we may need to read more than chnmSize
mask = 3
chnmSizeToRead = (chnmSize + mask) & (~mask)
channelName = fd.read(chnmSize)
paddingSize = chnmSizeToRead-chnmSize
channelContents['channelName'] = channelName
channelContents['paddingSize'] = paddingSize
if paddingSize > 0:
padding = fd.read(paddingSize)
channelContents['padding'] = padding
bytesRead += chnmSizeToRead
newBlockSize += chnmSizeToRead
#Next is the SIZE field, which tells us the length
#of the data array
sizeTag = fd.read(4)
channelContents['sizeTag'] = sizeTag
if sizeTag != "SIZE":
fileFormatError()
bytesRead += 4
newBlockSize += 4
#Next 32 bit int is the size of the array size variable,
#this is always 4, so we'll ignore it for now
#though we could use it as a sanity check.
arrayVarSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayVarSize'] = arrayVarSize
#finally the actual size of the array:
arrayLength = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayLength'] = arrayLength
#data format tag:
dataFormatTag = fd.read(4)
#buffer length - how many bytes is the actual data
bufferLength = readInt(fd,needSwap)
bytesRead += 8
newBlockSize += 8
numPointsToPrint = 5
if dataFormatTag == "FVCA":
#FVCA == Float Vector Array
outDataTag = "DVCA"
channelContents['dataFormatTag'] = outDataTag
if bufferLength != arrayLength*3*4:
fileFormatError()
outBufLength = bufferLength*2
channelContents['bufferLength'] = outBufLength
floatArray = array.array('f')
floatArray.fromfile(fd,arrayLength*3)
doubleArray = array.array('d')
bytesRead += arrayLength*3*4
newBlockSize += arrayLength*3*8
if needSwap:
floatArray.byteswap()
for index in range(0,arrayLength*3):
doubleArray.append(floatArray[index])
if needSwap:
doubleArray.byteswap()
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
elif dataFormatTag == "DVCA":
#DVCA == Double Vector Array
channelContents['dataFormatTag'] = dataFormatTag
if bufferLength != arrayLength*3*8:
fileFormatError()
channelContents['bufferLength'] = bufferLength
doubleArray = array.array('d')
doubleArray.fromfile(fd,arrayLength*3)
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
bytesRead += arrayLength*3*8
newBlockSize += arrayLength*3*8
else:
fileFormatError()
#Now that we have completely parsed this block, we are ready to output it
writeInt(fdOut,newBlockSize,needSwap)
fdOut.write(blockContents['mychTag'])
fdOut.write(blockContents['timeTag'])
writeInt(fdOut,blockContents['timeVarSize'],needSwap)
writeInt(fdOut,blockContents['time'],needSwap)
for channelContents in channels:
fdOut.write(channelContents['chnmTag'])
writeInt(fdOut,channelContents['chnmSize'],needSwap)
fdOut.write(channelContents['channelName'])
if channelContents['paddingSize'] > 0:
fdOut.write(channelContents['padding'])
fdOut.write(channelContents['sizeTag'])
writeInt(fdOut,channelContents['arrayVarSize'],needSwap)
writeInt(fdOut,channelContents['arrayLength'],needSwap)
fdOut.write(channelContents['dataFormatTag'])
writeInt(fdOut,channelContents['bufferLength'],needSwap)
channelContents['doubleArray'].tofile(fdOut)
########################################################################
# Description:
# method to parse and convert the contents of the data file, for the
# file per frame case ("OneFilePerFrame")
def parseDataFilePerFrame(cacheFile,outFileName):
allFilesInDir = os.listdir(cacheFile.m_directory)
matcher = re.compile(cacheFile.m_baseFileName)
dataFiles = []
for afile in allFilesInDir:
if os.path.splitext(afile)[1] == ".mc" and matcher.match(afile) != None:
dataFiles.append(afile)
for dataFile in dataFiles:
fileName = os.path.split(dataFile)[1]
baseName = os.path.splitext(fileName)[0]
frameAndTickNumberStr = baseName.split("Frame")[1]
frameAndTickNumber = frameAndTickNumberStr.split("Tick")
frameNumber = int(frameAndTickNumber[0])
tickNumber = 0
if len(frameAndTickNumber) > 1:
tickNumber = int(frameAndTickNumber[1])
timeInTicks = frameNumber*cacheFile.m_timePerFrame + tickNumber
print "--------------------------------------------------------------\n"
print "Converting data at time %f seconds:\n"%(timeInTicks/6000.0)
fd = open(dataFile,"rb")
dataFileOut = outFileName + "Frame" + frameAndTickNumberStr + ".mc"
dataFileOutPath = os.path.join(cacheFile.m_directory,dataFileOut)
fdOut = open(dataFileOutPath,"wb")
blockTag = fd.read(4)
#blockTag must be FOR4
if blockTag != "FOR4":
fileFormatError()
fdOut.write(blockTag)
platform = sys.platform
needSwap = False
if re.compile("win").match(platform) != None :
needSwap = True
if re.compile("linux").match(platform) != None :
needSwap = True
offset = readInt(fd,needSwap)
writeInt(fdOut,offset,needSwap)
#The 1st block is the header, not used.
#write out as is.
header = fd.read(offset)
fdOut.write(header)
blockTag = fd.read(4)
if blockTag != "FOR4":
fileFormatError()
fdOut.write(blockTag)
blockSize = readInt(fd,needSwap)
#We cannot just write out the old block size, since we are potentially converting
#Float channels to doubles, the block size may increase.
newBlockSize = 0
bytesRead = 0
#Since we don't know the size of the block yet, we will cache everything in a dictionary,
#and write everything out in the end.
blockContents = {}
mychTag = fd.read(4)
blockContents['mychTag'] = mychTag
if mychTag != "MYCH":
fileFormatError()
bytesRead += 4
#Note that unlike the oneFile case, for file per frame there is no
#TIME tag at this point. The time of the data is embedded in the
#file name itself.
newBlockSize = bytesRead
channels = []
blockContents['channels'] = channels
while bytesRead < blockSize:
channelContents = {}
#channel name is next.
#the tag for this must be CHNM
chnmTag = fd.read(4)
if chnmTag != "CHNM":
fileFormatError()
bytesRead += 4
newBlockSize += 4
channelContents['chnmTag'] = chnmTag
#Next comes a 32 bit int that tells us how long the
#channel name is
chnmSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['chnmSize'] = chnmSize
#The string is padded out to 32 bit boundaries,
#so we may need to read more than chnmSize
mask = 3
chnmSizeToRead = (chnmSize + mask) & (~mask)
channelName = fd.read(chnmSize)
paddingSize = chnmSizeToRead-chnmSize
channelContents['channelName'] = channelName
channelContents['paddingSize'] = paddingSize
if paddingSize > 0:
padding = fd.read(paddingSize)
channelContents['padding'] = padding
bytesRead += chnmSizeToRead
newBlockSize += chnmSizeToRead
#Next is the SIZE field, which tells us the length
#of the data array
sizeTag = fd.read(4)
channelContents['sizeTag'] = sizeTag
if sizeTag != "SIZE":
fileFormatError()
bytesRead += 4
newBlockSize += 4
#Next 32 bit int is the size of the array size variable,
#this is always 4, so we'll ignore it for now
#though we could use it as a sanity check.
arrayVarSize = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayVarSize'] = arrayVarSize
#finally the actual size of the array:
arrayLength = readInt(fd,needSwap)
bytesRead += 4
newBlockSize += 4
channelContents['arrayLength'] = arrayLength
#data format tag:
dataFormatTag = fd.read(4)
#buffer length - how many bytes is the actual data
bufferLength = readInt(fd,needSwap)
bytesRead += 8
newBlockSize += 8
numPointsToPrint = 5
if dataFormatTag == "FVCA":
#FVCA == Float Vector Array
outDataTag = "DVCA"
channelContents['dataFormatTag'] = outDataTag
if bufferLength != arrayLength*3*4:
fileFormatError()
outBufLength = bufferLength*2
channelContents['bufferLength'] = outBufLength
floatArray = array.array('f')
floatArray.fromfile(fd,arrayLength*3)
bytesRead += arrayLength*3*4
newBlockSize += arrayLength*3*8
doubleArray = array.array('d')
if needSwap:
floatArray.byteswap()
for index in range(0,arrayLength*3):
doubleArray.append(floatArray[index])
if needSwap:
doubleArray.byteswap()
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
elif dataFormatTag == "DVCA":
#DVCA == Double Vector Array
channelContents['dataFormatTag'] = dataFormatTag
if bufferLength != arrayLength*3*8:
fileFormatError()
channelContents['bufferLength'] = bufferLength
doubleArray = array.array('d')
doubleArray.fromfile(fd,arrayLength*3)
channelContents['doubleArray'] = doubleArray
channels.append(channelContents)
bytesRead += arrayLength*3*8
newBlockSize += arrayLength*3*8
else:
fileFormatError()
#Now that we have completely parsed this block, we are ready to output it
writeInt(fdOut,newBlockSize,needSwap)
fdOut.write(blockContents['mychTag'])
for channelContents in channels:
fdOut.write(channelContents['chnmTag'])
writeInt(fdOut,channelContents['chnmSize'],needSwap)
fdOut.write(channelContents['channelName'])
if channelContents['paddingSize'] > 0:
fdOut.write(channelContents['padding'])
fdOut.write(channelContents['sizeTag'])
writeInt(fdOut,channelContents['arrayVarSize'],needSwap)
writeInt(fdOut,channelContents['arrayLength'],needSwap)
fdOut.write(channelContents['dataFormatTag'])
writeInt(fdOut,channelContents['bufferLength'],needSwap)
channelContents['doubleArray'].tofile(fdOut)
def usage():
print "Use -f to indicate the cache description file (.xml) you wish to convert\nUse -o to indicate the output filename"
try:
(opts, args) = getopt.getopt(sys.argv[1:], "f:o:")
except getopt.error:
# print help information and exit:
usage()
sys.exit(2)
if len(opts) != 2:
usage()
sys.exit(2)
fileName = ""
outFileName = ""
for o,a in opts:
if o == "-f":
fileName = a
if o == "-o":
outFileName = a
cacheFile = CacheFile(fileName)
if cacheFile.m_version > 2.0:
print "Error: this script can only parse cache files of version 2 or lower\n"
sys.exit(2)
print "Outputing new description file...\n"
cacheFile.writeConvertedDescriptionFile(outFileName)
print "Beginning Conversion of data files...\n"
if cacheFile.m_cacheType == "OneFilePerFrame":
parseDataFilePerFrame(cacheFile,outFileName)
elif cacheFile.m_cacheType == "OneFile":
parseDataOneFile(cacheFile,outFileName)
else:
print "unknown cache type!\n"
|
997,193 | 4de43372be7aaaa10f75281f274495d7d6fc624e | from django.conf.urls import url
from django.conf.urls import include
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^generic/$', views.generic, name='generic'),
url(r'^elements/$', views.elements, name='elements'),
]
|
997,194 | 872589edff72922461aabc73166dabc8755816ad | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 18:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_remove_user_expiredatetokenspotify'),
]
operations = [
migrations.AlterField(
model_name='party',
name='current_user',
field=models.ForeignKey(null='True', on_delete=django.db.models.deletion.CASCADE, related_name='current_user', to='api.User'),
),
]
|
997,195 | 1627e55715441135088c5ce89fc555e884ce9497 | import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
def create_cloud_page(Scrapedreviews,freq):
layout=html.Div(children=[
html.Div(className='cloud-page-bg',children=[
html.Marquee(className='cloud-page-marquee',children=[
html.H3(children=freq,className='cloud-page-h3')
])
]),
html.Div(className='cloud-page-Div',children=[
dcc.Dropdown(id='review-dropdown',options=[{'label':i, 'value':i} for i in Scrapedreviews],style={"width": "100%",'height':'70px'},multi=False,optionHeight = 100),
html.Img(src='https://media.giphy.com/media/KGSxFwJJHQPsKzzFba/giphy.gif',width=300,height=300,className='cloud-page-img',id='review-dropdown-gif'),
html.H4('Choose something',className='cloud-page-h4',id='review-type')
])
])
return layout |
997,196 | 6e741fcd66e8095917d54dfc5beccedc0e6beec8 | from __future__ import absolute_import
import json
import pytest
from django.urls import reverse_lazy
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from .base import BaseAPITestCase
from contentcuration.models import AssessmentItem
from contentcuration.models import ContentNode
from contentcuration.models import File
from contentcuration.models import User
pytestmark = pytest.mark.django_db
class ChannelTestCase(BaseAPITestCase):
def setUp(self):
super(ChannelTestCase, self).setUp()
self.channel.editors.add(self.user)
self.channel.save()
def test_authorized_get(self):
url = reverse_lazy("channel-list") + "/" + self.channel.pk
response = self.get(url)
self.assertEqual(response.status_code, 200)
def test_unauthorized_get(self):
newuser = User.objects.create(email="unauthorized@test.com")
newuser.set_password("password")
newuser.save()
self.client.force_authenticate(newuser)
url = reverse_lazy("channel-list") + "/" + self.channel.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_readonly_fields(self):
original_version = self.channel.version
url = reverse_lazy("channel-list") + "/" + self.channel.pk
response = self.put(
url,
{
"version": original_version + 1,
"content_defaults": {},
"pending_editors": [],
},
)
self.channel.refresh_from_db()
self.assertEqual(original_version, self.channel.version)
# TODO: rtibbles - update tests to test sync behaviour.
@pytest.mark.skip
class AssessmentItemTestCase(BaseAPITestCase):
def test_bulk_update(self):
exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
item1 = AssessmentItem.objects.create(contentnode=exercise)
item2 = AssessmentItem.objects.create(contentnode=exercise)
item3 = AssessmentItem.objects.create(contentnode=exercise)
item1dict = {}
item2dict = {}
item3dict = {}
for field in AssessmentItem._meta.fields:
attname = field.attname
set_attname = attname
if attname == "contentnode_id":
set_attname = "contentnode"
item1dict[set_attname] = getattr(item1, attname)
item2dict[set_attname] = getattr(item2, attname)
item3dict[set_attname] = getattr(item3, attname)
item1dict["question"] = "test"
item2dict["type"] = "test"
self.client.put(
reverse_lazy("assessmentitem-list"),
json.dumps([item1dict, item2dict, item3dict]),
content_type="application/json",
)
item1.refresh_from_db()
self.assertEqual(item1.question, "test")
item2.refresh_from_db()
self.assertEqual(item2.type, "test")
item3.refresh_from_db()
self.assertEqual(item3.question, item3dict["question"])
def test_bulk_update_non_existent_item(self):
exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
item1 = AssessmentItem.objects.create(contentnode=exercise)
item1dict = {}
item2dict = {}
item3dict = {}
for field in AssessmentItem._meta.fields:
attname = field.attname
set_attname = attname
if attname == "contentnode_id":
set_attname = "contentnode"
item1dict[set_attname] = getattr(item1, attname)
item2dict[set_attname] = getattr(item1, attname)
item3dict[set_attname] = getattr(item1, attname)
item2dict["id"] = 10000
item3dict["id"] = 10001
item1dict["question"] = "test"
response = self.client.put(
reverse_lazy("assessmentitem-list"),
json.dumps([item1dict, item2dict, item3dict]),
content_type="application/json",
)
self.assertEqual(response.status_code, 400)
def test_bulk_update_checksum_file_not_associated_create_new_file_object(self):
exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
item1 = AssessmentItem.objects.create(contentnode=exercise)
item1dict = {}
for field in AssessmentItem._meta.fields:
attname = field.attname
set_attname = attname
if attname == "contentnode_id":
set_attname = "contentnode"
item1dict[set_attname] = getattr(item1, attname)
checksum = "b6d83d66859b0cf095ef81120ef98e1f"
item1dict["question"] = (
"".format(checksum=checksum)
)
File.objects.create(checksum=checksum)
self.client.put(
reverse_lazy("assessmentitem-list"),
json.dumps([item1dict]),
content_type="application/json",
)
self.assertEqual(File.objects.filter(checksum=checksum).count(), 2)
def test_bulk_update_checksum_file_associated_use_existing_file_object(self):
exercise = ContentNode.objects.filter(kind=content_kinds.EXERCISE).first()
item1 = AssessmentItem.objects.create(contentnode=exercise)
item1dict = {}
for field in AssessmentItem._meta.fields:
attname = field.attname
set_attname = attname
if attname == "contentnode_id":
set_attname = "contentnode"
item1dict[set_attname] = getattr(item1, attname)
checksum = "b6d83d66859b0cf095ef81120ef98e1f"
item1dict["question"] = (
"".format(checksum=checksum)
)
File.objects.create(checksum=checksum, assessment_item=item1)
self.client.put(
reverse_lazy("assessmentitem-list"),
json.dumps([item1dict]),
content_type="application/json",
)
self.assertEqual(File.objects.filter(checksum=checksum).count(), 1)
|
997,197 | b349116bb6bbb1ccb84114626da396c8b6a93694 | from typing import List
def cons(it, elem):
yield from it
yield elem
class Cosets:
def __init__(self, ngens, data=()):
assert len(data) % ngens == 0, 'invalid length starting row'
self.ngens = ngens
self.data = list(data)
self.len = len(data) // ngens
def add_row(self):
self.len += 1
self.data.extend(-1 for _ in range(self.ngens))
def put(self, idx, target):
coset, gen = divmod(idx, self.ngens)
self.data[idx] = target
self.data[target * self.ngens + gen] = coset
def get(self, idx):
return self.data[idx]
def __getitem__(self, key):
coset, gen = key
return self.data[coset * self.ngens + gen]
def __setitem__(self, key, target):
coset, gen = key
self.data[coset * self.ngens + gen] = target
self.data[target * self.ngens + gen] = coset
def __len__(self):
return self.len
def __repr__(self):
return '\n'.join(
f'{i // self.ngens:>3} | ' +
' '.join(
f'{e:>3}' for e in self.data[i:i + self.ngens])
for i in range(0, len(self.data), self.ngens))
class RelTable:
def __init__(self, gens: List[int], mul: int):
self.gens = gens
self.mul = mul
self.fam = []
self.gen = []
self.lst = []
def add_row(self):
idx = len(self.fam)
self.fam.append(-1)
self.gen.append(-1)
self.lst.append(-1)
return idx
class Group:
def __init__(self, ngens, rels=()):
self._mults = [[2] * ngens for _ in range(ngens)]
for (f, t), m in rels:
self._mults[f][t] = m
self._mults[t][f] = m
self.ngens = ngens
@property
def mults(self):
for i in range(self.ngens - 1):
for j in range(i + 1, self.ngens):
yield ((i, j), self._mults[i][j])
def __setitem__(self, key, value):
f, t = key
self._mults[f][t] = value
self._mults[t][f] = value
def __mul__(self, other):
assert isinstance(other, Group)
off = self.ngens
g = Group(self.ngens + other.ngens)
for (i, j), m in self.mults:
g[i, j] = m
for (i, j), m in other.mults:
g[off + i, off + j] = m
return g
def __pow__(self, p, modulo=None):
if modulo is not None: raise NotImplemented
assert isinstance(p, int), 'p must be an integer'
assert p >= 0, 'p must be a nonnegative integer'
g = Group(self.ngens * p)
for (i, j), m in self.mults:
for off in range(0, g.ngens, self.ngens):
g[off + i, off + j] = m
return g
@classmethod
def schlafli(cls, *mults):
ngens = len(mults) + 1
return Group(ngens, (((i, i + 1), mult) for i, mult in enumerate(mults)))
def solve(self, sub_gens=()):
initial_row = [-1] * self.ngens
for s in sub_gens:
initial_row[s] = 0
cosets = Cosets(self.ngens, initial_row)
rel_tables = [RelTable(*a) for a in self.mults]
for rel in rel_tables:
idx = rel.add_row()
count = 0
for g in rel.gens:
if cosets.get(g) == 0:
count += 1
rel.fam[idx] = 0
rel.gen[idx] = 0
rel.lst[idx] = 0
if count == 1:
rel.gen[idx] = -1
idx = 0
while True:
while idx < len(cosets.data) and cosets.get(idx) >= 0:
idx += 1
if idx == len(cosets.data):
break
coset, gen = divmod(idx, cosets.ngens)
target = len(cosets)
cosets.add_row()
for rel in rel_tables:
rel.add_row()
facts = [(coset, gen)]
while facts:
coset, gen = facts.pop()
cosets[coset, gen] = target
for rel in rel_tables:
if gen in rel.gens and rel.fam[target] == -1:
rel.fam[target] = rel.fam[coset]
rel.gen[target] = rel.gen[coset] + 1
if rel.gen[coset] < 0:
rel.gen[target] -= 2
if rel.gen[target] == rel.mul: # forward learn
lst = rel.lst[rel.fam[target]]
gen_ = rel.gens[rel.gens[0] == gen]
facts.append((lst, gen_))
elif rel.gen[target] == -rel.mul: # stationary learn
gen_ = rel.gens[rel.gens[0] == gen]
facts.append((target, gen_))
elif rel.gen[target] == rel.mul - 1:
rel.lst[rel.fam[target]] = target
facts.sort(reverse=True)
for rel in rel_tables:
if rel.fam[target] == -1:
rel.fam[target] = target
rel.gen[target] = 0
count = 0
for g in rel.gens:
if cosets[target, g] == target:
count += 1
if count == 1:
rel.gen[target] = -1
return cosets
|
997,198 | 52cc06c8b2f3337305e337cf6b289c7cd03c7728 | #!/usr/bin/env python
# coding: utf-8
# In[65]:
import numpy as ny
import pandas as pd
from matplotlib import pyplot as plt
plt.plot("dataR2.csv")
plt.show("dataR2.csv")
data=pd.read_csv("dataR2.csv")
x=data.drop("Classification",1)
y=data["Classification"]
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)
from sklearn.neural_network import MLPClassifier
model=MLPClassifier(hidden_layer_sizes=(8,8,8),activation="logistic",learning_rate="constant",learning_rate_init=0.1)
model.fit(x_train,y_train)
predict=model.predict(x_test)
from sklearn.metrics import accuracy_score
acc=accuracy_score(predict,y_test)
print(acc)
from sklearn.metrics import confusion_matrix
conf=confusion_matrix(predict,y_test)
print(conf)
# In[ ]:
|
997,199 | 25411849e7250b98cad4dbfc64391daaedb8b8f4 | import time
#
# k = 0
# while (k < 45):
# print("This is harry bhai")
# k += 1
#
# for i in range(45):
# print("This is harry bhai")
#
#now we have to loops for printing same value 45 times and suppose we want to know which
# loop runs faster we use time modules
initial=time.time()# this will tell time in tik units at this point of program
print(initial)
k = 0
while (k < 10):
print("This is harry bhai")
k += 1
print('time took by while loop to run was',time.time()-initial)
initial2=time.time()# this will tell time in tik units at this point of program
for i in range(10):
print("This is harry bhai")
print('time took by for loop to run was',time.time()-initial2)
# IN THE ABOVE CODES THE EXECUTION TIME WAS TOO SHORT TO BE MENTIONED SO IT WILL SHOW
# 0.0 SECS SO DON'T THINK CODE IS WRONG
# IF WE JUST INCREASE THE LOOP TOO 1000000 TIMES IT WILL SHOW
# 11.188894510269165 SECONDS
k=time.asctime(time.localtime(time.time()))#this will tell us real worldd time
print(k)
#now let's learn sleep function
p=time.time()
k = 0
while (k < 10):
print("This is harry bhai")
time.sleep(1)
k += 1
print('time took by while loop to run was',time.time()-p)
#In the above loop it will go in loop print our argument wait for 1 second then
# will move further and run again and it go on |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.