index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,300 | 2c4fcb3c8e45a47fc1f6fac7f81d1bbb2a83917a | def multiples_of(start, end, *multiples):
return sum(j for j in xrange(start, end + 1) if any(j % multiple == 0 for multiple in multiples))
def main():
return multiples_of(1, 999, 3, 5) |
20,301 | b1e5e20b7f883b4e93e4637312e451f68c4cba1c | from django.contrib import admin
from basic_app.models import Class_Model_Model1
# Register your models here.
admin.site.register(Class_Model_Model1)
|
20,302 | 8ff3bc958c34324e708e3d54126aca7ae55f6231 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 14 00:31:37 2019
@author: aayush
"""
import resnet
import Searcher
import argparse
import cv2
# creating the argument parser and parsing the arguments
#intializing the color descriptor
cd = resnet.ResNetFeat()
#loading the query image and describe it
queryFeatures = cd.getFeatureQuery('queries/test4.jpg')
#performing the search
s1 = Searcher.Searcher('output.csv')
results = s1.search(queryFeatures)
query = cv2.imread('queries/test4.jpg')
query2 = cv2.resize(query,(300,300))
cv2.imshow("query",query2)
#displaying the query
#loop over the results
label = []
for i in range(4):
label.append('rs'+str(i))
i = 0
for (score, resultID) in results:
#load the result image and display it
print(resultID)
result1 = cv2.imread(resultID)
result = cv2.resize(result1,(300,300))
cv2.imshow(label[i],result)
i = i+1
cv2.waitKey(0)
|
20,303 | ea6ef2eb6c287dc161975bce5920de5c088788c5 | import argparse
TF = (False, True)
def job_name(nodes, c, i, m):
name = f"matrixmul_{nodes}_{c}"
if i:
name += "_i"
if m:
name += "_m"
return name
def get_command(f, c, i, m, exp):
cmd = f"srun ../build/matrixmul -f {f} -s 42 -c {c} -e {exp} -g 0"
if i:
cmd += " -i"
if m:
cmd += " -m"
return cmd
def create_job(path, f, nodes, c, i, m):
name = job_name(nodes, c, i, m)
exp = nodes * 5
job = open(f"{path}/{name}", "w")
cmd = get_command(f, c, i, m, exp)
time = 10
job.write("#!/bin/bash -l\n")
job.write(f"#SBATCH --job-name {name}\n")
job.write(f"#SBATCH --output out/{name}.out\n")
job.write(f"#SBATCH --error out/{name}.err\n")
job.write(f"#SBATCH --account \"GC72-18\"\n")
job.write(f"#SBATCH --nodes {nodes}\n")
job.write(f"#SBATCH --tasks-per-node 24\n")
job.write(f"#SBATCH --time 0:{time}:00\n")
job.write(cmd)
if __name__ == "__main__":
f = "../../examples/sample_20000_1000"
m = True
for nodes in (1, 2, 4, 8, 16, 32):
for c in (1, 2, 4, 8, 16):
for i in TF:
if ((nodes * 24) % (c * c) == 0):
create_job("jobs", f, nodes, c, i, m)
|
20,304 | 1900d8cb977d706d2c3269dff3693af8f6eaecac | #!/usr/bin/python3
from tkinter import *
import cv2 as cv
import numpy as np
import math
from nanpy import (ArduinoApi, SerialManager)
import time
import picamera
import picamera.array
camangle = (62.2/(180/math.pi))
camangled=(28.9/(180/math.pi))
Bconst = (58.9/(180/math.pi))
cosB = math.cos(Bconst)
connection = SerialManager(device='/dev/ttyACM0')
a = ArduinoApi(connection=connection)
maxvalue=[]
Waittime = 0.005
yWaittime = 0.000005
yreturn = 915
dirstp1 = 25
stpstp1 = 3
ms3stp1 = 31
ms2stp1 = 29
ms1stp1 = 27
dirstp2 = 24
stpstp2 = 2
ms3stp2 = 26
ms2stp2 = 28
ms1stp2 = 30
lsrctrl = 32
endstop = 49
ydist = []
xdist = []
zdist=[]
rpix=[]
gpix=[]
bpix=[]
a.pinMode(dirstp1, a.OUTPUT)
a.pinMode(stpstp1, a.OUTPUT)
a.pinMode(ms3stp1, a.OUTPUT)
a.pinMode(ms2stp1, a.OUTPUT)
a.pinMode(ms1stp1, a.OUTPUT)
a.pinMode(dirstp2, a.OUTPUT)
a.pinMode(stpstp2, a.OUTPUT)
a.pinMode(ms3stp2, a.OUTPUT)
a.pinMode(ms2stp2, a.OUTPUT)
a.pinMode(ms1stp2, a.OUTPUT)
a.pinMode(lsrctrl, a.OUTPUT)
a.pinMode(endstop, a.INPUT)
a.digitalWrite(ms3stp1, a.HIGH)
a.digitalWrite(ms2stp1, a.HIGH)
a.digitalWrite(ms1stp1, a.HIGH)
a.digitalWrite(ms3stp2, a.HIGH)
a.digitalWrite(ms2stp2, a.HIGH)
a.digitalWrite(ms1stp2, a.HIGH)
a.digitalWrite(dirstp1, a.LOW)
scanangle=90
scanres=1.2
xresolution=1080
class Application(Frame):
def rotation_angle90(self):
global scanangle
scanangle=90
self.r90["fg"]="red"
self.r180["fg"]="black"
self.r270["fg"]="black"
self.r360["fg"]="black"
def rotation_angle180(self):
global scanangle
scanangle=180
self.r90["fg"]="black"
self.r180["fg"]="red"
self.r270["fg"]="black"
self.r360["fg"]="black"
def rotation_angle270(self):
global scanangle
scanangle=270
self.r90["fg"]="black"
self.r180["fg"]="black"
self.r270["fg"]="red"
self.r360["fg"]="black"
def rotation_angle360(self):
global scanangle
scanangle=360
self.r90["fg"]="black"
self.r180["fg"]="black"
self.r270["fg"]="black"
self.r360["fg"]="red"
def resolution03(self):
global scanres
scanres=0.3
self.res03["fg"]="red"
self.res06["fg"]="black"
self.res12["fg"]="black"
def resolution06(self):
global scanres
scanres=0.6
self.res03["fg"]="black"
self.res06["fg"]="red"
self.res12["fg"]="black"
def resolution12(self):
global scanres
scanres=1.2
self.res03["fg"]="black"
self.res06["fg"]="black"
self.res12["fg"]="red"
def camresmax(self):
global xresolution
xresolution=3280
self.camresmax["fg"]="red"
self.camresmed["fg"]="black"
self.camreslow["fg"]="black"
def camresmed(self):
global xresolution
xresolution=1920
self.camresmax["fg"]="black"
self.camresmed["fg"]="red"
self.camreslow["fg"]="black"
def camreslow(self):
global xresolution
xresolution=1080
self.camresmax["fg"]="black"
self.camresmed["fg"]="black"
self.camreslow["fg"]="red"
def ycalibrate(self):
a.digitalWrite(dirstp2, a.HIGH)
endstate = a.LOW
while endstate == a.LOW:
endstate = a.digitalRead(endstop)
a.digitalWrite(stpstp2, a.HIGH)
time.sleep(0.000001)
a.digitalWrite(stpstp2, a.LOW)
time.sleep(0.000001)
steps = 0
a.digitalWrite(dirstp2, a.LOW)
for steps in range (yreturn):
a.digitalWrite(stpstp2, a.HIGH)
time.sleep(0.000001)
a.digitalWrite(stpstp2, a.LOW)
time.sleep(0.000001)
def startscan(self):
fileoutput = "Example.pts"
fileoutput = '/media/pi/usbdrive/' + fileoutput
fileoutputtype = "w"
yresolution=int(xresolution/(3280/2464))
adjforz=((yresolution/2)/(math.tan((24.4)/(180/math.pi))))
scanresz=1
threshinput=0.2
radius=5
aconst = (xresolution*(math.sin(Bconst)))/(math.sin(camangle))
aconstsqrd = math.pow(aconst,2)
r=0
file_object = open(fileoutput,fileoutputtype)
scansteps = round(scanangle/scanres)
with picamera.PiCamera() as camera:
camera.start_preview(fullscreen=False,window=(0,0,640,480))
camera.resolution=(xresolution,yresolution)
camera.meter_mode='backlit'
camera.saturation=50
time.sleep(2)
print ("Scan start")
starttime=time.time()
for revolutions in range(scansteps):
scanstepsstring=str(revolutions)
expt=camera.exposure_speed
if expt < 4000:
camera.shutter_speed=4000
else:
camera.shutter_speed=0
camera.awb_gains=(0,0)
camera.capture('loff.jpg','jpeg',use_video_port=True)
loff = cv.imread('loff.jpg')
a.digitalWrite(lsrctrl, a.HIGH)
camera.awb_gains=(8,0)
camera.capture('lon.jpg','jpeg',use_video_port=True)
camera.shutter_speed=0
calcustep = (scanres/1.8)*3*16
calcustep= int(calcustep)
steps = 0
for steps in range(calcustep):
a.digitalWrite(stpstp1, a.HIGH)
time.sleep(0.01)
a.digitalWrite(stpstp1, a.LOW)
time.sleep(0.01)
steps +=1
a.digitalWrite(dirstp1, a.HIGH)
a.digitalWrite(stpstp1, a.HIGH)
time.sleep(0.01)
a.digitalWrite(stpstp1, a.LOW)
time.sleep(0.01)
a.digitalWrite(dirstp1, a.LOW)
a.digitalWrite(stpstp1, a.HIGH)
time.sleep(0.0001)
a.digitalWrite(stpstp1, a.LOW)
time.sleep(0.0001)
lon = cv.imread('lon.jpg')
a.digitalWrite(lsrctrl, a.LOW)
src=cv.subtract(lon,loff)
red=cv.cvtColor(src,cv.COLOR_BGR2GRAY)
ysize = red.shape[0]
xsize = red.shape[1]
blur=cv.GaussianBlur(red,(radius,radius),0)
(minVal, maxVal, MinLoc, maxLoc) = cv.minMaxLoc(blur)
threshamount = maxVal*threshinput
retval, threshold = cv.threshold(red, threshamount, 255, cv.THRESH_TOZERO);
(minVal, maxVal, MinLoc, maxLoc) = cv.minMaxLoc(threshold)
maxvalue = np.argmax(threshold,axis=1)
yint=0
yav=0
xav=0
angleav=0
ynum=0
hypav=0
while yint < ysize:
xcolumn=maxvalue[yint]
pxminus=threshold[yint,((maxvalue[yint])-1)]
if yint > 0:
pyminus=threshold[(yint-1),(maxvalue[yint])]
else:
pyminus=threshold[yint,(maxvalue[yint])]
if yint == (ysize-1):
pyplus=threshold[yint,(maxvalue[yint])]
else:
pyplus=threshold[(yint+1),(maxvalue[yint])]
if xcolumn < (xsize-1):
pxadd=threshold[yint,((maxvalue[yint])+1)]
else:
pxadd=threshold[yint,((maxvalue[yint]))]
if xcolumn > 0 and pxminus !=0 and pxadd !=0 and pyplus!=0 and pyminus !=0:
cosC=((2*aconstsqrd)-(2*aconst*(xcolumn+1)*cosB))/((2*aconst*(math.sqrt((aconstsqrd+((xcolumn+1)*(xcolumn+1))-(2*aconst*(xcolumn+1)*cosB))))))
angle=math.acos(cosC)
totalangle=angle+camangled
oppcalc=400*(math.tan(totalangle))
hypcalc=math.hypot(oppcalc,200)
calc2=math.asin(oppcalc/hypcalc)
rrad= r/(180/math.pi)
rcalc = calc2 + rrad
if hypcalc < 10000:
redp=loff[yint,xcolumn,2]
rpix.append(redp)
greenp=loff[yint,xcolumn,1]
gpix.append(greenp)
bluep=loff[yint,xcolumn,0]
bpix.append(bluep)
xdistance = -(hypcalc*(math.cos(rcalc)))
xdistance = round(xdistance,1)
ydistance = hypcalc*(math.sin(rcalc))
ydistance = round(ydistance,1)
ydist.append(ydistance)
xdist.append(xdistance)
if yint > (ysize/2):
angle = math.atan(((yint+1)-(yresolution/2))/adjforz)
tancalc=-(oppcalc*(math.tan(angle)))
else:
angle = math.atan(((yresolution/2)-(yint+1))/adjforz)
tancalc=(oppcalc*(math.tan(angle)))
tancalc = round(tancalc, 1)
zdist.append(tancalc)
yav=yav+ydistance
xav=xav+xdistance
ynum=ynum+1
hypav=hypav+hypcalc
angleav=angleav+calc2
yint=yint+1
r=r+scanres
if ynum >0:
yav=yav/ynum
xav=xav/ynum
hypav=hypav/ynum
angleav=angleav/ynum
rshort=round(r,1)
yav=round(yav,1)
xav=round(xav,1)
hypav=round(hypav,1)
angleav=angleav*(180/math.pi)
angleav=round(angleav,1)
else:
yav=0
xav=0
hypav=0
angleav=0
print ('Current angle: [%f] Av Distance [%f] at angle [%f] points rec: [%d] Average X: [%f] Y: [%f]\r'%(rshort,hypav,angleav,ynum,xav,yav),end="")
print ("Scan ended, saving")
endtime=time.time()
print("Completed in: %f minutes" % (int((endtime-starttime)/60)))
exportint = len(xdist)
for export in range (exportint):
xout = str(xdist[export])
yout = str(ydist[export])
zout = str(zdist[export])
rout = str(rpix[export])
gout=str(gpix[export])
bout=str(bpix[export])
file_object.write(xout + " " + yout + " " + zout + " " + rout + " " + gout + " " + bout + "\n")
file_object.close()
camera.close
def createWidgets(self):
topLabel=Frame(root)
topLabel.pack()
RotButtons=Frame(root)
RotButtons.pack(side=BOTTOM)
ResLabel=Frame(root)
ResLabel.pack(side=BOTTOM)
ResButtons=Frame(root)
ResButtons.pack(side=BOTTOM)
CamResLabel=Frame(root)
CamResLabel.pack(side=BOTTOM)
CamResButtons=Frame(root)
CamResButtons.pack(side=BOTTOM)
CalibButton=Frame(root)
CalibButton.pack(side=BOTTOM)
GoButton=Frame(root)
GoButton.pack(side=BOTTOM)
Labelrot = Label(topLabel, text="Rotation angle")
Labelrot.pack()
self.r90 = Button(RotButtons, text="90", command=self.rotation_angle90)
self.r90.pack(side=LEFT)
self.r180 = Button(RotButtons, text="180", command=self.rotation_angle180)
self.r180.pack(side=LEFT)
self.r270 = Button(RotButtons, text="270", command=self.rotation_angle270)
self.r270.pack(side=LEFT)
self.r360 = Button(RotButtons, text="360", command=self.rotation_angle360)
self.r360.pack(side=LEFT)
Labelrot = Label(ResLabel, text="Rotational Resolution")
Labelrot.pack()
self.res03 = Button(ResButtons, text="0.3 Deg", command=self.resolution03)
self.res03.pack(side=LEFT)
self.res06 = Button(ResButtons, text="0.6 Deg", command=self.resolution06)
self.res06.pack(side=LEFT)
self.res12 = Button(ResButtons, text="1.2 Deg", command=self.resolution12)
self.res12.pack(side=LEFT)
CamLabel = Label(CamResLabel, text="Camera Resolution")
CamLabel.pack()
self.camresmax = Button(CamResButtons, text="3280x2464 (Maximum)", command=self.camresmax)
self.camresmax.pack(side=LEFT)
self.camresmed = Button(CamResButtons, text="1920x1442 (Medium)", command=self.camresmed)
self.camresmed.pack(side=LEFT)
self.camreslow = Button(CamResButtons, text="1080x811 (Low)", command=self.camreslow)
self.camreslow.pack(side=LEFT)
self.calibbutton=Button(CalibButton, text="Calibrate Y Axis", command=self.ycalibrate)
self.calibbutton.pack(side=LEFT)
self.startscan = Button(GoButton, text="Start Scan with applied settings", command=self.startscan)
self.startscan.pack(side=LEFT)
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
root = Tk()
app = Application(master=root)
app.mainloop()
|
20,305 | 2a48e66a54770c076cefe8b5c3f9be4520f14fa1 | from datetime import datetime
# time stamp
def timeStamp():
return datetime.now().strftime("%H:%M:%S")
# timeStamped comment
def comment(commentContent, empty=False):
if empty:
print(f"{' '*len(timeStamp())} &> {commentContent}")
else:
print(f"{timeStamp()} &> {commentContent}")
def get(msg=''):
return input(f"{8*' '} $> {msg}")
if __name__ == "__main__":
print("-- utility file for Saaty project --")
|
20,306 | e28b1f7616781162d6321870958f8e8f52a7b2f8 | # Streams within 100 feet of roads script
# Created 7/8/2015 by Eddie Anderson
# GIS671A
# Scenario:
# The State EPA is concerned about potential hydrocarbon and road salt runoff into
# the state's streams and rivers. They would like to explore the possibility of installing
# water quality monitoring stations on streams and rivers that flow within 100' of any roadway.
# Methodology:
# create a 100 foot buffer of all roads
# clip all streams within this buffer
# explode multipart features of streams
# Note:
# this script assumes the user is storing their data in the following location:
# C:\temp2
# Data for this script was obtained from the MN Geospatial Commons
# The data was clipped to Rock County, Minnesota for brevity as the actual file size for the entire state would have been 60mb
# load the arcpy site package
import arcpy
arcpy.env.overwriteOutput = True
# assign shapefiles to variables
roads = "C:/temp2/roads.shp"
streams = "C:/temp2/streams.shp"
roads_buff = "C:/temp2/output/roads_buff.shp"
streams_c = "C:/temp2/output/streams_c.shp"
streams_expl = "C:/temp2/output/streams_expl.shp"
buffer_dist = "100 feet"
# first, buffer the roads 100'
# then clip the streams using the roads buffer
# finally, explode multipart features of the clipped streams
# printing messages will allow us to see messages generated from running the script
arcpy.MakeFeatureLayer_management(roads,"feature_roads")
arcpy.Buffer_analysis("feature_roads",roads_buff,buffer_dist)
arcpy.Clip_analysis(streams,roads_buff,streams_c)
arcpy.MultipartToSinglepart_management(streams_c,streams_expl)
print arcpy.GetMessages()
|
20,307 | c416d358e677d5730437505702b90f4459296907 | from datetime import datetime,timedelta
n=datetime.now()
t=n.time()
print("Current Time: ",t)
today=datetime.today()
day=today-timedelta(5)
print("Five days before today is : ",day)
print("Yesterday : ",today-timedelta(1))
print("Current date : ",today)
print("Tomorrow : ",today+timedelta(1))
print("Next 5 days: ")
for i in range(1,6):
t=today+timedelta(i)
print(t)
sec=n+timedelta(0,5)
print("After 5 Seconds:",sec.time())
|
20,308 | 5d2b7dd9a75cf92daee15e54b187d35964cc87e6 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
#from django_cron import CronJobBase, Schedule
# Create your models here.
estados=(
('rn','Rio Grande do Norte'),
('pe','Pernabuco'),
('pb','Paraiba'),
)
esporte=(
('boxe', 'Boxe'),
('capoeira','Capoeira'),
('judo',u"Judô"),
('taekwond','Taekwond'),
('mma','MMA'),
('jiujitsu','Jiu Jitsu'),
('karate',u'Karatê'),
('zumba','Zumba'),
('outra','Outra')
)
pagamentos=(
('dinheiro', 'Dinheiro(R$)'),
('Cartão', 'Cartão'),
('Tranferencia', 'Tranferência'),
)
meses= {
('1', 'JANEIRO'),
('2', 'FEVEREIRO'),
('3', 'MARÇO'),
('4', 'ABRIL'),
('5', 'MAIO'),
('6', 'JUNHO'),
('7', 'JULHO'),
('8', 'AGOSTO'),
('9', 'SETEMBRRO'),
('10', 'OUTUBRO'),
('11', 'NOVEMBRO'),
('12', 'DEZEMBRO'),
}
class aluno(models.Model):
nome = models.CharField(max_length=100)
data_nasc = models.DateField()
cpf = models.CharField(max_length=14)
email = models.EmailField()
endereco = models.CharField(max_length=80)
numero = models.CharField(max_length=5)
complemento = models.CharField(max_length=30, blank=True)
bairro = models.CharField(max_length=40)
uf = models.CharField(max_length=2, choices=estados, default='Rio Grande do Norte')
telefone = models.CharField(max_length=13)
data_cadastro = models.DateTimeField(auto_now_add=True)
foto_perfil = models.ImageField(blank=True)
pass
def __unicode__(self):
return self.nome
def imagem_img(self):
if self.foto_perfil:
return u'<img src="%s" width=50 />' % self.foto_perfil.url
else:
return u'Sem imagem'
imagem_img.short_description = "Imagem Perfil"
imagem_img.allow_tags = True
class professor(models.Model):
nome = models.CharField(max_length=100)
nascimento = models.DateField()
cpf = models.CharField(max_length=14)
endereco = models.CharField(max_length=80)
numero = models.CharField(max_length=5)
complemento = models.CharField(max_length=30, blank=True)
bairro = models.CharField(max_length=40)
uf = models.CharField(max_length=2, choices=estados, default='Rio Grande do Norte')
email = models.EmailField()
telefone = models.CharField(max_length=13)
def __unicode__(self):
return self.nome
class turma(models.Model):
nome = models.CharField(max_length=20)
modalidade = models.CharField(max_length=13, choices=esporte)
data_cadastro = models.DateTimeField(auto_now_add=True)
valor = models.DecimalField(max_digits=8, decimal_places=2)
#dia_vencimento = models.CharField(max_length=2)
professor = models.ForeignKey(professor, on_delete=models.CASCADE)
turma_alunos = models.ManyToManyField(aluno, through='turma_aluno')
def __unicode__(self):
return '%s - %s' % (self.nome,self.modalidade)
def rtValor(self):
return self.valor
class despesa(models.Model):
referencia = models.CharField(max_length=30)
valor = models.DecimalField(max_digits=8, decimal_places=2)
data_pagamento = models.DateField()
Pago = models.BooleanField()
def statusDs(self):
return self.Pago
class anotacoes_aluno(models.Model):
data_cadastro = models.DateTimeField(auto_now_add=True)
anotacao = models.TextField()
aluno = models.ForeignKey(aluno, on_delete=models.CASCADE)
class turma_aluno(models.Model):
alunos = models.ForeignKey(aluno, on_delete=models.CASCADE)
turma = models.ForeignKey(turma, on_delete=models.CASCADE)
data_matricula = models.DateField(auto_now_add=True)
dia_vencimento = models.CharField(max_length=2, default='02')
def __unicode__(self):
return ('%s %s' % (unicode(self.turma), unicode(self.alunos)))
#return unicode(self.turma.nome)
class recibo(models.Model):
turma_aluno=models.ForeignKey(turma_aluno, on_delete=models.CASCADE)
forma_pagamento=models.CharField(max_length=20, choices=pagamentos, blank=True)
observacao=models.TextField(blank=True)
pago = models.BooleanField()
mes = models.CharField(max_length=2,choices=meses)
juros = models.DecimalField(max_digits=8, decimal_places=2, default=0, blank=True,editable=False)
#def __unicode__(self):
#return self.turma_aluno.turma.valor
def valor(self):
return self.turma_aluno.turma.valor
def vencimento(self):
return self.turma_aluno.dia_vencimento
def total(self):
return self.valor()+self.juros |
20,309 | d4bcd83c2d95ef1f3dcedef278f4fb2999c580ca | from mysql import connector
class MySqlConnector:
def __init__(self, options={"host": "localhost", "port":'3306', "user": "root", "password": "", "database": ""}):
self.conector = connector.connect(
host=options['host'],
port=options['port'],
user=options['user'],
password=options['password'],
database=options['database']
)
self.cursor = self.conector.cursor()
def NoQuery(self, sql):
self.cursor.execute(sql)
def Query(self, sql):
self.cursor.execute(sql)
return self.cursor.fetchall()
def ListTabels(self):
self.cursor.execute('SHOW TABLES;')
return [i[0] for i in self.cursor.fetchall()]
def ListDatabases(self):
self.cursor.execute('SHOW DATABASES;')
return [i[0] for i in self.cursor.fetchall()]
def SetDatabase(self,db):
self.cursor.execute(f'Use `{db}`;')
def GetColumns(self, x):
sql = f"""SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'{x}' """
try:
self.cursor.execute(sql)
return self.cursor.fetchall()
except Exception as e:
print(e)
return e |
20,310 | c199da338f3813635755675d37025250cbc540f5 | """
leetcode 938: range sum of BST
Given the root node of a binary search tree and two integers low and high,
return the sumb of the values of all nodes with a value in the inclusive range
[low, high].
"""
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def range_sum(root, low, high):
if not root:
return 0
val = 0
if root.val >= low and root.val <= high:
val = root.val
return val + range_sum(root.left, low, high) + range_sum(
root.right, low, high
)
def test1():
root = Node(10, Node(5, Node(3), Node(7)), Node(15, None, Node(18)))
assert range_sum(root, 7, 15) == 32
print("test 1 successful")
def test2():
root = Node(
10,
Node(
5,
Node(3, Node(1)),
Node(7, Node(6))
),
Node(15, Node(13), Node(18))
)
assert range_sum(root, 6, 10) == 23
print("test 2 successful")
def main():
test1()
test2()
if __name__ == "__main__":
main()
|
20,311 | a0a22b7abe53a78f26beaba7dfd029e87826ee0a | import threading
def small(s):
count=0
for i in s:
if(i.islower()):
count+=1
print("small is {}".format(count))
def capital(s):
count=0
for i in s:
if(i.isupper()):
count+=1
print("capital is {}".format(count))
def digit(s):
count=0
for i in s:
if(i.isdigit()):
count+=1
print("digit is {}".format(count))
if __name__=="__main__":
#n=input("Enter no?")
s="My Name is 1234567"
#w=factset(n)
small =threading.Thread(name="SMALL",target=small,args=(s,))
#w=factset(n)
capital =threading.Thread(name="CAPITAL",target=capital,args=(s,))
digit =threading.Thread(name="DIGIT",target=digit,args=(s,))
small.start()
capital.start()
digit.start()
small.join()
capital.join()
digit.join()
print("Small Thread name is: {} and id is: {}".format(small.name,small.ident))
print("Caital Thread name is: {} and id is: {}".format(capital.name,capital.ident))
print("Digit Thread name is: {} and id is: {}".format(digit.name,digit.ident))
|
20,312 | 47b565cffdaa65aa24279eeeb6c7c238683387c3 | from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('', views.portfolio, name = 'portfolio'),
] |
20,313 | d36228d180359fefcefaaf6a8e14290ec551b604 | from math import pi, acos, sin, cos
import heapq
''' Maps a Node ID to a tuple of lat/lng coordinates '''
nodeToCoordinates = {}
''' Maps a Node ID to a set of all neighboring nodes '''
nodeToNeighbors = {}
''' Maps the two path nodes back to their original path ID '''
nodesToPath = {}
def run(source, destination, paths, nodes):
_init(paths, nodes)
astar_route, astar_checked = astar(source, destination)
dijkstra_route, dijkstra_checked = dijkstra(source, destination)
return ((astar_route, astar_checked), (dijkstra_route, dijkstra_checked))
def _init(paths, nodes):
'''
Initializes the nodeToCoordinates and nodeToNeighbors dictionaries.
'''
for p in paths:
if not p['source'] in nodeToNeighbors:
nodeToNeighbors[p['source']] = set()
nodeToNeighbors[p['source']].add(p['destination'])
if not p['destination'] in nodeToNeighbors:
nodeToNeighbors[p['destination']] = set()
nodeToNeighbors[p['destination']].add(p['source'])
nodesToPath[p['source'] + p['destination']] = p['id']
nodesToPath[p['destination'] + p['source']] = p['id']
for n in nodes:
nodeToCoordinates[n['id']] = (n['lat'], n['lng'])
def astar(source, destination):
'''
Performs an A* Graph search, returning the set of checked path IDs
The priority queue contains tuples of the following form (h, f, g, curr, from)
where:
f = g + h
g = distance travelled in this path
h = heuristic future distance estimate
path = list of path IDs we are currently evaluating
'''
checked_paths = set()
popped_nodes = set()
h_initial = distance(source, destination)
p_initial = (h_initial, 0, h_initial, [source])
pq = [p_initial]
while pq:
f, g, h, node_path = heapq.heappop(pq)
last_node = node_path[-1]
if len(node_path) > 1:
checked_paths.add(nodesToPath[last_node + node_path[-2]])
if last_node == destination:
return (node_path, checked_paths)
if last_node not in popped_nodes:
for neighbor in nodeToNeighbors[last_node]:
new_g = g + distance(last_node, neighbor)
new_h = distance(neighbor, destination)
new_f = new_g + new_h
new_path = list(node_path)
new_path.append(neighbor)
pq_node = (new_f, new_g, new_h, new_path)
heapq.heappush(pq, pq_node)
popped_nodes.add(last_node)
def dijkstra(source, destination):
'''
Perform's Dijkstra's algorithm to find the shortest path between two nodes
in the graph. The priority queue here contains tuples of the form (g, path)
where:
g = distance travelled thus far
path = the list of path IDs we are currently evaluating
'''
pq = [(0, [source])]
checked_paths = set()
popped_nodes = set()
while pq:
g, node_path = heapq.heappop(pq)
last_node = node_path[-1]
if len(node_path) > 1:
checked_paths.add(nodesToPath[last_node + node_path[-2]])
if last_node == destination:
return (node_path, checked_paths)
if last_node not in popped_nodes:
for neighbor in nodeToNeighbors[last_node]:
new_g = g + distance(last_node, neighbor)
new_path = list(node_path)
new_path.append(neighbor)
heapq.heappush(pq, (new_g, new_path))
popped_nodes.add(last_node)
distances = {}
R = 3958.76 # Radius of Earth, used in great circle distance calculation.
def distance(source, destination):
'''
Lazily calculates distance between two nodes and memoizes the result.
param source - string ID of source node
param destination - string ID of destination node
'''
if source + destination in distances:
return distances[source + destination]
if destination+source in distances:
return distances[destination + source]
if source == destination:
return 0
x1, y1 = nodeToCoordinates[source]
x2, y2 = nodeToCoordinates[destination]
x1 *= pi / 180
y1 *= pi / 180
x2 *= pi / 180
y2 *= pi / 180
d = acos(sin(x1) * sin(x2) + cos(x1) * cos(x2) * cos(y2 - y1)) * R
distances[source+destination] = d
return d
|
20,314 | 0f5497080456abbe895d2c07ee7cad6f74372464 | import numpy as np
import statsmodels.formula.api as smf
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(414) # create seed based on 414 starting point
# create 1000 test data sets
X = np.linspace(0, 15, 1000)
y = 3 * np.sin(X) + np.random.normal(1 + X, .2, 1000)
# create training and testing data sets
train_X, train_y = X[:700], y[:700]
test_X, test_y = X[700:], y[700:]
# create training and testing DataFrame
train_df = pd.DataFrame({'X': train_X, 'y': train_y})
test_df = pd.DataFrame({'X': test_X, 'y': test_y})
# plot training data with linear regression
m, b = np.polyfit(train_df['X'], train_df['y'], 1)
plt.plot(train_df['X'], train_df['y'], '.')
plt.plot(train_df['X'], m*train_df['X'] + b, '-')
plt.show()
# plot testing data with linear regression
m, b = np.polyfit(test_df['X'], test_df['y'], 1)
plt.plot(test_df['X'], test_df['y'], '.')
plt.plot(test_df['X'], m*test_df['X'] + b, '-')
plt.show()
# linear fit
poly_1 = smf.ols(formula='y ~ 1 + X', data=train_df).fit()
# quadratic fit
poly_1 = smf.ols(formula='y ~ 1 + X + I(X**2)', data=train_df).fit()
'''
Using mean squared error as a metric, compare the performance of different polynomial curves in the training set and in the testing set. Submit your project as overfitting.py.
'''
|
20,315 | d5fa80327151a02d4f937a5959892d1b92a18ab3 | ##########
# Listas #
##########
lista_numeros= [1, 2, 3]
lista_strings= ["Uno", "Dos", "Tres"]
lista_mixta= [1, "Dos", 3.0]
print("Lista de números")
for item in lista_numeros:
print(item)
print("\nLista de strings")
for item in lista_strings:
print(item)
print("\nLista mixta")
for item in lista_mixta:
print(item)
print("\nLos números son: ", end='', flush=True)
for item in lista_mixta:
print(item, end=' ', flush=True)
ultimo_lista_numeros=lista_numeros[-1]
ultimo_lista_strings=lista_strings[-1]
ultimo_lista_mixta=lista_mixta[-1]
print("\n\nLos últimos valores de las listas son: " + str(ultimo_lista_numeros) + ", " + str(ultimo_lista_strings) + ", " + str(ultimo_lista_mixta))
################
# Diccionarios #
################
diccionario_peliculas= {"Martin Scorsese":"Infiltrados","Francis Ford Coppola":"El Padrino","Javier Fesser":"Campeones"}
print("\nDiccionario de películas")
print(diccionario_peliculas)
print("\nLas claves del diccionario de películas son:")
for item in diccionario_peliculas:
print(item)
print("\nLos valores del diccionario de películas son:")
for item in diccionario_peliculas:
print(diccionario_peliculas[item])
|
20,316 | 417d2310c880c092ed9afe804cb06ac9ba8866a7 | import cv2
import argparse
import orien_lines
import datetime
from imutils.video import VideoStream
from utils import detector_utils as detector_utils
import pandas as pd
from datetime import date
import xlrd
from xlwt import Workbook
from xlutils.copy import copy
import numpy as np
from truck_lpr_demo import detect_license_plate
lst1 = []
lst2 = []
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--display', dest='display', type=int,
default=1, help='Display the detected images using OpenCV. This reduces FPS')
args = vars(ap.parse_args())
detection_graph, sess = detector_utils.load_inference_graph()
if __name__ == '__main__':
# Detection confidence threshold to draw bounding box
score_thresh = 0.80
# vs = cv2.VideoCapture('rtsp://192.168.1.64')
#vs = VideoStream(0).start()
# Oriendtation of machine
dir=['lr','rl','bt','tb']
for i in dir:
Orientation = i
# input("Enter the orientation of hand progression ~ lr,rl,bt,tb :")
# For Machine
# Line_Perc1=float(input("Enter the percent of screen the line of machine :"))
Line_Perc1 = float(15)
# For Safety
# Line_Perc2=float(input("Enter the percent of screen for the line of safety :"))
Line_Perc2 = float(30)
# max number of hands we want to detect/track
num_hands_detect = 2
# Used to calculate fps
start_time = datetime.datetime.now()
num_frames = 0
im_height, im_width = (None, None)
cv2.namedWindow('Detection', cv2.WINDOW_NORMAL)
def count_no_of_times(lst):
x = y = cnt = 0
for i in lst:
x = y
y = i
if x == 0 and y == 1:
cnt = cnt + 1
return cnt
try:
while True:
mypath = "D:\\BMS_final\\test_images\\baby_sleeping.jpg"
img = cv2.imread(mypath, 1)
#frame = vs.read()
frame = np.array(img)
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if im_height == None:
im_height, im_width = frame.shape[:2]
# Convert image to rgb since opencv loads images in bgr, if not accuracy will decrease
try:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
# cv2.line(img=frame, pt1=(0, Line_Position1), pt2=(frame.shape[1], Line_Position1), color=(255, 0, 0), thickness=2, lineType=8, shift=0)
# cv2.line(img=frame, pt1=(0, Line_Position2), pt2=(frame.shape[1], Line_Position2), color=(255, 0, 0), thickness=2, lineType=8, shift=0)
# Run image through tensorflow graph
boxes, scores, classes = detector_utils.detect_objects(
frame, detection_graph, sess)
Line_Position2 = orien_lines.drawsafelines(frame, Orientation, Line_Perc1, Line_Perc2)
# Draw bounding boxeses and text
for i in range(len(dir)):
a, b = detector_utils.draw_box_on_image(
num_hands_detect, score_thresh, scores, boxes, classes, im_width, im_height, frame, Line_Position2,
Orientation)
lst1.append(a)
lst2.append(b)
no_of_time_hand_detected = no_of_time_hand_crossed = 0
# Calculate Frames per second (FPS)
num_frames += 1
elapsed_time = (datetime.datetime.now() -
start_time).total_seconds()
fps = num_frames / elapsed_time
if args['display']:
# Display FPS on frame
detector_utils.draw_text_on_image("FPS : " + str("{0:.2f}".format(fps)), frame)
cv2.imshow('Detection', cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
img.stop()
break
no_of_time_baby_detected = count_no_of_times(lst2)
no_of_time_baby_crossed = count_no_of_times(lst1)
except KeyboardInterrupt:
no_of_time_baby_detected = count_no_of_times(lst2)
no_of_time_baby_crossed = count_no_of_times(lst1)
today = date.today()
|
20,317 | 682a3086883558c34e8c0d2d591a1ee6600ed2eb | import configparser
from .utils import readable_dir
class ErrorLoadingConfig(Exception):
"""Exception class, which is used for config loading exceptions. """
def __init__(self, config_file, message=None):
message = 'Failed in reading config file %s. Original message: %s' % (config_file, message)
Exception.__init__(self, message)
class Config(object):
"""A class that represents the parsed config.
:param no_parse: If true, the data is not parsed and only the extensions are executed
:param db_driver: name of the datastore driver (e.g. **mongo**)
:param db_user: datastore username (e.g. **root**)
:param db_password: datastore password
:param db_database: database of the datastore
:param db_hostname: hostname where the datastore runs on
:param db_port: port where the datastore is listening on
:param db_authentication: database to authenticate against
:param uri: path to the repository
"""
def __init__(self,
project_name,
db_driver="mongo",
db_database="vcsSHARK",
db_hostname="localhost",
db_port=27017,
path="."
):
self.project_name = project_name
self.db_driver = db_driver
self.db_database = db_database
self.db_hostname = db_hostname
self.db_port = int(db_port)
self.uri = path.rstrip('/')
def _str2bool(self, v):
""" Checks if a string containts yes, true, t or 1. This way we can check if
a value is set to true in the config
:param v: string that should be checked"""
return v.lower() in ("yes", "true", "t", "1")
def _readConfigOption(self, section, option, returnBool=False, returnList=False):
""" Helper method to read a config option. We can specify the return value with
the different parameters
:param section: section of the configruation, where the option is in
:param option: option from which the value should be read
:param returnBool: specifies if the return value should be a boolean
:param returnList: specifies if the return value should be a list"""
value = self.configParser.get(section,option)
if(value != None and value):
if(returnBool):
return self._str2bool(value)
elif(returnList):
return value.split(",")
else:
return value
return getattr(self, option)
def load_from_file(self, config_file):
""" Load the configuration from the specified file.
:param config_file: path to configuration file
"""
try:
self.configParser = configparser.ConfigParser(allow_no_value=True)
self.configParser.read(config_file)
self.uri = self._readConfigOption("RepositoryConfiguration", "uri").rstrip('/')
self.db_driver = self._readConfigOption("Database", "db_driver")
self.db_user = self._readConfigOption("Database", "db_user")
self.db_password = self._readConfigOption("Database", "db_password")
self.db_database = self._readConfigOption("Database", "db_database")
self.db_hostname = self._readConfigOption("Database", "db_hostname")
self.db_port = int(self._readConfigOption("Database", "db_port"))
self.db_authentiacation = self._readConfigOption("Database", "db_authentication")
# Check if dirs are readable
readable_dir(self.uri)
except Exception as e:
raise Exception('Failed in reading config file %s. Original message: %s' % (config_file, e))
|
20,318 | b0fef996b72195a10a41fd23ba25dc566764b713 | # 01. 세금징수(그리디)
t = int(input())
for i in range(t):
n = int(input())
coins = [50000, 10000, 5000, 1000, 500, 100]
coinnum = 0
for coin in coins:
coinnum += n // coin
n %= coin
print(coinnum) |
20,319 | ca691815f4d336d4ad0b5e74e617bfba8e2c923a |
import sys
from essentia import INFO
import essentia
from essentia.standard import *
import numpy as np
import conf
opts={'name' : 'pitch',
'maxFreq':2000,
'minFreq':100,
"confThresh":0.01,
"smoothTime":1}
def compute(audio):
audio = essentia.array(audio)
sampleRate = int(conf.opts['sampleRate'])
frameSize = int(conf.opts['frameSize'])
hopSize = int(conf.opts['hopSize'])
zeroPadding = int(conf.opts['zeroPadding'])
windowType = conf.opts['windowType']
frameRate = float(sampleRate)/float(hopSize)
frames = FrameGenerator(audio = audio, frameSize = frameSize, hopSize = hopSize)
window = Windowing(size = frameSize, zeroPadding = zeroPadding, type = windowType)
spectrumf = Spectrum()
yinf = PitchYinFFT(frameSize = frameSize,minFrequency = opts["minFreq"],maxFrequency = opts["maxFreq"])
avgf = MovingAverage(size=int(opts["smoothTime"]*frameRate))
total_frames = frames.num_frames()
n_frames = 0
start_of_frame = -frameSize*0.5
resp,resc = [0],[0]
for frame in frames:
windowed_frame = window(frame)
sp = spectrumf(windowed_frame)
pitch,confi = yinf(sp)
k=0
if n_frames:
"""
here is legato
"""
if confi>opts["confThresh"] and resc[-1]>opts["confThresh"] :
if resp[-1]>0:
while(True):
if pitch*pow(2,k)>2.*resp[-1]:
k-=1
elif pitch*pow(2,k)<resp[-1]/2.:
k+=1
else:
break
"""
here is attack
"""
else :
pitch = 0
resc+=[confi]
resp += [pitch*pow(2,k)]
n_frames += 1
start_of_frame += hopSize
# The onset rate is defined as the number of onsets per seconds
resp = np.diff(resp)*1000./frameRate
res = np.zeros(n_frames)
res[2:] = abs(np.diff(resp))*1000./frameRate
res = avgf(essentia.array(res))
return res
if __name__ == "__main__":
import matplotlib.pyplot as plt
import essentia.standard
path = '/Users/mhermant/Documents/Work/Datasets/jku/onsets/audio/flac/gs_mix2_0dB.wav'
l = essentia.standard.MonoLoader(filename = path)
ons = compute(l())
print ons
plt.plot(ons)
plt.show()
|
20,320 | fb74de44aa6ff20f42fca9f6da1acf0d34047a8b | import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.datasets import mnist
import sys
# import tensorflow as tf
# config = tf.compat.v1.ConfigProto(gpu_options =
# tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
# # device_count = {'GPU': 1}
# )
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
# tf.compat.v1.keras.backend.set_session(session)
def one_hot(data, num_categories):
oh = np.zeros((len(data),num_categories))
for i,entry in enumerate(data):
oh[i, entry] = 1
return oh
# import data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# preprocess data
x_train = x_train.reshape( (60000,28,28,1) ) / 256
x_test = x_test.reshape( (10000,28,28,1) ) / 256
y_train = one_hot(y_train, 10)
y_test = one_hot(y_test, 10)
# build the model
model = Sequential()
input_shape=(28,28,1)
model.add(Conv2D(filters=32,
kernel_size=(3,3),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2,2),
strides=(2,2)))
model.add(Conv2D(filters=32,
kernel_size=(3,3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),
strides=(2,2)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(units=256,
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10,
activation='softmax'))
# load model weight
# compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# train
num_epochs = 1
if num_epochs != 0:
# train the model
model.fit(x_train, y_train,
batch_size=4,
epochs=num_epochs)
# evaluate model
score = model.evaluate(x_test, y_test)
print('\nScore: ', score) |
20,321 | c30b32d71148001a5bd29126d25317af0fda2845 | #!/usr/bin/env python
import glob
from setuptools import find_packages, setup
setup(
url="https://github.com/kislyuk/argcomplete",
packages=find_packages(exclude=["test", "test.*"]),
scripts=glob.glob("scripts/*"),
package_data={"argcomplete": ["bash_completion.d/_python-argcomplete", "py.typed"]},
zip_safe=False,
include_package_data=True,
platforms=["MacOS X", "Posix"],
)
|
20,322 | cee6cdc8d56e5e9661fc3c1b6e7565b1e464539e | import pyautogui
import requests
import time
from PIL import Image
while True:
# ~ print("capturing.. ")
# ~ s = pyautogui.screenshot(region=(120*2,150*2,700*2,500*2))
# ~ s = s.convert("L")
# ~ s.thumbnail((200,200))
# ~ s.save("one.jpg", "JPEG")
# ~ r = requests.post(
# ~ "https://www.izzulmakin.com/datastorage/file/",
# ~ files={"storagefile":open("one.jpg","rb")},
# ~ headers={"Authorization": "Token e0cf9f5e5a443092a7037bebbb237a22d9be9c03"},
# ~ timeout=3
# ~ )
# ~ print(r.text)
with open("log", "r") as f:
log = f.read()[-1000:]
try:
r = requests.post(
"https://www.izzulmakin.com/datastorage/",
json={"content":log},
headers={"Authorization": "Token e0cf9f5e5a443092a7037bebbb237a22d9be9c03"},
timeout=10
)
print("sukses")
except Exception as e:
print("error")
print(e)
#raise e
print("sent.")
time.sleep(60)
|
20,323 | be86ac322b8143b33630147413e49a230bd1439a | '''
Created on 2016-3-27
@author: admin
'''
import urllib.request
import os
import re
url="http://1616lu.com/html/tupian/qingchun/index_"
index=1
for i in range(1,24):
openurl=(i==1)and "http://1616lu.com/html/tupian/qingchun/index.html" or url+str(i)+".html"
imgHtml=urllib.request.urlopen(openurl).read().decode('utf-8')
urls=re.findall(r'<li><a href="/html/tupian/qingchun/(.*)" target="_blank">',imgHtml)
print("分析网页....")
downurl="http://1616lu.com/html/tupian/qingchun/"
for url in urls:
#未能正确获得网页 就进行异常处理
try:
imgHtml=urllib.request.urlopen(downurl+url).read().decode('utf-8')
imgurls=re.findall(r'<p><img src="http://pic.1100lu.net/(.*?)" alt="" /></p>',imgHtml)
for imgurl in imgurls:
res=urllib.request.urlopen("http://pic.1100lu.net/"+imgurl)
if str(res.status)!='200':
print('未下载成功:')
continue
filepath,name=os.path.split(imgurl)
print("下载:",name)
filename=os.path.join(os.getcwd(),name)
if os.path.exists(filename):
print("文件已存在",name,"跳过")
continue
with open(filename,'wb') as f:
f.write(res.read())
print('下载完成\n',name)
index+=1
except Exception as e:
print('未下载成功:',url)
print("下载结束,一共下载了 %s 张图片"% (index-1)) |
20,324 | 86d81454e650b7e99e11d921ae9ae63cb3f35b3d | import os.path
import pkgutil
from pathlib import Path
from ._metadata import Specification
from ._mksdist import sdist_path
from ._mksdist import SdistBuilder
from ._mkwheel import wheel_name
from ._mkwheel import WheelBuilder
from ._mkwheel import write_src_to_whl
def build_wheel(spec: Specification, distdir: Path):
target_filename = wheel_name(distdir, spec)
with WheelBuilder.for_target(target_filename, spec) as bld:
write_src_to_whl(bld, spec)
return target_filename
def bootstraper(spec):
fn = os.path.abspath(spec.source_file)
srcdir = os.path.join(os.path.dirname(fn), "src")
template = pkgutil.get_data(__name__, "bootstrap_template.py.txt")
template = template.decode("utf-8")
return template.format(srcfolder=srcdir).encode("utf-8")
def build_editable(spec, distdir):
target_filename = wheel_name(
distdir,
spec,
)
with WheelBuilder.for_target(target_filename, spec) as bld:
bld.add_file(
name=spec.package + ".py",
data=bootstraper(spec),
)
return target_filename
def build_sdist(spec: Specification, sdist_dir: Path) -> Path:
target = sdist_path(sdist_dir, spec)
with SdistBuilder.for_target(target, spec) as sdist:
# todo: better config
from setuptools_scm.integration import find_files
for name in find_files(""):
with open(name, "rb") as fp:
sdist.add_file(name, fp.read())
return target
|
20,325 | 3aeddf0045beceffdb60e69ec2e61ef6421a1209 | import logging
from argparse import Namespace
from enum import Enum
import toml
class LogLevels(Enum):
debug = "DEBUG"
info = "INFO"
warning = "WARNING"
error = "ERROR"
critical = "CRITICAL"
def check(filepath: str) -> bool:
"""Check a single file."""
logging.debug(filepath, extra=dict(status="checking"))
try:
with open(filepath) as f:
toml.load(f)
except toml.TomlDecodeError as err:
logging.error(filepath, extra=dict(status=err.msg))
return False
logging.info(filepath, extra=dict(status="ok"))
return True
def main(args: Namespace) -> int:
return 0 if all(list(map(check, args.files))) else 1
|
20,326 | 6a2ca5e32ee8244ea2308c06793cdf3daef28fc1 | from django.urls import path
from .views import CreateContactUsObject, about_us
urlpatterns = [
path('contact_us/', CreateContactUsObject.as_view(), name='contact_us'),
path('about_us/', about_us, name='about_us'),
]
|
20,327 | 321d364b414bb18008e452de051238f629133cb7 | import scrapy
import time
import subprocess
count=1
prev_score1=-1
prev_score2=-1
index=0
teams1=[]
teams2=[]
class QuotesSpider(scrapy.Spider):
name = "quotes"
start_urls = [
'http://www.espncricinfo.com/ci/engine/match/index.html'
]
def sendmessage(self, message):
subprocess.Popen(['notify-send', message])
return
# count=1
def parse(self, response):
li=response.css('div.matches-wrapper')
li1=li[0].css('div.matches-container')
li2=li1[0].css('section.matches-day-block')
li3=li1[0].css('div.innings-info-1')
li4=li1[0].css('div.innings-info-2')
#print count
global count
global index
global teams1
global teams2
global prev_score1
global prev_score2
if(count==1):
for match in li3:
score=match.css('div.innings-info-1::text').extract()
st=""
flag=0
for i in range(0,len(score)-1):
st+=score[i]
#print score[i]
teams1.append(score[0].strip())
for match in li4:
score=match.css('div.innings-info-2::text').extract()
st=""
flag=0
for i in range(0,len(score)-1):
st+=score[i]
#print score[i]
teams2.append(score[0].strip())
for i in range(0,len(teams2)):
print ("%s %s vs %s" %(i+1, teams1[i], teams2[i]))
"""for match in li4:
print match.css('span.bold::text').extract_first().strip()"""
print "Enter the match index you wanted to see"
index=int(input(""))
exit=False
while exit==False:
print ("%s %s" %(teams1[index-1], li3[index-1].css('span.bold::text').extract_first().strip()))
score1=li3[index-1].css('span.bold::text').extract_first().strip()
print ("%s %s" %(teams2[index-1], li4[index-1].css('span.bold::text').extract_first().strip()))
score2=li4[index-1].css('span.bold::text').extract_first().strip()
if str(prev_score1)!=score1 and count!=1:
wick=score1.split("/")
wick=wick[1].split(" ")
wick=wick[0]
prev_wick=prev_score1.split("/")
prev_wick=prev_wick[1].split(" ")
prev_wick=prev_wick[0]
if prev_wick!=wick:
#q=QuotesSpider()
st="Wickets have fallen!\nCurrent Score is " + score1
self.sendmessage(st)
if str(prev_score2)!=score2 and count!=1:
wick=score2.split("/")
wick=wick[1].split(" ")
wick=wick[0]
prev_wick=prev_score2.split("/")
prev_wick=prev_wick[1].split(" ")
prev_wick=prev_wick[0]
if prev_wick!=wick:
#q=QuotesSpider()
st="Wickets have fallen!\nCurrent Score is " + score2
self.sendmessage(st)
#if prev_score2!=score2:
prev_score1=score1
prev_score2=score2
t_end = time.time() + 10
while time.time() < t_end:
i=0
st=teams1[index-1] + " " + li3[index-1].css('span.bold::text').extract_first().strip() + "\n" + teams2[index-1] + " " + li4[index-1].css('span.bold::text').extract_first().strip()
self.sendmessage(st)
count=count+1
next_page='http://www.espncricinfo.com/ci/engine/match/index.html'
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse,dont_filter=True)
exit=True
|
20,328 | 0219edc46c0266177bdce700ab4096cc81fbbe9f | import pickle
class TokenDict:
def __init__(self, path=None):
# token -> numerical id:
# string -> string
if path is None:
self.token2id = dict()
self.id2token = dict()
self.cnt = 0
else:
self.load(path)
def save(self, output_path):
with open(output_path, "wb") as f:
pickle.dump(self, f)
def load(self, input_path):
with open(input_path, "rb") as f:
tmp = pickle.load(f)
self.cnt = tmp.cnt
self.token2id = tmp.token2id
self.id2token = tmp.id2token
def display(self):
print("cnt", self.cnt)
print("token2id", self.token2id)
def put(self, token):
token = str(token)
if token not in self.token2id.keys():
self.token2id[token] = str(self.cnt)
self.id2token[str(self.cnt)] = token
self.cnt += 1
return self.token2id[token]
def getNumForToken(self, token):
token = str(token)
if token not in self.token2id.keys():
return None
else:
return self.token2id[token]
def getTokenForNum(self, num):
num = str(num)
if num not in self.id2token.keys():
import pdb; pdb.set_trace()
return None
else:
return self.id2token[num]
def getTokenForNums(self, lst):
return [self.getTokenForNum(x) for x in lst]
def getAllTokensWith(self, token):
lst = []
token = str(token)
lst = [key for key in self.token2id.keys() if token in key]
return lst
|
20,329 | 6ea66e1067a4ecc7d8cb9375877f24c40340be28 | #!/usr/bin/env python
# This simple example shows how to do basic rendering and pipeline
# creation.
import datetime
from datetime import timedelta
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QSlider, QGridLayout, QLabel, QPushButton, QTextEdit
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import Qt
import vtk
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from argparse import ArgumentParser
import sys
frame_counter = 0
initial_date = datetime.date(2020, 1, 22)
curr_date = datetime.datetime.now().date()
def main():
# Initialize argument and constant variables
parser = ArgumentParser("Create isosurfacing of object")
parser.add_argument("density")
parser.add_argument("climate")
parser.add_argument("sat")
parser.add_argument("--camera", type = str, help = "Optional camera settings file")
args = parser.parse_args()
# Create reader for ct scan
density_reader = vtk.vtkTIFFReader()
density_reader.SetFileName(args.density)
density_reader.Update()
print(density_reader.GetOutput().GetScalarRange()[1])
density_log = vtk.vtkImageLogarithmicScale()
density_log.SetInputConnection(density_reader.GetOutputPort())
density_log.SetConstant(0.435)
density_log.Update()
density_range = density_log.GetOutput().GetScalarRange()
climate_reader = vtk.vtkTIFFReader()
climate_reader.SetFileName(args.climate + "-" + str(initial_date.month.real).zfill(2) + ".tif")
climate_reader.Update()
climate_range = climate_reader.GetOutput().GetScalarRange()
sat_reader = vtk.vtkJPEGReader()
sat_reader.SetFileName(args.sat)
max_val = 100
color_count = 1000
density_ctf = vtk.vtkColorTransferFunction()
density_ctf.AddRGBPoint(0, 0, 0, 0)
density_ctf.AddRGBPoint(10, 0, 0, 1)
density_ctf.AddRGBPoint(30, 0, 1, 1)
density_ctf.AddRGBPoint(50, 1, 1, 0)
density_ctf.AddRGBPoint(65, 1, 0.5, 0)
density_ctf.AddRGBPoint(80, 1, 0, 0)
density_lut = vtk.vtkLookupTable()
density_lut.SetNumberOfTableValues(color_count)
density_lut.Build()
rgb = list(density_ctf.GetColor(0))+[0]
density_lut.SetTableValue(0, rgb)
for i in range(1, color_count):
rgb = list(density_ctf.GetColor(max_val * float(i)/color_count))+[1]
density_lut.SetTableValue(i, rgb)
climate_ctf = vtk.vtkColorTransferFunction()
climate_ctf.AddRGBPoint(5, 0, 0, 1)
climate_ctf.AddRGBPoint(35, 0, 1, 1)
climate_ctf.AddRGBPoint(65, 1, 1, 0)
climate_ctf.AddRGBPoint(95, 1, 0, 0)
climate_lut = vtk.vtkLookupTable()
climate_lut.SetNumberOfTableValues(color_count)
climate_lut.Build()
for i in range(0, color_count):
rgb = list(climate_ctf.GetColor(max_val * float(i)/color_count))+[1]
climate_lut.SetTableValue(i, rgb)
density_mapper = vtk.vtkDataSetMapper()
density_mapper.SetInputConnection(density_log.GetOutputPort())
density_mapper.SetLookupTable(density_lut)
density_mapper.SetScalarRange([0, density_range[1]])
density_mapper.Update()
climate_mapper = vtk.vtkDataSetMapper()
climate_mapper.SetInputConnection(climate_reader.GetOutputPort())
climate_mapper.SetLookupTable(climate_lut)
climate_mapper.SetScalarRange(climate_range)
climate_mapper.Update()
sat_mapper = vtk.vtkDataSetMapper()
sat_mapper.SetInputConnection(sat_reader.GetOutputPort())
density_actor = vtk.vtkActor()
density_actor.SetMapper(density_mapper)
density_actor.GetProperty().SetOpacity(0.99)
density_actor.VisibilityOn()
climate_actor = vtk.vtkActor()
climate_actor.SetMapper(climate_mapper)
climate_actor.GetProperty().SetOpacity(0.6)
climate_actor.VisibilityOff()
sat_actor = vtk.vtkActor()
sat_actor.SetMapper(sat_mapper)
sat_actor.GetProperty().SetOpacity(0.7)
# Make satellite image same size as contour map
crange = sat_actor.GetXRange()[0] - sat_actor.GetXRange()[1]
mrange = density_actor.GetXRange()[0] - density_actor.GetXRange()[1]
density_actor.SetScale(crange/mrange)
crange = sat_actor.GetXRange()[0] - sat_actor.GetXRange()[1]
mrange = climate_actor.GetXRange()[0] - climate_actor.GetXRange()[1]
climate_actor.SetScale(crange/mrange)
# Initialize renderer and place actors
ren = vtk.vtkRenderer()
ren.AddActor(density_actor)
ren.AddActor(climate_actor)
ren.AddActor(sat_actor)
ren.ResetCamera()
ren.SetBackground(0, 0, 0)
# Initialize camera settings
cam1 = ren.GetActiveCamera()
cam1.Azimuth(0)
cam1.Elevation(0)
cam1.Roll(360)
cam1.Zoom(1)
ren.ResetCameraClippingRange()
if args.camera:
reader = open(args.camera, "r")
line = reader.readline().split(",")
cam1.SetPosition(float(line[0]), float(line[1]), float(line[2]))
line = reader.readline().split(",")
cam1.SetFocalPoint(float(line[0]), float(line[1]), float(line[2]))
line = reader.readline().split(",")
cam1.SetViewUp(float(line[0]), float(line[1]), float(line[2]))
line = reader.readline().split(",")
cam1.SetClippingRange(float(line[0]), float(line[1]))
line = reader.readline().split(",")
cam1.SetViewAngle(float(line[0]))
line = reader.readline().split(",")
cam1.SetParallelScale(float(line[0]))
# Initialize PyQT5 UI and link to renderer
app = QApplication([])
window = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(window)
ui.vtkWidget.GetRenderWindow().AddRenderer(ren)
ui.vtkWidget.GetRenderWindow().SetSize(1280, 720)
ui.vtkWidget.GetRenderWindow().AddRenderer(ren)
ui.vtkWidget.GetRenderWindow().SetAlphaBitPlanes(True)
ui.vtkWidget.GetRenderWindow().SetMultiSamples(False)
iren = ui.vtkWidget.GetRenderWindow().GetInteractor()
# create the scalar_bar
density_scalar_bar = vtk.vtkScalarBarActor()
density_scalar_bar.SetOrientationToHorizontal()
density_scalar_bar.SetMaximumNumberOfColors(color_count)
density_scalar_bar.SetLookupTable(density_lut)
density_scalar_bar.SetTitle("Density (Log 10)")
# create the scalar_bar_widget
density_scalar_bar_widget = vtk.vtkScalarBarWidget()
density_scalar_bar_widget.SetInteractor(iren)
density_scalar_bar_widget.SetScalarBarActor(density_scalar_bar)
density_scalar_bar_widget.On()
# create the scalar_bar
climate_scalar_bar = vtk.vtkScalarBarActor()
climate_scalar_bar.SetOrientationToHorizontal()
climate_scalar_bar.SetMaximumNumberOfColors(color_count)
climate_scalar_bar.SetLookupTable(climate_lut)
climate_scalar_bar.SetTitle("Temparature (Celsius)")
# create the scalar_bar_widget
climate_scalar_bar_widget = vtk.vtkScalarBarWidget()
climate_scalar_bar_widget.SetInteractor(iren)
climate_scalar_bar_widget.SetScalarBarActor(climate_scalar_bar)
climate_scalar_bar_widget.Off()
# Function to initialize slider settings
def slider_setup(slider, val, bounds, interv):
slider.setOrientation(QtCore.Qt.Horizontal)
slider.setValue(float(val))
slider.setSliderPosition(val)
slider.setTracking(False)
slider.setTickInterval(interv)
slider.setTickPosition(QSlider.TicksAbove)
slider.setRange(bounds[0], bounds[1])
slider_setup(ui.time_slider, 0, [0, (curr_date - initial_date).days], 1)
window.show()
window.setWindowState(Qt.WindowMaximized)
iren.Initialize()
def time_slider_callback(val):
new_date = initial_date + timedelta(val)
if new_date.month.real != ui.curr_month:
ui.curr_month = new_date.month.real
climate_reader.SetFileName(args.climate + "-" + str(ui.curr_month).zfill(2) + ".tif")
climate_reader.Update()
new_range = climate_reader.GetOutput().GetScalarRange()
climate_mapper.SetScalarRange(new_range)
ui.date_label.setText("Date (" + new_date.strftime('%m/%d/%Y') + "):")
def density_callback():
isOn = density_actor.GetVisibility()
if isOn:
density_actor.VisibilityOff()
density_scalar_bar_widget.Off()
ui.vtkWidget.GetRenderWindow().Render()
ui.push_density.setText('Enable Density')
else:
density_actor.VisibilityOn()
density_scalar_bar_widget.On()
ui.vtkWidget.GetRenderWindow().Render()
ui.push_density.setText('Disable Density')
def climate_callback():
isOn = climate_actor.GetVisibility()
if isOn:
climate_actor.VisibilityOff()
climate_scalar_bar_widget.Off()
ui.vtkWidget.GetRenderWindow().Render()
ui.push_climate.setText('Enable Temperature')
else:
climate_actor.VisibilityOn()
climate_scalar_bar_widget.On()
ui.vtkWidget.GetRenderWindow().Render()
ui.push_climate.setText('Disable Temperature')
# Handle screenshot button event
def screenshot_callback():
save_frame(ren.GetActiveCamera(), ui.vtkWidget.GetRenderWindow(), ui.log)
# Handle show camera settings button event
def camera_callback():
print_camera_settings(ren.GetActiveCamera(), ui.camera_info, ui.log)
# Handle quit button event
def quit_callback():
sys.exit()
# Register callbacks to UI
ui.time_slider.valueChanged.connect(time_slider_callback)
ui.push_screenshot.clicked.connect(screenshot_callback)
ui.push_camera.clicked.connect(camera_callback)
ui.push_quit.clicked.connect(quit_callback)
ui.push_density.clicked.connect(density_callback)
ui.push_climate.clicked.connect(climate_callback)
# Terminate setup for PyQT5 interface
sys.exit(app.exec_())
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName('The Main Window')
MainWindow.setWindowTitle('Vector Planes Visualization')
self.centralWidget = QWidget(MainWindow)
self.gridlayout = QGridLayout(self.centralWidget)
self.vtkWidget = QVTKRenderWindowInteractor(self.centralWidget)
self.time_slider = QSlider()
self.push_screenshot = QPushButton()
self.push_screenshot.setText('Save Screenshot')
self.push_camera = QPushButton()
self.push_camera.setText('Update Camera Info')
self.push_quit = QPushButton()
self.push_quit.setText('Quit')
self.push_density = QPushButton()
self.push_density.setText('Disable Density')
self.push_density.size
self.push_climate = QPushButton()
self.push_climate.setText('Enable Temperature')
self.camera_info = QTextEdit()
self.camera_info.setReadOnly(True)
self.camera_info.setAcceptRichText(True)
self.camera_info.setHtml("<div style='font-weight: bold'>Camera Settings</div>")
self.log = QTextEdit()
self.log.setReadOnly(True)
self.date_label = QLabel("Date (" + initial_date.strftime('%m/%d/%Y') + "):")
self.curr_month = initial_date.month.real
self.gridlayout.addWidget(self.vtkWidget, 0, 0, 4, 4)
self.gridlayout.addWidget(self.date_label, 4, 0, 1, 1)
self.gridlayout.addWidget(self.time_slider, 4, 1, 1, 1)
self.gridlayout.addWidget(self.push_density, 4, 4, 1, 1)
self.gridlayout.addWidget(self.push_climate, 4, 5, 1, 1)
self.gridlayout.addWidget(self.push_screenshot, 0, 4, 1, 1)
self.gridlayout.addWidget(self.push_camera, 0, 5, 1, 1)
self.gridlayout.addWidget(self.camera_info, 2, 4, 1, 2)
self.gridlayout.addWidget(self.log, 3, 4, 1, 2)
MainWindow.setCentralWidget(self.centralWidget)
def save_frame(camera, window, log):
global frame_counter
# ---------------------------------------------------------------
# Save current contents of render window to PNG file
# ---------------------------------------------------------------
file_name = "three_planes-" + str(frame_counter).zfill(2) + ".png"
file_name2 = "three_planes_cam-" + str(frame_counter).zfill(2) + ".csv"
image = vtk.vtkWindowToImageFilter()
image.SetInput(window)
png_writer = vtk.vtkPNGWriter()
png_writer.SetInputConnection(image.GetOutputPort())
png_writer.SetFileName(file_name)
window.Render()
png_writer.Write()
cam = open(file_name2, "w")
cam.write(str(camera.GetPosition()[0]) + "," + str(camera.GetPosition()[1]) + "," + str(camera.GetPosition()[2]) + "\n")
cam.write(str(camera.GetFocalPoint()[0]) + "," + str(camera.GetFocalPoint()[1]) + "," + str(camera.GetFocalPoint()[2]) + "\n")
cam.write(str(camera.GetViewUp()[0]) + "," + str(camera.GetViewUp()[1]) + "," + str(camera.GetViewUp()[2]) + "\n")
cam.write(str(camera.GetClippingRange()[0]) + "," + str(camera.GetClippingRange()[1]) + "\n")
cam.write(str(camera.GetViewAngle()) + "\n")
cam.write(str(camera.GetParallelScale()) + "\n")
frame_counter += 1
log.insertPlainText('Exported {}\n'.format(file_name))
def print_camera_settings(camera, text_window, log):
# ---------------------------------------------------------------
# Print out the current settings of the camera
# ---------------------------------------------------------------
text_window.setHtml("""<div style='font-weight:bold'>Camera settings:</div><p><ul><li><div style='font-weight:bold'>
Position:</div> {0}</li><li><div style='font-weight:bold'>Focal Point:</div> {1}</li><li><div style='font-weight:bold'>
Up Vector:</div> {2}</li><li><div style='font-weight:bold'>Clipping Range:</div> {3}</li><li><div style='font-weight:bold'>
View Angle:</div> {4}</li></li><li><div style='font-weight:bold'>Parallel Scale:</div> {5}</li><li><div style='font-weight:bold'>
View Plane Normal:</div> {6}</li>""".format(camera.GetPosition(), camera.GetFocalPoint(),camera.GetViewUp(),camera.GetClippingRange(), camera.GetViewAngle(), camera.GetParallelScale(), camera.GetViewPlaneNormal()))
log.insertPlainText('Updated camera info\n')
if __name__ == '__main__':
main() |
20,330 | 61bc2167db64cf43e90e9c886e0cfdb1788b20b4 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, earthians and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
class PatientConsentTemplate(Document):
def validate(self):
if self.active:
# Get all Consent Template
# Set active = 0 for all other Template
pass
@frappe.whitelist()
def render_consent_template(template_name, doc):
if isinstance(doc, str):
doc = json.loads(doc)
else:
doc = doc.as_dict()
consent_template = frappe.get_doc("Patient Consent Template", template_name)
if consent_template.template:
return frappe.render_template(consent_template.template, doc)
|
20,331 | 945d0dd7c2c99368589f4ee04346e9d7620fa891 | from django.urls import path, include
from . import views
urlpatterns = [
path('theme/', views.ThemeFeed.as_view()),
path('theme/details/<int:id>/', views.ThemeNameFilter.as_view()),
path('theme/cart/<int:id>/', views.ThemeCart.as_view()),
path('theme/category/',views.CategoryView.as_view()),
] |
20,332 | 774b3ab1ad556be1e725b8ae7d1ceba3e9210237 | #!/usr/bin/env python
# $Id: astrometer.py,v 1.8 2006/05/22 17:38:53 meurer Exp $
# ---------------------------------------------------------------------
__version__ = '$Revision: 1.8 $ '[11:-3]
__version_date__ = '$Date: 2006/05/22 17:38:53 $ '[7:-3]
__author__ = 'J. Blakeslee, R. White, K. Anderson'
import sys,os,string,popen2,cStringIO,math,types,urllib
import wcsclass,pyfits
from msg import pMessage
import xmlUtil
import time
import subprocess
pyversion = sys.version
# Host and URL information for search will need to be changed when
# new catalog version is released.
_HOST_ = "gsss.stsci.edu"
_URL_ = "/webservices/vo/ConeSearch.aspx"
_CATALOG_ = "GSC23"
_minSig_ = 0.2 # don't believe rms less than this (arcsec)
# Sets maximum number of queries to try before bailing out.
MAXWQtry=10
class WebQueryError(IOError):
"""Provides an exception class for web query errors and timeouts."""
class gscMatchup:
""" This class provides various methods for searching and matching
against extended version of GSC2 catalog, determining the pointing
error, and correcting the image header CRVAL1,CRVAL2 keywords.
The methods have mostly been cobbled together from Rick's scripts and
wfp align module functions.
Pipeline should run it like this:
mm=astrometer.gscMatchup(obs)
if (mm.findAstromCorrs() != 0):
# then it didn't work!
# note this in long and move on to next module
else:
mm.applyCorrs()
The mkMsg method is remains callable, though deprecated for pipeline
purposes as this module and class is used by the combDither module rather
than called directly by the pipeline.
"""
def __init__(self,obs):
""" constructor gets Observation object (obs) from pipeline. """
self.modName = string.split(string.split(str(self))[0],'.')[0][1:]
self.root = obs.newobspath # root path of the observation dir
self.obsName = obs.newobs
self.pardir = obs.newpardir
self.Imagedir = obs.fitsdir # path to fits Images/ dir
self.Imagedir = obs.fitsdir # path to fits Images/ dir
self.messagedir = obs.messagedir # where the module message will go.
self.configdir = obs.configdir
self.logfile = obs.logfile
self.paramFile = os.path.join(self.pardir,self.modName + '.param')
self.inparFile = os.path.join(self.pardir,self.modName + '.inpar')
self.sciImlist = obs.sciImageList
self.ctxImlist = obs.contextImageList
self.wgtImlist = obs.weightImageList
self.rmsImlist = obs.rmsImageList
# make astrom working dir
self.astromdir = obs.astromdir = os.path.join(self.root,'astrom')
if not os.path.isdir(self.astromdir):
os.mkdir(self.astromdir)
self.errorList = []
self.inputList = []
self.outputList = {}
self.logfile.write('****************************************')
self.logfile.write(self.modName+": Instantiated astrometer object.")
# basic input check
self.Nim=Nim = len(self.sciImlist)
if Nim < 1 or Nim!=len(self.rmsImlist):
errtxt="Image Lists of zero or differing lenths."
self.errorList.append((self.modName,errtxt))
self.logfile.write('Error: '+errtxt)
raise IOError, errtxt
# init the input list
for i in self.sciImlist:
self.inputList.append(i)
def applyCorrs(self):
""" Apply the astrometric corrections:
update sci,context,weight,RMS image header CRVAL1,CRVAL2 values.
"""
curdir = os.getcwd()
os.chdir(self.Imagedir)
for imlist in [self.sciImlist, self.ctxImlist, self.wgtImlist, self.rmsImlist]:
for im in imlist:
self.logfile.write("Applying header updates: "+im)
ff = pyfits.open(im,'update')
ff[0].header.update('CRVAL1',self.CRVAL1new)
ff[0].header.update('CRVAL2',self.CRVAL2new)
ff[0].header.update('AMDRA',self.bestrans['dx'])
ff[0].header.update('AMDDEC',self.bestrans['dy'])
ff[0].header.update('AMNTCH',self.bestrans['Nmed'])
ff[0].header.update('AMSGRA',self.bestrans['sigx'])
ff[0].header.update('AMSGDEC',self.bestrans['sigy'])
# document with comments
ff[0].header.add_comment("astrom: CRVAL1,CRVAL2 corrected by wfp astrometer module.")
ff[0].header.add_comment("astrom: Old CRVAL1,CRVAL2: %.8f %.8f" \
%(self.crval1in,self.crval2in))
ff[0].header.add_comment("astrom: Applied corrections (arcsec) dRA,dDec: %.4f %.4f"\
%(self.bestrans['dx'],self.bestrans['dy']))
ff[0].header.add_comment("astrom: Correction errors (arcsec) e_dRA,e_dDec: %.4f %.4f"\
%(self.bestrans['dx_err'],self.bestrans['dy_err']))
ff[0].header.add_comment("astrom: Nmatch,sigx,sigy (arcsec): %d %.4f %.4f"\
%(self.bestrans['Nmed'],self.bestrans['sigx'],self.bestrans['sigy']))
ff[0].header.add_comment("APLUS: Properietary data by CLASH pipeline")
ff.close()
self.logfile.write("Image: " + im)
self.logfile.write("********************")
self.logfile.write("Old CRVAL1: " + str(self.crval1in) + " Old CRVAL2: " + str(self.crval2in))
self.logfile.write("New CRVAL1: " + str(self.CRVAL1new) + " New CRVAL2: " + str(self.CRVAL2new))
self.logfile.write("RA Shift : " + str(self.bestrans['dx']) + " Dec Shift : " + str(self.bestrans['dy']))
self.logfile.write("Sigma RA : " + str(self.bestrans['sigx']) + " Sigma Dec : " + str(self.bestrans['sigy']))
self.logfile.write("No. match : " + str(self.bestrans['Nmed']))
os.chdir(curdir)
self.logfile.write("Updated image headers with new CRVAL's.")
self.logfile.write("****************************************")
return
def findAstromCorrs(self):
""" This 'meta-method' runs the methods necessary for determining
astrometric zeropoint corrections.
"""
self.logfile.write("Entered findAstromCorrs - will run: "+\
"makeGSCcat(), makeObjcats(), doMatching().")
if self.makeGSCcat() != 0:
return -1
if self.makeObjcats()!= 0:
return -1
if self.doMatching() != 0:
# here we want to remake the GSCcat using a "chopSpur" flag,
# if the cat has a goodly number of objects
if(self.Ncull >= 10):
print "Retrying matchup with only GSC objs detected in 2 bands..."
self.logfile.write("Retrying matchup with only GSC objs detected in 2 bands...")
if self.makeGSCcat(chopSpur=1) != 0:
return -1
if self.makeObjcats()!= 0:
return -1
if self.doMatching() != 0:
return -1
return 0
def doMatching(self):
""" Match object .proj catalogs against GSC proj catalog. """
self.alltrans = []
curdir = os.getcwd()
os.chdir(self.astromdir)
for cat in self.projcats:
self.logfile.write("Beginning match attempt for %s" %cat)
nobj = max(80,self.Nrefobjs)
base = 'match %s 1 2 3 %s 1 2 3 identity recalc nobj=%d medtf medsigclip=2.7 '\
%(cat,self.GSCmatchin,nobj)
converged=0
mrad=4.5
xsh=0.
ysh=0.
Retry=0
while not converged and mrad<10 and Retry<11:
ferr,mdict=self._runmatch(base,mrad,xsh,ysh)
if ferr:
# match not found, make radius bigger and retry
mrad += 0.5
Retry += 1
self.logfile.write("Retry: %d ... set mrad: %s" %(Retry,mrad))
continue
dx = mdict['dx']
dy = mdict['dy']
sigx = mdict['sigx']
sigy = mdict['sigy']
# if the extra shift is too big compared to the
# match radius, update input shift and retry
if max(abs(dx-xsh),abs(dy-ysh)) > min(0.4, mrad/10.):
xsh=dx
ysh=dy
Retry += 1
continue
# if sigma seems too big, shrink mrad
if (max(sigx,sigy) > 0.7 and mrad>2.9):
mrad -= 0.5
Retry += 1
continue
# otherwise, it looks like we have a good fit
# but we want to tune it up with smaller matchrad
# Don't increment retry in this final case.
if mrad > 3.9 or (mrad>2.9 and max(sigx,sigy)>0.4):
mrad = max(mrad-1,2.5)
continue
self.alltrans.append(mdict)
converged=1
self.logfile.write("""%s trans:
mdx,mdy = %.4f %.4f
edx,edy = %.4f %.4f
sigx,sigy = %.4f %.4f
"""%(cat, mdict['dx'],mdict['dy'],mdict['dx_err'],mdict['dy_err'],\
mdict['sigx'],mdict['sigy']))
if len(self.alltrans)<1:
errtxt = "WARNING: No Image successfully matched against extended GSC!"
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
return -1
os.chdir(curdir)
# finally, choose best match transform and calculate
# the new CRVAL's
self._pickBestrans()
return 0
def _pickBestrans(self):
""" Choose the best from among available offsets. """
###
### CHECK OVER THIS PLEASE!!!!
###
min_uncty = 999.
for trans in self.alltrans:
# maxerr = max(trans['dx_err'],trans['dy_err'])
rerr = math.sqrt(trans['dx_err']**2 + trans['dy_err']**2)
if rerr < min_uncty:
min_uncty = rerr
self.bestrans = trans
del rerr
self.logfile.write("Pick dx,dy: %.4f %.4f edx,edy: %.4f %.4f sigx,sigy: %.4f %.4f rerr: %.4f"\
%(self.bestrans['dx'],self.bestrans['dy'],self.bestrans['dx_err'],\
self.bestrans['dy_err'],self.bestrans['sigx'],self.bestrans['sigy'],
min_uncty))
crval1_corr = (self.bestrans['dx']/3600.) / math.cos(self.crval2in * math.pi/180.)
crval2_corr = (self.bestrans['dy']/3600.)
self.CRVAL1new = self.crval1in + crval1_corr
self.CRVAL2new = self.crval2in + crval2_corr
self.logfile.write('CRVAL1new,CRVAL2new: '+str((self.CRVAL1new,self.CRVAL2new)))
return
def _runmatch(self,base,mrad,xsh,ysh):
"""Run match and parse result. """
cmd = base+'matchrad='+str(mrad)+' xsh=%.2f ysh=%.2f' %(xsh,ysh)
sproc = popen2.Popen3(cmd,1)
output = sproc.fromchild.readlines()
errs = sproc.childerr.readlines()
sproc.fromchild.close()
sproc.childerr.close()
if errs and not (len(errs) == 1 and errs[0][0:27] == 'atFindMedtf: RMS <= 0.0, so'):
self.logfile.write('match produced the following message(s): ')
for f in errs:
self.logfile.write(string.strip(f))
self.errorList.append(('match',string.strip(f)))
matchline = ''
medianline = ''
for line in output:
if line[0:6] == 'TRANS:':
matchline = line
elif line[0:6] == 'MEDTF:':
medianline = line
else:
self.logfile.write("ERROR:. Unexpected output from match program:\n\t\t\t"+line)
return (-1,None)
if (not matchline) or (not medianline):
self.logfile.write("Match program failed.")
return (-1,None)
mdict = self._process_median(medianline,mrad,xsh,ysh)
if not mdict:
return (-1,None)
return (0,mdict)
def _process_median(self, medianline,mrad,xsh,ysh):
"""Parse, process and write out match MEDTF output line."""
fields = string.split(medianline)
mdict={}
if len(fields) != 8 or fields[0] != 'MEDTF:':
errtxt = "ERROR: parsing match TRANS output: \n"+str(fields)
self.logfile.write(errtxt)
return None
# read the fit parameters into variables
mdx = float(fields[1].split('=')[1])
mdy = float(fields[2].split('=')[1])
adx = float(fields[3].split('=')[1])
ady = float(fields[4].split('=')[1])
sigx = float(fields[5].split('=')[1])
sigy = float(fields[6].split('=')[1])
Nmed = int(fields[7].split('=')[1])
# check to see if it's a reasonable transformation
if max(sigx,sigy) > 1.2:
errtxt = 'Poor median trans: x,y rms = '+str(sigx)+' '+str(sigy)
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
return None
# finally, populate MatchDict median keywords:
mdict['Nmed'] = Nmed
mdict['sigx'] = sigx
mdict['sigy'] = sigy
mdict['dx'] = mdx
mdict['dy'] = mdy
mdict['dx_err'] = max(sigx,_minSig_)/math.sqrt(Nmed-1.0)
mdict['dy_err'] = max(sigy,_minSig_)/math.sqrt(Nmed-1.0)
self.logfile.write(("rad,xsh,ysh: (%.1f,%.2f,%.2f)"%(mrad,xsh,ysh))+\
" med x,y shifts for "+str(Nmed)+" objects; "+\
str(mdx)+" "+str(mdy)+" rms: "+str(sigx)+" "+str(sigy))
return mdict
def makeObjcats(self):
""" Make object catalogs for all input science images. """
curdir = os.getcwd()
os.chdir(self.astromdir)
# make the sxtr paramfiles and run with them
self._setupSxtrFiles()
self.objcats = []
self.projcats = []
for imsci in self.sciImlist:
rmsfile = imsci.split("_sci")[0]+'_RMS.fits'
if rmsfile not in self.rmsImlist:
errtxt = "Expected file %s not in rmsImlist!" %rmsfile
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
return -1
tmpcat = imsci.split("_sci")[0]+'.tmp'
cmd = 'sex %s -c %s -CATALOG_NAME %s -WEIGHT_IMAGE %s' \
%(os.path.join(self.Imagedir,imsci), self.inparFile, \
tmpcat, os.path.join(self.Imagedir,rmsfile))
self.logfile.write(' '+cmd)
sproc = popen2.Popen4(cmd, 1)
errs = sproc.fromchild.readlines()
sproc.fromchild.close()
if errs:
self.logfile.write("Sxtr seems to have choked a bit:")
for line in errs:
self.logfile.write(line)
self.errorList.append((self.modName,line))
try:
sxtlines = open(tmpcat).readlines()
except:
sxtlines = []
del cmd,sproc,rmsfile,errs,tmpcat
# refine that catalog, please
objcat = imsci.split("_sci")[0]+'.obj'
projcat = imsci.split("_sci")[0]+'.proj'
fp = open(objcat,'w')
ngood=0
# the following seems very contrived, but helps in some cases
if len(sxtlines)<250: bigMagerr = 0.4
else: bigMagerr = 0.35
self.logfile.write('cat: %s Magerr_lim: %.2f' %(objcat,bigMagerr))
for line in sxtlines:
flds = line.split()
if len(flds)<9 or flds[0][0] == '#':
continue
mag = float(flds[3])
magerr = float(flds[4])
ellip = float(flds[5])
fwhm = float(flds[6])
a_im = float(flds[7])
b_im = float(flds[8])
if(magerr>bigMagerr or ellip>0.7 or b_im < 0.6 or max(a_im,fwhm/2)>400):
continue
fp.write(line)
ngood += 1
fp.close()
if ngood>5:
self.objcats.append(objcat)
else:
continue
# finally, write the file with projected coord offsets
cmd = 'project_coords %s 1 2 %.6f %.6f outfile=%s asec' \
%(objcat,self.RActr,self.Decctr,projcat)
self.logfile.write("**> "+cmd)
projcoords = popen2.Popen4(cmd)
_outlines = projcoords.fromchild.readlines()
projcoords.fromchild.close()
if len(_outlines) > 0:
# I've never seen this happen, but better check...
errtxt = "ERROR: project_coords mysteriously failed!"
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
for _line in _outlines:
self.logfile.write(_line)
print _line
return -1
self.projcats.append(projcat)
self.logfile.write("Object astrometric catalog %s constructed."%projcat)
del projcoords,cmd,objcat,projcat
# ok, all done making catalogs for this image
os.chdir(curdir)
if len(self.objcats) < 1:
self.logfile.write("makeObjCats: No catalogs made for matching?")
return -1
self.logfile.write("Made %d object catalogs for matching."%len(self.objcats))
return 0
def _setupSxtrFiles(self):
"Make basic input files for running sextractor."
# first the output parameter list file
fp = open(self.paramFile,'w')
fp.write("NUMBER \nALPHA_J2000 \nDELTA_J2000 \nMAG_ISO \nMAGERR_ISO\n"+\
"ELLIPTICITY \nFWHM_IMAGE \nA_IMAGE \nB_IMAGE \n"+\
"MAG_AUTO \nMAGERR_AUTO \nMAG_APER(1) \nMAGERR_APER")
fp.close()
# now for the input configuration file
fp = open(self.inparFile,'w')
fp.write("CATALOG_NAME ztemp.cat\n")
fp.write("CATALOG_TYPE ASCII_HEAD\n")
fp.write("PARAMETERS_NAME %s\n" %self.paramFile)
fp.write("DETECT_TYPE CCD\n")
fp.write("DETECT_MINAREA 9\n")
fp.write("DETECT_THRESH 6.5\n")
fp.write("ANALYSIS_THRESH 6.5\n")
fp.write("FILTER N\n")
fp.write("FILTER_NAME $PIPELINE/configs/gauss_1.5_3x3.conv \n")
fp.write("DEBLEND_NTHRESH 12\n")
fp.write("DEBLEND_MINCONT 0.05\n")
fp.write("CLEAN Y\n")
fp.write("CLEAN_PARAM 1.2\n")
fp.write("MASK_TYPE CORRECT\n")
fp.write("PHOT_APERTURES 10\n")
fp.write("PHOT_AUTOPARAMS 2.5,3.5\n")
fp.write("SATUR_LEVEL 128000.0\n")
fp.write("MAG_ZEROPOINT 0.0\n")
fp.write("MAG_GAMMA 4.0\n")
fp.write("GAIN 1.0\n")
fp.write("PIXEL_SCALE 1.0\n")
fp.write("SEEING_FWHM 2.0\n")
fp.write("STARNNW_NAME $PIPELINE/configs/default.nnw\n")
fp.write("BACK_SIZE 128\n")
fp.write("BACK_FILTERSIZE 3\n")
fp.write("BACKPHOTO_TYPE LOCAL\n")
fp.write("BACKPHOTO_THICK 26\n")
fp.write("CHECKIMAGE_TYPE NONE \n")
fp.write("CHECKIMAGE_NAME NONE\n")
fp.write("MEMORY_OBJSTACK 10000\n")
fp.write("MEMORY_PIXSTACK 2600000 \n")
fp.write("MEMORY_BUFSIZE 2048\n")
fp.write("VERBOSE_TYPE QUIET\n")
fp.write("WEIGHT_TYPE MAP_RMS\n")
fp.write("WEIGHT_IMAGE rms.fits\n")
fp.write("WEIGHT_THRESH 0,9.9e29\n")
fp.write("INTERP_TYPE NONE\n")
def makeGSCcat(self,chopSpur=0):
""" Constructs the GSC catalog file to be used in astrometric matching
for this field. Name of catalog stored as parameter 'self.GSCmatchin'.
Returns 0 if successful, -1 if not.
"""
curdir = os.getcwd()
os.chdir(self.astromdir)
self.logfile.write("Entered 'makeGSCcat' to make astrometric ref catalog...")
# get input wcs information from first image in sci list
ff = pyfits.open(os.path.join(self.Imagedir,self.sciImlist[0]))
inwcs = wcsclass.BasicWCS(ff[0].header)
ff.close()
del ff
self.crpix1,self.crpix2 = (inwcs.wcs['CRPIX1'],inwcs.wcs['CRPIX2'])
self.crval1in = inwcs.wcs['CRVAL1']
self.crval2in = inwcs.wcs['CRVAL2']
NX=self.NX = inwcs.wcs['NAXIS1']
NY=self.NY = inwcs.wcs['NAXIS2']
# get RA,Dec of central pixel and the search radius
Xctr = (self.NX+1)/2
Yctr = (self.NY+1)/2
self.RActr,self.Decctr = inwcs.xy2rd((Xctr,Yctr)) # both in deg
((rahh,ramm,rass), (decdd,decmm,decss)) = self._deg2hms(self.RActr,self.Decctr)
rad_amin = 0.5*math.sqrt(NX*NX + NY*NY) * inwcs.wcs['PSCALE']/60.
rad_amin = round(1000*(0.1+rad_amin))/1000.
self.logfile.write('Input WCS: CRVAL1,2: '+str((self.crval1in,self.crval2in))+\
' crpix1,2: '+str((self.crpix1,self.crpix2)))
self.logfile.write('Xctr,Yctr: '+str((Xctr,Yctr))+' RA_ctr,Dec_ctr: '+str((self.RActr,self.Decctr)))
self.logfile.write('Making query to '+_HOST_+' RA,Dec: '+\
str(((rahh,ramm,rass),(decdd,decmm,decss)))+' rad_amin = '+str(rad_amin))
fh = cStringIO.StringIO()
# the STScI query fails a lot, randomly. This loop actually helps sometimes.
for iwqtry in range(MAXWQtry):
try:
self.webquery(host=_HOST_, url=_URL_, method="GET", file=fh,
RA = "%s" % self.RActr,
DEC = "%s" % self.Decctr,
SR=rad_amin/60.0,
CAT=_CATALOG_,
FORMAT="CSV")
except IOError,err:
errtxt = str(err)
self.logfile.write("WARNING: webquery "+str(iwqtry+1)+" failed...\n ")
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
time.sleep(2)
if (iwqtry<MAXWQtry-1):
sys.stderr.write(" trying again...\n")
else:
sys.stderr.write(" sorry, no luck.\n")
print "Web qeury on ",_HOST_,"failed."
print errtxt
raise WebQueryError,err
continue
break
# read and format the output, first line is a header so we will ignore it
output = string.split(fh.getvalue(),'\n')[2:]
fh.close()
gsclines = [i.replace(',', ' ') for i in output if i != '']
self.Nrefobjs = len(gsclines)
#
# AT THIS POINT WE NEED TO BAIL OUT IF THERE AREN'T ANY GSC OBJS FOUND!
#
if(self.Nrefobjs<6):
errtxt = "WARNING: Too few (%d) GSC objects: no astrometric recalibration possible."%(self.Nrefobjs)
self.logfile.write("WARNING: NOT ENUFF GSC OBJS TO CONTINUE!")
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
return -1
# write stuff to data files
fdump = open(os.path.split(_URL_)[1].split('.')[0]+'.scat', 'w')
culledfile = os.path.split(_URL_)[1].split('.')[0]+'.cull'
fcull = open(culledfile, 'w')
self.GSCmatchin = os.path.split(_URL_)[1].split('.')[0]+'.proj'
self.Ncull=0
for line in gsclines:
fdump.write(line+'\n')
flds = line.split()
# check Ra
if flds[1][0] not in "1234567890.":
continue
# FpgMag, JpgMag, Vmag
# FpgMagErr, JpgMagErr, VmagErr
_mlist = [float(flds[7]),float(flds[8]),float(flds[12])]
_elist = [float(flds[23]),float(flds[24]),float(flds[28])]
_mlist.sort()
_elist.sort()
if _mlist[0] > 88.: continue
if chopSpur:
if _mlist[1] > 88.: continue
mag = _mlist[0]
magerr = _elist[0]
del _mlist,_elist
self.Ncull += 1
# hstID, ra, dec, mag, raEpsilon, decEpsilon, magerr, epoch
oline = '%-14s %s %s %6.2f %s %s %5.2f %s\n' \
%(flds[0],flds[1],flds[2],mag,flds[4],flds[5],magerr,flds[6])
fcull.write(oline)
fdump.close()
fcull.close()
del fdump,fcull,line,oline,flds,mag,magerr
self.logfile.write("Culling: kept %d GSC objects."%self.Ncull)
print "Culling: kept %d GSC objects."%self.Ncull
# finally, write the file with projected coord offsets
cmd = 'project_coords %s 1 2 %.6f %.6f outfile=%s asec' \
%(culledfile,self.RActr,self.Decctr,self.GSCmatchin)
self.logfile.write("**> "+cmd)
projcoords = popen2.Popen4(cmd)
_outlines = projcoords.fromchild.readlines()
projcoords.fromchild.close()
if len(_outlines) > 0:
# I've never seen this happen, but better check...
errtxt = "Error: project_coords mysteriously failed!"
self.logfile.write(errtxt)
self.errorList.append((self.modName,errtxt))
for _line in _outlines:
self.logfile.write(_line)
print _line
return -1
del projcoords
self.logfile.write("Astrometric reference catalog %s constructed."%self.GSCmatchin)
os.chdir(curdir)
return 0
def _gscReform(self,lines):
"""Reformat GSC2 output"""
newlines =[]
Nobj=0
if lines[0][:4] == "HTTP":
# remove HTTP header
for i in xrange(len(lines)):
line = string.rstrip(lines[i])
if line == '': break
else:
# no null line to end HTTP header -- just print it
i = -1
lines = lines[i+1:]
hdr = []
for line in lines[1:]:
line = line.rstrip()
if line and (line[:5] != "[EOD]"):
fields = line.split(',')
if len(fields) > 10:
newlines.append(line)
Nobj+=1
if Nobj == 0:
self.logfile.write("WARNING: No GSC objects returned!")
self.logfile.write("GSC query returned %d objects."%Nobj)
print "GSC query returned %d objects."%Nobj
self.Nrefobjs = Nobj
return newlines
def _deg2hms(self, ra, dec):
try:
ra = float(ra)
dec = float(dec)
except ValueError:
raise ValueError("deg2hrms: BAD input: %s %s" % (ra,dec))
# convert to hms, dms format
if dec > 90 or dec < -90:
raise ValueError("deg2hrms: BAD input ra,dec: %s %s" % (ra,dec))
ra = ra/15
while ra>24:
ra = ra-24
while ra<0:
ra = ra+24
h = int(ra)
m = int(60*(ra-h))
s = 60*(60*(ra-h)-m)
if s+0.0005 > 60:
s = 0.0
m = m+1
if m==60:
m = 0
h = h+1
if h==24:
h = 0
h = "%02d" % h
m = "%02d" % m
s = "%06.3f" % s
if dec<0:
dsign = "-"
dec = -dec
else:
dsign = "+"
dd = int(dec)
dm = int(60*(dec-dd))
ds = 60*(60*(dec-dd)-dm)
if ds+0.0005 > 60:
ds = 0.0
dm = dm+1
if dm==60:
dm = 0
dd = dd+1
dd = "%s%02d" % (dsign,dd)
dm = "%02d" % dm
ds = "%06.3f" % ds
return ((h,m,s),(dd,dm,ds))
def webquery(self, args=(), **kw):
"""Write output of a specified URL to stdout or file.
Keywords for query may be specified as a sequence of pairs in
args or as keywords. Special keywords that define the URL include:
host (default 'localhost'), url (default null), method (default 'POST'),
and port (default 80). The file keyword specifies an output filename
or file handle (default sys.stdout). Additional keywords are passed
as parameters to the query.
This method swiped in toto from Rick.
"""
args = list(args)
for key, value in kw.items():
args.append((key,value))
port = 80
method = "POST"
url = ""
host = urllib.localhost()
outfile = sys.stdout
query = []
for key, value in args:
if key == "port":
port = int(value)
elif key == "method":
method = value.upper()
elif key == "url":
url = value
elif key == "host":
host = value
elif key == "file":
outfile = value
elif value is None:
query.append(urllib.quote(key))
else:
query.append('%s=%s' % (urllib.quote(key),urllib.quote_plus(str(value))))
query = '&'.join(query)
if isinstance(outfile, types.StringType):
outfile = open(outfile,"w")
if url[:1] == "/":
# don't add an extra slash (purely for aesthetic purposes)
url = "http://%s:%d%s" % (host,port,url)
else:
url = "http://%s:%d/%s" % (host,port,url)
if not query:
query = None
elif method == "GET":
url = "%s?%s" % (url,query)
query = None
inurl = urllib.urlopen(url,query)
print url,query
s = inurl.read(102400)
while s:
outfile.write(s)
s = inurl.read(102400)
def writeXml(self):
"""This module must remark the image headers with WFP xml.
"""
curdir = os.getcwd()
os.chdir(self.Imagedir)
allImageLists = [self.sciImlist, self.ctxImlist, self.wgtImlist, self.rmsImlist]
for imlist in allImageLists:
for im in imlist:
file = xmlUtil.markupImage(im,dataset=self.obsName)
# Don't write these images as output of this module, which
# really doesn't have any.
#if file not in self.outputList.keys():
# self.outputList[file] = [im]
os.chdir(curdir)
return
def mkMsg(self):
"""Create and write module level message for this class.
Most of this is just compiling the info. meta is a dictionary
of lists where each list is a list of tuples describing the
tag lines for the particular section of the message. This tuple
format conforms to that used by the xmlMessage class which is
modeled on basic python argument passing, i.e. (key,*value,**attr).
"""
# getting the version of project_coords
project_coords_cmd = 'project_coords --version'
outp = popen2.Popen4(project_coords_cmd)
outpline = outp.fromchild.readlines()
pcoorVer = outpline[0].split()[-1]
self.meta = {}
self.meta['module']= []
self.meta['meta'] = []
self.meta['input'] = []
self.meta['output']= []
self.meta['errorlist'] = []
self.meta['module'].append(('module','name='+self.modName,'version='+__version__,'dataset='+self.obsName))
self.meta['module'].append(('root',self.root))
self.meta['meta'].append(('meta',))
self.meta['meta'].append(('depend',))
self.meta['meta'].append(('pkg',))
self.meta['meta'].append(('name','python'))
self.meta['meta'].append(('version',pyversion.split()[0]))
self.meta['meta'].append(('pkg',))
self.meta['meta'].append(('name','pyfits'))
self.meta['meta'].append(('version',pyfits.__version__.split()[0]))
self.meta['meta'].append(('pkg',))
self.meta['meta'].append(('name','project_coords'))
self.meta['meta'].append(('version',pcoorVer))
self.meta['meta'].append(('pkg',))
self.meta['meta'].append(('name','Guide Star Catalog'))
self.meta['meta'].append(('version',_URL_.split("/")[-1].split("q")[0]))
# SExtractor info
sub = subprocess.Popen(['sex', '--version'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)
outp = sub.stdout.readlines()
name = outp[0].split()[0]
ver = outp[0].split()[2]
self.meta['meta'].append(('pkg',))
self.meta['meta'].append(('name',name))
self.meta['meta'].append(('version',ver))
cmdline1 = 'sex fitsfile -c self.InParFileName'
self.meta['meta'].append(('commandline',cmdline1))
del outp,sub,name,ver
if self.errorList:
self.meta['errorlist'].append(('errorlist',))
for pkg,err in self.errorList:
self.meta['errorlist'].append(('erroritem',err,'frompkg='+pkg))
# input section
self.meta['input'].append(('input',))
for f in self.inputList:
if string.find(f,"_asn") == -1:
self.meta['input'].append(('file','type=image/x-fits'))
self.meta['input'].append(('name',os.path.join("Images",f)))
else:
self.meta['input'].append(('file','type=image/x-fits'))
self.meta['input'].append(('name',os.path.join("Images",f)))
# output section
if self.outputList:
self.meta['output'].append(('output',))
for f in self.outputList.keys():
if string.find(f,".xml") == -1:
self.meta['output'].append(('file','type=image/x-fits'))
self.meta['output'].append(('name',os.path.join("Images",f)))
for pred in self.outputList[f]:
self.meta['output'].append(('predecessor',os.path.join("Images",pred)))
else:
self.meta['output'].append(('file','type=text/xml'))
self.meta['output'].append(('name',os.path.join("Images",f)))
for pred in self.outputList[f]:
self.meta['output'].append(('predecessor',os.path.join("Images",pred)))
# pass this dictionary to the class pMessage...
msgFile = os.path.join(self.messagedir,self.modName+"_module.xml")
mmsg = pMessage(self.meta)
mmsg.writeMsg(msgFile)
return
|
20,333 | 694c911a4e827600799cc44987daf6a14c5634e5 | from django import forms
from .models import Book
class BookCreateForm(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
class Meta:
model = Book
fields = '__all__' |
20,334 | 627693c1998b63f1ff11c693f41558e099b43a4e | time = input().strip()
time_arr = time.strip('AMP').split(':')
hh, mm, ss = map(int, time_arr)
if ("PM" in time) and hh != 12:
hh += 12
if ("AM" in time) and (hh == 12):
hh = 0
print('%(hh)02d:%(mm)02d:%(ss)02d' % {"hh": hh, "mm": mm, "ss": ss})
|
20,335 | 6944468bdaeaf3caba6a21b0f9835d3ccde87cc2 | import pandas as pd
def nested_dict_get(d, key):
value = d
for key in key.split(':'):
value = value[key]
return value
def data_frame2md_table(df, value_columns=None, mark=None, header=True):
table = ""
if header:
table += f"| {' | '.join(df.columns)} |\n"
table += f"{'| --- ' * df.shape[1]}|\n"
for i in range(df.shape[0]):
mark_value = None
if mark is not None:
mark_value = eval(mark)(df.iloc[i].loc[value_columns])
for j in range(df.shape[1]):
if df.columns[j] in value_columns:
if df.iloc[i, j] == mark_value:
table = table + f"| **{df.iloc[i, j]:.4f}** "
else:
table = table + f"| {df.iloc[i, j]:.4f} "
else:
table = table + f"| {df.iloc[i, j]} "
table = table + f"|\n"
return table
def get_report(data_set_info, records_list, file_type="md"):
test_params = pd.DataFrame(
{data_set_info["test_param"]: nested_dict_get(data_set_info, data_set_info["test_param"])}
)
final_perf = pd.concat([records.iloc[-1:] for records in records_list],
ignore_index=True)
instant_perf = pd.concat([pd.DataFrame(records.mean()).T for records in records_list],
ignore_index=True)
if file_type == "md":
table = data_frame2md_table(pd.concat([test_params, final_perf], axis=1),
value_columns=records_list[0].columns,
mark="max")
return table
|
20,336 | cbcd19ae3dccd1b8d38b9900c8a55fd96ddd8c85 | import time
from selenium.common.exceptions import NoSuchElementException, WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from config import target
def single_task(driver, url, wait):
"""
传入URL,处理单个页面
:param driver: driver
:param url: 要处理的url
:param wait: driver 等待
:return: 结果 success or error
"""
result = pre_handle(driver, url)
if result['status'] == 'error':
return result
result1 = comment_for_later_call(wait)
result2 = comment_for_immediate_call(wait)
if result1['status'] == 'success' or result2['status'] == 'success':
result = {'status': 'success'}
return result
return result2
def comment_for_immediate_call(wait):
"""
电话回拨系统(实时攻击)
:param wait: driver 等待
:return: 结果 success or error
"""
result = {'status': 'success'}
try:
phone_input = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "lxb-cb-input")))
phone_input.send_keys(target['phone'])
button = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "lxb-cb-input-btn")))
time.sleep(1)
button.click()
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
def comment_for_later_call(wait):
"""
留言系统实现(非实时攻击)
:param wait: driver 等待
:return: 结果 success or error
"""
result = {'status': 'success'}
try:
textarea = wait.until(EC.visibility_of_element_located((By.ID, "nb-nodeboard-set-content-js")))
textarea.send_keys(target['comment'])
input_list = wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'nb-nodeboard-input')))
for text in input_list:
placeholder = str(text.get_attribute('placeholder'))
if '电话' in placeholder:
text.send_keys(target['phone'])
elif '姓' in placeholder:
text.send_keys(target['name'])
elif '邮' in placeholder:
text.send_keys(target['email'])
elif '地址' in placeholder:
text.send_keys(target['address'])
elif '必填' in placeholder:
text.send_keys(target['phone'])
time.sleep(0.5)
button = wait.until(EC.presence_of_element_located((By.ID, 'nb_nodeboard_send')))
button.click()
wait.until(EC.visibility_of_element_located((By.ID, 'nb_nodeboard_success')))
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
def pre_handle(driver, url):
"""
预处理 404、502、页面不存在 等问题
:param driver: driver
:param url: url
:return: 结果 success or error
"""
result = {'status': 'success'}
try:
driver.get(url)
except WebDriverException as e:
if 'NotFound' in e.msg:
result['message'] = Exception("404 Not Found")
result['status'] = 'error'
return result
else:
driver.execute_script("window.stop()")
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
try:
if driver.find_element_by_xpath("//*[contains(text(),'502')]") is not None:
raise Exception("502 Bad GatWay")
except NoSuchElementException as e:
pass
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
try:
if driver.find_element_by_xpath("//*[contains(text(),'无法进行访问')]") is not None:
raise Exception("502 Bad GatWay")
except NoSuchElementException as e:
pass
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
try:
if driver.find_element_by_xpath("//*[contains(text(),'无法访问')]") is not None:
raise Exception("502 Bad GatWay")
except NoSuchElementException as e:
pass
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
try:
if driver.find_element_by_xpath("//*[contains(text(),'404 Not Found')]") is not None:
raise Exception("404 Not Found")
except NoSuchElementException as e:
pass
except Exception as e:
result['message'] = e
result['status'] = 'error'
return result
return result
|
20,337 | 09ec35168849522ad66ada4c5d13ce73d290eb51 | #!/usr/bin/python
# Python Standard Library Imports
try:
import smbus
except ImportError:
class smbus(object):
@staticmethod
def SMBus(channel):
raise Exception("smbus module not found!")
# ===========================================================================
# PyComms I2C Base Class (an rewriten Adafruit_I2C pythone class clone)
# ===========================================================================
class PyComms:
def __init__(self, address, channel = 0):
self.address = address
self.bus = smbus.SMBus(channel)
def reverseByteOrder(self, data):
# Reverses the byte order of an int (16-bit) or long (32-bit) value
# Courtesy Vishal Sapre
dstr = hex(data)[2:].replace('L','')
byteCount = len(dstr[::2])
val = 0
for i, n in enumerate(range(byteCount)):
d = data & 0xFF
val |= (d << (8 * (byteCount - i - 1)))
data >>= 8
return val
def readBit(self, reg, bitNum):
b = self.readU8(reg)
data = b & (1 << bitNum)
return data
def writeBit(self, reg, bitNum, data):
b = self.readU8(reg)
if data != 0:
b = (b | (1 << bitNum))
else:
b = (b & ~(1 << bitNum))
return self.write8(reg, b)
def readBits(self, reg, bitStart, length):
# 01101001 read byte
# 76543210 bit numbers
# xxx args: bitStart=4, length=3
# 010 masked
# -> 010 shifted
b = self.readU8(reg)
mask = ((1 << length) - 1) << (bitStart - length + 1)
b &= mask
b >>= (bitStart - length + 1)
return b
def writeBits(self, reg, bitStart, length, data):
# 010 value to write
# 76543210 bit numbers
# xxx args: bitStart=4, length=3
# 00011100 mask byte
# 10101111 original value (sample)
# 10100011 original & ~mask
# 10101011 masked | value
b = self.readU8(reg)
mask = ((1 << length) - 1) << (bitStart - length + 1)
data <<= (bitStart - length + 1)
data &= mask
b &= ~(mask)
b |= data
return self.write8(reg, b)
def readBytes(self, reg, length):
output = []
i = 0
while i < length:
output.append(self.readU8(reg))
i += 1
return output
def readBytesListU(self, reg, length):
output = []
i = 0
while i < length:
output.append(self.readU8(reg + i))
i += 1
return output
def readBytesListS(self, reg, length):
output = []
i = 0
while i < length:
output.append(self.readS8(reg + i))
i += 1
return output
def writeList(self, reg, list):
# Writes an array of bytes using I2C format"
try:
self.bus.write_i2c_block_data(self.address, reg, list)
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readBlock(self, reg):
return self.bus.read_i2c_block_data(self.address, reg)
def write8(self, reg, value):
# Writes an 8-bit value to the specified register/address
try:
self.bus.write_byte_data(self.address, reg, value)
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readU8(self, reg):
# Read an unsigned byte from the I2C device
try:
result = self.bus.read_byte_data(self.address, reg)
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readS8(self, reg):
# Reads a signed byte from the I2C device
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127:
return result - 256
else:
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readU16(self, reg):
# Reads an unsigned 16-bit value from the I2C device
try:
hibyte = self.bus.read_byte_data(self.address, reg)
result = (hibyte << 8) + self.bus.read_byte_data(self.address, reg + 1)
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readS16(self, reg):
# Reads a signed 16-bit value from the I2C device
try:
hibyte = self.bus.read_byte_data(self.address, reg)
if hibyte > 127:
hibyte -= 256
result = (hibyte << 8) + self.bus.read_byte_data(self.address, reg + 1)
return result
except (IOError):
print ("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1 |
20,338 | 13317aba379903551a8727be20bf47658f76fa8e | import hashlib
i = 0
for i in xrange (10000000):
s = "H2thereum" + str (i)
h = hashlib.sha256 (s).hexdigest()
if h[:3] == "000":
print "sha256 (\"H2thereum\" + \"{}\") == 0x{}".format (i, h)
|
20,339 | 38998be88130806ba3c2cd4fa8ec8c87e4417693 | # 반례
# [0 0 0 0 0 0 0 0 0] * 9
ans = 0
def dfs(x, y, cnt_dict):
print(x, y)
global ans
if ans == 1:
return
if cnt_dict.get(arr[x][y]) == 1:
return
if x == 8 and y == 8:
print(111111111111111111111111)
for i in arr:
print(*i)
ans = 1
return
flag = 0
for i in range(1, 10):
if cnt_dict[i] == 0:
flag = 1
if flag == 0:
return
else:
for i in range(9):
for j in range(9):
if arr[i][j] != 0:
continue
else:
cnt_dict = {i + 1: 0 for i in range(9)}
# 가로 세로 체크 및 9각형
for cnt in range(9):
if arr[i][cnt] != 0:
cnt_dict[arr[i][cnt]] = 1
if arr[cnt][j] != 0:
cnt_dict[arr[cnt][j]] = 1
first_x = i // 3
first_y = j // 3
first_x *= 3
first_y *= 3
for row in range(first_x, first_x+3):
for col in range(first_y, first_y+3):
if arr[row][col] != 0:
cnt_dict[arr[row][col]] = 1
for cnt in range(1, 10):
arr[i][j] = cnt
dfs(i, j, cnt_dict)
# return 된거면 다시 0으로 돌려야함
arr[i][j] = 0
# if j > 0:
# dfs(i, j-1, cnt_dict)
# else:
# dfs(i-1, 8, cnt_dict)
# arr = [list(map(int, input().split())) for _ in range(9)]
arr = [list(map(int, input())) for _ in range(9)]
cnt_dict = {i + 1: 0 for i in range(9)}
dfs(0, 0, cnt_dict)
print(arr)
|
20,340 | ee5be0d3bc88b7ac38bef822b7d18feeebf7516c | # -*- coding: utf-8 -*-
# utils Functions
#import MySQLdb
import sqlite3,datetime
import pymongo
#def findone_mongo():
def get_fulltext_cyb_corpus():
cybids = read_csv_as_dico('../../../Data/raw/cybergeo.csv',",",2,1)
schids = get_data("SELECT id FROM cybergeo",'mysql')
res = []
for ref in schids:
res.append([ref[0],read_file('../../../Data/raw/texts/'+str(cybids[ref[0]])+"_text.txt")])
return(res)
def read_file(f):
data = open(f,'r')
res=""
currentLine = data.readline().replace('\n','')
while currentLine != '' :
res=res+" "+currentLine
currentLine = conf.readline().replace('\n','')
return(res)
def read_csv_as_dico(f,delimiter,key_column,value_column):
data = open(f,'r')
res=dict()
currentLine = data.readline().replace('\n','')
while currentLine != '' :
t=str.split(currentLine,delimiter)
res[t[key_column].replace("\"","")]=t[value_column].replace("\"","")
currentLine = conf.readline().replace('\n','')
return(res)
# read a conf file under the format key:value
# , returns a dictionary
def read_conf(file):
conf = open(file,'r')
res=dict()
currentLine = conf.readline().replace('\n','')
while currentLine != '' :
t=str.split(currentLine,':')
if len(t) != 2 : raise Exception('error in conf file')
res[t[0]]=t[1]
currentLine = conf.readline().replace('\n','')
return(res)
# return the mysql connection
def configure_sql():
# conf mysql
conf=read_conf('conf/mysql.conf')
user = conf['user']
password = conf['password']
conn = MySQLdb.connect("localhost",user,password,"cybergeo",charset="utf8")
return(conn)
# returns sqlite connection
def configure_sqlite(database):
return(sqlite3.connect(database,600))
def get_ids(database,collection):
d = get_data_mongo(database,collection,{'id':{'$gt':'0'}},{'id':1})
ids = []
for row in d :
ids.append(row['id'])
print('Raw data : '+str(len(ids)))
return(ids)
def get_data_mongo(database,collection,query,filt):
mongo = pymongo.MongoClient('localhost', 27017)
database = mongo[database]
col = database[collection]
data = col.find(query,filt)
return(data)
def get_data(query,source):
if source=='mysql' :
conn = configure_sql()
else :
conn = configure_sqlite(source)
conn.text_factory = str
cursor = conn.cursor()
cursor.execute(query)
data=cursor.fetchall()
return(data)
def fetchone_sqlite(query,database):
conn = configure_sqlite(database)
cursor = conn.cursor()
cursor.execute(query)
res = cursor.fetchone()
conn.commit()
conn.close()
return(res)
def insert_sqlite(query,database):
conn = configure_sqlite(database)
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
def query_mysql(query):
conn = configure_sql()
cursor = conn.cursor()
cursor.execute(query)
conn.commit()
conn.close()
##
# query formatted with ?
# 'INSERT INTO table VALUES (?,?,?,?,?)'
def insertmany_sqlite(query,values,database):
conn = configure_sqlite(database)
cursor = conn.cursor()
cursor.executemany(query)
conn.commit()
conn.close()
def implode(l,delimiter):
res=''
i=0
for k in l:
res = res+str(k)
if i<len(l)-1 : res=res+delimiter
return(res)
def import_kw_dico(database,collection):
mongo = pymongo.MongoClient('localhost', 27017)
database = mongo[database]
col = database[collection]
data = col.find()
ref_kw_dico={}
kw_ref_dico={}
for row in data:
keywords = row['keywords'];ref_id=row['id']
ref_kw_dico[ref_id] = keywords
for kw in keywords :
if kw not in kw_ref_dico : kw_ref_dico[kw] = []
kw_ref_dico[kw].append(kw)
print('dicos : '+str(len(ref_kw_dico))+' ; '+str(len(kw_ref_dico)))
return([ref_kw_dico,kw_ref_dico])
##
# usage : [ref_kw_dico,kw_ref_dico] = import_kw_dico()
def import_kw_dico_req(request,source):
# import extracted keywords from database
data = get_data(request,source)
ref_kw_dico = dict() # dictionnary refid -> keywords as list
kw_ref_dico = dict() # dictionnary keywords -> refs as list
for row in data :
#ref_id = row[0].encode('utf8','ignore')
ref_id=row[0]
#print(ref_id)
#keywords_raw = row[1].encode('utf8','ignore').split(';')
keywords_raw = row[1].split(';')
keywords = [keywords_raw[i] for i in range(len(keywords_raw)-1)]
# pb with last delimiter in
ref_kw_dico[ref_id] = keywords
for kw in keywords :
if kw not in kw_ref_dico : kw_ref_dico[kw] = []
kw_ref_dico[kw].append(kw)
return([ref_kw_dico,kw_ref_dico])
def import_kw_dico_sqlite(source):
return(import_kw_dico_req('SELECT id,abstract_keywords FROM refdesc WHERE abstract_keywords IS NOT NULL;',source))
##
# corpus as (id,...)
def extract_sub_dicos(corpus,occurence_dicos) :
ref_kw_dico_all = occurence_dicos[0]
kw_ref_dico_all = occurence_dicos[1]
ref_kw_dico = dict()
kw_ref_dico = dict()
for ref in corpus :
ref_id = ref[0].encode('ascii','ignore')
keywords = []
if ref_id in ref_kw_dico_all :
keywords = ref_kw_dico_all[ref_id]
ref_kw_dico[ref_id] = keywords
for k in keywords :
if k not in kw_ref_dico : kw_ref_dico[k] = []
kw_ref_dico[k].append(ref_id)
return([ref_kw_dico,kw_ref_dico])
def mysql2sqlite(sqlitedatabase):
data = get_data('SELECT * FROM refdesc WHERE abstract_keywords IS NOT NULL','mysql')
conn = configure_sqlite(sqlitedatabase)
cursor = conn.cursor()
cursor.executemany('INSERT INTO refdesc VALUES (?,?,?,?,?,?)', data)
conn.commit()
conn.close()
def export_dico_csv(dico,fileprefix,withDate):
datestr = ''
if withDate : datestr = str(datetime.datetime.now())
outfile=open(fileprefix+datestr+'.csv','w')
for k in dico.keys():
outfile.write(k+";")
for kw in dico[k]:
outfile.write(kw+";")
outfile.write('\n')
def export_dico_num_csv(dico,fileprefix,withDate):
datestr = ''
if withDate : datestr = str(datetime.datetime.now())
outfile=open(fileprefix+datestr+'.csv','w')
for k in dico.keys():
outfile.write(k+";")
outfile.write(str(dico[k]))
outfile.write('\n')
def export_list(l,fileprefix,withDate):
datestr = ''
if withDate : datestr = str(datetime.datetime.now())
outfile=open(fileprefix+datestr+'.csv','w')
for k in l :
outfile.write(k)
outfile.write('\n')
def export_matrix_csv(m,fileprefix,delimiter,withDate):
datestr = ''
if withDate : datestr = str(datetime.datetime.now())
outfile=open(fileprefix+datestr+'.csv','w')
for r in m :
#print(len(r))
#print(r)
for c in range(len(r)) :
#print(str(r[c]))
t=''
#print(r[c][0])
if isinstance(r[c],unicode) : t=unicode(r[c]).encode('utf8','ignore')
else : t = str(r[c])
outfile.write(t)
if c < len(r)-1 : outfile.write(delimiter)
outfile.write('\n')
|
20,341 | 03401e92363a6d3fdccc3532c229c516080cf430 | # -*- coding: utf-8 -*-
"""
samsaraapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class DriverForCreate(object):
"""Implementation of the 'DriverForCreate' model.
TODO: type model description here.
Attributes:
password (string): Driver's password for the driver app.
tag_ids (list of long|int): A list of tag IDs.
eld_adverse_weather_exemption_enabled (bool): Flag indicating this
driver may use Adverse Weather exemptions in ELD logs.
eld_big_day_exemption_enabled (bool): Flag indicating this driver may
use Big Day excemptions in ELD logs.
eld_day_start_hour (int): 0 indicating midnight-to-midnight ELD
driving hours, 12 to indicate noon-to-noon driving hours.
eld_exempt (bool): Flag indicating this driver is exempt from the
Electronic Logging Mandate.
eld_exempt_reason (string): Reason that this driver is exempt from the
Electronic Logging Mandate (see eldExempt).
eld_pc_enabled (bool): Flag indicating this driver may select the
Personal Conveyance duty status in ELD logs.
eld_ym_enabled (bool): Flag indicating this driver may select the Yard
Move duty status in ELD logs.
external_ids (dict<object, string>): Dictionary of external IDs
(string key-value pairs)
group_id (long|int): ID of the group if the organization has multiple
groups (uncommon).
license_number (string): Driver's state issued license number.
license_state (string): Abbreviation of state that issued driver's
license.
name (string): Driver's name.
notes (string): Notes about the driver.
phone (string): Driver's phone number. Please include only digits, ex.
4157771234
username (string): Driver's login username into the driver app.
vehicle_id (long|int): ID of the vehicle assigned to the driver for
static vehicle assignments. (uncommon).
"""
# Create a mapping from Model property names to API property names
_names = {
"password":'password',
"name":'name',
"tag_ids":'tagIds',
"eld_adverse_weather_exemption_enabled":'eldAdverseWeatherExemptionEnabled',
"eld_big_day_exemption_enabled":'eldBigDayExemptionEnabled',
"eld_day_start_hour":'eldDayStartHour',
"eld_exempt":'eldExempt',
"eld_exempt_reason":'eldExemptReason',
"eld_pc_enabled":'eldPcEnabled',
"eld_ym_enabled":'eldYmEnabled',
"external_ids":'externalIds',
"group_id":'groupId',
"license_number":'licenseNumber',
"license_state":'licenseState',
"notes":'notes',
"phone":'phone',
"username":'username',
"vehicle_id":'vehicleId'
}
def __init__(self,
password=None,
name=None,
tag_ids=None,
eld_adverse_weather_exemption_enabled=None,
eld_big_day_exemption_enabled=None,
eld_day_start_hour=None,
eld_exempt=None,
eld_exempt_reason=None,
eld_pc_enabled=False,
eld_ym_enabled=False,
external_ids=None,
group_id=None,
license_number=None,
license_state=None,
notes=None,
phone=None,
username=None,
vehicle_id=None):
"""Constructor for the DriverForCreate class"""
# Initialize members of the class
self.password = password
self.tag_ids = tag_ids
self.eld_adverse_weather_exemption_enabled = eld_adverse_weather_exemption_enabled
self.eld_big_day_exemption_enabled = eld_big_day_exemption_enabled
self.eld_day_start_hour = eld_day_start_hour
self.eld_exempt = eld_exempt
self.eld_exempt_reason = eld_exempt_reason
self.eld_pc_enabled = eld_pc_enabled
self.eld_ym_enabled = eld_ym_enabled
self.external_ids = external_ids
self.group_id = group_id
self.license_number = license_number
self.license_state = license_state
self.name = name
self.notes = notes
self.phone = phone
self.username = username
self.vehicle_id = vehicle_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
password = dictionary.get('password')
name = dictionary.get('name')
tag_ids = dictionary.get('tagIds')
eld_adverse_weather_exemption_enabled = dictionary.get('eldAdverseWeatherExemptionEnabled')
eld_big_day_exemption_enabled = dictionary.get('eldBigDayExemptionEnabled')
eld_day_start_hour = dictionary.get('eldDayStartHour')
eld_exempt = dictionary.get('eldExempt')
eld_exempt_reason = dictionary.get('eldExemptReason')
eld_pc_enabled = dictionary.get("eldPcEnabled") if dictionary.get("eldPcEnabled") else False
eld_ym_enabled = dictionary.get("eldYmEnabled") if dictionary.get("eldYmEnabled") else False
external_ids = dictionary.get('externalIds')
group_id = dictionary.get('groupId')
license_number = dictionary.get('licenseNumber')
license_state = dictionary.get('licenseState')
notes = dictionary.get('notes')
phone = dictionary.get('phone')
username = dictionary.get('username')
vehicle_id = dictionary.get('vehicleId')
# Return an object of this model
return cls(password,
name,
tag_ids,
eld_adverse_weather_exemption_enabled,
eld_big_day_exemption_enabled,
eld_day_start_hour,
eld_exempt,
eld_exempt_reason,
eld_pc_enabled,
eld_ym_enabled,
external_ids,
group_id,
license_number,
license_state,
notes,
phone,
username,
vehicle_id)
|
20,342 | b8c5d7250458fa07c884f1fe2dda88c62e5b4e88 | from tkinter import *
from tkinter.ttk import *
from socket import *
from select import *
import sys
HOST = "localhost"
PORT = 33110
server = socket(AF_INET, SOCK_STREAM)
server.bind((HOST, PORT))
server.listen(5)
clients = []
id_client = {}
def getClient():
use = []
for client in clients:
use.append(client[0])
#print(use)
return use
while 1:
#try:
read, write, error = select([server], [], [], 0)
if(len(read)):
client, address = server.accept()
clients.append([client, address, []])
#print(client)
id_client[len(id_client)+1] = clients[-1][0]
#for i in range(len(id_client)):
#data = str(id_client.keys())
#data = []
data = []
for a in id_client.keys():
data.append(a)
#print(data)
for i in data:
try:
b = id_client[i].send(str.encode(str(data)))
except:
for y in clients:
if y[0] == id_client[i]:
y[0].close()
print("a")
clients.remove(y)
id_client.pop(i)
#id_client.pop(i)
#print("Tipe clients:",type(y[0]))
#print("Tipe id_client:",type(id_client[i]))
#print(id_client[data[0]])
#print(clients)
#print(id_client)
#print(id_client[1])
#print(id_client)
#print(address[1])
use = getClient()
try:
read, write, error = select(use, [], [], 0)
if(len(read)):
for client in read:
#print(client)
data = client.recv(1024)
#print(bytes.decode(data)[2:])
tujuan = int(bytes.decode(data)[0])
#print(id_client[tujuan])
if(data == 0):
for c in clients:
if c[0] == client:
clients.remove(c)
break
else:
for c in clients:
c[2].append(data)
except:
pass
try:
use = getClient()
read, write, error = select([], use, [], 0)
if(len(write)):
for client in write:
for c in clients:
if c[0] == client:
for data in c[2]:
#print(client)
#value = str.encode(str(data[2:]))
sent = id_client[tujuan].send(data)
c[2].remove(data)
break
break
except:
pass
# except:
# pass
|
20,343 | b2a0d45fa110def4d3d977825b7166788c8fc4a1 | # -*- coding: utf-8 -*-
from django.shortcuts import render, get_object_or_404
from django.shortcuts import render_to_response, redirect # response to template, redirect to another view
from django.http.response import Http404 # ERRORR 404
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import get_template
from django.template import Context, RequestContext # RequestContext <-- get user from request
from django.core.context_processors import csrf # csrf token
from nodarb.models import *
from klienti.models import Klienti
from grafiks.models import Grafiks, Planotajs
from pieraksts.forms import KlientsForm
from pieraksts.models import *
# E-pasta modulis
from main import mail
from slugify import slugify
import datetime
today = datetime.datetime.now() # sakot no --> shodiena + pulkstens (tagad)
# !!!!! TRENERU LIST !!!!!
def trener_list( n_id ):
nod = Nodarb_tips.objects.get( slug=n_id ) # Nodarbiba
treneri_rel = nod.n.all() #nodarbiba(slug) ... n-relacija ... all() ... visi objekti
treneri = []
if treneri_rel.count() > 1:
treneri.append('any')
for t in treneri_rel:
treneri.append(getattr( t, 'treneris') ) # relaciju objektu parametrs "Treneris"
return treneri
# !!!!! NODARBIBAS LAIKU OVERLAP CHEKER !!!!!
def nod_check(n_id, k_id):
result = False # denied Pieraksts
n = Grafiks.objects.get( id=n_id )
nod_start = getattr( n, 'sakums')
nod_end = getattr( n, 'sakums') + datetime.timedelta(minutes=int(getattr( n, 'ilgums')))
nod_date = nod_start.date()
date_nod = Grafiks.objects.filter( sakums__startswith = nod_date ).order_by('sakums')
count = 0
for d in date_nod:
end = d.sakums + datetime.timedelta(minutes=int(d.ilgums))
Overlap = max(nod_start, d.sakums) < min(nod_end, end)
if Overlap == True:
try:
pieraksti_nodarb = Pieraksti.objects.get( klients = k_id, nodarbiba = d )
count += 1
except Pieraksti.DoesNotExist:
pass
except:
count += 1
if count != 0:
return False # Pieraksts --> DENIED
else:
return True
# =================================================================================================================
# !!! Visas nodarbibas !!!
def home(request):
args = {}
args['title'] = 'Nodarbības'
args['nodarbibas'] = Nodarb_tips.objects.filter( redz = True ).order_by('nos') # Atlasa redzamas nodarbibas
return render_to_response( 'nodarb.html', args )
# !!! Nodarbibas izvele !!!
def tren(request, n_id):
try:
nod = Nodarb_tips.objects.get( slug=n_id ) # Nodarbiba
except ObjectDoesNotExist: # not existing --> 404
return redirect ('main')
t = trener_list(n_id)
if not t:
return redirect ('main') # Temporary solution ( Nodarbiba redz=True, no coresponding Grafiks entries)
if len(t) > 1:
return redirect( 'any', n_id=n_id) # ===> ANY TRAINER
return redirect( 'specific', n_id=n_id, t_id=t[0].slug ) # ===> SPECIFIC TRAINER
# =================================================================================================================
# !!! ANY trainer !!!
def any(request, n_id):
try:
n = Nodarb_tips.objects.get( slug=n_id ) # Nodarbiba
except ObjectDoesNotExist: # not existing --> 404
return redirect ('main')
args = {}
args['title'] = getattr( n, 'nos') + u' - Visi' # Nodarb_tips nosaukums
args['nodarb_slug'] = n_id
args['treneri'] = trener_list( n_id )
grafiks_data = []
start_time = datetime.datetime(today.year, today.month, today.day)
end_time = today.replace(hour=23, minute=59, second=59)
gr = Grafiks.objects.filter(nodarbiba = n, sakums__range=( datetime.datetime.now() , end_time)).order_by('sakums')
if gr.count() != 0:
grafiks_data.append(gr)
for day in range(1,29):
gr = Grafiks.objects.filter(nodarbiba = n, sakums__range=( start_time + datetime.timedelta( days=day) , end_time + datetime.timedelta( days=day ))).order_by('sakums')
if gr.count() != 0:
grafiks_data.append(gr)
args['grafiks'] = grafiks_data
if (len(grafiks_data) % 4) == 0:
args['carousel_end'] = len(grafiks_data)
args['back'] = False
return render_to_response( 'select.html', args )
# !!! SPECIFIC trainer !!!
def specific(request, n_id, t_id):
try:
n = Nodarb_tips.objects.get( slug=n_id ) # Nodarbiba
t = Treneris.objects.get( slug=t_id ) # Treneris
except ObjectDoesNotExist: # not existing --> 404
return redirect ('main')
args = {}
args['title'] = getattr( n, 'nos') + ' - ' + getattr( t, 'vards') # Nodarb_tips nosaukums
args['nodarb_slug'] = n_id
args['treneri'] = trener_list( n_id )
grafiks_data = []
start_time = datetime.datetime(today.year, today.month, today.day)
end_time = today.replace(hour=23, minute=59, second=59)
gr = Grafiks.objects.filter(nodarbiba = n, treneris = t, sakums__range=( datetime.datetime.now() , end_time)).order_by('sakums')
if gr.count() != 0:
grafiks_data.append(gr)
for day in range(1,29):
gr = Grafiks.objects.filter(nodarbiba = n, treneris = t, sakums__range=( start_time + datetime.timedelta( days=day) , end_time + datetime.timedelta( days=day ))).order_by('sakums')
if gr.count() != 0:
grafiks_data.append(gr)
args['grafiks'] = grafiks_data
if (len(grafiks_data) % 4) == 0:
args['carousel_end'] = len(grafiks_data)
args['back'] = False
return render_to_response( 'select.html', args )
# =================================================================================================================
# !!! Pieraksts !!!
def pieraksts(request, g_id):
try:
nod = Grafiks.objects.get( id=g_id )
except ObjectDoesNotExist: # not existing --> 404
return redirect ('main')
form = KlientsForm
args = {}
args['g_id'] = str(g_id)
args['nodarb_slug'] = nod.nodarbiba.slug
args['title'] = nod.nodarbiba.nos + ' - ' + nod.treneris.vards
args['laiks'] = nod.sakums
args['form'] = form
args['back'] = True
args['time'] =True
args.update(csrf(request)) # ADD CSRF TOKEN
if request.POST:
form = KlientsForm( request.POST )
if form.is_valid():
# SLUGIFY "Vārds Uzvārds" --> "vards_uzvards"
new_name = slugify(form.cleaned_data['vards']).lower()
new_email = form.cleaned_data['e_pasts'].lower()
new_tel = form.cleaned_data['tel']
# REMOVE +371 etc.
if new_tel.startswith('+371 '):
new_tel = new_tel[5:]
elif new_tel.startswith('+371'):
new_tel = new_tel[4:]
else:
pass
args['vards'] = form.cleaned_data['vards']
args['epasts'] = new_email
args['telefons'] = form.cleaned_data['tel']
error = False
clients = Klienti.objects.all()
new = 0
# meklejam kljudas pieteikuma forma
for c in clients:
if c.e_pasts == new_email and c.vards != new_name:
# CITS KLIENTA VARDS
error = True
args['error_msg'] = u' Autorizācijas kļūda, nekorekts lietotāja vārds'
if error == True:
args['error'] = True
args['form'] = form # ERROR MESSAGE
return render_to_response( 'pieraksts.html', args )
for c in clients:
if c.e_pasts == new_email and c.vards == new_name:
# klients jau eksiste
if getattr(Grafiks.objects.get( id=g_id ), 'vietas') < 1: # IF VIETAS=0 --> ERROR
error = True
args['error_msg'] = u' Atvainojiet visas nodarbības vietas jau ir aizņemtas'
if nod_check(g_id, c) == False: # False --> jau ir pieraksts uz sho laiku
error = True
args['error_msg'] = u' Jūs uz šo laiku jau esat pierakstījies'
if error == False: # VIETAS > 0, PIERAKSTI NEPARKLAJAS --> Pieraksts
c.pieteikuma_reizes += 1
c.pedejais_pieteikums = datetime.datetime.now()
c.tel = new_tel # UPDATE tel nr ierakstu
c.save()
new += 1
nodarbiba = Grafiks.objects.get( id=g_id ) # VIETAS -1
nodarbiba.vietas -= 1
nodarbiba.save()
pieraksts = Pieraksti(klients=c, nodarbiba=nodarbiba) # PIETEIKUMS --> ACCEPT
# SEND ACCEPT MAIL WITH CANCEL CODE
pieraksts.save()
try:
mail.send_email(new_email, nod.nodarbiba.nos, nod.sakums, pieraksts.atteikuma_kods)
except:
pass
# Pieraksts sekmigs
args['back'] = False
return render_to_response( 'success.html', args )
if error == True:
args['error'] = True
args['form'] = form # ERROR MESSAGE
return render_to_response( 'pieraksts.html', args )
if new == 0:
# Jauns klients
if getattr(Grafiks.objects.get( id=g_id ), 'vietas') < 1: # IF VIETAS=0 --> ERROR
error = True
args['error_msg'] = u' Atvainojiet visas nodarbības vietas ir aizņemtas'
else: # VIETAS > 0 --> Pieraksts
new_client = Klienti(vards=new_name, e_pasts=new_email, tel=new_tel, pieteikuma_reizes=1)
new_client.save()
nodarbiba = Grafiks.objects.get( id=g_id ) # VIETAS -1
nodarbiba.vietas -= 1
nodarbiba.save()
pieraksts = Pieraksti(klients=new_client, nodarbiba=nodarbiba) # PIETEIKUMS --> ACCEPT
# SEND ACCEPT MAIL WITH CANCEL CODE
pieraksts.save()
try:
mail.send_email(new_email, nod.nodarbiba.nos, nod.sakums, pieraksts.atteikuma_kods)
except:
pass
# Pieraksts sekmigs
args['back'] = False
return render_to_response( 'success.html', args )
if error == True:
args['error'] = True
args['form'] = form # ERROR MESSAGE
return render_to_response( 'pieraksts.html', args )
else:
args['form'] = form # ERROR MESSAGE
return render_to_response( 'pieraksts.html', args )
return render_to_response( 'pieraksts.html', args )
# =================================================================================================================
# !!!!! ATCELT !!!!!
def cancel(request, id):
try:
pieraksts = Pieraksti.objects.get( atteikuma_kods = id)
except ObjectDoesNotExist: # not existing code --> 404
return redirect ('main')
args = {}
args.update(csrf(request)) # ADD CSRF TOKEN
args['data'] = pieraksts
# DISABLE CANCEL 1h before
import pytz
tz = pytz.timezone('EET')
# time_remain = ( pieraksts.nodarbiba.sakums - today.replace(tzinfo=tz) ).seconds / 3600
time_remain = ( pieraksts.nodarbiba.sakums - today.replace(tzinfo=tz) ).total_seconds() / 3600
if time_remain < 1:
args['disable_cancel'] = True
args['id'] = id
return render_to_response( 'cancel.html', args )
# !!!!! ATCELT POST !!!!!
def cancel_ok(request):
args = {}
if request.POST:
id = str(request.POST.get('cancel_id', ''))
try:
pieraksts = Pieraksti.objects.get( atteikuma_kods = id)
except ObjectDoesNotExist: # not existing code --> 404
return redirect ('main')
args['data'] = pieraksts
# Grafiks.vietas +=1
pieraksts.nodarbiba.vietas += 1
pieraksts.nodarbiba.save()
# ADD Ateikumi
atteikums = Atteikumi( pieraksta_laiks=pieraksts.pieraksta_laiks, klients=pieraksts.klients, nodarbiba=pieraksts.nodarbiba )
atteikums.save()
# Klients.atteikumi -=1
pieraksts.klients.atteikuma_reizes +=1
pieraksts.klients.save()
# DELETE PIERAKSTS
pieraksts.delete()
return render_to_response( 'canceled.html', args )
# !!!!! ATCELT GET !!!!!
def old_cancel_ok(request, id):
try:
pieraksts = Pieraksti.objects.get( atteikuma_kods = id)
except ObjectDoesNotExist: # not existing code --> 404
return redirect ('main')
args = {}
args['data'] = pieraksts
# Grafiks.vietas +=1
pieraksts.nodarbiba.vietas += 1
pieraksts.nodarbiba.save()
# ADD Ateikumi
atteikums = Atteikumi( pieraksta_laiks=pieraksts.pieraksta_laiks, klients=pieraksts.klients, nodarbiba=pieraksts.nodarbiba )
atteikums.save()
# Klients.atteikumi -=1
pieraksts.klients.atteikuma_reizes +=1
pieraksts.klients.save()
# DELETE PIERAKSTS
pieraksts.delete()
return render_to_response( 'canceled.html', args )
|
20,344 | 1f3028d92d779ff3bfbf4bc676c499f03b9572b9 | """This project will take you through the process of mashing up data from two different APIs to make movie recommendations. The TasteDive API lets you provide a movie (or bands, TV shows, etc.) as a query input, and returns a set of related items. The OMDB API lets you provide a movie title as a query input and get back data about the movie, including scores from various review sites (Rotten Tomatoes, IMDB, etc.).
You will put those two together. You will use TasteDive to get related movies for a whole list of titles. You’ll combine the resulting lists of related movies, and sort them according to their Rotten Tomatoes scores (which will require making API calls to the OMDB API.)
To avoid problems with rate limits and site accessibility, we have provided a cache file with results for all the queries you need to make to both OMDB and TasteDive. Just use requests_with_caching.get() rather than requests.get(). If you’re having trouble, you may not be formatting your queries properly, or you may not be asking for data that exists in our cache. We will try to provide as much information as we can to help guide you to form queries for which data exists in the cache.
Your first task will be to fetch data from TasteDive. The documentation for the API is at https://tastedive.com/read/api.
Define a function, called get_movies_from_tastedive. It should take one input parameter, a string that is the name of a movie or music artist. The function should return the 5 TasteDive results that are associated with that string; be sure to only get movies, not other kinds of media. It will be a python dictionary with just one key, ‘Similar’.
Try invoking your function with the input “Black Panther”.
HINT: Be sure to include only q, type, and limit as parameters in order to extract data from the cache. If any other parameters are included, then the function will not be able to recognize the data that you’re attempting to pull from the cache. Remember, you will not need an api key in order to complete the project, because all data will be found in the cache."""
import requests_with_caching
import json
def get_movies_from_tastedive(s):
d={'q':s,'type':'movies','limit':5}
resp=requests_with_caching.get("https://tastedive.com/api/similar",params=d)
#print(resp)
#print(resp.url)
response_d=resp.json()
return response_d
#get_movies_from_tastedive("Bridesmaids")
#get_movies_from_tastedive("Black Panther")
def extract_movie_titles(d):
name=[]
x=d['Similar']
y=x['Results']
for i in y:
name.append(i['Name'])
#print(name)
return name
#extract_movie_titles(get_movies_from_tastedive("Tony Bennett"))
# extract_movie_titles(get_movies_from_tastedive("Black Panther"))
def get_related_titles(l):
f=[]
o=[]
for i in l:
f=extract_movie_titles(get_movies_from_tastedive(i))
for j in f:
o.append(j)
#print(list(set(o)))
return list(set(o))
#get_related_titles(["Black Panther", "Captain Marvel"])
# get_related_titles([])
def get_movie_data(movieName):
d={'t':movieName,'r':'json'}
resp=requests_with_caching.get("http://www.omdbapi.com/",params=d)
print(resp.url)
respDic = resp.json()
return respDic
#get_movie_data("Venom")
#get_movie_data("Baby Mama")
def get_movie_rating(d):
rate=d['Ratings']
for i in rate:
if i['Source'] == 'Rotten Tomatoes':
#print(int(i['Value'][:-1]))
return int(i['Value'][:-1])
return 0
#get_movie_rating(get_movie_data("Venom"))
#get_movie_rating(get_movie_data("Deadpool 2"))
def get_sorted_recommendations(l):
new_l=get_related_titles(l)
dict={}
for i in new_l:
dict[i]=get_movie_rating(get_movie_data(i))
print(dict)
return [i[0] for i in sorted(dict.items(), key=lambda item: (item[1], item[0]), reverse=True)] |
20,345 | 96669ff36e141e42ef2ae6631b2f7163ffc37dae | #coding = utf-8
'''
标题 进行users子应用的视图路由
@name:
@function:
@author: Mr.Fan
@date:2021--
'''
#进行users子应用的视图路由
from django.urls import path
from users.views import RegisterView,ImageCodeView,SmsCodeView,LoginView,LogoutView
from users.views import ForgetPasswordView,UserCentetView,WriteBlogView
urlpatterns = [
#path的第一个参数:路由
#第二个参数: 视图函数名
path('register/',RegisterView.as_view(),name = 'register'),
#图片验证码的路由
path('imagecode/',ImageCodeView.as_view(),name = 'imagecode'),
#短信发送
path('smscode/',SmsCodeView.as_view(),name = 'smscode'),
#登录路由
path('login/',LoginView.as_view(),name = 'login'),
#退出登录
path('logout/', LogoutView.as_view(), name='logout'),
#忘记密码
path('forgetpassword/', ForgetPasswordView.as_view(), name='forgetpassword'),
#个人中心
path('center/', UserCentetView.as_view(), name='center'),
#写博客的路由
path('writeblog/', WriteBlogView.as_view(), name='writeblog'),
] |
20,346 | 3ea5f8d9659c310713c8567f88b6865d9b52d072 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class DoubanspiderPipeline(object):
def process_item(self, item, spider):
with open('douban.txt', 'w')as f:
f.write(str(dict(item)))
print('数据保存完成==============logging!!!')
return item
|
20,347 | 876cbe0ff0ded197e63be845395231fecc07ae85 | import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# 定义卷积层
def conv(input, filter_shape, bias_shape, strides_shape):
filter = tf.get_variable("filter", filter_shape, initializer= tf.truncated_normal_initializer())
bias = tf.get_variable("bias", bias_shape, initializer= tf.truncated_normal_initializer())
conv = tf.nn.conv2d(input, filter, strides= strides_shape, padding= 'SAME')
output = tf.nn.sigmoid(conv + bias)
return output
# 定义池化层
def pooling(input, ksize_shape, strides_shape):
output = tf.nn.max_pool(input, ksize= ksize_shape, strides= strides_shape, padding = 'SAME')
return output
# 定义全连接层
def connection(input, weight_shape, bias_shape, flat_shape):
weight = tf.get_variable("weight", weight_shape, initializer= tf.truncated_normal_initializer())
bias = tf.get_variable("bias", bias_shape, initializer= tf.truncated_normal_initializer())
flat = tf.reshape(input, flat_shape)
output = tf.nn.sigmoid(tf.matmul(flat, weight) + bias)
return output
# 导入数据
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
with tf.name_scope('Input'):
x_data = tf.placeholder(tf.float32, [None, 784])
y_data = tf.placeholder(tf.float32, [None, 10])
x_image = tf.reshape(x_data, [-1, 28, 28, 1])
with tf.variable_scope('Conv1'):
conv1_output = conv(x_image, [5, 5, 1, 6], [6], [1, 1, 1, 1])
with tf.variable_scope('Pooling1'):
pooling1_output = pooling(conv1_output, [1, 2, 2, 1], [1, 2, 2, 1])
with tf.variable_scope('Conv2'):
conv2_output = conv(pooling1_output, [5, 5, 6, 16], [16], [1, 1, 1, 1])
with tf.variable_scope('Pooling2'):
pooling2_output = pooling(conv2_output, [1, 2, 2, 1], [1, 2, 2, 1])
with tf.variable_scope('Conv3'):
conv3_output = conv(pooling2_output, [5, 5, 16, 120], [120], [1, 1, 1, 1])
with tf.variable_scope('Connection'):
connection_output = connection(conv3_output, [7*7*120, 80], [80], [-1, 7*7*120])
with tf.variable_scope('dropout'):
dropout_output = tf.nn.dropout(connection_output, 0.7)
with tf.name_scope('Output'):
weight = tf.Variable(tf.truncated_normal([80, 10]),dtype= tf.float32)
bias = tf.Variable(tf.truncated_normal([10]),dtype= tf.float32)
y_model = tf.nn.softmax(tf.add(tf.matmul(dropout_output, weight), bias))
with tf.name_scope('Loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels = y_data, logits = y_model))
tf.summary.scalar('The variation of the loss', loss)
with tf.name_scope('Accuracy'):
prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(y_data, 1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
tf.summary.scalar('The variation of the accuracy', accuracy)
with tf.name_scope('Train'):
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter("21_log/", sess.graph)
merged = tf.summary.merge_all()
a = []
for epoch in range(21):
for batch in range(n_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(train_op, feed_dict={x_data: batch_x, y_data: batch_y})
print('epoch:', epoch, ',accuracy:', sess.run(accuracy, feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels}))
summary, acc = sess.run([merged, accuracy], feed_dict={x_data: mnist.test.images, y_data: mnist.test.labels})
a.append(acc)
writer.add_summary(summary, epoch)
writer.close()
# 绘制训练精度变化图
plt.plot(a)
plt.title('The variation of the acuracy')
plt.xlabel('The sampling point')
plt.ylabel('Accuracy')
plt.tight_layout()
plt.show()
# epoch: 0 ,accuracy: 0.2409
# epoch: 1 ,accuracy: 0.3678
# epoch: 2 ,accuracy: 0.4367
# epoch: 3 ,accuracy: 0.5451
# epoch: 4 ,accuracy: 0.6318
# epoch: 5 ,accuracy: 0.6978
# epoch: 6 ,accuracy: 0.7291
# epoch: 7 ,accuracy: 0.7388
# epoch: 8 ,accuracy: 0.7763
# epoch: 9 ,accuracy: 0.7666
# epoch: 10 ,accuracy: 0.7734
# epoch: 11 ,accuracy: 0.7958
# epoch: 12 ,accuracy: 0.8025
# epoch: 13 ,accuracy: 0.8005
# epoch: 14 ,accuracy: 0.8065
# epoch: 15 ,accuracy: 0.8007
# epoch: 16 ,accuracy: 0.8091
# epoch: 17 ,accuracy: 0.8176
# epoch: 18 ,accuracy: 0.811
# epoch: 19 ,accuracy: 0.8212
# epoch: 20 ,accuracy: 0.8183
|
20,348 | 501e4dbdc03a96706e8bbd6907bd33e1cb75ca1e | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/1/30 15:25
# @Author : Leslee
import re
import os
import json
import jieba
import random
import pandas as pd
import numpy as np
from tqdm import tqdm
__all__ = ["PreprocessText", "PreprocessTextMulti", "PreprocessSim"]
__tools__ = ["txt_read", "txt_write", "extract_chinese", "read_and_process",
"preprocess_label_ques", "save_json", "load_json", "delete_file",
"transform_multilabel_to_multihot"]
def txt_read(file_path, encode_type='utf-8'):
"""
读取txt文件,默认utf8格式
:param file_path: str, 文件路径
:param encode_type: str, 编码格式
:return: list
"""
list_line = []
try:
file = open(file_path, 'r', encoding=encode_type)
while True:
line = file.readline()
line = line.strip()
if not line:
break
list_line.append(line)
file.close()
except Exception as e:
print(str(e))
finally:
return list_line
def get_ngram(text, ns=[1]):
if type(ns) != list:
raise RuntimeError("ns of function get_ngram() must be list!")
for n in ns:
if n < 1:
raise RuntimeError("enum of ns must '>1'!")
len_text = len(text)
ngrams = []
for n in ns:
ngram_n = []
for i in range(len_text):
if i + n <= len_text:
ngram_n.append(text[i:i+n])
else:
break
if not ngram_n:
ngram_n.append(text)
ngrams += ngram_n
return ngrams
def txt_write(list_line, file_path, type='w', encode_type='utf-8'):
"""
txt写入list文件
:param listLine:list, list文件,写入要带"\n"
:param filePath:str, 写入文件的路径
:param type: str, 写入类型, w, a等
:param encode_type:
:return:
"""
try:
file = open(file_path, type, encoding=encode_type)
file.writelines(list_line)
file.close()
except Exception as e:
print(str(e))
def extract_chinese(text):
"""
只提取出中文、字母和数字
:param text: str, input of sentence
:return:
"""
chinese_exttract = ''.join(re.findall(u"([\u4e00-\u9fa5A-Za-z0-9@._])", text))
return chinese_exttract
def read_and_process(path):
"""
读取文本数据并
:param path:
:return:
"""
# with open(path, 'r', encoding='utf-8') as f:
# lines = f.readlines()
# line_x = [extract_chinese(str(line.split(",")[0])) for line in lines]
# line_y = [extract_chinese(str(line.split(",")[1])) for line in lines]
# return line_x, line_y
data = pd.read_csv(path)
ques = data["ques"].values.tolist()
labels = data["label"].values.tolist()
line_x = [str(line).upper() for line in labels]
line_y = [extract_chinese(str(line).upper()) for line in ques]
return line_x, line_y
def preprocess_label_ques(path):
x, y, x_y = [], [], []
x_y.append('label,ques\n')
with open(path, 'r', encoding='utf-8') as f:
while True:
line = f.readline()
try:
line_json = json.loads(line)
except:
break
ques = line_json['title']
label = line_json['category'][0:2]
line_x = " ".join([extract_chinese(word) for word in list(jieba.cut(ques, cut_all=False, HMM=True))]).strip().replace(' ',' ')
line_y = extract_chinese(label)
x_y.append(line_y+','+line_x+'\n')
return x_y
def load_json(path):
"""
获取json,只取第一行
:param path: str
:return: json
"""
with open(path, 'r', encoding='utf-8') as fj:
model_json = json.loads(fj.readlines()[0])
return model_json
def save_json(jsons, json_path):
"""
保存json,
:param json_: json
:param path: str
:return: None
"""
with open(json_path, 'w', encoding='utf-8') as fj:
fj.write(json.dumps(jsons, ensure_ascii=False))
fj.close()
def delete_file(path):
"""
删除一个目录下的所有文件
:param path: str, dir path
:return: None
"""
if not os.path.exists(path):
return
for i in os.listdir(path):
# 取文件或者目录的绝对路径
path_children = os.path.join(path, i)
if os.path.isfile(path_children):
if path_children.endswith(".h5") or path_children.endswith(".json"):
os.remove(path_children)
else:# 递归, 删除目录下的所有文件
delete_file(path_children)
def transform_multilabel_to_multihot(sample, label=1070):
result = np.zeros(label)
result[sample] = 1
res = result.tolist()
return res
class PreprocessText:
"""
数据预处理, 输入为csv格式, [label,ques]
"""
def __init__(self, path_model_dir):
self.l2i_i2l = None
self.path_fast_text_model_vocab2index = path_model_dir + 'vocab2index.json'
self.path_fast_text_model_l2i_i2l = path_model_dir + 'l2i_i2l.json'
if os.path.exists(self.path_fast_text_model_l2i_i2l):
self.l2i_i2l = load_json(self.path_fast_text_model_l2i_i2l)
def prereocess_idx(self, pred, digits=5):
if os.path.exists(self.path_fast_text_model_l2i_i2l):
pred_i2l = {}
i2l = self.l2i_i2l['i2l']
for i in range(len(pred)):
pred_i2l[i2l[str(i)]] = round(float(pred[i]), digits)
pred_i2l_rank = [sorted(pred_i2l.items(), key=lambda k: k[1], reverse=True)]
return pred_i2l_rank
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def prereocess_pred_xid(self, pred):
if os.path.exists(self.path_fast_text_model_l2i_i2l):
pred_l2i = {}
l2i = self.l2i_i2l['l2i']
for i in range(len(pred)):
pred_l2i[pred[i]] = l2i[pred[i]]
pred_l2i_rank = [sorted(pred_l2i.items(), key=lambda k: k[1], reverse=True)]
return pred_l2i_rank
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def preprocess_label_ques_to_idx(self, embedding_type, path, embed, rate=1, shuffle=True, graph=None):
data = pd.read_csv(path, quoting=3)
ques = data['ques'].tolist()
label = data['label'].tolist()
ques = [str(q).upper() for q in ques]
label = [str(l).upper() for l in label]
if shuffle:
ques = np.array(ques)
label = np.array(label)
indexs = [ids for ids in range(len(label))]
random.shuffle(indexs)
ques, label = ques[indexs].tolist(), label[indexs].tolist()
# 如果label2index存在则不转换了
if not os.path.exists(self.path_fast_text_model_l2i_i2l):
label_set = set(label)
count = 0
label2index = {}
index2label = {}
for label_one in label_set:
label2index[label_one] = count
index2label[count] = label_one
count = count + 1
l2i_i2l = {}
l2i_i2l['l2i'] = label2index
l2i_i2l['i2l'] = index2label
save_json(l2i_i2l, self.path_fast_text_model_l2i_i2l)
else:
l2i_i2l = load_json(self.path_fast_text_model_l2i_i2l)
len_ql = int(rate * len(ques))
if len_ql <= 500: # sample时候不生效,使得语料足够训练
len_ql = len(ques)
x = []
print("ques to index start!")
ques_len_ql = ques[0:len_ql]
for i in tqdm(range(len_ql)):
que = ques_len_ql[i]
que_embed = embed.sentence2idx(que)
x.append(que_embed) # [[], ]
label_zo = []
print("label to onehot start!")
label_len_ql = label[0:len_ql]
for j in tqdm(range(len_ql)):
label_one = label_len_ql[j]
label_zeros = [0] * len(l2i_i2l['l2i'])
label_zeros[l2i_i2l['l2i'][label_one]] = 1
label_zo.append(label_zeros)
count = 0
if embedding_type in ['bert', 'albert']:
x_, y_ = np.array(x), np.array(label_zo)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
x_all = [x_1, x_2]
return x_all, y_
elif embedding_type == 'xlnet':
count += 1
if count == 1:
x_0 = x[0]
print(x[0][0][0])
x_, y_ = x, np.array(label_zo)
x_1 = np.array([x[0][0] for x in x_])
x_2 = np.array([x[1][0] for x in x_])
x_3 = np.array([x[2][0] for x in x_])
if embed.trainable:
x_4 = np.array([x[3][0] for x in x_])
x_all = [x_1, x_2, x_3, x_4]
else:
x_all = [x_1, x_2, x_3]
return x_all, y_
else:
x_, y_ = np.array(x), np.array(label_zo)
return x_, y_
|
20,349 | d1cb9e93df488cda5d3052921de6a8443d092418 | from __future__ import annotations
from design_pattern.memento.framework import Originator, Memento, Caretaker
from typing import List
from abc import ABC, abstractmethod
class Implementor(ABC):
def __init__(self):
self.__grocery_list = GroceryList(
title="Untitled"
)
self.__grocery_list_history = Caretaker()
def _save_history(func):
def wrapper(self: Implementor, *args, **kwargs):
_state = self.__grocery_list.get_state()
self.__grocery_list_history.push(
state=_state
)
return func(self, *args, **kwargs)
return wrapper
def undo(self):
_is_successful, _grocery_list_state = self.__grocery_list_history.try_pop()
if _is_successful:
self.__grocery_list.set_state(
state=_grocery_list_state
)
@_save_history
def set_title(self, *, title: str):
self.__grocery_list.set_title(
title=title
)
@_save_history
def add_item(self, *, item: str):
self.__grocery_list.add_item(
item=item
)
def get_title(self) -> str:
return self.__grocery_list.get_title()
def get_items(self) -> List[str]:
return self.__grocery_list.get_items().copy()
@abstractmethod
def show(self):
"""
Provides an interface for the grocery list
"""
raise NotImplementedError()
class GroceryList(Originator):
def __init__(self, *, title: str):
self.__title = title
self.__items = [] # type: List[str]
def set_title(self, *, title: str):
self.__title = title
def get_title(self) -> str:
return self.__title
def add_item(self, *, item: str):
self.__items.append(item)
def get_items(self) -> List[str]:
return self.__items.copy()
def get_state(self) -> GroceryListState:
return GroceryListState(
title=self.__title,
items=self.__items.copy()
)
def set_state(self, *, state: GroceryListState):
self.__title = state.get_title()
self.__items = state.get_items()
class GroceryListState(Memento):
def __init__(self, *, title: str, items: List[str]):
self.__title = title
self.__items = items
def get_title(self) -> str:
return self.__title
def get_items(self) -> List[str]:
return self.__items
|
20,350 | a73889a42cf77cc13ece504354a264d6616f887c | '''
Created on Aug 31, 2017
@author: riccga
'''
num = int(raw_input())
phonebook = {}
x = 0
for i in range( 1, num+1):
namenum = raw_input()
sep = namenum.index(' ')
name = namenum[0:sep]
numsplit = namenum[sep+1:]
x += 1
phonebook[name] = numsplit
people = []
for i in range(1, num+1):
contact = raw_input()
people.append(contact)
for item in people:
contacts = phonebook.keys()
if item in contacts:
lookingfor = contacts.index(item)
print contacts[lookingfor] + '=' + phonebook[item]
else:
print 'Not found'
|
20,351 | 7e0ae1a63bdf840ccb0f7e07c54652af50c0ede7 | import numpy as np
import pyMilne
class MilneEddington:
"""
MilneEddington class
Purpose: Implementation of a parallel Milne-Eddington solver with analytical response functions
Coded in C++/python by J. de la Cruz Rodriguez (ISP-SU, 2020)
References:
Landi Degl'Innocenti & Landolfi (2004)
Orozco Suarez & del Toro Iniesta (2007)
"""
# *************************************************************************************************
def _initLine(self, label, anomalous, dw, precision):
if(precision == 'float64'):
if(label == 6301):
return pyMilne.pyLines(j1 = 2.0, j2 = 2.0, g1 = 1.84, g2 = 1.50, cw = 6301.4995, gf = 10.**-0.718, anomalous = anomalous, dw = dw)
elif(label == 6302):
return pyMilne.pyLines(j1 = 1.0, j2 = 0.0, g1 = 2.49, g2 = 0.00, cw = 6302.4931, gf = 10.**-0.968, anomalous = anomalous, dw = dw)
elif(label == 6173):
return pyMilne.pyLines(j1 = 1.0, j2 = 0.0, g1 = 2.50, g2 = 0.00, cw = 6173.3340, gf = 10.**-2.880, anomalous = anomalous, dw = dw)
else:
print("pyLines::setLine: Error line with label {0 } is not implented".format(label))
return pyMilne.pyLines()
else:
if(label == 6301):
return pyMilne.pyLinesf(j1 = 2.0, j2 = 2.0, g1 = 1.84, g2 = 1.50, cw = 6301.4995, gf = 10.**-0.718, anomalous = anomalous, dw = dw)
elif(label == 6302):
return pyMilne.pyLinesf(j1 = 1.0, j2 = 0.0, g1 = 2.49, g2 = 0.00, cw = 6302.4931, gf = 10.**-0.968, anomalous = anomalous, dw = dw)
elif(label == 6173):
return pyMilne.pyLinesf(j1 = 1.0, j2 = 0.0, g1 = 2.50, g2 = 0.00, cw = 6173.3340, gf = 10.**-2.880, anomalous = anomalous, dw = dw)
else:
print("pyLines::setLine: Error line with label {0 } is not implented".format(label))
return pyMilne.pyLinesf()
# *************************************************************************************************
def _get_dtype(self):
num = self.Me.get_dtype()
if(num == 4): return 'float32'
else: return 'float64'
# *************************************************************************************************
def _getLines(self, labels, anomalous, dw, precision):
nLines = len(labels)
lines = [None]*nLines
for ii in range(nLines):
lines[ii] = self._initLine(labels[ii], anomalous, dw, precision)
return lines
# *************************************************************************************************
def __init__(self, regions, lines, anomalous=True, dw_lines = 20, nthreads=1, precision = 'float32'):
"""
__init__ method
Arguments:
regions: it is a list that contains lists with region information [[wav1, psf1], [wav2, psf2]]
where wav1, wav2, psf1, psf2 are float64 numpy arrays. If no PSF is desired, use None.
lines: list with the labels of lines to be used (defined in _initLine).
anomalous: If True, all Zeeman components are calculated for each spectral lines.
dw_lines: spectral window +/- dw from line center to compute the line profile. Outside that window the profile won't be calculated.
Given in km/s (default 20 km/s)
nthreads: number of threads to be used when synthesizing or inverting. Only relevant if there is
more than 1 pixel.
"""
error = False
# check regions
for ii in range(len(regions)):
if(len(regions[ii]) != 2):
print("MilneEddington::__init__: ERROR, region {0} has {1} elements, should have 2!".format(ii, len(regions[ii])))
error = True
if(error):
return None
# Init C++ object
pyLines = self._getLines(lines, anomalous, dw_lines, precision)
if(precision == 'float32'):
self.Me = pyMilne.pyMilne_float(regions, pyLines, nthreads=nthreads, anomalous=anomalous)
else:
self.Me = pyMilne.pyMilne(regions, pyLines, nthreads=nthreads, anomalous=anomalous)
# *************************************************************************************************
def synthesize(self, model, mu = 1.0):
"""
synthesize spectra for a given model at a mu angle
Arguments:
model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model
mu: heliocentric angle for the synthesis
The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\AA], eta_l, damp, S0, S1]
Returns:
4D array [ny,nx,4,nwaw] with the emerging intensity
"""
ndim = len(model.shape)
dtype = self._get_dtype()
if(ndim == 1):
model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)
elif(ndim == 3):
model1 = model
else:
print("MilneEddington::synthesize: ERROR, the input model must have 1 or 3 dimensions")
return None
if(model1.shape[2] != 9):
print("MilneEddington::synthesize: ERROR, input model has npar={0}, should be 9".format(model1.shape[2]))
return None
isContiguous = model1.flags['C_CONTIGUOUS']
if(not isContiguous or model1.dtype != dtype):
model1 = np.ascontiguousarray(model1, dtype=dtype)
return self.Me.synthesize(model, mu=mu)
# *************************************************************************************************
def get_wavelength_array(self):
"""
get_wavelength_array returns the total wavelength array 1D (regions are concatenated)
"""
return self.Me.get_wavelength_array()
# *************************************************************************************************
def synthesize_rf(self, model, mu=1.0):
"""
synthesize the spectra and analytical response functions for a given model at a mu angle
Arguments:
model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model
mu: heliocentric angle for the synthesis
The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\AA], eta_l, damp, S0, S1]
Returns:
a tuple (spectra, response_function)
spectra: 4D array [ny,nx,4,nwaw] with the emerging intensity
response_function: 5D array [ny, ny, 9, 4, nwav]
"""
ndim = len(model.shape)
dtype = self._get_dtype()
if(ndim == 1):
model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)
elif(ndim == 3):
model1 = model
else:
print("MilneEddington::synthesize_rf: ERROR, the input model must have 1 or 3 dimensions")
return None
if(model1.shape[2] != 9):
print("MilneEddington::synthesize_rf: ERROR, input model has npar={0}, should be 9".format(model1.shape[2]))
return None
isContiguous = model1.flags['C_CONTIGUOUS']
if(not isContiguous or model1.dtype != dtype):
model1 = np.ascontiguousarray(model1, dtype=dtype)
return self.Me.synthesize_RF(model, mu=mu)
# *************************************************************************************************
def invert(self, model, obs, sig = 1.e-3, mu = 1.0, nRandom = 3, nIter = 20, chi2_thres = 1.0, verbose = False):
"""
invert observations acquired at a given mu angle
Arguments:
model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model
obs: 2D [4,nwav] or 4D array [ny,nx,4,nwav] with the observed profiles. Should be normalized to the mean continuum.
sig: scalar or 2D array [4,nwav] with the noise estimate
mu: heliocentric angle for the synthesis
nRandom: if larger than 1, the input model parameters will be randomized and more inversion will be performed
to avoid converging to a local minimum. The best fit will be returned
nIter: maximum number of Levenberg Marquardt iterations per inversion
chi2_thres: stop inversion if Chi2 <= chi2_thres
verbose: only used if nthreads=1, printsout info of each LM iteration
The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\AA], eta_l, damp, S0, S1]
Returns:
a tuple (spectra, response_function)
spectra: 4D array [ny,nx,4,nwaw] with the emerging intensity
response_function: 5D array [ny, ny, 9, 4, nwav]
"""
#
# Check guessed model properties
#
ndim = len(model.shape)
dtype = self._get_dtype()
if(ndim == 1):
model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)
elif(ndim == 3):
model1 = model
else:
print("MilneEddington::synthesize: ERROR, the input model must have 1 or 3 dimensions")
return None, None, None
if(model1.shape[2] != 9):
print("MilneEddington::synthesize: ERROR, input model has npar={0}, should be 9".format(model1.shape[2]))
return None, None, None
isContiguous = model1.flags['C_CONTIGUOUS']
if(not isContiguous or model1.dtype != dtype):
model1 = np.ascontiguousarray(model1, dtype=dtype)
#
# Check observations
#
ndim = len(obs.shape)
if(ndim == 2):
obs1 = np.ascontiguousarray(model.reshape((1,1,obs.shape[0], obs.shape[1])), dtype=dtype)
elif(ndim == 4):
obs1 = obs
else:
print("MilneEddington::invert: ERROR, the input observations must have 2 or 4 dimensions")
return None, None, None
wav = self.Me.get_wavelength_array()
nwav = wav.size
if(obs1.shape[3] != nwav):
print("MilneEddington::invert: ERROR, input observations has nwav={0}, should be nwav={1}".format(obs1.shape[3], nwav))
return None, None, None
isContiguous = obs1.flags['C_CONTIGUOUS']
if(not isContiguous or obs1.dtype != dtype):
obs1 = np.ascontiguousarray(obs1, dtype=dtype)
#
# Check sigma
#
if isinstance(sig, np.ndarray):
if(sig.shape[1] != nwav):
print("MilneEddington::invert: sigma array has nwav={0}, but it should be {1}".format(sigma.shape[1], nwav))
return None, None, None
sig1 = np.zeros((4,nwav), dtype=dtype, order='c')
sig1[:] = sig
else:
sig1 = np.zeros((4,nwav), dtype=dtype, order='c')
sig1[:] = sig
#
# Call C++ module
#
return self.Me.invert(model1, obs1, sig1, mu=mu, nRandom=nRandom, nIter = nIter, chi2_thres = chi2_thres, verbose=verbose)
# *************************************************************************************************
def get_a_guessed_model(self, ny=1, nx=1):
iPar = np.float64([750, 1.0, 0.39, 0.25, 0.02, 30., 0.1, 0.8, 0.2])
dtype = self._get_dtype()
res = np.zeros((ny, nx, 9), dtype = dtype, order='c')
for ii in range(9):
res[:,:,ii] = iPar[ii]
return res
# *************************************************************************************************
def repeat_model(self, m_in, ny, nx):
"""
This routine repeats a 1D model over an entire FOV with dimensions ny, nx pixels
m_in must have 9 elements
"""
dtype = self._get_dtype()
res = np.zeros((ny, nx, 9), dtype = dtype, order='c')
m = m_in.squeeze()
nPar = m.shape[0]
if(nPar != 9):
print("MilneEddington::repeat_model: Error, input model must have 9 elements!")
return None
for ii in range(9):
res[:,:,ii] = m[ii]
return res
# *************************************************************************************************
def estimate_uncertainties(self, model, obs, sig, mu=1.0):
"""
estimates uncertainties based on the quality of the fit
and the parameters sensitivity.
Model: output model from the inversion [ny, nx, 9]
Obs : Observed profiles [ny, nx, 4, nwav]
sig : Noise estimate 1D or 2D [4,nwav]
returns the uncertainty estimate per parameter per pixel [ny, nx, 9]
Reference: del Toro Iniesta (2003), Eq. 11.30
"""
syn, J = self.synthesize_rf(model, mu=mu)
error = model*0
ny, nx = error.shape[0:2]
for yy in range(ny):
for xx in range(nx):
for kk in range(9):
J[yy,xx,kk] /= sig
Hdiag = (J[yy,xx,:]**2).sum(axis=(1,2))
error[yy,xx,:] = (((obs[yy,xx]-syn[yy,xx]) / sig )**2).sum()
for kk in range(9):
error[yy,xx,kk] /= Hdiag[kk]
error *= 2.0 / 9.0
return np.sqrt(error)
# *************************************************************************************************
def invert_spatially_regularized(self, model, obs, sig = 1.e-3, mu = 1.0, nIter = 20, chi2_thres = 1.0, alpha=1.0, alphas=np.ones(9,dtype='float32'), method = 0, delay_bracket = 3):
"""
invert_spatially_regularized observations acquired at a given mu angle
Arguments:
model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model
obs: 2D [4,nwav] or 4D array [ny,nx,4,nwav] with the observed profiles. Should be normalized to the mean continuum.
sig: scalar or 2D array [4,nwav] with the noise estimate
mu: heliocentric angle for the synthesis
nIter: maximum number of Levenberg Marquardt iterations per inversion
chi2_thres: stop inversion if Chi2 <= chi2_thres
alpha: global regularization weight that multiplies the value of "alphas" (default = 1).
alphas: the relative scaling of regularization weights for each parameter (default = 1).
method: Numerical method to solve the sparse system: 0) Conjugate Gradient, 1) BiCGStab, 2) SparseLU (default 0)
delay_bracket: Delay optimal lambda bracketing for this number of iterations. Avoids taking too large steps in the initial iterations.
The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\AA], eta_l, damp, S0, S1]
Returns:
a tuple (spectra, response_function)
spectra: 4D array [ny,nx,4,nwaw] with the emerging intensity
response_function: 5D array [ny, ny, 9, 4, nwav]
"""
#
# Check guessed model properties
#
ndim = len(model.shape)
dtype = self._get_dtype()
if(ndim == 1):
model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)
elif(ndim == 3):
model1 = model
else:
print("MilneEddington::invert_spatially_regularized_float: ERROR, the input model must have 1 or 3 dimensions")
return None, None, None
if(model1.shape[2] != 9):
print("MilneEddington::invert_spatially_regularized_float: ERROR, input model has npar={0}, should be 9".format(model1.shape[2]))
return None, None, None
isContiguous = model1.flags['C_CONTIGUOUS']
if(not isContiguous or model1.dtype != dtype):
model1 = np.ascontiguousarray(model1, dtype=dtype)
#
# Check observations
#
ndim = len(obs.shape)
if(ndim == 2):
obs1 = np.ascontiguousarray(model.reshape((1,1,obs.shape[0], obs.shape[1])), dtype=dtype)
elif(ndim == 4):
obs1 = obs
else:
print("MilneEddington::invert_spatially_regularized_float: ERROR, the input observations must have 2 or 4 dimensions")
return None, None, None
wav = self.Me.get_wavelength_array()
nwav = wav.size
if(obs1.shape[3] != nwav):
print("MilneEddington::invert_spatially_regularized_float: ERROR, input observations has nwav={0}, should be nwav={1}".format(obs1.shape[3], nwav))
return None, None, None
isContiguous = obs1.flags['C_CONTIGUOUS']
if(not isContiguous or obs1.dtype != dtype):
obs1 = np.ascontiguousarray(obs1, dtype=dtype)
#
# Check sigma
#
if isinstance(sig, np.ndarray):
if(sig.shape[1] != nwav):
print("MilneEddington::invert_spatially_regularized_float: sigma array has nwav={0}, but it should be {1}".format(sigma.shape[1], nwav))
return None, None, None
sig1 = np.zeros((4,nwav), dtype=dtype, order='c')
sig1[:] = sig
else:
sig1 = np.zeros((4,nwav), dtype=dtype, order='c')
sig1[:] = sig
#
# make alphas
#
alphas_in = np.zeros(9,dtype=dtype)
for ii in range(9):
alphas_in[ii] = alpha * alphas[ii]
#
# Call C++ module
#
return self.Me.invert_spatially_regularized(model1, obs1, sig1, alphas_in, mu=mu, nIter = nIter, chi2_thres = chi2_thres, method=method, delay_bracket = delay_bracket)
|
20,352 | b9ff3e6a7a953ecfa6ada8e4bdfa97973876ed60 | class Solution(object):
def __init__(self, name):
self.mynName = name
print(self.mynName)
self.myAge = self.getAge(18)
print(self.myAge)
return
def getAge(self, age):
self.myAgehaha = age
return self.myAgehaha
temp = Solution('zcc')
|
20,353 | 9510dadc69aedfaaac68a57e569a7f6a4c4df96e | from keras.datasets import mnist
import matplotlib.pyplot as plt
import random
import utils
import config
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_data = []
test_data = []
for i in range(10):
train_data.append(x_train[y_train==i])
test_data.append(x_test[y_test==i])
utils.save_to_pickle(train_data,config.TRAIN_DATA_PATH)
utils.save_to_pickle(test_data, config.TEST_DATA_PATH)
#ckeck data
train_data = utils.load_pickle(config.TRAIN_DATA_PATH)
plt.figure(figsize=(5, 10))
for i in range(10):
data_single = train_data[i]
show_sample_index = random.sample(range(len(data_single)), 5)
for j in range(5):
plt.subplot(10, 5, 5 * i + j + 1)
plt.imshow(data_single[show_sample_index[j]])
title = '%s' % (i)
plt.title(title)
plt.show() |
20,354 | 7c389b79d472886683c6c99393d7a75b063ed7a5 | import datetime
DOB=input("Masukan Tahun Lahir Anda : ")
CurrentYear=datetime.datetime.now().year
Age=CurrentYear-int(DOB)
print("Umur anda adalah {}".format(Age)) |
20,355 | 42e68098bad96202ee9b9bab367ae7067120dd7b | classmates = {"tony": ' man1111',"watwat1": ' man', "ashish": 'my man'}
for k, v in classmates.items():
print (k + v)
|
20,356 | b4eb0936f5cf721b34e4d902549ef9d370d2dfcd | import sklearn.preprocessing
from sklearn.model_selection import train_test_split
from wrangle import split_telco
import pandas as pd
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def minmax_scale(train, validate, test):
# Make the thing
scaler = sklearn.preprocessing.MinMaxScaler()
# We fit on the training data
# in a way, we treat our scalers like our ML models
# we only .fit on the training data
scaler.fit(train)
train_scaled = scaler.transform(train)
validate_scaled = scaler.transform(validate)
test_scaled = scaler.transform(test)
# turn the numpy arrays into dataframes
train_scaled = pd.DataFrame(train_scaled, columns=train.columns)
validate_scaled = pd.DataFrame(validate_scaled, columns=train.columns)
test_scaled = pd.DataFrame(test_scaled, columns=train.columns)
fig = plt.figure(figsize=(12, 6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax1.title.set_text('Tenure')
ax2.title.set_text('MonthlyCharges')
ax3.title.set_text('TotalCharges')
ax1.hist(train_scaled.tenure)
ax2.hist(train_scaled.MonthlyCharges)
ax3.hist(train_scaled.TotalCharges)
return train_scaled, validate_scaled, test_scaled
def standard_scale(train, validate, test):
# Make the thing
scaler = sklearn.preprocessing.StandardScaler()
# Note that we only call .fit with the training data,
# but we use .transform to apply the scaling to all the data splits.
scaler.fit(train)
train_scaled = scaler.transform(train)
validate_scaled = scaler.transform(validate)
test_scaled = scaler.transform(test)
# turn the numpy arrays into dataframes
train_scaled = pd.DataFrame(train_scaled, columns=train.columns)
validate_scaled = pd.DataFrame(validate_scaled, columns=train.columns)
test_scaled = pd.DataFrame(test_scaled, columns=train.columns)
fig = plt.figure(figsize=(12, 6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax1.title.set_text('Tenure')
ax2.title.set_text('MonthlyCharges')
ax3.title.set_text('TotalCharges')
ax1.hist(train_scaled.tenure)
ax2.hist(train_scaled.MonthlyCharges)
ax3.hist(train_scaled.TotalCharges)
return train_scaled, validate_scaled, test_scaled
def robust_scale(train, validate, test):
# Make the thing
scaler = sklearn.preprocessing.RobustScaler()
# Note that we only call .fit with the training data,
# but we use .transform to apply the scaling to all the data splits.
scaler.fit(train)
train_scaled = scaler.transform(train)
validate_scaled = scaler.transform(validate)
test_scaled = scaler.transform(test)
# turn the numpy arrays into dataframes
train_scaled = pd.DataFrame(train_scaled, columns=train.columns)
validate_scaled = pd.DataFrame(validate_scaled, columns=train.columns)
test_scaled = pd.DataFrame(test_scaled, columns=train.columns)
fig = plt.figure(figsize=(12, 6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax1.title.set_text('Tenure')
ax2.title.set_text('MonthlyCharges')
ax3.title.set_text('TotalCharges')
ax1.hist(train_scaled.tenure)
ax2.hist(train_scaled.MonthlyCharges)
ax3.hist(train_scaled.TotalCharges)
return train_scaled, validate_scaled, test_scaled
def quantile_transformer(train, validate, test):
# Make the thing
scaler = sklearn.preprocessing.QuantileTransformer(output_distribution='normal')
# Note that we only call .fit with the training data,
# but we use .transform to apply the scaling to all the data splits.
scaler.fit(train)
train_scaled = scaler.transform(train)
validate_scaled = scaler.transform(validate)
test_scaled = scaler.transform(test)
# turn the numpy arrays into dataframes
train_scaled = pd.DataFrame(train_scaled, columns=train.columns)
validate_scaled = pd.DataFrame(validate_scaled, columns=train.columns)
test_scaled = pd.DataFrame(test_scaled, columns=train.columns)
fig = plt.figure(figsize=(12, 6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax1.title.set_text('Tenure')
ax2.title.set_text('MonthlyCharges')
ax3.title.set_text('TotalCharges')
ax1.hist(train_scaled.tenure)
ax2.hist(train_scaled.MonthlyCharges)
ax3.hist(train_scaled.TotalCharges)
return train_scaled, validate_scaled, test_scaled
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Fucntions that show set scaler, original, and scaled
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def visualize_scaled_date(scaler, scaler_name, feature):
train_scaled = scaler.fit_transform(train[[feature]])
fig = plt.figure(figsize = (12,6))
gs = plt.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[1,1])
ax1.scatter(train[[feature]], train_scaled)
ax1.set(xlabel = feature, ylabel = 'Scaled_' + feature, title = scaler_name)
ax2.hist(train[[feature]])
ax2.set(title = 'Original')
ax3.hist(scaled)
ax3.set(title = 'Scaled')
plt.tight_layout();
def scale_fit_transform(X_train, X_validate, X_test):
# Define the thing
scaler = sklearn.preprocessing.MinMaxScaler()
# Fit the thing
scaler.fit(X_train[['lotsizesquarefeet']])
scaler.fit(X_train[['calculatedfinishedsquarefeet']])
scaler.fit(X_train[['bedroomcnt']])
scaler.fit(X_train[['bathroomcnt']])
scaler.fit(X_validate[['lotsizesquarefeet']])
scaler.fit(X_validate[['calculatedfinishedsquarefeet']])
scaler.fit(X_validate[['bedroomcnt']])
scaler.fit(X_validate[['bathroomcnt']])
scaler.fit(X_test[['lotsizesquarefeet']])
scaler.fit(X_test[['calculatedfinishedsquarefeet']])
scaler.fit(X_test[['bedroomcnt']])
scaler.fit(X_test[['bathroomcnt']])
#transform
scaled1 = scaler.transform(X_train[['lotsizesquarefeet']])
scaled2 = scaler.transform(X_train[['calculatedfinishedsquarefeet']])
scaled3 = scaler.transform(X_train[['bedroomcnt']])
scaled4 = scaler.transform(X_train[['bathroomcnt']])
scaled11 = scaler.transform(X_validate[['lotsizesquarefeet']])
scaled22 = scaler.transform(X_validate[['calculatedfinishedsquarefeet']])
scaled33 = scaler.transform(X_validate[['bedroomcnt']])
scaled44 = scaler.transform(X_validate[['bathroomcnt']])
scaled111 = scaler.transform(X_test[['lotsizesquarefeet']])
scaled222 = scaler.transform(X_test[['calculatedfinishedsquarefeet']])
scaled333 = scaler.transform(X_test[['bedroomcnt']])
scaled444 = scaler.transform(X_test[['bathroomcnt']])
# single step to fit and transform
scaled1 = scaler.fit_transform(X_train[['lotsizesquarefeet']])
scaled2 = scaler.fit_transform(X_train[['calculatedfinishedsquarefeet']])
scaled3 = scaler.fit_transform(X_train[['bedroomcnt']])
scaled4 = scaler.fit_transform(X_train[['bathroomcnt']])
scaled11 = scaler.fit_transform(X_validate[['lotsizesquarefeet']])
scaled22 = scaler.fit_transform(X_validate[['calculatedfinishedsquarefeet']])
scaled33 = scaler.fit_transform(X_validate[['bedroomcnt']])
scaled44 = scaler.fit_transform(X_validate[['bathroomcnt']])
scaled111 = scaler.fit_transform(X_test[['lotsizesquarefeet']])
scaled222 = scaler.fit_transform(X_test[['calculatedfinishedsquarefeet']])
scaled333 = scaler.fit_transform(X_test[['bedroomcnt']])
scaled444 = scaler.fit_transform(X_test[['bathroomcnt']])
#you can make a new 'scaled' column in original dataframe if you wish
X_train['lotsizesquarefeet_scaled'] = scaled1
X_train['calculatedfinishedsquarefeet_scaled'] = scaled2
X_train['bedroomcnt_scaled'] = scaled3
X_train['bathroomcnt_scaled'] = scaled4
X_validate['lotsizesquarefeet_scaled'] = scaled11
X_validate['calculatedfinishedsquarefeet_scaled'] = scaled22
X_validate['bedroomcnt_scaled'] = scaled33
X_validate['bathroomcnt_scaled'] = scaled44
X_test['lotsizesquarefeet_scaled'] = scaled111
X_test['calculatedfinishedsquarefeet_scaled'] = scaled222
X_test['bedroomcnt_scaled'] = scaled333
X_test['bathroomcnt_scaled'] = scaled444
return scaled1, scaled2, scaled3, scaled4 |
20,357 | df06da491ed96964663fe0643f53e7200229ec67 | from django.db import models
from account.models import User
class Message(models.Model):
text = models.CharField(max_length=1024)
user = models.ForeignKey(User, related_name="messeges",
on_delete=models.CASCADE)
|
20,358 | 15901dc6fde2f6336624e5047ab66ad8fe38b12a | from sys import stdout as ostream
from sys import stdin as istream
import heapq
import math
f = istream = open('input.txt')
o = ostream = open('output.txt', 'w')
"""
QR - 2019
"""
def solve_latin_square():
t = int(f.readline())
for test_no in range(1, t + 1):
n = int(f.readline())
m = []
for _ in range(n):
m.append(list(map(int, f.readline().split())))
# trace
trace = 0
for i in range(n):
trace += m[i][i]
# duplicate rows
r = 0
for i in range(n):
okay = True
seen = [False] * (n + 1)
for j in range(n):
if seen[m[i][j]]:
okay = False
break
seen[m[i][j]] = True
if not okay:
r += 1
# duplicate columns
c = 0
for j in range(n):
okay = True
seen = [False] * (n + 1)
for i in range(n):
if seen[m[i][j]]:
okay = False
break
seen[m[i][j]] = True
if not okay:
c += 1
o.write("Case #%d: %d %d %d\n" % (test_no, trace, r , c))
def solve_min_paranthesis():
t = int(f.readline())
for test_no in range(1, t + 1):
s = f.readline().strip()
res = []
prev = -1
for cstr in s:
c = int(cstr)
if prev == -1:
res.extend(['('] * c)
elif c > prev:
res.extend(['('] * (c - prev))
elif c < prev:
res.extend([')'] * (prev - c))
res.append(cstr)
prev = c
res.extend([')'] * int(s[len(s) - 1]))
o.write("Case #%d: %s\n" % (test_no, ''.join(res)))
def solve_overlapping_schedules():
t = int(f.readline())
for test_no in range(1, t + 1):
n = int(f.readline())
a = []
for i in range(n):
l = list(map(int, f.readline().strip().split()))
l.append(i)
a.append(l)
a.sort()
c_next, j_next = a[0][0], a[1][0]
okay = True
res = [''] * n
for s, e, i in a:
if s >= c_next:
res[i] = 'C'
c_next = max(c_next, e)
elif s >= j_next:
res[i] = 'J'
j_next = max(j_next, e)
else:
okay = False
break
o.write("Case #%d: %s\n" % (test_no, ''.join(res) if okay else 'IMPOSSIBLE'))
if __name__ == '__main__':
solve_overlapping_schedules()
# Flush the output
o.flush() |
20,359 | 70e04ccb281025dec953cfeef4a43ae8541910c9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = ('Kaan Akşit')
try:
import sys,time
from array import array
from jnius import autoclass
from jnius import cast
except ImportError, err:
print "couldn't load module. %s" % (err)
sys.exit()
# Class for serial communication using USB-OTG cable in an Android OS.
class Serial:
def __init__(self,port,speed):
self.Context = autoclass('android.content.Context')
self.UsbConstants = autoclass('android.hardware.usb.UsbConstants')
self.UsbDevice = autoclass('android.hardware.usb.UsbDevice')
self.UsbDeviceConnection = autoclass('android.hardware.usb.UsbDeviceConnection')
self.UsbEndpoint = autoclass('android.hardware.usb.UsbEndpoint')
self.UsbInterface = autoclass('android.hardware.usb.UsbInterface')
self.UsbManager = autoclass('android.hardware.usb.UsbManager')
self.UsbRequest = autoclass('android.hardware.usb.UsbRequest')
self.PythonActivity = autoclass('org.renpy.android.PythonActivity')
self.activity = self.PythonActivity.mActivity
self.speed = speed
self.port = port
self.ReadCache = []
self.usb_mgr = cast(self.UsbManager, self.activity.getSystemService(self.Context.USB_SERVICE))
print [d.getKey() for d in self.usb_mgr.getDeviceList().entrySet().toArray()]
self.device = self.usb_mgr.getDeviceList().get(port)
self.cmd = 'k00'
if self.device:
Intent = autoclass('android.content.Intent')
PendingIntent = autoclass('android.app.PendingIntent')
ACTION_USB_PERMISSION = "com.access.device.USB_PERMISSION"
intent = Intent(ACTION_USB_PERMISSION)
pintent = PendingIntent.getBroadcast(self.activity,0,intent,0)
self.usb_mgr.requestPermission(self.device,pintent)
if self.usb_mgr.hasPermission(self.device):
print 'Device permission granted!'
print 'InterfaceCount: ', self.device.getInterfaceCount()
self.intf = cast(self.UsbInterface, self.device.getInterface(0))
self.UsbConnection = cast(self.UsbDeviceConnection,self.usb_mgr.openDevice(self.device))
print self.UsbConnection
self.UsbConnection.claimInterface(self.intf, True)
print 'SerialNumber: ', self.UsbConnection.getSerial()
self.UsbConnection.controlTransfer(0x40, 0, 0, 0, None, 0, 0)
self.UsbConnection.controlTransfer(0x40, 0, 1, 0, None, 0, 0)
self.UsbConnection.controlTransfer(0x40, 0, 2, 0, None, 0, 0)
self.UsbConnection.controlTransfer(0x40, 2, 0, 0, None, 0, 0)
self.UsbConnection.controlTransfer(0x40, 3, 0x0034, 0, None, 0, 0)
self.UsbConnection.controlTransfer(0x40, 4, 8, 0, None, 0, 0)
for i in xrange(0, self.intf.getEndpointCount()):
if self.intf.getEndpoint(i).getType() == self.UsbConstants.USB_ENDPOINT_XFER_BULK:
if self.intf.getEndpoint(i).getDirection() == self.UsbConstants.USB_DIR_IN:
self.epIN = self.intf.getEndpoint(i)
elif self.intf.getEndpoint(i).getDirection() == self.UsbConstants.USB_DIR_OUT:
self.epOUT = self.intf.getEndpoint(i)
else:
print 'Device permission not granted!'
else:
print 'Device not found.'
sys.exit()
return
def send(self,msg):
MsgOut = msg
MsgOutHex = map(ord,MsgOut)
self.UsbConnection.bulkTransfer(self.epOUT, MsgOutHex, len(MsgOutHex), 0)
return True
def read(self,BufSize=35):
time.sleep(0.03)
response = [0]*BufSize
length = self.UsbConnection.bulkTransfer(self.epIN, response, len(response), 50)
if length >= 0:
self.ReadCache = response
return True
def asyncRead(self):
self.send(self.cmd)
self.read()
return
def disconnet(self):
self.UsbConnection.close()
return True
def main():
pass
return True
# Call for main definition upon initialization.
if __name__ == '__main__':
sys.exit(main())
|
20,360 | 3e2b9aaff815881d04e0459a2a911af762fa8c78 | from textblob import TextBlob
from flask import Flask, render_template, request
def get_sentiment(text):
blob = TextBlob(text)
score = blob.sentiment.polarity
if score > 0:
return "positive."
elif score == 0:
return "neutral."
else:
return "negative." # -0.3
app = Flask(__name__)
@app.route("/", methods = ["GET", "POST"])
def index():
if request.method == "POST":
content = request.form.get("text")
sentiment = get_sentiment(content)
return sentiment
return render_template("index.html")
if __name__ == "__main__":
app.run(debug = True)
|
20,361 | 3bf51d7cd94db279314b49cab58bcf682f6461a4 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import webapp2
import jinja2
from google.appengine.ext import db
import logging
import json
from google.appengine.api import memcache
from datetime import datetime
from datetime import timedelta
import time
template_dir=os.path.join(os.path.dirname(__file__),'templates')
jinja_env=jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),autoescape=True)
## creating database
class Art(db.Model):
subject=db.StringProperty(required=True)
content=db.TextProperty(required=True)
created=db.DateTimeProperty(auto_now_add=True)
last_modified=db.DateTimeProperty(auto_now = True)
def render(self):
self._render_text = self.content.replace('\n', '<br>')
return render_str("post.html", p = self)
class User(db.Model):
username = db.StringProperty(required = True)
password = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class Handler(webapp2.RequestHandler):
a="Happy"
def write(self, *a,**kw):
self.response.out.write(*a,**kw)
def render_str(self,template,**params):
t=jinja_env.get_template(template)
return t.render(params)
def render(self,template,**kw):
a="Happy"
self.write(self.render_str(template,**kw))
### BLOG STUFF
def blog_key(name = 'default'):
return db.Key.from_path('arts', name)
first_date = time.time()
delta_date = time.time()
def cache_front(update=False):
key='front'
contents = memcache.get(key)
global delta_date
global first_date
if contents is None or update:
logging.error("DB Query")
contents=db.GqlQuery("SELECT * FROM Art ORDER BY created DESC")
memcache.set(key, contents)
t = time.time()
delta_date = (t - first_date)
logging.error(delta_date)
return contents
def cache_post(post_id, update=False):
post = memcache.get(post_id)
key_str = str(post_id)+'_t'
if post is None or update:
logging.error("DB Query post %s" % post_id)
key = db.Key.from_path('Art', int(post_id), parent=blog_key())
post = db.get(key)
memcache.set(post_id, post)
memcache.set(key_str, time.time())
return (post)
class Home(Handler):
def render_front(self,username="",subject="",content="",error=""):
contents=cache_front()
self.render("home.html",username=username,contents=contents, time=int(delta_date))
def get(self):
#self.write("asciichan!")
user=self.request.cookies.get('name')
self.render_front(user)
class NewPost(Handler):
def get(self):
self.render("newpost.html")
def post(self):
subject = self.request.get('subject')
content = self.request.get('content')
if subject and content:
p = Art(parent = blog_key(), subject = subject, content = content)
p.put()
self.redirect('/blog/%s' % str(p.key().id()))
else:
error = "subject and content, please!"
self.render("newpost.html", subject=subject, content=content, error=error)
class SignUp(Handler):
logging.info(Handler.a)
def render_front(self,user="",email="", error=""):
self.render("signup.html",username=user,verify="",email=email, error=error)
def get(self):
self.render_front()
def post(self):
user=self.request.get("username")
email=self.request.get("email")
password=self.request.get("password")
verify=self.request.get("verify")
user_in_db=db.GqlQuery("SELECT * FROM User WHERE username=:1 limit 1", user)
u = user_in_db.get()
if u:
u = u.username
if u == user:
error="user jah existe!"
self.render_front(user,email,error)
else:
if password != verify:
error="password != verify"
self.render_front(user,email,error)
else:
u=User(username=user,password=password)
u.put() #Stores data in database
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie','name=%s;Path=%s' % (str(user), '/'))
self.redirect("/blog")
class Welcome(Handler):
def render_front(self,user="",email="", error=""):
self.render("welcome.html",username=user, error=error)
def get(self):
user=self.request.cookies.get('name')
self.render_front(user)
class Login(Handler):
def render_front(self,user="",email="", error=""):
self.render("login.html",username=user, email=email, error="")
def get(self):
user=self.request.get('username')
email=self.request.get('email')
self.render_front(user,email)
def post(self):
user = self.request.get('username')
password = self.request.get('password')
user_in_db=db.GqlQuery("SELECT * FROM User WHERE username=:1 limit 1", user)
u = user_in_db.get()
if u:
if u.password == password:
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie','name=%s;Path=%s' % (str(user), '/'))
self.redirect('/blog')
else:
self.render_front(user,password,'erro! user or pass wrong')
class Logout(Handler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers.add_header('Set-Cookie','name=%s; Path=%s' % ('', '/'))
self.redirect('/blog/signup')
class PostPage(Handler):
def get(self, post_id):
############
post = cache_post(post_id)
logging.error(post.content)
if not post:
self.error(404)
return
t_0 = long(memcache.get(post_id+'_t')) or time.time()
t = time.time()
dt = int(t - t_0)
self.render("permalink.html", post=post, time=dt)
class JSON(Handler):
def render_json(self, post_id):
jstruct = []
c = 'content'
s = 'subject'
self.response.headers['Content-Type'] = 'application/json'
if str(post_id) == '.json':
contents=db.GqlQuery("SELECT * FROM Art ORDER BY created DESC")
for e in contents:
jstruct.append({c:e.content, s:e.subject})
else:
key = db.Key.from_path('Art', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.error(404)
return
jstruct.append({c:post.content, s:post.subject})
self.render("json.js",json_struct=json.dumps(jstruct))
def get(self, post_id):
self.render_json(post_id)
class Flush(Handler):
def get(self):
global delta_date
global first_date
delta_date=time.time()
first_date=delta_date
memcache.delete('front')
memcache.flush_all()
self.redirect('/blog')
app = webapp2.WSGIApplication([('/blog/?', Home),
('/blog/([0-9]+)', PostPage),
('/blog/(.json)', JSON),
('/blog(.json)', JSON),
('/blog/([0-9]+).json', JSON),
('/blog/newpost', NewPost),
('/blog/signup', SignUp),
('/blog/login', Login),
('/blog/logout', Logout),
('/blog/flush', Flush)],
debug=True)
|
20,362 | bb91e22d9ae18101ea536e911a4ea687fc27741f |
# Usa el fichero ten-more-contries.txt para añadir a la base de datos,
# database.db 10 paises mas.
import sqlite3
import pandas as pd
data = pd.read_csv("exercices\\ficheros\\ten-more-countries.txt")
print(type(data))
conn = sqlite3.connect('exercices\\ficheros\\database.db')
c = conn.cursor()
data.to_sql("countries", conn, if_exists="append", index=False) |
20,363 | d67f90cf3bc10668b844a5433728b151dd77d398 | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
import nltk
'''
★ Define a function find_language() that takes a string
as its argument, and returns a list of languages that have that
string as a word. Use the udhr corpus and limit your searches
to files in the Latin-1 encoding.
'''
from nltk.corpus import udhr
def find_language(text):
for fileid in udhr.fileids():
if fileid[-6:] == "Latin1":
if text in udhr.words(fileid):
print (fileid[0:-7])
find_language("human")
|
20,364 | 49148a01274877a59d492433469884d509ff7417 | l1=[[4,5,2,1],[8,4,2,3]]
l2=[[2,3,4,1],[5,6,2,7]]
c=[[0,0,0,0],[0,0,0,0]]
for i in range(len (l1)):
print l1[i]
for j in range(len(l1[i])):
print l1[i][j]
c[i][j]=l1[i][j]+l2[i][j]
print c
#
# c[i],[j]=l1[i][j]+l2[i][j]
# print(c[i][j]) |
20,365 | ab0d14350b233b959d9d394609a38f3f80e51760 | #!/usr/bin/env python3
'''
This is a script to normalize Kodak PhotoCD image files that have an unusual
but documented history:
First the files were created on a proprietary Kodak PhotoCD.
Second the files were read on a CD-ROM drive on a Macintosh running Mac OS9.x
The Mac operating system did some funny stuff to the files:
- the .PCD file was wrapped as a QuickTime Image File (QTIF)
- the QTIF resource was then wrapped as a PICT resource, which was readable
by Mac OS9
This script uses `deark` to read the PICT, then the QTIF,
then it uses `imagemagick` to convert the original PCD file
to PNG, then again from PNG to TIFF. This is necessary because of some
unusual colorspace issues with PCD, which can't be converted as-is to TIFF.
Both these programs need to be available on your Python path (e.g. `/usr/local/bin`)
Put the script into the folder w the image files, then run `python3 pcdExtractor.py`
'''
import os
import subprocess
for base in os.listdir('.'):
# assumes everything in the directory other than this file
# is a PCD you want to extract.
if not base.startswith('.'):
if not base.endswith('py'):
here = os.path.abspath('.')
path = os.path.abspath(base)
## INTERMEDIARY IMAGE PATHS
qtifPath = path+'.000.qtif' # this is extracted by `deark`
pcdPath = path+'.000.pcd' # then this is extracted by `deark`
pngPath = path+'.png' # this is output by `magick`
tiffPath = path+'.tif' # then so is this, it's the final output
###
### DEARK
###
# The first pass extracts the QTIF
deark1 = [
'deark',
'-o',base,
path
]
subprocess.run(deark1)
# The second pass extracts the underlying PCD
deark2 = [
'deark',
'-o',base,
qtifPath
]
subprocess.run(deark2)
###
### IMAGEMAGICK
###
# The first pass converts the PCD to a PNG
magick1 = [
'magick',
pcdPath+'[6]',
pngPath
]
subprocess.run(magick1)
# The second pass coverts the PNG to TIFF
magick2 = [
'magick',
pngPath,
tiffPath
]
subprocess.run(magick2)
###
### NOW CLEAN UP THE INTERMEDIATE FILES
###
for residual in (path,qtifPath,pcdPath,pngPath):
os.remove(residual)
|
20,366 | 9da54b3879affc9b498e7467161909b295cbeab7 | # IMM4LFO 2011-09-16
#
from django.shortcuts import render_to_response, get_object_or_404, redirect
#from django.http import HttpResponseRedirect
#from django.core.urlresolvers import reverse
from django.core.context_processors import csrf
from django.db.models import F, Q
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.forms import *
import datetime, string
from gr.models import *
from gr.forms import *
def index(request):
return render_to_response('gr/main.html', {}, RequestContext(request))
@login_required
def event_list(request):
events=Event.objects.filter(date__gte=datetime.datetime.now()) \
.select_related(depth=1)
if not (settings.GODMODE and request.user.id==1):
if request.user.get_profile().is_recipient():
events = events.filter(recipient = request.user)
elif request.user.get_profile().is_attendee():
events = events.filter(attendees = request.user)
return render_to_response('gr/event_list.html', \
{'events':events}, \
context_instance=RequestContext(request))
@login_required
def event_view(request, event_id):
try:
event = Event.objects.select_related(depth=1).get(pk=event_id)
except Event.DoesNotExist:
return redirect(event_list)
if not (settings.GODMODE and request.user.id==1):
utype = request.user.get_profile().gr_user_type
else:
utype = 'ALL'
return render_to_response('gr/event_view.html', \
{'event':event, 'user':request.user, 'utype':utype}, \
context_instance=RequestContext(request))
@login_required
def event_edit(request, event_id=None, remove_flag=None):
if not (settings.GODMODE and request.user.id==1):
if not request.user.get_profile().is_recipient():
return redirect(event_list)
if event_id is not None:
e = Event.objects.get(pk=event_id)
if request.user != e.recipient:
return redirect(event_list)
if remove_flag == 'del' and event_id is not None:
Event.objects.get(pk=event_id).delete()
return redirect(event_list)
if request.method == 'POST':
if len(request.POST['event_id']) > 0:
event_id = request.POST['event_id']
event = Event.objects.get(pk=event_id)
form = EventForm(request.POST, instance=event)
else:
form = EventForm(request.POST)
if form.is_valid():
event = form.save(commit=False)
if not (settings.GODMODE and request.user.id==1):
if event.recipient != request.user:
raise Exception("You can't set a recipient other than yourself")
event.save()
form.save_m2m()
others=map(string.strip,form.cleaned_data['other_attendees'].split("\n"))
if len(others) > 0:
for uname in others:
# create user account if doesn't exist
if len(uname) == 0: continue
try:
u = User.objects.get(username__iexact=uname)
p = u.get_profile()
if p.gr_user_type != 'ATT':
raise Exception('Existing user "%s" not attendee account' % uname)
except User.DoesNotExist:
# XXX in real life we wouldn't set everyone's password to "password"
u = User.objects.create_user(uname,uname,'password')
p = u.get_profile()
# dict([(v,k) for (k,v) in UserProfile.GR_USER_TYPES])['Attendee']
p.gr_user_type = 'ATT'
p.save()
# add to attendees list (this won't make duplicates)
event.attendees.add(u)
return redirect(event_view, event.id)
else:
if event_id is not None:
try:
event = Event.objects.get(pk=event_id)
except Event.DoesNotExist:
return redirect(event_list)
form = EventForm(instance=event)
else:
form = EventForm()
if request.user.get_profile().is_recipient():
form.fields['recipient'].initial = request.user
#form.fields['recipient'].widget = widgets.MultipleHiddenInput
# XXX this doesn't hide the select....
form_fields = {'form':form,'event_id':event_id}
form_fields.update(csrf(request))
return render_to_response('gr/event_edit.html', \
form_fields, \
context_instance=RequestContext(request))
@login_required
def wishlist_index(request):
wishlists=RecipientWishList.objects \
.values('recipient') \
.distinct()
return render_to_response('gr/wishlist_index.html', \
{'wishlists':wishlists}, \
context_instance=RequestContext(request))
@login_required
def wishlist_list(request, user_id=None):
if not (settings.GODMODE and request.user.id==1):
if not request.user.get_profile().is_recipient():
return redirect(event_list)
elif str(request.user.id) != user_id:
return redirect(wishlist_list, request.user.id)
try:
u = User.objects.get(pk=user_id)
except User.DoesNotExist:
return redirect(wishlist_index)
# TODO it would be nice to left-join the attendeegifts
# table somehow so that we could show if an item
# has been selected by an attendee.
wlist=RecipientWishList.objects.filter(recipient=u).select_related(depth=1)
return render_to_response('gr/wishlist_list.html', \
{'wlist':wlist, 'user':u}, \
context_instance=RequestContext(request))
@login_required
def wishlist_edit(request, user_id, wishlist_id=None):
if not (settings.GODMODE and request.user.id==1):
if not request.user.get_profile().is_recipient():
return redirect(event_list)
elif str(request.user.id) != user_id:
return redirect(wishlist_list, request.user.id)
if request.method == 'POST':
form = RecipientWishListWithGiftForm(request.POST)
if form.is_valid():
try: gi = Gift.objects.get(pk=form.cleaned_data['gift_id'])
except: gi = Gift()
try: wl = RecipientWishList.objects \
.get(pk=form.cleaned_data['wishlist_id'])
except: wl = RecipientWishList()
#XXX this belongs as a transaction...
gi.name = form.cleaned_data['gift_name']
gi.value = form.cleaned_data['gift_value']
gi.save()
wl.recipient= User.objects.get(pk=form.cleaned_data['wishlist_recipient'])
wl.gift = gi
wl.active = form.cleaned_data['wishlist_active']
wl.save()
return redirect(wishlist_list,user_id=user_id)
else:
if wishlist_id is not None:
w = RecipientWishList.objects.select_related(depth=1).get(pk=wishlist_id)
if str(w.recipient.id) != str(user_id):
return redirect(wishlist_list,user_id=user_id)
ws = {
'wishlist_recipient': w.recipient.id
, 'gift_name': w.gift.name
, 'gift_value': w.gift.value
, 'gift_id': w.gift.id
, 'wishlist_active': w.active
, 'wishlist_id': w.id
}
form = RecipientWishListWithGiftForm(initial=ws)
else:
form=RecipientWishListWithGiftForm(initial={'wishlist_recipient':user_id})
form_fields = {'form':form,'user_id':user_id}
form_fields.update(csrf(request))
return render_to_response('gr/wishlist_edit.html', \
form_fields, \
context_instance=RequestContext(request))
@login_required
def attendee_budget_edit(request, attendee_id, event_id):
if not (settings.GODMODE and request.user.id==1):
if not request.user.get_profile().is_attendee() \
or str(request.user.id) != attendee_id:
return redirect(event_view, event_id)
def _get_budget(event_id, attendee_id):
return AttendeeBudget.objects.get( \
event__exact=event_id, attendee__exact=attendee_id)
if request.method == 'POST':
form = AttendeeBudgetForm(request.POST)
if form.is_valid():
e = form.cleaned_data['event']
a = form.cleaned_data['attendee']
try:
b = _get_budget(e,a)
except AttendeeBudget.DoesNotExist:
b = AttendeeBudget()
b.event = Event.objects.get(pk=e)
b.attendee = User.objects.get(pk=a)
b.maxpurchases = form.cleaned_data['maxpurchases']
if b.maxpurchases > 0:
b.save()
else:
try:
b.delete()
except AssertionError:
pass
return redirect(attendee_gifts_list, attendee_id=attendee_id, event_id=e)
else:
form_fields = {'form':form}
form_fields.update(csrf(request))
return render_to_response('gr/attendee_budget_edit.html', \
form_fields, \
context_instance=RequestContext(request))
else:
d = {'attendee':attendee_id,'event':event_id}
try:
budget = _get_budget(event_id, attendee_id)
d['maxpurchases'] = budget.maxpurchases
except AttendeeBudget.DoesNotExist:
pass
form = AttendeeBudgetForm(initial=d)
form_fields = {'form':form}
form_fields.update(csrf(request))
return render_to_response('gr/attendee_budget_edit.html', \
form_fields, \
context_instance=RequestContext(request))
@login_required
def attendee_gifts_list(request, attendee_id, event_id):
if not (settings.GODMODE and request.user.id==1):
if not request.user.get_profile().is_attendee() \
or str(request.user.id) != attendee_id:
return redirect(event_view, event_id)
# I hope there's a better way to do this....
event = Event.objects.get(pk=event_id)
wishlist = RecipientWishList.objects.attendee_options( \
event.recipient.id, attendee_id, event_id)
gifts = Gift.objects.filter(pk__in=[w.gift.id for w in wishlist])
prev_selected = AttendeeGifts.objects.filter(event__id=event_id, \
attendee__id=attendee_id)
gift_choices = [ (x.id, x) for x in gifts ]
gift_checked = [ x.gift.id for x in prev_selected ]
try:
budget = AttendeeBudget.objects.get(attendee__id=attendee_id, event=event).maxpurchases
except AttendeeBudget.DoesNotExist:
budget = None
if request.method == 'POST':
form = AttendeeGiftsForm(request.POST)
form.fields['gifts'].choices = gift_choices
if form.is_valid():
prev_selected.delete()
selected_gifts = request.POST.getlist('gifts')
for g in selected_gifts:
ag = AttendeeGifts()
ag.attendee = User.objects.get(pk=attendee_id)
ag.event = Event.objects.get(pk=event_id)
ag.gift = Gift.objects.get(pk=g)
ag.save()
return redirect(event_view, event_id)
else:
form.fields['gifts'].choices = gift_choices
form_fields = {
'form':form
, 'budget':budget
, 'attendee_id':attendee_id
, 'event_id':event_id
}
form_fields.update(csrf(request))
return render_to_response('gr/attendee_gifts_list.html', \
form_fields, \
context_instance=RequestContext(request))
form = AttendeeGiftsForm(initial={'event':event_id,'attendee':attendee_id})
form.fields['gifts'].choices = gift_choices
form.fields['gifts'].initial = gift_checked
form_fields = {
'form':form
, 'budget':budget
, 'attendee_id':attendee_id
, 'event_id':event_id
}
form_fields.update(csrf(request))
return render_to_response('gr/attendee_gifts_list.html', \
form_fields, \
context_instance=RequestContext(request))
@login_required
def attendee_notify(request, event_id):
"""
from django.core.mail import send_mail
send_mail(subject, message, sender, recipients)
"""
return render_to_response('gr/attendee_notify.html', \
context_instance=RequestContext(request))
def auth_register(request):
messages = []
if request.method == 'POST':
form = AuthRegisterForm(request.POST)
if form.is_valid():
uname = form.cleaned_data['email']
passw = form.cleaned_data['password']
utype = form.cleaned_data['user_type']
u = None
try:
u = User.objects.get(username__iexact=uname)
except User.DoesNotExist:
pass
if u:
messages.append('A user with this name already exists')
else:
u = User.objects.create_user(uname,uname,passw)
p = u.get_profile()
p.gr_user_type = utype
p.save()
a = authenticate(username=uname,password=passw)
login(request, a)
return redirect(event_list)
else:
form = AuthRegisterForm()
page_vars = {'form':form, 'messages':messages}
page_vars.update(csrf(request))
return render_to_response('gr/auth_register.html', \
page_vars, \
context_instance=RequestContext(request))
def auth_login(request, gmflag=None):
if settings.GODMODE and gmflag=='imm':
u = User.objects.get(pk=1)
u.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, u)
return redirect(event_list)
messages = []
if request.method == 'POST':
form = AuthLoginForm(request.POST)
if form.is_valid():
u = authenticate(username=form.cleaned_data['username'], \
password=form.cleaned_data['password'] )
if u is not None and u.is_active:
login(request, u)
return redirect(event_list)
else:
messages.append('Could not log in with supplied credentials.')
else:
form = AuthLoginForm()
page_vars = {'form':form, 'messages':messages}
page_vars.update(csrf(request))
return render_to_response('gr/auth_login.html', \
page_vars, \
context_instance=RequestContext(request))
def auth_logout(request):
logout(request)
return redirect(index)
|
20,367 | c4c425adb558a85f480502f362fff34a9db343ac | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An abstract class for matrices input to the linear systems solvers in Qiskit."""
from abc import ABC, abstractmethod
from typing import Tuple
from qiskit import QuantumCircuit
from qiskit.circuit.library import BlueprintCircuit
class LinearSystemMatrix(BlueprintCircuit, ABC):
"""Base class for linear system matrices."""
def __init__(
self,
num_state_qubits: int,
tolerance: float,
evolution_time: float,
name: str = "ls_matrix",
) -> None:
"""
Args:
num_state_qubits: the number of qubits where the unitary acts.
tolerance: the accuracy desired for the approximation
evolution_time: the time of the Hamiltonian simulation
name: The name of the object.
"""
super().__init__(name=name)
# define internal parameters
self._num_state_qubits = None
self._tolerance = None
self._evolution_time = None # makes sure the eigenvalues are contained in [0,1)
# store parameters
self.num_state_qubits = num_state_qubits
self.tolerance = tolerance
self.evolution_time = evolution_time
@property
def num_state_qubits(self) -> int:
r"""The number of state qubits representing the state :math:`|x\rangle`.
Returns:
The number of state qubits.
"""
return self._num_state_qubits
@num_state_qubits.setter
def num_state_qubits(self, num_state_qubits: int) -> None:
"""Set the number of state qubits.
Note that this may change the underlying quantum register, if the number of state qubits
changes.
Args:
num_state_qubits: The new number of qubits.
"""
if num_state_qubits != self._num_state_qubits:
self._invalidate()
self._num_state_qubits = num_state_qubits
self._reset_registers(num_state_qubits)
@property
def tolerance(self) -> float:
"""Return the error tolerance"""
return self._tolerance
@tolerance.setter
def tolerance(self, tolerance: float) -> None:
"""Set the error tolerance
Args:
tolerance: The new error tolerance.
"""
self._tolerance = tolerance
@property
def evolution_time(self) -> float:
"""Return the time of the evolution."""
return self._evolution_time
@evolution_time.setter
def evolution_time(self, evolution_time: float) -> None:
"""Set the time of the evolution.
Args:
evolution_time: The new time of the evolution.
"""
self._evolution_time = evolution_time
@abstractmethod
def eigs_bounds(self) -> Tuple[float, float]:
"""Return lower and upper bounds on the eigenvalues of the matrix."""
raise NotImplementedError
@abstractmethod
def condition_bounds(self) -> Tuple[float, float]:
"""Return lower and upper bounds on the condition number of the matrix."""
raise NotImplementedError
@abstractmethod
def _reset_registers(self, num_state_qubits: int) -> None:
"""Reset the registers according to the new number of state qubits.
Args:
num_state_qubits: The new number of qubits.
"""
raise NotImplementedError
@abstractmethod
def power(self, power: int, matrix_power: bool = False) -> QuantumCircuit:
"""Build powers of the circuit.
Args:
power: The power to raise this circuit to.
matrix_power: If True, the circuit is converted to a matrix and then the
matrix power is computed. If False, and ``power`` is a positive integer,
the implementation defaults to ``repeat``.
Returns:
The quantum circuit implementing powers of the unitary.
"""
raise NotImplementedError
|
20,368 | d64389bf3413cc5f105f8acfcc3af675079d9258 | #!/usr/bin/python3
'''Create the 'cities' table and the mapping to it'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
Base = declarative_base()
class State(Base):
__tablename__ = 'states'
id = Column(Integer, primary_key=True)
name = Column(String(128), nullable=False)
class City(Base):
'''link to cities table in MySQL'''
__tablename__ = 'cities'
id = Column(Integer, primary_key=True)
name = Column(String(128), nullable=False)
state_id = Column(Integer, ForeignKey('states.id'), nullable=False)
# reference to the state the specified city belongs to
# "cities" referes to State.cities
# back_populates establishes the type of relationships (one-to-many, etc.)
state = relationship("State", back_populates="cities")
# reference to the cities in a specified state, ordered by city id
# "state" refers to the above state attribute
State.cities = relationship("City", order_by=City.id, back_populates="state")
|
20,369 | 186086aa37cf73b2f2371d7533dd2436c0246e84 | from model import mini_conv
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from scipy import ndimage, misc
from preprocessor import train_data_generator
import sys
def predict(img_file):
model = mini_conv()
model.load_weights('weights/weights.h5')
img = load_img(img_file)
x = img_to_array(img)
x = misc.imresize(x, (150, 150, 3))
x = x.reshape((1,) + x.shape)
p = model.predict(x)
# get label indices
train_generator = train_data_generator()
class_dictionary = train_generator.class_indices
indexes = list(class_dictionary.values())
return list(class_dictionary.keys())[indexes.index(int(p[0][0]))]
if __name__ == '__main__':
print(predict(sys.argv[1])) |
20,370 | 28859fc2f4d083fbc428cd68ce7d123985d7dcec | from blockchain import blockexplorer as be
def getOldestBlocks():
blocks = list()
# blocks.append(be.get_latest_block())
# latest = vars(blocks[0])['height']
for i in range(0,10):
print('1')
blocks.append(be.get_block(str(i)))
return blocks
def getBlock(ID):
return be.get_block(str(ID))
def getLatestBlock():
return be.get_latest_block()
def getTx(ID):
return be.get_tx(str(ID))
print(vars(vars(getBlock(4))['transactions'][0]))
print(vars(getTx("df2b060fa2e5e9c8ed5eaf6a45c13753ec8c63282b2688322eba40cd98ea067a")))
|
20,371 | a2a3a0d7d5d3b845ebe188d4570ca6c885eb0b4b | version https://git-lfs.github.com/spec/v1
oid sha256:42fdbb9245a99191b4d8e577e7975fcc04046e5b4519b1b479bb3ce3c3e5a3ef
size 6328
|
20,372 | a265d851f4f7817f828c34e1eb70c121109a05dd | import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
app_train = pd.read_csv('data/application_train.csv')
app_test = pd.read_csv('data/application_train.csv')
app_train[['AMT_INCOME_TOTAL','AMT_CREDIT','AMT_ANNUITY','AMT_GOODS_PRICE','NAME_INCOME_TYPE']]
columns_drop = ['SK_ID_CURR',]
columns_categorical_compose = [ ('FLAG_OWN_CAR','FLAG_OWN_REALTY') ]
app_train['TARGET'].corr( app_train['AMT_GOODS_PRICE'] / app_train['AMT_INCOME_TOTAL'] )
app_train['TARGET'].corr( app_train['AMT_CREDIT'] / app_train['AMT_ANNUITY'] )
app_train['loan_term'] = ( app_train['AMT_CREDIT'] / app_train['AMT_ANNUITY'] )
app_train['loan_term_int'] = app_train['loan_term'].apply(round)
app_train[['loan_term_int','TARGET']].groupby('loan_term_int').mean().plot.bar()
plt.hist(app_train['loan_term_int'], edgecolor = 'k', bins = 20)
poor = app_train.loc[ (app_train['FLAG_OWN_CAR']=='Y') & ( app_train['FLAG_OWN_REALTY']=='Y'),['TARGET','AMT_GOODS_PRICE','AMT_INCOME_TOTAL']]
poor['TARGET'].corr( poor['AMT_GOODS_PRICE'] - poor['AMT_INCOME_TOTAL'] )
( poor['AMT_GOODS_PRICE'] - poor['AMT_INCOME_TOTAL'] ).corr(poor['TARGET'])
poor['dif'] = poor['AMT_GOODS_PRICE'] - poor['AMT_INCOME_TOTAL']
sns.kdeplot(poor.loc[poor['TARGET'] == 0, 'dif'] , label = 'target == 0')
sns.kdeplot(poor.loc[poor['TARGET'] == 1, 'dif'] , label = 'target == 1')
featrue_goups = [ 'family', 'economy', 'career', 'event', 'geo']
app_train['DAYS_EMPLOYED'].plot.hist(title = 'Days Employment Histogram')
plt.hist(app_train['DAYS_BIRTH'] / -365, edgecolor = 'k', bins = 25, range = (0,40))
plt.hist(app_train['SK_ID_CURR'], edgecolor = 'k', bins = 20)
sns.kdeplot(app_train.loc[app_train['TARGET'] == 0, 'DAYS_BIRTH'] / 365, label = 'target == 0')
sns.kdeplot(app_train.loc[app_train['TARGET'] == 1, 'DAYS_BIRTH'] / 365, label = 'target == 1')
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
# Print some summary information
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
missing_values_table(app_train)
missing_values_table(app_test)
app_train.dtypes.value_counts()
app_train.dtypes.value_counts()
app_train.select_dtypes('object').apply(pd.Series.nunique, axis = 0)
# Create a label encoder object
le = LabelEncoder()
le_count = 0
# Iterate through the columns
for col in app_train:
if app_train[col].dtype == 'object':
# If 2 or fewer unique categories
if len(list(app_train[col].unique())) <= 2:
# Train on the training data
le.fit(app_train[col])
# Transform both training and testing data
app_train[col] = le.transform(app_train[col])
app_test[col] = le.transform(app_test[col])
# Keep track of how many columns were label encoded
le_count += 1
print('%d columns were label encoded.' % le_count)
# one-hot encoding of categorical variables
app_train = pd.get_dummies(app_train)
app_test = pd.get_dummies(app_test)
print('Training Features shape: ', app_train.shape)
print('Testing Features shape: ', app_test.shape)
train_labels = app_train['TARGET']
# Align the training and testing data, keep only columns present in both dataframes
app_train, app_test = app_train.align(app_test, join = 'inner', axis = 1)
# Add the target back in
app_train['TARGET'] = train_labels
print('Training Features shape: ', app_train.shape)
print('Testing Features shape: ', app_test.shape)
# https://www.kaggle.com/willkoehrsen/start-here-a-gentle-introduction
train_labels = app_train['TARGET']
# Align the training and testing data, keep only columns present in both dataframes
app_train, app_test = app_train.align(app_test, join = 'inner', axis = 1)
# Add the target back in
app_train['TARGET'] = train_labels
print('Training Features shape: ', app_train.shape)
print('Testing Features shape: ', app_test.shape)
|
20,373 | e4ad50f8fb969ef465f2cf07ea3f64c31ce748c3 | from queue import Queue
import re
class Token:
LEFT_BLACKETS = 'LEFT_BLACKETS'
RIGHT_BLACKETS = 'RIGHT_BLACKETS'
SYMBOL = 'SYMBOL'
EXPRESSION = 'EXPRESSION'
SYMBOLS = '&|!'
def __init__(self, value, types):
self.value = value
self.types = types
def __str__(self):
return '{0}<{1}>'.format(self.value, self.types)
def __repr__(self):
return self.__str__()
# produce an expression with tokens
def tokenize(origin):
tokens = []
is_expr = False
expr = []
for c in origin:
if c == '#':
if not is_expr:
is_expr = True
else:
is_expr = False
token = Token(''.join(expr), Token.EXPRESSION)
tokens.append(token)
expr = []
elif c in Token.SYMBOLS and not is_expr:
token = Token(c, Token.SYMBOL)
tokens.append(token)
elif c == '(' and not is_expr:
token = Token(c, Token.LEFT_BLACKETS)
tokens.append(token)
elif c == ')' and not is_expr:
token = Token(c, Token.RIGHT_BLACKETS)
tokens.append(token)
elif is_expr:
expr.append(c)
return tokens
class ASTree:
def __init__(self, token):
self.root = token
self.left = None
self.right = None
def visit(self):
ret = []
q = Queue()
q.put(self)
while not q.empty():
t = q.get()
ret.append(t.root)
if t.left:
q.put(t.left)
if t.right:
q .put(t.right)
return ret
def make_sub_ast(stack, tree):
current = tree
while stack and stack[-1].root.types == 'SYMBOL':
node = stack.pop()
if node.root.value == '!' and not node.right:
node.right = current
if stack[-1].root.types != 'LEFT_BLACKETS':
raise Exception('{0} is not the expected type{1}'.format(stack[-1].root.value, Token.LEFT_BLACKETS))
else:
if not stack[-1]:
raise Exception('')
node.right = current
node.left = stack.pop()
current = node
return stack.append(current)
# transform the expression with tokens into astree
def make_ast(token):
stack = []
for t in token:
tree = ASTree(t)
if tree.root.types == 'LEFT_BLACKETS' or tree.root.types == 'SYMBOL':
stack.append(tree)
elif tree.root.types == 'EXPRESSION':
make_sub_ast(stack, tree)
elif tree.root.types == 'RIGHT_BLACKETS':
r = stack.pop()
if stack[-1] and stack[-1].root.types == 'LEFT_BLACKETS':
stack.pop()
make_sub_ast(stack, r)
else:
raise Exception('a')
else:
raise Exception('')
return stack.pop()
def cacl(ast, line):
if ast.root.types != Token.EXPRESSION:
if ast.root.value == '!':
return not cacl(ast.right, line)
elif ast.root.value == '|':
return cacl(ast.left, line) or cacl(ast.right, line)
elif ast.root.value == '&':
return cacl(ast.left, line) and cacl(ast.right, line)
else:
return re.search(ast.root.value, line) is not None
class Matcher:
def __init__(self, name, origin):
self.name = name
self.origin = origin
self.ast = make_ast(tokenize(origin))
def match(self, line):
return cacl(self.ast, line)
if __name__ == '__main__':
e = '#test# & #abc# | (!#123# | #456#)'
line = 'test cdf 123 568'
m = Matcher(e)
print(m.match(line)) |
20,374 | 10cd0f2114bfe14de8e91a574b87dde9177878c8 | #!/usr/bin/env python
from setuptools import setup, find_packages
REQUIREMENTS = [
'prometheus-client==0.0.13',
]
setup(
name='salt_exporter',
description='Best salt exporter',
version='0.1.0',
author='Shalom Yerushalmy',
author_email='yershalom@gmail.com',
packages=find_packages(exclude=['docs']),
install_requires=['prometheus-client==0.0.13'],
)
|
20,375 | 3747b14e50ca2672093c8a841ca917d1a0e2c304 | def open_file():
"""Opens a file.
Inputs: Prompts a filename from the user.
Outputs: A text file for parsing.
"""
filename = input("Input filename. > ")
open_text = open(filename, 'r')
print("Opened " + filename)
return open_text
def parse_file(input_lst):
"""Opens a file and counts word occurrences.
Inputs: A text file.
Outputs: A dictionary with word occurrence counts.
"""
word_dct = {}
for line in input_lst:
raw_output = line.split() # these are lists of strings
for str_ in raw_output: # strings
str_ = str_.lower()
str_ = str_.replace("-", " ")
str_ = str_.replace("?", "")
str_ = str_.replace("!", "")
str_ = str_.replace(",", "")
str_ = str_.replace("\'", "")
str_ = str_.replace('\"', "")
str_ = str_.replace(".", "")
if str_ not in word_dct:
word_dct[str_] = 1
else:
word_dct[str_] += 1
return word_dct
def main():
"""The main function, which prompts the user for a text file and outputs the top 50
most common word occurences.
"""
import operator
init_file = open_file()
opened_file = parse_file(init_file)
final = sorted(opened_file.items(), key = operator.itemgetter(1), reverse = True)
for item in range(51):
print(final[item])
"""Unit Tests"""
def unit_tests():
file = open('test.txt', 'r') # loads separate test file
assert parse_file(file) == {'string' : 1}
file.close()
#print("Passed all tests.")
main()
unit_tests()
|
20,376 | 8ec3260f27c0c6d0525edf48bce4f35d86fe6969 | '''
Напишите реализацию функции closest_mod_5, принимающую в качестве единственного аргумента целое число x и возвращающую самое маленькое целое число y, такое что:
y больше или равно x
y делится нацело на 5
На вход функция принимает целое число. В теле функции - проверяем делится ли это число без остатка на 5.
Если нет увеличиваем его и проверяем снова. Как только нашлось такое число то выводим его из функции.
'''
def closest_mod_5(x):
if x % 5 == 0:
return x
else:
y = x + 1
return closest_mod_5(y)
inp = int(input('введите число: '))
print(closest_mod_5(inp) ) |
20,377 | cb154c43b5e8ea809ec4cf69f5e1b4e2aa322ff5 | import unittest
from ros2sim.parsers import Parser
class DummyParser(Parser):
def parse(input_str):
pass
class TestBasicParser(unittest.TestCase):
def test_construction(self):
env = 'test'
parser = DummyParser(env)
self.assertEqual(env, parser.env)
if __name__ == '__main__':
unittest.main() |
20,378 | 4910e50cc4e3fb1739e0713a9399d7faf4a9104b | import FWCore.ParameterSet.Config as cms
# generate pi0/pi0* events
source = cms.Source("FlatRandomPtGunSource",
PGunParameters = cms.untracked.PSet(
# you can request more than 1 particle
PartID = cms.untracked.vint32(211),
MinEta = cms.untracked.double(-4.0),
MaxEta = cms.untracked.double( 4.0),
MinPhi = cms.untracked.double(-3.14159265359),
MaxPhi = cms.untracked.double( 3.14159265359),
MinPt = cms.untracked.double( 9.99),
MaxPt = cms.untracked.double(10.01)
),
AddAntiParticle = cms.untracked.bool(True),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
|
20,379 | a956afdd806e0ac055c310a30642f14d8746ad21 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import json
import unittest
from uuid import uuid4
import yaml
from tests.integration_tests.test_app import app
from superset import db
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.utils.database import get_example_database
from superset.utils.dict_import_export import export_to_dict
from .base_tests import SupersetTestCase
DBREF = "dict_import__export_test"
NAME_PREFIX = "dict_"
ID_PREFIX = 20000
class TestDictImportExport(SupersetTestCase):
"""Testing export import functionality for dashboards"""
@classmethod
def delete_imports(cls):
with app.app_context():
# Imported data clean up
session = db.session
for table in session.query(SqlaTable):
if DBREF in table.params_dict:
session.delete(table)
session.commit()
@classmethod
def setUpClass(cls):
cls.delete_imports()
@classmethod
def tearDownClass(cls):
cls.delete_imports()
def create_table(
self, name, schema=None, id=0, cols_names=[], cols_uuids=None, metric_names=[]
):
database_name = "main"
name = "{0}{1}".format(NAME_PREFIX, name)
params = {DBREF: id, "database_name": database_name}
if cols_uuids is None:
cols_uuids = [None] * len(cols_names)
dict_rep = {
"database_id": get_example_database().id,
"table_name": name,
"schema": schema,
"id": id,
"params": json.dumps(params),
"columns": [
{"column_name": c, "uuid": u} for c, u in zip(cols_names, cols_uuids)
],
"metrics": [{"metric_name": c, "expression": ""} for c in metric_names],
}
table = SqlaTable(
id=id, schema=schema, table_name=name, params=json.dumps(params)
)
for col_name, uuid in zip(cols_names, cols_uuids):
table.columns.append(TableColumn(column_name=col_name, uuid=uuid))
for metric_name in metric_names:
table.metrics.append(SqlMetric(metric_name=metric_name, expression=""))
return table, dict_rep
def yaml_compare(self, obj_1, obj_2):
obj_1_str = yaml.safe_dump(obj_1, default_flow_style=False)
obj_2_str = yaml.safe_dump(obj_2, default_flow_style=False)
self.assertEqual(obj_1_str, obj_2_str)
def assert_table_equals(self, expected_ds, actual_ds):
self.assertEqual(expected_ds.table_name, actual_ds.table_name)
self.assertEqual(expected_ds.main_dttm_col, actual_ds.main_dttm_col)
self.assertEqual(expected_ds.schema, actual_ds.schema)
self.assertEqual(len(expected_ds.metrics), len(actual_ds.metrics))
self.assertEqual(len(expected_ds.columns), len(actual_ds.columns))
self.assertEqual(
set([c.column_name for c in expected_ds.columns]),
set([c.column_name for c in actual_ds.columns]),
)
self.assertEqual(
set([m.metric_name for m in expected_ds.metrics]),
set([m.metric_name for m in actual_ds.metrics]),
)
def assert_datasource_equals(self, expected_ds, actual_ds):
self.assertEqual(expected_ds.datasource_name, actual_ds.datasource_name)
self.assertEqual(expected_ds.main_dttm_col, actual_ds.main_dttm_col)
self.assertEqual(len(expected_ds.metrics), len(actual_ds.metrics))
self.assertEqual(len(expected_ds.columns), len(actual_ds.columns))
self.assertEqual(
set([c.column_name for c in expected_ds.columns]),
set([c.column_name for c in actual_ds.columns]),
)
self.assertEqual(
set([m.metric_name for m in expected_ds.metrics]),
set([m.metric_name for m in actual_ds.metrics]),
)
def test_import_table_no_metadata(self):
table, dict_table = self.create_table("pure_table", id=ID_PREFIX + 1)
new_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
imported_id = new_table.id
imported = self.get_table_by_id(imported_id)
self.assert_table_equals(table, imported)
self.yaml_compare(table.export_to_dict(), imported.export_to_dict())
def test_import_table_1_col_1_met(self):
table, dict_table = self.create_table(
"table_1_col_1_met",
id=ID_PREFIX + 2,
cols_names=["col1"],
cols_uuids=[uuid4()],
metric_names=["metric1"],
)
imported_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
imported = self.get_table_by_id(imported_table.id)
self.assert_table_equals(table, imported)
self.assertEqual(
{DBREF: ID_PREFIX + 2, "database_name": "main"}, json.loads(imported.params)
)
self.yaml_compare(table.export_to_dict(), imported.export_to_dict())
def test_import_table_2_col_2_met(self):
table, dict_table = self.create_table(
"table_2_col_2_met",
id=ID_PREFIX + 3,
cols_names=["c1", "c2"],
cols_uuids=[uuid4(), uuid4()],
metric_names=["m1", "m2"],
)
imported_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
imported = self.get_table_by_id(imported_table.id)
self.assert_table_equals(table, imported)
self.yaml_compare(table.export_to_dict(), imported.export_to_dict())
def test_import_table_override_append(self):
table, dict_table = self.create_table(
"table_override", id=ID_PREFIX + 3, cols_names=["col1"], metric_names=["m1"]
)
imported_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
table_over, dict_table_over = self.create_table(
"table_override",
id=ID_PREFIX + 3,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_over_table = SqlaTable.import_from_dict(db.session, dict_table_over)
db.session.commit()
imported_over = self.get_table_by_id(imported_over_table.id)
self.assertEqual(imported_table.id, imported_over.id)
expected_table, _ = self.create_table(
"table_override",
id=ID_PREFIX + 3,
metric_names=["new_metric1", "m1"],
cols_names=["col1", "new_col1", "col2", "col3"],
cols_uuids=[col.uuid for col in imported_over.columns],
)
self.assert_table_equals(expected_table, imported_over)
self.yaml_compare(
expected_table.export_to_dict(), imported_over.export_to_dict()
)
def test_import_table_override_sync(self):
table, dict_table = self.create_table(
"table_override", id=ID_PREFIX + 3, cols_names=["col1"], metric_names=["m1"]
)
imported_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
table_over, dict_table_over = self.create_table(
"table_override",
id=ID_PREFIX + 3,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_over_table = SqlaTable.import_from_dict(
session=db.session, dict_rep=dict_table_over, sync=["metrics", "columns"]
)
db.session.commit()
imported_over = self.get_table_by_id(imported_over_table.id)
self.assertEqual(imported_table.id, imported_over.id)
expected_table, _ = self.create_table(
"table_override",
id=ID_PREFIX + 3,
metric_names=["new_metric1"],
cols_names=["new_col1", "col2", "col3"],
cols_uuids=[col.uuid for col in imported_over.columns],
)
self.assert_table_equals(expected_table, imported_over)
self.yaml_compare(
expected_table.export_to_dict(), imported_over.export_to_dict()
)
def test_import_table_override_identical(self):
table, dict_table = self.create_table(
"copy_cat",
id=ID_PREFIX + 4,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_table = SqlaTable.import_from_dict(db.session, dict_table)
db.session.commit()
copy_table, dict_copy_table = self.create_table(
"copy_cat",
id=ID_PREFIX + 4,
cols_names=["new_col1", "col2", "col3"],
metric_names=["new_metric1"],
)
imported_copy_table = SqlaTable.import_from_dict(db.session, dict_copy_table)
db.session.commit()
self.assertEqual(imported_table.id, imported_copy_table.id)
self.assert_table_equals(copy_table, self.get_table_by_id(imported_table.id))
self.yaml_compare(
imported_copy_table.export_to_dict(), imported_table.export_to_dict()
)
def test_export_datasource_ui_cli(self):
# TODO(bkyryliuk): find fake db is leaking from
self.delete_fake_db()
cli_export = export_to_dict(
session=db.session,
recursive=True,
back_references=False,
include_defaults=False,
)
self.get_resp("/login/", data=dict(username="admin", password="general"))
resp = self.get_resp(
"/databaseview/action_post", {"action": "yaml_export", "rowid": 1}
)
ui_export = yaml.safe_load(resp)
self.assertEqual(
ui_export["databases"][0]["database_name"],
cli_export["databases"][0]["database_name"],
)
self.assertEqual(
ui_export["databases"][0]["tables"], cli_export["databases"][0]["tables"]
)
if __name__ == "__main__":
unittest.main()
|
20,380 | 99d01af4935d0c3716e01da9390795c64362783a | import psutil as util
i = 0
cpuCount = util.cpu_count()
print("System Monitor Example\n"
"--------------------------------------------------------------------------------------------------------")
while i < 5:
cpuUsage = util.cpu_percent(2, True)
for cpu in range(cpuCount):
print("CPU" + str(cpu) + " Usage: " + str(cpuUsage[cpu]) + "%", end="\t")
# getting ram percentage used.
ram = util.virtual_memory() # gets ram
ram = str(ram).split(',') # splits into indexable array
ram = ram[2] # gets the percent used
print("Ram used: " + ram + "%", end="\t")
# cpu times
cpuMetrics = util.cpu_times(True)
print(cpuMetrics)
# getting number of processes
processes = util.pids() # gets a list of pids
numberOfProcesses = len(processes)
print("Processes: " + str(numberOfProcesses), end="\t")
# getting the network usage
networkUsage = util.net_io_counters()
# we now need to manipulate the string output
networkUsage = str(networkUsage).split(',')
networkUsage[0] = networkUsage[0].strip("snetio(")
networkUsage[len(networkUsage) - 1] = networkUsage[len(networkUsage) - 1].rstrip(")")
print("Network Info: ", end="")
for info in networkUsage:
print(info, end="\t")
# new line
print()
i += 1
print("\n"
"Process info\n"
"--------------------------------------------------------------------------------------------------------")
i = 0
for proc in util.process_iter(): # iterates of all system processes in order of pid.
try:
# gets the info with the attached attribute name, not in the order of the attr array.
pinfo = proc.as_dict(attrs=['pid', 'name', 'cpu_percent', 'cpu_times'])
except util.NoSuchProcess:
pass
else:
print(pinfo)
i += 1
if i == 5: # this is just a test and we are not looking for a particular process, so just print the first 5
break
print("\n"
"Getting particular process with its info, in this case python3.6, this name may vary by OS. Works on Linux.\n"
"--------------------------------------------------------------------------------------------------------")
procName = "python3.6"
for proc in util.process_iter():
if proc.name() == procName:
try:
# gets the info with the attached attribute name, not in the order of the attr array.
pinfo = proc.as_dict(attrs=['pid', 'name', 'cpu_percent', 'cpu_times'])
except util.NoSuchProcess:
pass
else:
print(pinfo)
|
20,381 | 22dea8cb2aa50f197350d68c0c174feff322b273 | # -*- coding:utf-8 -*-
from collections import namedtuple
import os
import tensorflow as tf
import config
from model.inits import glorot, zeros
import model.layers as layers
from model.aggregators import CrossAggregator
flags = tf.app.flags
FLAGS = flags.FLAGS
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
sess_config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
sess_config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
sess_config.allow_soft_placement = True
SAGEInfo = namedtuple("SAGEInfo",
['layer_name', # name of the layer (to get feature embedding etc.)
# 'neigh_sampler', # callable neigh_sampler constructor
'num_samples',
'output_dim' # the output (i.e., hidden) dimension
])
layer_infos = [SAGEInfo("node", FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", FLAGS.samples_2, FLAGS.dim_2)]
# === 预测阶段确定的参数 ===
num_classes = 2 # label_map.shape[1]
feats_dim = 106 #features.shape[1]
aggregator_type = 'cross'
concat = True
model_size = FLAGS.model_size
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(self, feed_dict):
preds = self.sess.run([self.preds],
feed_dict=feed_dict)
return preds
def close_sess(self):
self.sess.close()
# def save(self, sess=None):
# if not sess:
# raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(var_list=self.var_list)
# save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
# saver.restore(sess, save_path)
# print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(reshape=True)
# saver = tf.train.Saver(var_list=self.var_list)
# saver = tf.train.Saver()/
# 不能硬编码啊
save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
self.saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
# ckpt_path = './data/model/%s.ckpt'%(self.aggregator_type)
# meta_path = ckpt_path + '.meta'
|
20,382 | 0b76a574c394a8a4c4bfc4f7f7f0c8ccae13ad55 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django middleware helper to capture and trace a request."""
import logging
from opencensus.trace.ext import utils
from opencensus.trace.ext.django.config import (settings, convert_to_import)
from opencensus.trace import attributes_helper
from opencensus.trace import execution_context
from opencensus.trace import span as span_module
from opencensus.trace import tracer as tracer_module
from opencensus.trace.samplers import probability
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # pragma: NO COVER
MiddlewareMixin = object
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES['HTTP_METHOD']
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES['HTTP_URL']
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES['HTTP_STATUS_CODE']
REQUEST_THREAD_LOCAL_KEY = 'django_request'
BLACKLIST_PATHS = 'BLACKLIST_PATHS'
GCP_EXPORTER_PROJECT = 'GCP_EXPORTER_PROJECT'
SAMPLING_RATE = 'SAMPLING_RATE'
TRANSPORT = 'TRANSPORT'
SERVICE_NAME = 'SERVICE_NAME'
ZIPKIN_EXPORTER_SERVICE_NAME = 'ZIPKIN_EXPORTER_SERVICE_NAME'
ZIPKIN_EXPORTER_HOST_NAME = 'ZIPKIN_EXPORTER_HOST_NAME'
ZIPKIN_EXPORTER_PORT = 'ZIPKIN_EXPORTER_PORT'
ZIPKIN_EXPORTER_PROTOCOL = 'ZIPKIN_EXPORTER_PROTOCOL'
OCAGENT_TRACE_EXPORTER_ENDPOINT = 'OCAGENT_TRACE_EXPORTER_ENDPOINT'
log = logging.getLogger(__name__)
class _DjangoMetaWrapper(object):
"""
Wrapper class which takes HTTP header name and retrieve the value from
Django request.META
"""
def __init__(self, meta=None):
self.meta = meta or _get_django_request().META
def get(self, key):
return self.meta.get('HTTP_' + key.upper().replace('-', '_'))
def _get_django_request():
"""Get Django request from thread local.
:rtype: str
:returns: Django request.
"""
return execution_context.get_opencensus_attr(REQUEST_THREAD_LOCAL_KEY)
def _get_current_tracer():
"""Get the current request tracer."""
return execution_context.get_opencensus_tracer()
def _set_django_attributes(tracer, request):
"""Set the django related attributes."""
django_user = getattr(request, 'user', None)
if django_user is None:
return
user_id = django_user.pk
user_name = django_user.get_username()
# User id is the django autofield for User model as the primary key
if user_id is not None:
tracer.add_attribute_to_current_span('django.user.id', str(user_id))
if user_name is not None:
tracer.add_attribute_to_current_span('django.user.name', user_name)
class OpencensusMiddleware(MiddlewareMixin):
"""Saves the request in thread local"""
def __init__(self, get_response=None):
# One-time configuration and initialization.
self.get_response = get_response
self._sampler = settings.SAMPLER
self._exporter = settings.EXPORTER
self._propagator = settings.PROPAGATOR
self._blacklist_paths = settings.params.get(BLACKLIST_PATHS)
# Initialize the sampler
if self._sampler.__name__ == 'ProbabilitySampler':
_rate = settings.params.get(
SAMPLING_RATE, probability.DEFAULT_SAMPLING_RATE)
self.sampler = self._sampler(_rate)
else:
self.sampler = self._sampler()
# Initialize the exporter
transport = convert_to_import(settings.params.get(TRANSPORT))
if self._exporter.__name__ == 'GoogleCloudExporter':
_project_id = settings.params.get(GCP_EXPORTER_PROJECT, None)
self.exporter = self._exporter(
project_id=_project_id,
transport=transport)
elif self._exporter.__name__ == 'ZipkinExporter':
_service_name = self._get_service_name(settings.params)
_zipkin_host_name = settings.params.get(
ZIPKIN_EXPORTER_HOST_NAME, 'localhost')
_zipkin_port = settings.params.get(
ZIPKIN_EXPORTER_PORT, 9411)
_zipkin_protocol = settings.params.get(
ZIPKIN_EXPORTER_PROTOCOL, 'http')
self.exporter = self._exporter(
service_name=_service_name,
host_name=_zipkin_host_name,
port=_zipkin_port,
protocol=_zipkin_protocol,
transport=transport)
elif self._exporter.__name__ == 'TraceExporter':
_service_name = self._get_service_name(settings.params)
_endpoint = settings.params.get(
OCAGENT_TRACE_EXPORTER_ENDPOINT, None)
self.exporter = self._exporter(
service_name=_service_name,
endpoint=_endpoint,
transport=transport)
else:
self.exporter = self._exporter(transport=transport)
# Initialize the propagator
self.propagator = self._propagator()
def process_request(self, request):
"""Called on each request, before Django decides which view to execute.
:type request: :class:`~django.http.request.HttpRequest`
:param request: Django http request.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
# Add the request to thread local
execution_context.set_opencensus_attr(
REQUEST_THREAD_LOCAL_KEY,
request)
try:
# Start tracing this request
span_context = self.propagator.from_headers(
_DjangoMetaWrapper(_get_django_request().META))
# Reload the tracer with the new span context
tracer = tracer_module.Tracer(
span_context=span_context,
sampler=self.sampler,
exporter=self.exporter,
propagator=self.propagator)
# Span name is being set at process_view
span = tracer.start_span()
span.span_kind = span_module.SpanKind.SERVER
tracer.add_attribute_to_current_span(
attribute_key=HTTP_METHOD,
attribute_value=request.method)
tracer.add_attribute_to_current_span(
attribute_key=HTTP_URL,
attribute_value=request.path)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_view(self, request, view_func, *args, **kwargs):
"""Process view is executed before the view function, here we get the
function name add set it as the span name.
"""
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return
try:
# Get the current span and set the span name to the current
# function name of the request.
tracer = _get_current_tracer()
span = tracer.current_span()
span.name = utils.get_func_name(view_func)
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
def process_response(self, request, response):
# Do not trace if the url is blacklisted
if utils.disable_tracing_url(request.path, self._blacklist_paths):
return response
try:
tracer = _get_current_tracer()
tracer.add_attribute_to_current_span(
attribute_key=HTTP_STATUS_CODE,
attribute_value=str(response.status_code))
_set_django_attributes(tracer, request)
tracer.end_span()
tracer.finish()
except Exception: # pragma: NO COVER
log.error('Failed to trace request', exc_info=True)
finally:
return response
def _get_service_name(self, params):
_service_name = params.get(
SERVICE_NAME, None)
if _service_name is None:
_service_name = params.get(
ZIPKIN_EXPORTER_SERVICE_NAME, 'my_service')
return _service_name
|
20,383 | 4b105df30ca9bd93d6116f74ebadf773c325f1b2 | #-*- coding=utf-8 -*-
import cv2
import numpy as np
import primary_c
def plot(pic_dir):
primary_c.temperature(pic_dir)
img = np.zeros((100,500,3),np.uint8)
#tem=[(0,0,225),(0,225,0),(225,0,0)]
with open('app/color/temputre.txt','r') as f:
i=0
for t in f.readlines():
tlist=t.split(',')
rgb=(int(tlist[0]),int(tlist[1]),int(tlist[2]))
print rgb
xx=(i*50,0)
yy=(i*50+50,100)
cv2.rectangle(img, xx, yy, rgb, -1)
i=i+1
cv2.imwrite("app/static/uploads/tempture.jpg",img)
#cv2.imshow("img",img)
#cv2.waitKey(0)
if __name__ == "__main__":
plot('feathers.jpg')
|
20,384 | 06e3311987020d19cca63dff4d9a1747120d2af8 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import unittest
import mock
from py_utils import retry_util
class RetryOnExceptionTest(unittest.TestCase):
def setUp(self):
self.num_calls = 0
# Patch time.sleep to make tests run faster (skip waits) and also check
# that exponential backoff is implemented correctly.
patcher = mock.patch('time.sleep')
self.time_sleep = patcher.start()
self.addCleanup(patcher.stop)
def testNoExceptionsReturnImmediately(self):
@retry_util.RetryOnException(Exception, retries=3)
def Test(retries=None):
del retries
self.num_calls += 1
return 'OK!'
# The function is called once and returns the expected value.
self.assertEqual(Test(), 'OK!')
self.assertEqual(self.num_calls, 1)
def testRaisesExceptionIfAlwaysFailing(self):
@retry_util.RetryOnException(KeyError, retries=5)
def Test(retries=None):
del retries
self.num_calls += 1
raise KeyError('oops!')
# The exception is eventually raised.
with self.assertRaises(KeyError):
Test()
# The function is called the expected number of times.
self.assertEqual(self.num_calls, 6)
# Waits between retries do follow exponential backoff.
self.assertEqual(
self.time_sleep.call_args_list,
[mock.call(i) for i in (1, 2, 4, 8, 16)])
def testOtherExceptionsAreNotCaught(self):
@retry_util.RetryOnException(KeyError, retries=3)
def Test(retries=None):
del retries
self.num_calls += 1
raise ValueError('oops!')
# The exception is raised immediately on the first try.
with self.assertRaises(ValueError):
Test()
self.assertEqual(self.num_calls, 1)
def testCallerMayOverrideRetries(self):
@retry_util.RetryOnException(KeyError, retries=3)
def Test(retries=None):
del retries
self.num_calls += 1
raise KeyError('oops!')
with self.assertRaises(KeyError):
Test(retries=10)
# The value on the caller overrides the default on the decorator.
self.assertEqual(self.num_calls, 11)
def testCanEventuallySucceed(self):
@retry_util.RetryOnException(KeyError, retries=5)
def Test(retries=None):
del retries
self.num_calls += 1
if self.num_calls < 3:
raise KeyError('oops!')
else:
return 'OK!'
# The value is returned after the expected number of calls.
self.assertEqual(Test(), 'OK!')
self.assertEqual(self.num_calls, 3)
def testRetriesCanBeSwitchedOff(self):
@retry_util.RetryOnException(KeyError, retries=5)
def Test(retries=None):
del retries
self.num_calls += 1
if self.num_calls < 3:
raise KeyError('oops!')
else:
return 'OK!'
# We fail immediately on the first try.
with self.assertRaises(KeyError):
Test(retries=0)
self.assertEqual(self.num_calls, 1)
def testCanRetryOnMultipleExceptions(self):
@retry_util.RetryOnException((KeyError, ValueError), retries=3)
def Test(retries=None):
del retries
self.num_calls += 1
if self.num_calls == 1:
raise KeyError('oops!')
elif self.num_calls == 2:
raise ValueError('uh oh!')
else:
return 'OK!'
# Call eventually succeeds after enough tries.
self.assertEqual(Test(retries=5), 'OK!')
self.assertEqual(self.num_calls, 3)
if __name__ == '__main__':
unittest.main()
|
20,385 | 8772533d154e8f6d3dd8a39bf3813b16b201970e | #
# @lc app=leetcode id=103 lang=python3
#
# [103] Binary Tree Zigzag Level Order Traversal
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return
q = [root]
bfs = []
l = [root.val]
i = 0
while q:
if len(q) == len(l):
if i%2 != 0:
l.reverse()
bfs.append(l)
l = []
i+=1
if q[0].left:
q.append(q[0].left)
l.append(q[0].left.val)
if q[0].right:
q.append(q[0].right)
l.append(q[0].right.val)
q.pop(0)
return bfs
# @lc code=end
|
20,386 | 19922a535929825270a029f765038fe85fe390ff | from flask import Flask, render_template, request, json
import os
import odapi_server
import numpy as np
import sys
# import json
UPLOAD_FOLDER = '/mnt/lost+found/upload/odapi'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
def index():
return render_template('index.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(JsonEncoder, self).default(obj)
@app.route("/upload", methods=['POST'])
def hello():
file = request.files['file']
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(file_path)
result = odapi_server.detect(file)
return json.dumps(result, cls=JsonEncoder)
if __name__ == '__main__':
port = 5000
if len(sys.argv) > 1:
port = int(sys.argv[1])
app.run(host='0.0.0.0',port = port)
|
20,387 | 31e17f9d414cb35ebd8ae89ca1a1f55f0171aff7 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
try:
options = Options()
# options.headless = True
driver = webdriver.Chrome(options=options)
driver.get("https://gentle-bay-0e4e13803.azurestaticapps.net/todo.html")
driver.find_element_by_xpath
def get_imput_1():
pass
finally:
driver.close() |
20,388 | fed0f08a2ae9741f301503e6b7081e20fd47ff80 | '''
Created on Jul 13, 2013
@author: Alain Adler
'''
import json
class CallBackResponse(object):
def __init__(self):
self.actions = []
self.settings = []
def answer(self):
self.actions.append({ "answer":{} })
def dial(self, numbers, callerId, url, record = False):
dialAction = { "dial":{"numbers":numbers, "caller_id":callerId, "url":url, "record":record} }
self.actions.append(dialAction)
def bridgeTo(self, otherCallId, url):
bridgeToAction = { "bridge_to":{ "other_call_id":otherCallId, "url": url } }
self.actions.append(bridgeToAction)
def setLimitCallDuration(self, limitCallDurationSeconds):
found = False
for item in self.settings:
if "set_limit_call_duration" in item:
item["set_limit_call_duration"] = {"seconds":limitCallDurationSeconds}
found = True
break
if found == False:
self.settings.append({ "set_limit_call_duration": {"seconds":limitCallDurationSeconds} })
def json(self):
retVal = { "actions" : self.actions }
if len(self.settings) > 0:
retVal["settings"] = self.settings
return json.dumps(retVal)
|
20,389 | 3676ba1f5691f2ae5c37aaf77e937fb8e448400e | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 10:26:01 2019
@author: mushtu
"""
##Create empty dictionary
empty = {}
type(empty)
##Create dictionary food
food = {"ham" : "yes", "egg" : "yes", "spam" : "no" }
food
##Other ways of creating dictionaries
new= dict(Bob='508-2232', John='159-0055')
new
new1=dict([('Bob', '508-2232'), ('John', '159-0055')])
new1
## Lookuptag: Creating dictionaries by assignment
bag = dict()
bag['money'] = 12
bag['candy'] = 3
bag['tissues'] = 75
bag
##Constructing and accessing dictionaries
# Make a dictionary with {} and : to signify a key and a value
my_dict = {'key1':'value1','key2':'value2'}
# Call values by their key
my_dict['key2']
print (bag['candy'])
#Different object types for key value pair
my_dict = {'key1':123,'key2':[12,23,33],'key3':['item0','item1','item2']}
# Let's call items from the dictionary
my_dict['key3']
# Can call an index on that value
my_dict['key3'][1]
##Using mutable objects
dict1 = { [1,2,3]:"abc"}
##Can use tuples for keys
dict1 = { (1,2,3):"abc", 3.1415:"abc"}
dict1
##Operators
my_dict = {'Ahmad': 10, 'Jane': 42, 'Carol': 39, 'Jon': 25}
len(my_dict)
del my_dict['Ahmad']
my_dict
##Affecting values
my_dict['key1']= 123
my_dict
# Subtract 123 from the value
my_dict['key1'] = my_dict['key1'] - 123
#Check
my_dict['key1']
my_dict
##Using -= or +=
# Set the object equal to itself minus 123
my_dict['key1'] -= 123
my_dict['key1']
my_dict
##Problem 1
newdict= {0: 10, 1: 20}
newdict[2] = 30
newdict
##Problem 2
d = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}
def is_key_present(x):
if x in d:
print('Key is present in the dictionary')
else:
print('Key is not present in the dictionary')
is_key_present(5)
is_key_present(9)
##Problem 3
myDict = {'a':1,'b':2,'c':3,'d':4}
print(myDict)
if 'a' in myDict:
del myDict['a']
print(myDict)
|
20,390 | 857f04b52720d19159b0b139917044be55b8847d |
'''
The results obtained using and modifying this code and hydration data set may be used in any publications provided that its use is explicitly acknowledged.
A suitable reference is: Junji Hyodo, Kota Tsujikawa, Motoki Shiga, Yuji Okuyama, and Yoshihiro Yamazaki*, “Accelerated discovery of proton-conducting perovskite oxide by capturing physicochemical fundamentals of hydration”, ACS Energy Letters, 6(2021) 2985-2992. (https://doi.org/10.1021/acsenergylett.1c01239)
'''
import os
import shutil
import itertools
import re
import pandas as pd
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
def generate_experimental_condition(dir_output, file_name_prefix,list_temperature, partial_pressure_H2O=0.02,SinteringTemperature=1600,SinteringTime=24):
"""
Generate experimental condition data to a csv file
Parameters
----------
dir_output : str
output directory
file_name_prefix : str
name prefix of output file
list_temperature : list of int
list of experimental temperature (Celsius)
partial_pressure_H2O : float
experimental water vapor partial pressure
SinteringTemperature : int
sintering temperature
SinteringTime : int
sintering time
Returns
-------
None
"""
print("Enter the host element occupying the A-site")
set_A1 = input ("Ex: Ba\n")
print("Enter the valence of the A-site host element")
set_A1_valence = input("Ex: 2\n")
frac_A1 = '1'
print("Enter the host element occupying the B-site")
set_B1 = input ("Ex: Zr\n")
print("Enter the valence of the B-site host element")
set_B1_valence = input("Ex:4\n")
print("Enter the fraction that describes the composition of the B-site host element")
frac_B1 = str(format(float( input ("Ex:0.8\n")), '.2f'))
print("Enter the dopant element occupying the B-site")
set_B2 = input ("Ex: Sc\n")
print("Enter the valence of the B-dopant")
set_B2_valence = input("Ex: 3\n")
frac_B2 = str(format((1 - float(frac_B1)), '.2f'))
# generate dataframe for base
CA = set_A1 + set_B1 + frac_B1 + set_B2 + frac_B2 + "O3"
dic = {'Composition':CA,
'A1':set_A1, 'Valence A1':set_A1_valence, 'fraction A1':frac_A1,
'B1':set_B1, 'Valence B1':set_B1_valence, 'fraction B1':frac_B1,
'B2':set_B2, 'Valence B2':set_B2_valence, 'fraction B2':frac_B2}
df = pd.DataFrame(dic,index=['i',])
# add columns name
columns_all = ['Composition','Temperature / C','pH2O / atm','CH',
'A1','Valence A1','fraction A1','A2','Valence A2','fraction A2',
'B1','Valence B1','fraction B1','B2','Valence B2','fraction B2',
'B3','Valence B3','fraction B3','X1','Valence X1','fraction X1','fraction total']
for c in columns_all:
if not(c in df.columns):
df[c] = float(np.NaN)
df = df[columns_all]
# add another experimental conditions
df['pH2O / atm'] = partial_pressure_H2O
df['Sintering temperature/C'] = SinteringTemperature
df['Sintering time / h'] = SinteringTime
df['fraction A2']='0'
df['fraction B3']='0'
df['X1']='O'
df['Valence X1']='-2'
df['fraction X1']='0.2'
df['fraction total']='1'
for cnt, tmp in enumerate(list_temperature):
df['Temperature / C'] = tmp
if cnt==0:
df_all = df.copy()
else:
df_all = pd.concat([df_all,df], ignore_index=True)
file_name = os.path.join(dir_output,'{:}_all.csv'.format(file_name_prefix, tmp))
df_all.to_csv(file_name, index=False)
#------------------------------------------------------------
def predict_CH(X_train, y_train, dopant_fraction_train, X_test, dopant_fraction_test, model, flag_StandardScaler, flag_proc):
"""
train and predict proton concentration by machine learning model
Parameters
----------
X_train : numpy.array
dataset of training data
y_train : numpy.array
target variable of training data
dopant_fraction_train : : numpy.array
dopant fractions of training data
X_test : numpy.array
dataset of test data
dopant_fraction_train : : numpy.array
dopant fractions of test data
model : scikit-learn model
machine learning model
flag_StandardScaler : Boolean
with or without standardization (True or False)
flag_proc : str
target variable used for a machine learning model
(convert to the proton concentration of the objective variable when prediction)
choose from the following 4 options:
'direct' : proton concentration
'log' : logarithm of proton concentration
'CHdopant' : ratio of proton concentration and dopant fraction
'log_CHdopant' : logarithm of ratio of proton concentration and dopant fraction
Returns
-------
y_predict : numpy.array
predicted value (proton concentration) of test data
"""
# set random seed
if 'random_state' in model.get_params().keys():
model = model.set_params(random_state=0)
# standardize each descriptor
if flag_StandardScaler:
sc = StandardScaler()
sc.fit(X_train)
X_train = sc.transform(X_train)
X_test = sc.transform(X_test)
# training and prediction
if flag_proc == 'log_CHdopant':
model.fit(X_train, np.log(y_train/dopant_fraction_train))
y_pred = np.exp(model.predict(X_test))
y_pred[y_pred>1] = 1
y_pred = y_pred*dopant_fraction_test
y_pred[y_pred < 0] = 0
elif flag_proc == 'direct':
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_pred[y_pred>1] = 1
y_pred[y_pred < 0] = 0
elif flag_proc == 'CHdopant':
model.fit(X_train, y_train/dopant_fraction_train)
y_pred = model.predict(X_test)
y_pred[y_pred>1] = 1
y_pred = y_pred*dopant_fraction_test
y_pred[y_pred<0]=0
elif flag_proc == 'log':
model.fit(X_train, np.log(y_train))
y_pred = np.exp(model.predict(X_test))
y_pred[y_pred>1] = 1
y_pred[y_pred < 0] = 0
# output prediction for test data
return y_pred
#------------------------------------------------------------
def feature_ranking(X_train, y_train, dopant_fraction_train, list_feature, model, flag_proc):
"""
descriptors ranking by importance scores of machine learning model
Parameters
----------
X_train : numpy.array
dataset of training dataset
y_train : numpy.array
target variable of training dataset
dopant_fraction_train : : numpy.array
dopant fractions of training data
list_feature : list
list of descriptor names
model : scikit-learn model
machine learning model
flag_proc : str
target variable used for a machine learning model
(convert to the proton concentration of the objective variable when prediction)
choose from the following 4 options:
'direct' : proton concentration
'log' : logarithm of proton concentration
'CHdopant' : ratio of proton concentration and dopant fraction
'log_CHdopant' : logarithm of ratio of proton concentration and dopant fraction
Returns
-------
df_ranking : pandas.DataFrame
ranked list of descriptor names with importance scores
"""
# set random seed
if 'random_state' in model.get_params().keys():
model = model.set_params(random_state=0)
# tarining model for descriptor ranking
if flag_proc == 'log_CHdopant':
model.fit(X_train, np.log(y_train/dopant_fraction_train))
elif flag_proc == 'direct':
model.fit(X_train, y_train)
elif flag_proc == 'CHdopant':
model.fit(X_train, y_train/dopant_fraction_train)
elif flag_proc == 'log':
model.fit(X_train, np.log(y_train))
fi = model.feature_importances_
# sort descriptors by importance scores
i = np.argsort(-fi)
df_ranking = pd.DataFrame({'Name':list_feature[i], 'Importance':fi[i]})
df_ranking = df_ranking[['Name','Importance']]
# output DataFrame of ranked descriptors and importance scores
return df_ranking
#------------------------------------------------------------
def virtual_prediction(file_train, file_test, file_feature, dir_output, model, flag_StandardScaler, flag_proc):
"""
Predicte proton concentration of virtual composition
Parameters
----------
file_train : str
file name of training data
file_test : str
file name of test data
file_feature : str
file name of descriptors
dir_output : str
output directory
model : model of scikit-learn
machine learning model to use
flag_StandardScaler : boolean
with or without standardization (True or False)
flag_proc : str
target variable used for a machine learning model
(convert to the proton concentration of the objective variable when prediction)
choose from the following 4 options:
'direct' : proton concentration
'log' : logarithm of proton concentration
'CHdopant' : ratio of proton concentration and dopant fraction
'log_CHdopant' : logarithm of ratio of proton concentration and dopant fraction
Returns
-------
None
"""
# load dataset of train and test dataset
df_train = pd.read_csv(file_train,index_col=False)
df_test = pd.read_csv(file_test,index_col=False)
# load list of descriptors
fea_list = np.array(pd.read_csv(file_feature,index_col=False).columns)
# convert datasets in Pandas dataframes to numpy arrays
X_train = np.array(df_train[fea_list])
y_train = np.array(df_train['CH'])
X_test = np.array(df_test[fea_list])
dopant_fraction_train = np.array(df_train['dopant fraction'])
dopant_fraction_test = np.array(df_test['dopant fraction'])
# predict CH of test data
y_pred = predict_CH(X_train, y_train, dopant_fraction_train, X_test,
dopant_fraction_test, model, flag_StandardScaler, flag_proc)
# extraction of experimental conditions of test data
chemav = df_test['Composition']
tempe = df_test['Temperature / C']
p_H2O = df_test['pH2O / atm']
# output prediction results to a csv file
df_out = pd.DataFrame({'Composition':chemav,'Temperature / C':tempe, 'CH_predicted':y_pred, 'Dopant fraction':dopant_fraction_test, 'p_H2O / atm':p_H2O})
df_out = df_out.sort_values(['Composition','Temperature / C'])
df_out.to_csv(os.path.join(dir_output,'prediction_all.csv'),index=False)
# output importance scores of descriptors to a csv file
if hasattr(model,'feature_importances_'):
df_ranking = feature_ranking(X_train, y_train, dopant_fraction_train, fea_list, model, flag_proc)
df_ranking.to_csv(os.path.join(dir_output,'fea_importance_all.csv'),index=False)
#------------------------------------------------------------
def combine_dataset(file_element, file_experiment, file_save, threshold=None):
"""
generate dataset for machine learning by combining elemental information and experimental conditions.
Parameters
----------
file_element : str
file name of elemental imformation
file_experiment : str
file name of experimental conditions
file_save : str
fime name of output data
threshold : float
threshold of ratio of proton concentration and dopant fraction
to exclude experimental data below the threshold.
If threshold is not specified, any data is not excluded
Returns
-------
None
"""
# load elemental data file
df_fea = pd.read_csv(file_element)
# load experimental conditions
tmp, ext = os.path.splitext(file_experiment)
if ext=='.xlsx':
df_exp = pd.read_csv(file_experiment)
elif ext=='.csv':
df_exp = pd.read_csv(file_experiment)
N = df_exp.shape[0]
# caclulate A site fraction
Const_A = np.array(df_exp['fraction A1'] + df_exp['fraction A2'])
fraction_A1 = np.array(df_exp['fraction A1'] / Const_A)
fraction_A2 = np.array(df_exp['fraction A2'] / Const_A)
# caclulate B site fraction
Const_B = np.array(df_exp['fraction B1'] + df_exp['fraction B2'] + df_exp['fraction B3'])
fraction_B1 = np.array(df_exp['fraction B1'] / Const_B)
fraction_B2 = np.array(df_exp['fraction B2'] / Const_B)
fraction_B3 = np.array(df_exp['fraction B3'] / Const_B)
# descriptors of oxygen
fraction_X1 = np.ones(N)*3
aw_X1 = np.ones(N)*16
ir_X1 = np.ones(N)*1.4
# determine if A2, B2, B3 are dopants
flag_dp_A2 = np.array(df_exp['Valence A2'] < df_exp['Valence A1'])*1
flag_dp_B2 = np.array(df_exp['Valence B2'] < df_exp['Valence B1'])*1
flag_dp_B3 = np.array(df_exp['Valence B3'] < df_exp['Valence B1'])*1
# determine if A2, B2, B3 are hosts
flag_ht_A2 = 1-flag_dp_A2
flag_ht_B2 = 1-flag_dp_B2
flag_ht_B3 = 1-flag_dp_B3
# ratio host and dopant
dopant_fraction = flag_dp_A2*fraction_A2 + flag_dp_B2*fraction_B2 + flag_dp_B3*fraction_B3
host_fraction = 2 - dopant_fraction
dopant_A_fraction = flag_dp_A2*fraction_A2
host_A_fraction = 1 - dopant_A_fraction
dopant_B_fraction = flag_dp_B2*fraction_B2 + flag_dp_B3*fraction_B3
host_B_fraction = 1 - dopant_B_fraction
# target variable (proton concentration)
CH = np.array(df_exp['CH'])
# extract elemental information of A1 site
aw_A1, ad_A1, mp_A1, fie_A1, en_A1, ir_A1 = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n in range(N):
if fraction_A1[n]>0:
a = df_exp['A1'][n]
v = df_exp['Valence A1'][n]
i = np.where((df_fea['Atom']==a)&(df_fea['Valence']==v))[0]
if len(i)==0:
print('None!')
aw_A1[n] = df_fea['Atomic weight'][i]
ad_A1[n] = df_fea['Atomic density'][i]
mp_A1[n] = df_fea['Melting point'][i]
fie_A1[n] = df_fea['First ionization energy'][i]
en_A1[n] = df_fea['Electronegativity'][i]
ir_A1[n] = df_fea['Ionic radius XII'][i]
# extract elemental information of A2 site
aw_A2, ad_A2, mp_A2, fie_A2, en_A2, ir_A2 = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n in range(N):
if fraction_A2[n]>0:
a = df_exp['A2'][n]
v = df_exp['Valence A2'][n]
i = np.where((df_fea['Atom']==a)&(df_fea['Valence']==v))[0]
if len(i)==0:
print('None!')
aw_A2[n] = df_fea['Atomic weight'][i]
ad_A2[n] = df_fea['Atomic density'][i]
mp_A2[n] = df_fea['Melting point'][i]
fie_A2[n] = df_fea['First ionization energy'][i]
en_A2[n] = df_fea['Electronegativity'][i]
ir_A2[n] = df_fea['Ionic radius XII'][i]
# extract elemental information of B1 site
aw_B1, ad_B1, mp_B1, fie_B1, en_B1, ir_B1 = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n in range(N):
if fraction_B1[n]>0:
a = df_exp['B1'][n]
v = df_exp['Valence B1'][n]
i = np.where((df_fea['Atom']==a)&(df_fea['Valence']==v))[0]
if len(i)==0:
print('None!')
aw_B1[n] = df_fea['Atomic weight'][i]
ad_B1[n] = df_fea['Atomic density'][i]
mp_B1[n] = df_fea['Melting point'][i]
fie_B1[n] = df_fea['First ionization energy'][i]
en_B1[n] = df_fea['Electronegativity'][i]
ir_B1[n] = df_fea['Ionic radius VI'][i]
# extract elemental information of B2 site
aw_B2, ad_B2, mp_B2, fie_B2, en_B2, ir_B2 = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n in range(N):
if fraction_B2[n]>0:
a = df_exp['B2'][n]
v = df_exp['Valence B2'][n]
i = np.where((df_fea['Atom']==a)&(df_fea['Valence']==v))[0]
if len(i)==0:
print('None!')
aw_B2[n] = df_fea['Atomic weight'][i]
ad_B2[n] = df_fea['Atomic density'][i]
mp_B2[n] = df_fea['Melting point'][i]
fie_B2[n] = df_fea['First ionization energy'][i]
en_B2[n] = df_fea['Electronegativity'][i]
ir_B2[n] = df_fea['Ionic radius VI'][i]
# extract elemental information of B3 site
aw_B3, ad_B3, mp_B3, fie_B3, en_B3, ir_B3 = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N)
for n in range(N):
if fraction_B3[n]>0:
a = df_exp['B3'][n]
v = df_exp['Valence B3'][n]
i = np.where((df_fea['Atom']==a)&(df_fea['Valence']==v))[0]
if len(i)==0:
print('None!')
aw_B3[n] = df_fea['Atomic weight'][i]
ad_B3[n] = df_fea['Atomic density'][i]
mp_B3[n] = df_fea['Melting point'][i]
fie_B3[n] = df_fea['First ionization energy'][i]
en_B3[n] = df_fea['Electronegativity'][i]
ir_B3[n] = df_fea['Ionic radius VI'][i]
#----------------------------------------------------------------------------
# calculate descriptors
ave_aw_A = fraction_A1*aw_A1 + fraction_A2*aw_A2
ave_ad_A = fraction_A1*ad_A1 + fraction_A2*ad_A2
ave_mp_A = fraction_A1*mp_A1 + fraction_A2*mp_A2
ave_fie_A = fraction_A1*fie_A1 + fraction_A2*fie_A2
ave_en_A = fraction_A1*en_A1 + fraction_A2*en_A2
ave_ir_A = fraction_A1*ir_A1 + fraction_A2*ir_A2
ave_aw_B = fraction_B1*aw_B1 + fraction_B2*aw_B2 + fraction_B3*aw_B3
ave_ad_B = fraction_B1*ad_B1 + fraction_B2*ad_B2 + fraction_B3*ad_B3
ave_mp_B = fraction_B1*mp_B1 + fraction_B2*mp_B2 + fraction_B3*mp_B3
ave_fie_B = fraction_B1*fie_B1 + fraction_B2*fie_B2 + fraction_B3*fie_B3
ave_en_B = fraction_B1*en_B1 + fraction_B2*en_B2 + fraction_B3*en_B3
ave_ir_B = fraction_B1*ir_B1 + fraction_B2*ir_B2 + fraction_B3*ir_B3
Molar_weight = ave_aw_A + ave_aw_B + fraction_X1*aw_X1
T_sinter_time_K_h = np.array(df_exp["Sintering temperature/C"] * df_exp["Sintering time / h"])
ratio_aw_AB = ave_aw_A / ave_aw_B
ratio_ad_AB = ave_ad_A / ave_ad_B
ratio_mp_AB = ave_mp_A / ave_mp_B
ratio_fie_AB = ave_fie_A / ave_fie_B
ratio_en_AB = ave_en_A / ave_en_B
ratio_ir_AB = ave_ir_A / ave_ir_B
ave_aw_host = ( fraction_A1*aw_A1 + fraction_B1*aw_B1
+ flag_ht_A2*fraction_A2*aw_A2 + flag_ht_B2*fraction_B2*aw_B2 + flag_ht_B3*fraction_B3*aw_B3 ) / host_fraction
ave_ad_host = ( fraction_A1*ad_A1 + fraction_B1*ad_B1
+ flag_ht_A2*fraction_A2*ad_A2 + flag_ht_B2*fraction_B2*ad_B2 + flag_ht_B3*fraction_B3*ad_B3) / host_fraction
ave_mp_host = ( fraction_A1*mp_A1 + fraction_B1*mp_B1
+ flag_ht_A2*fraction_A2*mp_A2 + flag_ht_B2*fraction_B2*mp_B2 + flag_ht_B3*fraction_B3*mp_B3) / host_fraction
ave_fie_host = ( fraction_A1*fie_A1 + fraction_B1*fie_B1
+ flag_ht_A2*fraction_A2*fie_A2 + flag_ht_B2*fraction_B2*fie_B2 + flag_ht_B3*fraction_B3*fie_B3) / host_fraction
ave_en_host = ( fraction_A1*en_A1 + fraction_B1*en_B1
+ flag_ht_A2*fraction_A2*en_A2 + flag_ht_B2*fraction_B2*en_B2 + flag_ht_B3*fraction_B3*en_B3) / host_fraction
ave_ir_host = ( fraction_A1*ir_A1 + fraction_B1*ir_B1
+ flag_ht_A2*fraction_A2*ir_A2 + flag_ht_B2*fraction_B2*ir_B2 + flag_ht_B3*fraction_B3*ir_B3) / host_fraction
ave_aw_dopant = ( flag_dp_A2*fraction_A2*aw_A2 + flag_dp_B2*fraction_B2*aw_B2 + flag_dp_B3*fraction_B3*aw_B3 ) / dopant_fraction
ave_ad_dopant = ( flag_dp_A2*fraction_A2*ad_A2 + flag_dp_B2*fraction_B2*ad_B2 + flag_dp_B3*fraction_B3*ad_B3) / dopant_fraction
ave_mp_dopant = ( flag_dp_A2*fraction_A2*mp_A2 + flag_dp_B2*fraction_B2*mp_B2 + flag_dp_B3*fraction_B3*mp_B3) / dopant_fraction
ave_fie_dopant = ( flag_dp_A2*fraction_A2*fie_A2 + flag_dp_B2*fraction_B2*fie_B2 + flag_dp_B3*fraction_B3*fie_B3) / dopant_fraction
ave_en_dopant = ( flag_dp_A2*fraction_A2*en_A2 + flag_dp_B2*fraction_B2*en_B2 + flag_dp_B3*fraction_B3*en_B3) / dopant_fraction
ave_ir_dopant = ( flag_dp_A2*fraction_A2*ir_A2 + flag_dp_B2*fraction_B2*ir_B2 + flag_dp_B3*fraction_B3*ir_B3) / dopant_fraction
ave_aw_host_A = ( fraction_A1*aw_A1 + flag_ht_A2*fraction_A2*aw_A2 ) / host_A_fraction
ave_ad_host_A = ( fraction_A1*ad_A1+ flag_ht_A2*fraction_A2*ad_A2 ) / host_A_fraction
ave_mp_host_A = ( fraction_A1*mp_A1+ flag_ht_A2*fraction_A2*mp_A2) / host_A_fraction
ave_fie_host_A = ( fraction_A1*fie_A1 + flag_ht_A2*fraction_A2*fie_A2) / host_A_fraction
ave_en_host_A = ( fraction_A1*en_A1+ flag_ht_A2*fraction_A2*en_A2) / host_A_fraction
ave_ir_host_A = ( fraction_A1*ir_A1+ flag_ht_A2*fraction_A2*ir_A2) / host_A_fraction
ave_aw_host_B = ( fraction_B1*aw_B1 + flag_ht_B2*fraction_B2*aw_B2 + flag_ht_B3*fraction_B3*aw_B3 ) / host_B_fraction
ave_ad_host_B = ( fraction_B1*ad_B1+ flag_ht_B2*fraction_B2*ad_B2 + flag_ht_B3*fraction_B3*ad_B3) / host_B_fraction
ave_mp_host_B = ( fraction_B1*mp_B1 + flag_ht_B2*fraction_B2*mp_B2 + flag_ht_B3*fraction_B3*mp_B3) / host_B_fraction
ave_fie_host_B = ( fraction_B1*fie_B1 + flag_ht_B2*fraction_B2*fie_B2 + flag_ht_B3*fraction_B3*fie_B3) / host_B_fraction
ave_en_host_B = ( fraction_B1*en_B1 + flag_ht_B2*fraction_B2*en_B2 + flag_ht_B3*fraction_B3*en_B3) / host_B_fraction
ave_ir_host_B = ( fraction_B1*ir_B1 + flag_ht_B2*fraction_B2*ir_B2 + flag_ht_B3*fraction_B3*ir_B3) / host_B_fraction
ratio_aw_host_B_host_A = ave_aw_host_B / ave_aw_host_A
ratio_ad_host_B_host_A = ave_ad_host_B / ave_ad_host_A
ratio_mp_host_B_host_A = ave_mp_host_B / ave_mp_host_A
ratio_fie_host_B_host_A = ave_fie_host_B / ave_fie_host_A
ratio_en_host_B_host_A = ave_en_host_B / ave_en_host_A
ratio_ir_host_B_host_A = ave_ir_host_B / ave_ir_host_A
ratio_aw_dopant_host_A = ave_aw_dopant / ave_aw_host_A
ratio_ad_dopant_host_A = ave_ad_dopant / ave_ad_host_A
ratio_mp_dopant_host_A = ave_mp_dopant / ave_mp_host_A
ratio_fie_dopant_host_A = ave_fie_dopant / ave_fie_host_A
ratio_en_dopant_host_A = ave_en_dopant / ave_en_host_A
ratio_ir_dopant_host_A = ave_ir_dopant / ave_ir_host_A
ratio_aw_dopant_host_B = ave_aw_dopant / ave_aw_host_B
ratio_ad_dopant_host_B = ave_ad_dopant / ave_ad_host_B
ratio_mp_dopant_host_B = ave_mp_dopant / ave_mp_host_B
ratio_fie_dopant_host_B = ave_fie_dopant / ave_fie_host_B
ratio_en_dopant_host_B = ave_en_dopant / ave_en_host_B
ratio_ir_dopant_host_B = ave_ir_dopant / ave_ir_host_B
ratio_aw_dopant_host = ave_aw_dopant / ave_aw_host
ratio_ad_dopant_host = ave_ad_dopant / ave_ad_host
ratio_mp_dopant_host = ave_mp_dopant / ave_mp_host
ratio_fie_dopant_host = ave_fie_dopant / ave_fie_host
ratio_en_dopant_host = ave_en_dopant / ave_en_host
ratio_ir_dopant_host = ave_ir_dopant / ave_ir_host
MW_ir_AB = Molar_weight / np.sqrt(ave_ir_A*ave_ir_B)
QToleranceFactor = (ave_ir_A + ir_X1)/np.sqrt(2)/(ave_ir_B + ir_X1)
#----------------------------------------------------------------------------
# combine information ( descriptors, experimental condition ) for output
df_new_fea = {
"CH":CH,
"dopant fraction":dopant_fraction,
"host fraction":host_fraction,
"host_A fraction":host_A_fraction,
"host_B fraction":host_B_fraction,
"dopant_A fraction":dopant_A_fraction,
"dopant_B fraction":dopant_B_fraction,
"Molar_weight/gmol-1":Molar_weight,
"ToleranceFactor":QToleranceFactor,
"T_sinter*time / K h":T_sinter_time_K_h,
"average atomic_weight_A":ave_aw_A,
"average atomic_density_A":ave_ad_A,
"average melting_point_A":ave_mp_A,
"average first_ionization_energy_A":ave_fie_A,
"average electronegativity_A":ave_en_A,
"average ionic_radius_A":ave_ir_A,
"average atomic_weight_B":ave_aw_B,
"average atomic_density_B":ave_ad_B,
"average melting_point_B":ave_mp_B,
"average first_ionization_energy_B":ave_fie_B,
"average electronegativity_B":ave_en_B,
"average ionic_radius_B":ave_ir_B,
"ratio atomic_weight_A/B":ratio_aw_AB,
"ratio atomic_density_A/B": ratio_ad_AB,
"ratio melting_point_A/B":ratio_mp_AB,
"ratio first_ionization_energy_A/B":ratio_fie_AB,
"ratio electronegativity_A/B":ratio_en_AB,
"ratio ionic_radius_A/B":ratio_ir_AB,
"average atomic_weight_host":ave_aw_host,
"average atomic_density_host":ave_ad_host,
"average melting_point_host":ave_mp_host,
"average first_ionization_energy_host":ave_fie_host,
"average electronegativity_host":ave_en_host,
"average ionic_radius_host":ave_ir_host,
"average atomic_weight_dopant":ave_aw_dopant,
"average atomic_density_dopant":ave_ad_dopant,
"average melting_point_dopant":ave_mp_dopant,
"average first_ionization_energy_dopant":ave_fie_dopant,
"average electronegativity_dopant":ave_en_dopant,
"average ionic_radius_dopant":ave_ir_dopant,
"average atomic_weight_host_A":ave_aw_host_A,
"average atomic_density_host_A":ave_ad_host_A,
"average melting_point_host_A":ave_mp_host_A,
"average first_ionization_energy_host_A":ave_fie_host_A,
"average electronegativity_host_A":ave_en_host_A,
"average ionic_radius_host_A":ave_ir_host_A,
"average atomic_weight_host_B":ave_aw_host_B,
"average atomic_density_host_B":ave_ad_host_B,
"average melting_point_host_B":ave_mp_host_B,
"average first_ionization_energy_host_B":ave_fie_host_B,
"average electronegativity_host_B":ave_en_host_B,
"average ionic_radius_host_B":ave_ir_host_B,
"ratio atomic_weight_host_A/B":ratio_aw_host_B_host_A,
"ratio atomic_density_host_A/B":ratio_ad_host_B_host_A,
"ratio melting_point_host_A/B":ratio_mp_host_B_host_A,
"ratio first_ionization_energy_host_A/B":ratio_fie_host_B_host_A,
"ratio electronegativity_host_A/B":ratio_en_host_B_host_A,
"ratio ionic_radius_host_A/B":ratio_ir_host_B_host_A,
"ratio atomic_weight_dopant/host_A":ratio_aw_dopant_host_A,
"ratio atomic_density_dopant/host_A":ratio_ad_dopant_host_A,
"ratio melting_point_dopant/host_A":ratio_mp_dopant_host_A,
"ratio first_ionization_energy_dopant/host_A":ratio_fie_dopant_host_A,
"ratio electronegativity_dopant/host_A":ratio_en_dopant_host_A,
"ratio ionic_radius_dopant/host_A":ratio_ir_dopant_host_A,
"ratio atomic_weight_dopant/host_B":ratio_aw_dopant_host_B,
"ratio atomic_density_dopant/host_B":ratio_ad_dopant_host_B,
"ratio melting_point_dopant/host_B":ratio_mp_dopant_host_B,
"ratio first_ionization_energy_dopant/host_B":ratio_fie_dopant_host_B,
"ratio electronegativity_dopant/host_B":ratio_en_dopant_host_B,
"ratio ionic_radius_dopant/host_B":ratio_ir_dopant_host_B,
"ratio atomic_weight_dopant/host":ratio_aw_dopant_host,
"ratio atomic_density_dopant/host":ratio_ad_dopant_host,
"ratio melting_point_dopant/host":ratio_mp_dopant_host,
"ratio first_ionization_energy_dopant/host":ratio_fie_dopant_host,
"ratio electronegativity_dopant/host":ratio_en_dopant_host,
"ratio ionic_radius_dopant/host":ratio_ir_dopant_host,
"Mw/sqrt(radiiA*radiiB)":MW_ir_AB
}
df_new_fea = pd.DataFrame(df_new_fea)
# combine DataFrame of experimental conditions and descriptors
fea_names = ['Composition', 'Temperature / C',
'pH2O / atm', 'Sintering temperature/C', 'Sintering time / h',
'fraction A1', 'fraction A2', 'fraction B1', 'fraction B2', 'fraction B3']
df_new = pd.concat([df_exp[fea_names], df_new_fea], axis=1)
# rearange descriptors and target variable (CH)
n = len(fea_names)
col = [df_new.columns[0]] + [df_new.columns[n]] + list(df_new.columns[1:n]) + list(df_new.columns[(n+1):])
df_new = df_new[col]
# exclude data that the ratio of proton concentration and dopant below the threshold
if threshold:
i = np.where(df_new['CH']/df_new['dopant fraction']>=threshold)[0]
df_new = df_new.iloc[i,:]
# save of dataset to a csv file
df_new = df_new.sort_values(['Composition','Temperature / C'])
df_new.to_csv(file_save, index=False, float_format='%.3f')
|
20,391 | 40c9c928f3c611b42253532e147d93b81a69efb7 | """This module contains utilities for accessing and modifying the game leaderboard"""
import os
import pymongo
import pymongo.errors
import dotenv
from utils import difficulties, MAX_LEADERS
dotenv.load_dotenv()
class Leaderboard:
"""A class that connects to the database and can make changes"""
def __init__(self, db_name='leaderboard'):
"""Creates a database handler"""
key = os.getenv('ATLAS_KEY')
self.valid = key is not None
self.client = None
self.database = None
if self.valid:
try:
self.client = pymongo.MongoClient(key % db_name)
self.database = self.client[db_name]
except pymongo.errors.ConfigurationError:
self.valid = False
def get_top(self, difficulty):
"""Get the top MAX_LEADERS scores for a given difficulty"""
return list(self.database[difficulty].find().sort('time').limit(MAX_LEADERS))
def is_high_score(self, difficulty, time):
"""Checks if a given time is a high score for the difficulty"""
tops = self.get_top(difficulty)
if len(tops) < MAX_LEADERS:
return True
return time < tops[MAX_LEADERS-1]['time']
def add_score(self, difficulty, time, name):
"""Add a score to the database"""
self.database[difficulty].insert_one({'time': time, 'name': name})
def clear_database(self):
"""Clears the database"""
for diff in difficulties:
self.database[diff].delete_many({})
|
20,392 | fb35f352397e36dfef2ecac58ad2a787ee000770 | class Solution:
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
代码错误,跑不出来
"""
number = dict(zip(range(1, 27), [chr(i) for i in range(65, 91)]))
number[0] = ''
ans = ''
# multi = 10
# while n // 26 != 0:
# multi *= 10
while True:
div, mod = divmod(n, 26)
if mod == 0:
d, m = 26, 26
else:
d, m = div, mod
print(d, m, div)
if div == 0:
ans += number[mod]
break
ans += number[d]
n = mod
return ans
n = 52
print(Solution().convertToTitle(n))
|
20,393 | 8c0593475847a03934332cf0a918332c5a34a91c | """
Monk and Rotation
Monk loves to preform different operations on arrays, and so being the principal of Hackerearth School, he assigned a task to his new student Mishki. Mishki will be provided with an integer array A of size N and an integer K , where she needs to rotate the array in the right direction by K steps and then print the resultant array. As she is new to the school, please help her to complete the task.
Input:
The first line will consists of one integer T denoting the number of test cases.
For each test case:
1) The first line consists of two integers N and K, N being the number of elements in the array and K denotes the number of steps of rotation.
2) The next line consists of N space separated integers , denoting the elements of the array A.
Output:
Print the required array.
Constraints:
Sample Input
1
5 2
1 2 3 4 5
Sample Output
4 5 1 2 3
Explanation
Here T is 1, which means one test case.
denoting the number of elements in the array and , denoting the number of steps of rotations.
The initial array is:
In first rotation, 5 will come in the first position and all other elements will move to one position ahead from their current position. Now, the resultant array will be
In second rotation, 4 will come in the first position and all other elements will move to one position ahead from their current position. Now, the resultant array will be
Time Limit: 1.0 sec(s) for each input file
Memory Limit: 256 MB
Source Limit: 1024 KB
"""
# Solution
'''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
for i in range(int(input())):
n, k = map(int, input().split())
k = k % n
a = list(map(int, input().split()))
print(" ".join(str(i) for i in a[-k:] + a[:-k]))
|
20,394 | 87d4b28d9c7ec11420c6e8f249cfc60032f06605 | nst=input() # Given a string, find the length of the longest substring without repeating characters.
sb,lt= [nst[i:j] for i in range(len(nst)) for j in range(i+1, len(nst) + 1)],[]
for i in sb:
if(len(i)==len(set(i))):
lt.append(len(set(i)))
print(max(lt))
|
20,395 | d7adfc0d95cc1a49671472492f1cd7a9d4591e1e |
import pymel.core as pm
from rutils.rig_sort import rig_sortListByVectorAxis
from other.webcolors import name_to_rgb
from rutils.rig_transform import rig_transform
def rig_curveFromJoints( _list, name = 'crvJoints', axis='x', degree=3 ):
posList = []
# grab the world translation of eyelid child joints
for jnt in _list:
posList.append(jnt.getTranslation(worldSpace=True))
# sort the vectors by x translation
posList = rig_sortListByVectorAxis(posList, axis)
# make a knot list for curve creation
knotList = []
numKnots = (len(posList)+degree-1)
numHalf = numKnots / 2
if numKnots % 2 != 0:
numHalf = (numKnots-1) / 2
knotVector = 0
for i in range(0, numKnots):
if i == numHalf:
knotVector = 1
if i > numHalf:
knotVector = 2
knotList.append(knotVector)
# create the curve
curve = pm.curve( n=name+'_CRV' ,d=degree, p=posList, k=knotList )
return curve
'''
_locList = pm.ls(sl=True)
_curve = crv
for loc in _locList:
pos = loc.getTranslation(worldSpace=True)
closestPoint = _curve.closestPoint(pos)
u = _curve.getParamAtPoint( closestPoint )
print u
nme = loc.replace('_JNT', '_POI')
poi = pm.createNode( 'pointOnCurveInfo', n=nme )
pm.connectAttr( _curve+'.worldSpace', poi + '.inputCurve', )
pm.setAttr( poi +'.parameter', u )
pm.connectAttr( poi+'.position', loc+'.t')
crv = pm.PyNode('crvJoints_CRV1')
cvs = crv.getCVs()
count = 0
for c in cvs:
pm.select(cl=True)
jnt = pm.joint(name='spine'+str(count)+'_JNT')
jnt.setTranslation(c)
count += 1
'''
'''
rig_curveBetweenTwoPoints(poleVector.con, elbow, name=name+'PV')
'''
def rig_curveBetweenTwoPoints(start,end, name='curveBetween' , degree=1, colour='white', parent='allModel_GRP'):
startPos = pm.xform( start, translation=True, query=True, ws=True)
endPos = pm.xform( end, translation=True, query=True, ws=True)
pm.select(cl=True)
startJnt = pm.joint(name=name+'Start_JNT')
pm.pointConstraint( start, startJnt )
pm.select(cl=True)
endJnt = pm.joint(name=name+'End_JNT')
pm.pointConstraint( end, endJnt )
curve = pm.curve( n=name+'_CRV', d=degree, p=(startPos, endPos), k=(0,1) )
sc = pm.skinCluster( (startJnt, endJnt),curve , tsb=True, dr=1)
pm.skinPercent( sc, curve.cv[0],tv=(startJnt, 1) )
pm.skinPercent( sc, curve.cv[1],tv=(endJnt, 1) )
col = name_to_rgb(colour)
curve.overrideColorRGB.set(col[0], col[1], col[2])
topGrp = 'puppetLinearCurves_GRP'
if not pm.objExists(topGrp):
topGrp = rig_transform(0, name='puppetLinearCurves', type='group',
parent=parent).object
pm.parent(curve,startJnt,endJnt, topGrp)
pm.setAttr(curve+".inheritsTransform", 0)
pm.setAttr(curve+".template", 1)
pm.hide( startJnt, endJnt )
return curve
|
20,396 | 1abdae4997f3baa6d5325b5f7114cdcf85132846 | #!env/bin/python
from app import app, db
from app.database import models
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
engine = create_engine("postgres://postgres:1numan1@localhost/web_chat_task")
if not database_exists(engine.url):
create_database(engine.url)
print(database_exists(engine.url))
db.drop_all()
db.create_all()
|
20,397 | 9c7fe7e42fc46ef8e16eb443920dd9c979822804 | ############################################################
#
# zip files
#
############################################################
import os
os.chdir("zipfiles")
import zipfile
zipfile.ZipFile("src.zip", "r").extractall()
import shutil
shutil.rmtree("src")
|
20,398 | abcf951c5586a6fc4a32f130835e822dd57e45e3 | import requests
import traceback
import os,random,re
from os import path
import json
import datetime,time
import shutil
from concurrent.futures import ThreadPoolExecutor,ALL_COMPLETED,wait,FIRST_COMPLETED
import numpy as np
import cv2
def cv_imread(f_path):
img = cv2.imdecode(np.fromfile(f_path,dtype=np.uint8),-1) # -1 彩色,0 灰度图
return img
def cv_imwrite(f_path,im):
cv2.imencode('.jpg',im)[1].tofile(f_path)#保存图片 |
20,399 | c119bc6c950c9da7d0afd727011f5f78680f0517 | # DATAFRAME#1
import numpy as np
import pandas as pd
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5,4),['A','B','C','D','E'],['W','X','Y','Z'])
df
# returns 5 rows, 4 columns.
# A-E are row indexes, and W-Z are column names
df['W']
# returns the W column with a-e rows
type(df['W'])
# returns pandas.core.series
type(df)
# returns pandas.core.frame.DataFrame
df.W
# returns like df['W'] / column W with their rows / recommended to use [], bc the dot is used for other methods
df[['W','Z']]
# returns W and Z columns with A-E rows/
# MULTIPLE COLUMN - DATAFRAME
# SINGLE COLUMN - RETURNS A SERIES
# df['new'] - returns keyerror
df['new'] = df['W'] + df['Y']
# created a new last column with W + Y values
df.drop('new', axis=1, inplace=True)
# axis = 0 means it's referring to the INDEX / X AXIS
# if want to refer to column, axis = 1 /Y AXIS
# it will return with the new column if you don't specify ARGUMENT inplace=True bc it's defaulted to FALSE
# DEFAULTED TO FALSE so changes are made by mistake
df.drop('E',axis=0)
# axis=0 is defaulted so you don't have to specify it
# it'll delete ROW E / x axis
df.shape
# returns (5,4) 2 dimensional 5 ROWS/ 4 COLUMNS (X = 0,Y = 1)
df['Y']
# returns single column
df[['Z','X']]
# returns multiple columns
df.loc['A']
# returns index A
# location of INDEX / ROW YOU WANT
# shift tab in jupyter will show information
# RETURNS SERIES of A index, COLUMN W-Z AS INDEX SERIES/LEFT AND VALUE OF A WILL BE ON RIGHT SIDE
df.iloc[2]
# location index position# NUMERICAL/INTEGER ONLY
# C IS INDEX 2
df.loc['B','Y']
# returns a single value
# row B, column Y = single value
df.loc[['A','B'], ['W','Y']]
# returns a subset of rows A and B with Columns W and Y /values
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.