index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,900 | 9a2bf36412c0e931378854ef09def803f73723bc |
class InMemorySurfacer(object):
def __init__(self):
self.metrics = []
def clear(self): pass
def flush(self): pass
def record(self, metric):
self.metrics.append(metric)
|
17,901 | b7c67dca79870220d04aad3cc0ecbe420d135236 | import torch
from torch import nn
from torch.utils.data import TensorDataset
import numpy as np
from pyvacy import optim, analysis, sampling
import argparse
import os
from train_brown_model import Model, get_next_token_accuracy
from utils import get_data, load_model, totorch, save_checkpoint
class DPFineTuner:
def __init__(
self,
model,
train_dataset,
args,
device,
X_dev,
Y_dev,
X_test,
Y_test
):
self.train_dataset = train_dataset
self.model = model
self.loss_f = nn.CrossEntropyLoss()
self.device = device
self.X_dev = X_dev
self.Y_dev = Y_dev
self.X_test = X_test
self.Y_test = Y_test
self.batch_size = 64
self.args = args
# init record files
self.record_dir = "dp_experiments_big"
if not os.path.exists(self.record_dir):
print("Making directory:", self.record_dir)
os.mkdir(self.record_dir)
self.exp_dir = os.path.join(self.record_dir, "experiment_{}".format(args.exp_num))
if not os.path.exists(self.exp_dir):
print("Making directory:", self.exp_dir)
os.mkdir(self.exp_dir)
self.exp_desc = os.path.join(self.exp_dir, "exp_desc.txt")
self.training_acc_file = os.path.join(self.exp_dir, "training_acc.txt")
self.dev_acc_file = os.path.join(self.exp_dir, "dev_acc.txt")
self.test_acc_file = os.path.join(self.exp_dir, "test_acc.txt")
files = [
self.exp_desc,
self.training_acc_file,
self.dev_acc_file,
self.test_acc_file
]
# Refresh files
for f in files:
if os.path.exists(f):
os.remove(f)
# TODO: write to experiment description file with hyperparameters
self.dp_optim_params = {
# An upper bound on the L2 norm of each gradient update.
# A good rule of thumb is to use the median of the L2 norms observed
# throughout a non-private training loop.
'l2_norm_clip': args.l2_norm_clip,
# A coefficient used to scale the standard deviation of the noise applied to gradients.
'noise_multiplier': args.noise_multiplier,
# Each example is given probability of being selected with minibatch_size / N.
# Hence this value is only the expected size of each minibatch, not the actual.
'minibatch_size': args.minibatch_size,
# Each minibatch is partitioned into distinct groups of this size.
# The smaller this value, the less noise that needs to be applied to achieve
# the same privacy, and likely faster convergence. Although this will increase the runtime.
'microbatch_size': args.microbatch_size
}
self.epsilon_params = {
'N': len(train_dataset),
# A coefficient used to scale the standard deviation of the noise applied to gradients.
'noise_multiplier': args.noise_multiplier,
# Each example is given probability of being selected with minibatch_size / N.
# Hence this value is only the expected size of each minibatch, not the actual.
'batch_size': args.minibatch_size,
# The usual privacy parameter for (ε,δ)-Differential Privacy.
# A generic selection for this value is 1/(N^1.1), but it's very application dependent.
'delta': args.delta,
# The number of minibatches to process in the training loop.
'iterations': args.iterations
}
self.sampler_params = {
# Each example is given probability of being selected with minibatch_size / N.
# Hence this value is only the expected size of each minibatch, not the actual.
'minibatch_size': args.minibatch_size,
# Each minibatch is partitioned into distinct groups of this size.
# The smaller this value, the less noise that needs to be applied to achieve
# the same privacy, and likely faster convergence. Although this will increase the runtime.
'microbatch_size': args.microbatch_size,
# The number of minibatches to process in the training loop.
'iterations': args.iterations
}
def fine_tune(self):
optimizer = optim.DPSGD(params=self.model.parameters(), **self.dp_optim_params, lr=1e-3)
epsilon = analysis.epsilon(**self.epsilon_params)
print("epsilon", epsilon)
model_state = load_model(self.model, optimizer, "finetuned_brown_big_on_reddit_10k_0.1/epoch_2_it_3500.tar", False)
self.model = model_state['model']
optimizer = model_state['optimizer']
for E in range(1, 5):
# Store losses
it = 3500
losses = []
acc = []
best_model = {
'epoch': E + 1,
'it': it,
'model_state_dict': self.model.state_dict(),
'opt_state_dict': optimizer.state_dict(),
'loss': 1e9
}
minibatch_loader, microbatch_loader = sampling.get_data_loaders(**self.sampler_params)
for X_minibatch, y_minibatch in minibatch_loader(self.train_dataset):
optimizer.zero_grad()
for X_microbatch, y_microbatch in microbatch_loader(TensorDataset(X_minibatch, y_minibatch)):
X_microbatch = X_microbatch.float().to(self.device)
y_microbatch = y_microbatch.long().to(self.device)
optimizer.zero_microbatch_grad()
loss = self.loss_f(self.model(X_microbatch), y_microbatch)
loss.backward()
optimizer.microbatch_step()
l = loss.to('cpu').data.numpy()
losses.append(l)
# if l < best_model['loss']:
# best_model['loss'] = l
# best_model['epoch'] = E + 1
# best_model['it'] = it
# best_model['model_state_dict'] = self.model.state_dict()
# best_model['opt_state_dict'] = optimizer.state_dict()
# save_checkpoint(**best_model, dir_name="finetuned_brown_big_on_reddit_10k_" + str(args.noise_multiplier), file_name="best_model.tar")
it += 1
if it % 500 == 0:
avg_loss = round(np.mean(losses),4)
# Write to file
with open(self.training_acc_file, "a") as f:
f.write("Epoch {} | Iter {} | Avg Loss On Epoch {} | Avg Next token Acc On Epoch {}\n".format(E + 1, it, avg_loss, round(np.mean(acc),4)))
with open(self.dev_acc_file, "a") as f:
f.write("Epoch {} | Iter {} | Avg Loss On Epoch {} | Avg Next token Acc On Epoch {}\n".format(E + 1, it, avg_loss, get_next_token_accuracy(self.model, self.X_dev, self.Y_dev, self.batch_size)))
with open(self.test_acc_file, "a") as f:
f.write("Epoch {} | Iter {} | Avg Loss On Epoch {} | Avg Next token Acc On Epoch {}\n".format(E + 1, it, avg_loss, get_next_token_accuracy(self.model, self.X_test, self.Y_test, self.batch_size)))
# save checkpoint
save_checkpoint(E + 1, it, self.model.state_dict(), optimizer.state_dict(), l, "finetuned_brown_big_on_reddit_10k_" + str(args.noise_multiplier), "epoch_{}_it_{}.tar".format(E + 1, it))
batch_preds = self.model.predict(X_minibatch.float().to(self.device))
acc.append(np.sum(batch_preds == y_minibatch.cpu().data.numpy()) / y_minibatch.shape[0])
optimizer.step()
def main(device, args):
# Load the reddit train, dev, and test data
data_dict = get_data("reddit-10000", tknizer="tokenizers/brown-tokenizer.pkl")
X_train, Y_train = data_dict.X_train_enumerated, data_dict.Y_train_enumerated
X_dev, Y_dev = data_dict.X_dev_enumerated, data_dict.Y_dev_enumerated
X_test, Y_test = data_dict.X_test_enumerated, data_dict.Y_test_enumerated
LM = Model(X_train.shape[1], 36743)
LM.to(device)
# Don't need this optimizer here, but the load function requires it
optimizer = torch.optim.Adam(LM.parameters(), lr=1e-3)
model_state = load_model(LM, optimizer, "brown_pretrained_big/best_model.tar", train=False)
LM = model_state['model']
train_dataset = [(x_train, y_train) for x_train, y_train in zip(X_train, Y_train)]
# print(train_dataset)
dp_finetuner = DPFineTuner(
LM,
train_dataset,
args,
device,
X_dev,
Y_dev,
X_test,
Y_test
)
dp_finetuner.fine_tune()
def parse_args():
parser = argparse.ArgumentParser(description='Take DP parameters.')
parser.add_argument('--exp_num', type=int, help='Set value for experiment number')
parser.add_argument('--l2_norm_clip', type=float, help='Set value for clipping')
parser.add_argument('--noise_multiplier', type=float, help='Set value for noise multiplier')
parser.add_argument('--minibatch_size', type=int, help='Set value for mini batch')
parser.add_argument('--microbatch_size', type=int, help='Set value for micro batch')
parser.add_argument('--delta', type=float, help='Set value for delta')
parser.add_argument('--iterations', type=int, help='Set value for iterations')
args = parser.parse_args()
return args
if __name__ == "__main__":
use_cuda = torch.cuda.is_available()
print ("Using Cuda: {}".format(use_cuda))
device = torch.device("cuda:0" if use_cuda else "cpu")
args = parse_args()
main(device, args) |
17,902 | 118ab3cd8eb966d0053ebdd7db4e5a41529db475 | #!/usr/bin/python
# -*- coding: utf8 -*-
import random
from pandac.PandaModules import *
from wallBuilder import WallBuilder, RockWallBuilder, ModelWallBuilder
#-----------------------------------------------------------------------
# Decor building functions and classes
#-----------------------------------------------------------------------
'''
def makeFloor(nbCases, scalex, scaley, texpath):
cm = CardMaker('card')
cm.setUvRange(Point2(scalex/nbCases,scaley/nbCases), Point2(0,0))
cm.setHasNormals(True)
card = NodePath(cm.generate())
img = loader.loadTexture(texpath)
img.setWrapU(Texture.WMRepeat)
img.setWrapV(Texture.WMRepeat)
card.setTexture(img)
card.setScale(scalex,1,scaley)
card.setPos(0,0,0.0)
card.setHpr(0,-90,0)
#card.setTwoSided(True)
#card.setTransparency(TransparencyAttrib.MAlpha)
return card
'''
def makeWall(scalex, scaley, texPath, scaleTex):
cm = CardMaker('card')
cm.setUvRange((scalex/scaleTex,0), (0,scaley/scaleTex))
cm.setHasNormals(True)
card = NodePath(cm.generate())
img = loader.loadTexture(texPath)
card.setTexture(img)
card.setScale(scalex,1,scaley)
#card.setHpr(0,0,0)
#card.setTwoSided(True)
card.setTransparency(TransparencyAttrib.MAlpha)
return card
class InnerWall:
def __init__(self, map, height=6.0, texPath = "img/textures/ice01.jpg", texScale=5.0, z=0.0):
self.map = map
self.walls = []
x = self.map.x
y = self.map.y
self.height = height
self.texScale = texScale
self.texPath = texPath
wall1 = makeWall(x, height,texPath, texScale)
wall1.reparentTo(self.map.mapObjectRoot)
wall1.setPos(0,y,z)
self.walls.append(wall1)
wall1 = makeWall(x, height,texPath, texScale)
wall1.reparentTo(self.map.mapObjectRoot)
wall1.setPos(x,0,z)
wall1.setHpr(180,0,0)
self.walls.append(wall1)
wall1 = makeWall(y, height, texPath ,texScale)
wall1.reparentTo(self.map.mapObjectRoot)
wall1.setPos(0,0,z)
wall1.setHpr(90,0,0)
self.walls.append(wall1)
wall1 = makeWall(y, height, texPath ,texScale)
wall1.reparentTo(self.map.mapObjectRoot)
wall1.setPos(x,y,z)
wall1.setHpr(-90,0,0)
self.walls.append(wall1)
def getSaveData(self):
data = [self.height, self.texPath, self.texScale]
return data
def destroy(self):
for wall in self.walls:
wall.remove()
#-----------------------------------------------------------------------
# CollisionGrid : WIP
#-----------------------------------------------------------------------
class CollisionGrid:
def __init__(self, map, name=None, texPath="img/textures/ice01.jpg", mipImg = None, texScale=50.0):
self.map = map
self.name = name
self.plane = Plane(Vec3(0, 0, 1), Point3(0, 0, 0))
#print "CollisionGrid : initiating %s, scale = %s" % (name, texScale)
self.x = self.map.x
self.y = self.map.y
self.groundTex = texPath
if self.groundTex is not None:
self.tex = loader.loadTexture(self.groundTex)
self.colTex = loader.loadTexture("img/textures/collision.png")
self.mipImg = mipImg
self.groundTexScale = texScale
self.clearData()
'''
self.data = [] # [[1,1,1,1,0,1,0,0,...], [1,0,0,...]... ]
for y in range(self.y):
tmp = []
for x in range(self.x):
tmp.append(0)
self.data.append(tmp)
'''
self.node = GeomNode("tiledMesh")
self.gvd = GeomVertexData('name', GeomVertexFormat.getV3n3c4t2(), Geom.UHStatic)
self.geom = Geom(self.gvd)
self.prim = GeomTriangles(Geom.UHStatic)
self.np = None
self.ground = None
self.openTiles = []
self.rebuild()
'''
self.update()
i = 0
for x in range(self.x * self.y):
self.prim.addVertices(i, i + 3, i + 2)
self.prim.addVertices(i, i + 2, i + 1)
i += 4
self.prim.closePrimitive()
self.geom.addPrimitive(self.prim)
self.node.addGeom(self.geom)
self.np = NodePath(self.node)
self.np.reparentTo(render)
#self.np.setTwoSided(True)
#self.np.setTransparency(True)
if self.groundTex is not None:
self.tex = loader.loadTexture(self.groundTex)
self.colTex = loader.loadTexture("img/textures/collision.png")
self.np.setTexture(self.colTex)
#self.np.setColor(0,0,1.0,0.1)
self.np.setTransparency(True)
if self.mipImg is not None:
self.hasGeoMip = True
self.ground = TerrainGround(self.map,
self.x,
self.y,
self.groundTex, # terrain texture
self.mipImg, # mipImg
imgSize=65.0, # mipImg size
scale=5.0) # terrain height scale
else:
self.hasGeoMip = False
self.ground = FlatGround(self.map, self.groundTex, self.groundTexScale)
'''
def collisionHide(self):
self.np.hide()
def collisionShow(self):
self.np.show()
def getTileHeight(self, x, y):
if not self.hasGeoMip:
return 0
else:
return self.ground.getTileHeight(x, y)
def getMouseTilePos(self, mpos=None):
if mpos is None:
if base.mouseWatcherNode.hasMouse():
mpos = base.mouseWatcherNode.getMouse()
else:
return None
pos3d = Point3()
nearPoint = Point3()
farPoint = Point3()
base.camLens.extrude(mpos, nearPoint, farPoint)
if self.plane.intersectsLine(pos3d,
render.getRelativePoint(camera, nearPoint),
render.getRelativePoint(camera, farPoint)):
x = pos3d.getX()
y = pos3d.getY()
return int(x), int(y)
return None
def update(self):
self.vertex = GeomVertexWriter(self.gvd, 'vertex')
self.texcoord = GeomVertexWriter(self.gvd, 'texcoord')
self.color = GeomVertexWriter(self.gvd, 'color')
self.normal = GeomVertexWriter(self.gvd, 'normal')
i = 0
self.openTiles = []
for y in range(self.y):
for x in range(self.x):
if self.data[y][x] == 1:
self.addWallTile(x, y)
else:
self.addEmptyTile(x, y)
self.openTiles.append((x,y))
i += 4
def rebuild(self):
# Needed to update the map after it has been resized
#print "Collision grid : rebuilding in progress..."
if self.np:
self.np.remove()
if self.ground:
self.ground.destroy()
self.y = len(self.data)
self.x = len(self.data[0])
self.node = GeomNode("tiledMesh")
self.gvd = GeomVertexData('name', GeomVertexFormat.getV3n3c4t2(), Geom.UHStatic)
self.geom = Geom(self.gvd)
self.prim = GeomTriangles(Geom.UHStatic)
self.update()
i = 0
for x in range(self.x * self.y):
#self.prim.addVertices(i, i + 3, i + 2)
#self.prim.addVertices(i, i + 2, i + 1)
self.prim.addVertices(i, i + 2, i + 1)
self.prim.addVertices(i, i + 3, i + 2)
i += 4
self.prim.closePrimitive()
self.geom.addPrimitive(self.prim)
self.node.addGeom(self.geom)
self.np = NodePath(self.node)
self.np.reparentTo(render)
#self.np.setTwoSided(True)
self.np.setTexture(self.colTex)
#self.np.setColor(0,0,1.0,0.1)
self.np.setTransparency(True)
self.np.setShaderOff()
self.np.setLightOff()
if self.mipImg is not None:
self.hasGeoMip = True
self.ground = TerrainGround(self.map,
self.x,
self.y,
"img/textures/ice01.jpg", # terrain texture
"img/mipmaps/ground02.jpg", # mipImg
imgSize=65.0, # mipImg size
scale=5.0) # terrain height scale
else:
self.hasGeoMip = False
self.ground = FlatGround(self.map, self.groundTex, self.groundTexScale)
def addWallTile(self, x, y):
norm, norm2 = random.random()/2.0, random.random()/2.0
#z = 0
z1 = self.getTileHeight(x, y) + 0.01
z2 = self.getTileHeight(x, y+1) + 0.01
z3 = self.getTileHeight(x+1, y+1) + 0.01
z4 = self.getTileHeight(x+1, y) + 0.01
self.vertex.addData3f(x, y, z1)
self.texcoord.addData2f(0, 0)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(norm,norm2,1)
self.vertex.addData3f(x, y+1, z2)
self.texcoord.addData2f(0, 1)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(norm,norm2,1)
self.vertex.addData3f(x+1, y+1, z3)
self.texcoord.addData2f(1, 1)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(norm,norm2,1)
self.vertex.addData3f(x+1, y, z4)
self.texcoord.addData2f(1, 0)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(norm,norm2,1)
def addEmptyTile(self, x, y):
self.vertex.addData3f(x, y, 0.01)
self.texcoord.addData2f(0, 0)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
self.vertex.addData3f(x, y+1, 0.01)
self.texcoord.addData2f(0, 0.01)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
self.vertex.addData3f(x+1, y+1, 0.01)
self.texcoord.addData2f(0.01, 0.01)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
self.vertex.addData3f(x+1, y, 0.01)
self.texcoord.addData2f(0.01, 0)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
#z = random()/100.0
'''
z = 0
self.vertex.addData3f(x, y, z)
self.texcoord.addData2f(0, 0)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
self.vertex.addData3f(x, y, z)
self.texcoord.addData2f(0, 1)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
self.vertex.addData3f(x, y, z)
self.texcoord.addData2f(1, 1)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
self.vertex.addData3f(x, y, z)
self.texcoord.addData2f(1, 0)
self.color.addData4f(1, 1, 1, 1)
self.normal.addData3f(0,0,1)
'''
def hideTile(self, x, y):
if (0<=x<self.x) and (0<=y<self.y):
if self.data[y][x]!=0:
self.data[y][x] = 0
row = (self.x*y + x)*4
self.vertex = GeomVertexWriter(self.gvd, 'vertex')
self.texcoord = GeomVertexWriter(self.gvd, 'texcoord')
self.color = GeomVertexWriter(self.gvd, 'color')
self.normal = GeomVertexWriter(self.gvd, 'normal')
self.vertex.setRow(row)
self.texcoord.setRow(row)
self.color.setRow(row)
self.normal.setRow(row)
self.addEmptyTile(x, y)
#self.update()
if (x,y) not in self.openTiles:
self.openTiles.append((x,y))
def showTile(self, x, y):
if (0<=x<self.x) and (0<=y<self.y):
if self.data[y][x]!=1:
self.data[y][x] = 1
row = (self.x*y + x)*4
self.vertex = GeomVertexWriter(self.gvd, 'vertex')
self.texcoord = GeomVertexWriter(self.gvd, 'texcoord')
self.color = GeomVertexWriter(self.gvd, 'color')
self.normal = GeomVertexWriter(self.gvd, 'normal')
self.vertex.setRow(row)
self.texcoord.setRow(row)
self.color.setRow(row)
self.normal.setRow(row)
self.addWallTile(x, y)
if (x,y) in self.openTiles:
self.openTiles.remove((x,y))
#self.update()
def fill(self):
for y in range(self.y):
for x in range(self.x):
self.data[y][x] = 1
self.update()
def clear(self):
self.clearData()
self.update()
def clearData(self):
self.data = [] # [[1,1,1,1,0,1,0,0,...], [1,0,0,...]... ]
for y in range(self.y):
tmp = []
for x in range(self.x):
tmp.append(0)
self.data.append(tmp)
def fillBorder(self):
for y in range(self.y):
for x in range(self.x):
if self.data[y][x] == 0:
if (x==0) or (y==0):
self.data[y][x] = 1
if (x==self.x-1) or (y==self.y-1):
self.data[y][x] = 1
self.update()
def isOpen(self, x, y):
if self.data[y][x]==0:return True
return False
def getRandomTile(self):
if len(self.openTiles)>1:
a = random.randint(0,len(self.openTiles)-1)
return self.openTiles[a][0], self.openTiles[a][1]
return None
def getClosestOpenTile(self, x, y):
for loc in [(x-1,y-1),(x,y-1),(x+1,y-1),(x-1,y),(x+1,y),(x-1,y+1),(x,y+1),(x+1,y+1)]:
#if self.data[loc[1]][loc[0]] == 0:
if self.isOpen(loc[0], loc[1]):
return loc
return None
def destroy(self):
if self.np:
self.np.remove()
if self.ground:
self.ground.destroy()
#if self.terrainNP:
# self.terrainNP.remove()
del self.data
del self.gvd
def setSize(self, x, y):
oldData = self.data
oldX = self.x
oldY = self.y
self.x = int(x)
self.y = int(y)
self.data = []
for y in range(self.y):
tmp = []
for x in range(self.x):
if x<oldX and y<oldY:
tmp.append(oldData[y][x])
else:
tmp.append(0)
self.data.append(tmp)
self.rebuild()
class FlatGround:
def __init__(self, map, tex="img/textures/ice01.jpg", scale=50.0):
self.map = map
self.x = map.x
self.y = map.y
self.scale = scale
self.texPath = tex
self.tex = loader.loadTexture(self.texPath)
self.tex.setWrapU(Texture.WMRepeat)
self.tex.setWrapV(Texture.WMRepeat)
self.makeGround()
def makeGround(self):
self.cm = CardMaker('card')
self.cm.setUvRange(Point2(self.x/self.scale,self.y/self.scale), Point2(0,0))
#print "making flat ground with scale = %s" % (self.scale)
self.cm.setHasNormals(True)
self.card = NodePath(self.cm.generate())
self.card.setTexture(self.tex)
self.card.setScale(self.x,1,self.y)
self.card.setPos(0,0,0.0)
self.card.setHpr(0,-90,0)
#card.setTwoSided(True)
#card.setTransparency(TransparencyAttrib.MAlpha)
self.card.reparentTo(self.map.mapObjectRoot)
def destroy(self):
self.card.detachNode()
self.card.remove()
def setSize(self, x, y):
self.destroy()
self.x = x
self.y = y
self.makeGround()
#self.card.setScale(self.x,1,self.y)
def setTexture(self, tex):
self.tex = loader.loadTexture(tex)
self.texPath = tex
self.card.setTexture(self.tex)
def getTileHeight(self, x, y):
return 0
class TerrainGround:
def __init__(self,
map,
x=20,
y=20,
tex="img/textures/ice01.jpg",
mipImg="img/mipmaps/ground02.jpg",
imgSize=65.0,
scale=5.0):
self.map = map
self.x = x
self.y = y
self.texPath = tex
self.terrain = None
self.terrainNP = None
self.terrainScale = 0
self.terrainImgSize = imgSize
self.mipImg = mipImg
self.terrainScale = scale
self.initGeoMip()
def initGeoMip(self):
self.terrain = GeoMipTerrain("ground")
self.terrain.setHeightfield(self.mipImg)
#self.terrain.setMinLevel(2)
#self.terrain.setBruteforce(True)
#self.terrain.setBlockSize(5)
self.terrainNP = self.terrain.getRoot()
#self.terrainNP.reparentTo(self.map.mapObjectRoot)
import direct.directbase.DirectStart
self.terrainNP.reparentTo(render)
self.terrainNP.setScale(self.x/self.terrainImgSize,self.y/self.terrainImgSize,self.terrainScale)
#self.terrainNP.setPos(0,0,-self.terrainScale)
self.terrain.generate()
self.terrainNP.setTexture(loader.loadTexture(self.texPath))
self.terrainNP.setTexScale(TextureStage.getDefault(),self.terrainImgSize/10,self.terrainImgSize/10)
self.terrainNP.flattenStrong()
#self.terrainNP.setCollideMask(BitMask32(1))
def destroy(self):
if self.terrainNP:
self.terrainNP.remove()
def getTileHeight(self, x, y):
if not (0<=x<self.x): return 0 #- self.terrainScale
if not (0<=y<self.y): return 0 #- self.terrainScale
xPx = int(float(x)/self.x*self.terrainImgSize)
yPx = int(float(y)/self.y*self.terrainImgSize)
height = self.terrain.getElevation(xPx, yPx) * self.terrainScale# - self.terrainScale
#print "Terrain height in %s / %s : %s" % (x, y, height)
return height
if __name__ == "__main__":
t = TerrainGround("map", 250,120,"img/textures/ice01.jpg", "img/mipmaps/ground02.jpg", 65.0,15.0)
import sys
import direct.directbase.DirectStart
base.accept("escape", sys.exit)
#t.destroy()
run()
|
17,903 | f4ec232ec8f9e4ca69ee987b0d98729896eab52c | from aws_cdk import (core, aws_s3 as s3, aws_s3_deployment as s3deploy)
class RecommendationFrontendStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
bucket = s3.Bucket(
self,
"web",
removal_policy=core.RemovalPolicy.DESTROY,
public_read_access=True,
access_control=s3.BucketAccessControl.PUBLIC_READ,
website_index_document="index.html",
)
# 웹사이트 업로드
s3deploy.BucketDeployment(self, "frontend-deploy",
sources=[s3deploy.Source.asset("../app/out")],
destination_bucket=bucket,
destination_key_prefix="/")
|
17,904 | d8bb53062dc0d9de977a35565e1cde87d0775d55 | import sys
sys.path.append('/Users/jso/dev/work-python/jump-to-python/05/game')
|
17,905 | 8c8acea64631ad76f5b3b3b7eadfa96b6c6d719c | from django.test import TestCase
from .utils import send_sms_request, send_sms_via_at, validate_recipients
from mock import patch, Mock
from message.models import Gateway
from django.conf import settings
import requests
import json
class SMSTaskTests(TestCase):
def setUp(self):
self.gateway = Gateway.objects.create(
name="Africastalking",
active=True,
api_url="https://api.sandbox.africastalking.com/version1/messaging",
password=settings.AT_PASSWORD,
configured_sender=settings.AT_USERNAME,
account_number=settings.AT_USERNAME,
)
def test_send_sms_request(self):
with patch.object(requests, "post") as mock_obj:
mock_obj.return_value = mock_response = Mock()
mock_response.status_code = 201
resp, status_code = send_sms_request("", "", "")
self.assertEqual(resp, mock_response)
self.assertEqual(status_code, 201)
def test_sms_via_at(self):
recipient = ["+254728282828", "+25472383883"]
with patch.object(requests, "post") as mock_obj:
fake_response = json.dumps(
{
"SMSMessageData": {
"Message": "Sent to 2/2 Total Cost: KES 1.6000",
"Recipients": [
{
"statusCode": 101,
"number": "+25472929298",
"cost": "KES 0.8000",
"status": "Success",
"messageId": "ATXid_89c12c24448c87f8bfe4548f935e0ec4",
},
{
"statusCode": 101,
"number": "+25472929292",
"cost": "KES 0.8000",
"status": "Success",
"messageId": "ATXid_d4814306596b9989f4b0d3b9002ad3e4",
},
],
}
}
)
mock_obj.return_value = mock_response = Mock()
mock_response.content = fake_response
mock_response.status_code = 201
actual_response = send_sms_via_at(recipient, "hello world")
self.assertEqual(actual_response, json.loads(fake_response))
def test_validate_recipients_with_valid_phone_numbers(self):
recipients = ["+254771621351", "+254771621352"]
self.assertEqual(validate_recipients(recipients), recipients)
def test_validate_recipients_with_validandinvalidphonenumbers(self):
recipients = ["+828299292", "+254771621352"]
self.assertEqual(validate_recipients(recipients), ["+254771621352"])
|
17,906 | e8c31dd6f5bed15021373504d526b60f8264101c | # divide two numbers
# https://leetcode.com/problems/divide-two-integers
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
# Constants.
MAX_INT = 2147483647 # 2**31 - 1
MIN_INT = -2147483648 # -2**31
# only because this number cannot fit in memory
if dividend == MIN_INT and divisor == -1:
return MAX_INT
quotient = 0
if dividend < 0 or divisor < 0:
neg = True
if dividend < 0 and divisor < 0:
neg = False
else:
neg = False
dividend = -abs(dividend)
divisor = -abs(divisor)
while(divisor>=dividend):
p_o_2 = -1
val = divisor
while (val + val >= dividend):
val += val
p_o_2 += p_o_2
quotient += p_o_2
dividend -= val
if neg:
return -abs(quotient)
else:
return abs(quotient)
|
17,907 | 0a94630e2adae7e8c37c652cc6916c2330f26b7b | import numpy as np
import pandas as pd
# activation func
def sigmoid(u, b=0.5):
return 1. / (1 + np.exp(-b * u))
def sigmoidDerivative(u, b=0.5):
return b * sigmoid(u) * (1 - sigmoid(u))
def getNormalizedOutput(y):
if y >= 0.5:
return 1
else:
return 0
def getFinalError(d, y):
sum = 0
for j in range(len(y)):
sum = sum + (d[j] - getNormalizedOutput(y[j][1]))**2
return 1./2 * sum
eta = 0.1
inputs = 4
outputs = 3
layers = (6, 4)
weights = []
for l in range(len(layers)):
if (l == 0):
weights.append(np.transpose( np.random.uniform(0, 1, ( inputs, layers[l]) ) ) )
if (l < (len(layers) - 1)):
weights.append(np.transpose( np.random.uniform(0, 1, (layers[l], layers[l+1]) ) ) )
else:
weights.append(np.transpose( np.random.uniform(0, 1, (layers[l], outputs) ) ) )
activations = []
for l in range(len(layers)):
# activations.append([np.zeros(layers[l]), np.zeros(layers[l])])
activations.append([[np.random.uniform(0,0.9) for _ in range(layers[l])], [np.random.uniform(0,0.9) for _ in range(layers[l])]])
outLayerActivations = [0]*outputs
for ol in range(outputs):
# print(activations[-1][1])
# print(iris-weights[-1][ol])
pre = np.dot(activations[-1][1], weights[-1][ol])
# print(pre)
post = sigmoid(pre)
# print(post)
outLayerActivations[ol] = [pre, post]
# print(outLayerActivations)
#2.2: Compute the output layer's error
Y = [0, 1, 0]
errors = []
for n in range(outputs):
y = outLayerActivations[n][1]
# deltaOut += ( (y - Y[n]) * sigmoidDerivative(outLayerActivations[n][1]) ) ** 2
errors.append( ( (y - Y[n])**2 ) * sigmoidDerivative(outLayerActivations[n][1]))
deltaOut = (1/2)*sum(errors)
print (deltaOut)
a = [[0.01895276, 0.03015534, 0.0613778, 0.0260315, 0.01000285, 0.04092527,
0.0003963, 0.02707075],
[0.01812982, 0.03549912, 0.00670976, 0.02066392, 0.05383293, 0.03177738,
0.00398775, 0.02953082],
[0.02222084, 0.01786938, 0.01738915, 0.0578475, 0.00918505, 0.039417,
0.0506866, 0.006425 ],
[0.05809377, 0.03476727, 0.0067234, 0.00445595, 0.05332672, 0.04056107,
0.06173761, 0.03117363]]
b = [0.1199423, 0.12009572, 0.11982998, 0.11902922]
# print (np.dot(np.transpose(a),b))
# for i in reversed(range(len(layers))):
# print(i)
# import collections
# x = collections.deque(2*[None], 2)
# print(x)
# x.appendleft(1)
# print(x)
# x.appendleft(2)
# print(x)
# x.appendleft(3)
# print (x)
#
# from numpy.random import seed
# from numpy.random import rand
# # seed random number generator
# seed(1)
# # generate some random numbers
# print(rand())
# # reset the seed
#
# # generate some random numbers
# print(rand())
#
# # seed(1)
# print(rand())
# weightsArr = [ [0]*3 for i in range(10)]
# print(weightsArr)
# w = np.asarray([[1,2],[1,2,3]])
t = [(3,3), (4,), (2,)]
for layers in t:
print(layers) |
17,908 | 0598ce088a9047fedc3e4f9255b2775ca6088de8 | # -*- coding: utf-8 -*-
import datetime
from django.db import models
from users.models import UserProfile
from browser.models import ClickTask
# Create your models here.
'''
CPM业务
'''
class CPMWork(models.Model):
user_name = models.ForeignKey(UserProfile, verbose_name=u"任务发布人", default=1)
task_id = models.ForeignKey(ClickTask, verbose_name=u'任务ID')
url = models.CharField(max_length=500, verbose_name=u"投放链接")
click_nums = models.PositiveIntegerField(default=0, verbose_name=u"投定量")
ip_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放IP")
pv_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放PV")
is_control = models.PositiveSmallIntegerField(choices=((0,"否"),(1,"是")), default=0, verbose_name=u"是否控量")
status = models.PositiveSmallIntegerField(choices=((0,'未开始'),(1,'正在执行'),(2,'完成'),(3,'任务暂停')), default=0, verbose_name=u"状态")
remark = models.CharField(max_length=255, verbose_name=u"备注", null=True, blank=True)
task_time = models.DateField(default=datetime.date.today, verbose_name=u"投放时间")
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = u"CPM业务"
verbose_name_plural = verbose_name
def __str__(self):
return "{}:{}".format(self.user_name.username, self.task_id.id)
'''
CPC业务
'''
class CPCWork(models.Model):
user_name = models.ForeignKey(UserProfile, verbose_name=u"任务发布人", default=1)
task_id = models.ForeignKey(ClickTask, verbose_name=u'任务ID')
url = models.CharField(max_length=500, verbose_name=u"投放链接")
click_nums = models.PositiveIntegerField(default=0, verbose_name=u"投定量")
hasclicked = models.PositiveIntegerField(default=0, verbose_name=u"已投放点击量")
ip_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放IP")
is_control = models.PositiveSmallIntegerField(choices=((0,"否"),(1,"是")), default=0, verbose_name=u"是否控量")
status = models.PositiveSmallIntegerField(default=0, verbose_name=u"状态")
remark = models.CharField(max_length=255, verbose_name=u"备注", null=True, blank=True)
task_time = models.DateField(default=datetime.date.today, verbose_name=u"投放时间")
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
# click_detail = models.ForeignKey(verbose_name=u"投放明细")
class Meta:
verbose_name = u"CPC业务"
verbose_name_plural = verbose_name
def __str__(self):
return "{}:{}".format(self.user_name.username,self.task_id)
'''
排名业务
'''
class RankWork(models.Model):
user_name = models.ForeignKey(UserProfile, verbose_name=u"任务发布人", default=1)
task_id = models.ForeignKey(ClickTask, verbose_name=u'任务ID')
url = models.CharField(max_length=500, verbose_name=u"投放链接")
click_nums = models.PositiveIntegerField(default=0, verbose_name=u"投定量")
hasclicked = models.PositiveIntegerField(default=0, verbose_name=u"已投放点击量")
ip_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放IP")
is_control = models.PositiveSmallIntegerField(choices=((0,"否"),(1,"是")), default=0, verbose_name=u"是否控量")
status = models.PositiveSmallIntegerField(default=0, verbose_name=u"状态")
remark = models.CharField(max_length=255, verbose_name=u"备注", null=True, blank=True)
task_time = models.DateField(default=datetime.date.today, verbose_name=u"投放时间")
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
# click_detail = models.ForeignKey(verbose_name=u"投放明细")
class Meta:
verbose_name = u"排名业务"
verbose_name_plural = verbose_name
def __str__(self):
return self.user_name.username+":"+self.task_id.id
'''
CPM历史数据
'''
class CPMHistory(models.Model):
user_name = models.ForeignKey(UserProfile, verbose_name=u"任务发布人", default=1)
task_id = models.ForeignKey(ClickTask, verbose_name=u'任务ID')
url = models.CharField(max_length=500, verbose_name=u"投放链接")
click_nums = models.PositiveIntegerField(default=0, verbose_name=u"投定量")
ip_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放IP")
pv_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放PV")
history = models.ForeignKey(CPMWork, verbose_name=u"历史")
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = u"CPM历史数据"
verbose_name_plural = verbose_name
def __str__(self):
return self.user_name.username+":"+self.task_id
'''
CPC历史数据
'''
class CPCHistory(models.Model):
user_name = models.ForeignKey(UserProfile, verbose_name=u"任务发布人", default=1)
task_id = models.ForeignKey(ClickTask, verbose_name=u'任务ID')
url = models.CharField(max_length=500, verbose_name=u"投放链接")
click_nums = models.PositiveIntegerField(default=0, verbose_name=u"投定量")
ip_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放IP")
pv_nums = models.PositiveIntegerField(default=0, verbose_name=u"已投放PV")
history = models.ForeignKey(CPCWork, verbose_name=u"历史")
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = u"CPC历史数据"
verbose_name_plural = verbose_name
def __str__(self):
return self.user_name.username+":"+self.task_id
'''
多URL列表
url以;分隔
({权重},{url})
10,http://www.baidu.com;
20,http://www.google.com;
'''
class Urls(models.Model):
user_name = models.ForeignKey(UserProfile, verbose_name=u"任务发布人", default=1)
name = models.CharField(max_length=20, verbose_name=u"名称")
urls = models.TextField(verbose_name=u"URL明细")
create_at = models.DateTimeField(auto_now_add=True, verbose_name=u"投放时间")
update_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = u"多URL列表"
verbose_name_plural = verbose_name
def __str__(self):
return self.user_name.username+":"+self.task_id |
17,909 | 98c3a1e7b0d534386bc0349a7e5f8225ae3c72d1 | #!/usr/bin/env python
# Li Xue
# 27-Jun-2020 20:40
import sys
import torch
import imageio
from matplotlib import pyplot as plt
from vae_example_conv import *
import pdb
img_ori = '../scripts_and_files_for_homework/dataset.png'
#--------------------------------------------------
#Assignment 1.1 data preprocessing
#-- visually check the first emoji
# data_ori = imageio.imread(img_ori)
# emoji_1 = data_ori[0:32,0:32,:]
# plt.imshow(emoji_1)
# plt.show()
#--
def image2dataset(image, outputshape):
data_ori = imageio.imread(img_ori)
num_emojis_row = int(data_ori.shape[0]/32)
num_emojis_col = int(data_ori.shape[1]/32)
num_emojis = num_emojis_row * num_emojis_col
if outputshape == 'convolutional':
data = torch.Tensor(num_emojis, 32, 32, 3)
count = 0
for i in range(num_emojis_row):
for j in range(num_emojis_col):
xue = data_ori[32*i:32*(i+1), 32*j:32*(j+1),:]
# plt.imshow(xue)
# plt.show()
data[count] = torch.Tensor(data_ori[32*i:32*(i+1), 32*j:32*(j+1),:])
xue1 = data[count].to(torch.uint8).numpy()
# plt.imshow(xue1)
# plt.show()
count = count + 1
data = data.permute([0,3,1,2])
if outputshape == 'linear':
data = torch.Tensor(num_emojis, 32 * 32 * 3)
count = 0
for i in range(num_emojis_row):
for j in range(num_emojis_col):
data[count] = torch.Tensor(data_ori[32*i:32*(i+1), 32*j:32*(j+1),:]).reshape(-1)
count = count + 1
return count, data
num_img, data = image2dataset(img_ori, 'linear')
print(data.shape)
num_img, data = image2dataset(img_ori, 'convolutional')
#plt.imshow(data[1].permute([1,2,0]))
#plt.show()
#-------------------------------------------------
# Assignment 1.2, inner scientist: python gui.py
#-------------------------------------------------
# Assignment 1.3, image interplation
def img_interplation():
net = VAE()
net.eval()
checkpoint = torch.load('../scripts_and_files_for_homework/checkpoint.pth.tar')
net.load_state_dict(checkpoint['state_dict'])
img_reconstr, mu, logvar = net(data/255)
img_reconstr = img_reconstr*255
pdb.set_trace()
#--
idx = torch.randint(num_img, (1,2))
idx1 = idx[0,0]
idx2 = idx[0,1]
print(f"Randomly choose two images: {idx1} and {idx2}")
oriImg_idx1 = data[idx1].squeeze(0).permute([1,2,0]).to(torch.uint8).numpy()
oriImg_idx2 = data[idx2].squeeze(0).permute([1,2,0]).to(torch.uint8).numpy()
imageio.imsave(f"{idx1}.png", oriImg_idx1)
imageio.imsave(f"{idx2}.png", oriImg_idx2)
# interpolation on pixel space
imageio.imsave(f"{idx1}_{idx2}_pxl_rcnstr.png", (oriImg_idx1 + oriImg_idx2)/2 )
# interpolation on latent space
mu_new = (mu[idx1] + mu[idx2])/2
logvar_new = (logvar[idx1] + logvar[idx2])/2
z_new = net.reparameterize(mu_new, logvar_new)
img_new = net.decode(z_new.unsqueeze(0)) * 255
img_new = img_new.squeeze()
img_new = img_new.permute([1,2,0]).to(torch.uint8).numpy()
#plt.imshow(img_new)
#plt.show()
imageio.imsave(f"{idx1}_{idx2}_Latent_rcnstr.png", img_new)
print(f"images interpolated and save as png files")
img_interplation()
#-------------------------------------------------
# Assignment 1.4, math with laten vectors
a = imageio.imread('../scripts_and_files_for_homework/a.png')
b = imageio.imread('../scripts_and_files_for_homework/b.png')
c = imageio.imread('../scripts_and_files_for_homework/c.png')
d = imageio.imread('../scripts_and_files_for_homework/d.png')
e = imageio.imread('../scripts_and_files_for_homework/e.png')
data = torch.Tensor(5,32,32,3)
data[0] = torch.Tensor(a)
data[1] = torch.Tensor(b)
data[2] = torch.Tensor(c)
data[3] = torch.Tensor(d)
data[4] = torch.Tensor(e)
data = data.permute([0,3,1,2])
net = VAE()
checkpoint = torch.load('../scripts_and_files_for_homework/checkpoint.pth.tar')
net.load_state_dict(checkpoint['state_dict'])
net.eval()
img_reconstr, mu, logvar = net(data/255)
img_reconstr = img_reconstr*255
z = net.reparameterize(mu, logvar)
img = net.decode(z) * 255
img = img.permute([0,2,3,1]).to(torch.uint8).numpy()
plt.imshow(img[0])
plt.show()
# a - b + c
mu_new = mu[0] - mu[1] + mu[2]
logvar_new = logvar[0] - logvar[1] + logvar[2]
z_new = net.reparameterize(mu_new, logvar_new)
torch.save(z_new, 'a_b+c.pt')
img_new = net.decode(z_new.unsqueeze(0) ) * 255
img_new = img_new[0].permute([1,2,0]).to(torch.uint8).numpy()
plt.imshow(img_new)
plt.show()
# a - b + d
mu_new = mu[0] - mu[1] + mu[3]
logvar_new = logvar[0] - logvar[1] + logvar[3]
z_new = net.reparameterize(mu_new, logvar_new)
torch.save(z_new, 'a_b+d.pt')
img_new = net.decode(z_new.unsqueeze(0) ) * 255
img_new = img_new[0].permute([1,2,0]).to(torch.uint8).numpy()
plt.imshow(img_new)
plt.show()
# a - b + e
mu_new = mu[0] - mu[1] + mu[4]
logvar_new = logvar[0] - logvar[1] + logvar[4]
z_new = net.reparameterize(mu_new, logvar_new)
torch.save(z_new, 'a_b+e.pt')
img_new = net.decode(z_new.unsqueeze(0) ) * 255
img_new = img_new[0].permute([1,2,0]).to(torch.uint8).numpy()
plt.imshow(img_new)
plt.show()
#-------------------------------------------------
# Assignment 1.5, anomaly detection
num_img, data = image2dataset(img_ori, 'convolutional')
net = VAE()
net.eval()
checkpoint = torch.load('../scripts_and_files_for_homework/checkpoint.pth.tar')
net.load_state_dict(checkpoint['state_dict'])
losses= torch.Tensor(num_img)
for i in range(num_img):
img = data[i]/255
img = img.unsqueeze(0)
img_reconstr, mu, logvar = net(img)
loss = specialLoss(img_reconstr, img, mu, logvar)
losses[i] = loss
#-- top 10 worst reconstructions
values, indices = torch.topk(losses,5)
badImgs = data[indices].permute([0,2,3,1]).to(torch.uint8).numpy()
badImgs_reconstr, mu, logvar = net(data[indices]/255)
badImgs_reconstr = (badImgs_reconstr *255).permute([0,2,3,1]).to(torch.uint8).numpy()
plt.imshow(badImgs[0])
plt.show()
plt.imshow(badImgs_reconstr[0])
plt.show()
|
17,910 | bff18423c1956bb1f6840733cc52f7a7e4911433 | from cs1graphics import *
_canvas = None
_current_color = "black"
_current_line_thickness = 1
_cue = None
def open_canvas(width, height):
"""Creates a window for painting of a given width and height."""
global _canvas
if _canvas != None:
raise RuntimeError("Canvas is already open.")
_canvas = Canvas(width, height)
def clear_canvas():
"""Clears the canvas of all shapes and text."""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
_canvas.clear()
def set_line_thickness(thickness):
"""Sets the canvas painting line width to the integer given."""
global _current_line_thickness
_current_line_thickness = thickness
def set_color(color):
"""Sets the current painting color."""
global _current_color
_current_color = color
def set_color_rgb(r, g, b):
"""Sets the current painting color."""
global _current_color
_current_color = (r, g, b)
def _set_filled(shape):
global _current_color
shape.setBorderWidth(0)
shape.setFillColor(_current_color)
def _set_not_filled(shape):
global _current_color
global _current_line_thickness
shape.setBorderWidth(_current_line_thickness)
shape.setBorderColor(_current_color)
def draw_circle(centerx, centery, radius):
"""Draws a circle on the canvas."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
circle = Circle()
circle.move(centerx, centery)
circle.setRadius(radius)
_set_not_filled(circle)
_canvas.add(circle)
def draw_filled_circle(centerx, centery, radius):
"""Draws a filled circle on the canvas."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
circle = Circle()
circle.move(centerx, centery)
circle.setRadius(radius)
_set_filled(circle)
_canvas.add(circle)
def draw_oval(centerx, centery, radiusx, radiusy):
"""Draws an oval on the canvas."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
oval = Ellipse(radiusx * 2, radiusy * 2, Point(centerx, centery))
_set_not_filled(oval)
_canvas.add(oval)
def draw_filled_oval(centerx, centery, radiusx, radiusy):
"""Draws a filled oval on the canvas."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
oval = Ellipse(radiusx * 2, radiusy * 2, Point(centerx, centery))
_set_filled(oval)
_canvas.add(oval)
def draw_line(x1, y1, x2, y2):
"""Draws a line on the canvas from (x1, y1) to (x2, y2)."""
#global _canvas
#global _current_line_thickness
#global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
path = Path(Point(x1, y1), Point(x2, y2))
path.setBorderWidth(_current_line_thickness)
path.setBorderColor(_current_color)
_canvas.add(path)
def draw_rect(x, y, width, height):
"""Draws a rectangle on the canvas. Upper left corner at (x, y), width
and height as given."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
rect = Rectangle(width, height, Point(x+width/2, y+height/2))
_set_not_filled(rect)
_canvas.add(rect)
def draw_filled_rect(x, y, width, height):
"""Draws a filled rectangle on the canvas. Upper left corner at (x, y), width
and height as given."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
rect = Rectangle(width, height, Point(x+width/2, y+height/2))
_set_filled(rect)
_canvas.add(rect)
def draw_polygon(*points):
"""Draws a polygon on the canvas. The points of the polygon are (x,y) pairs
specified as one big list. E.g.: draw_polygon(10, 10, 20, 20, 30, 40) draws a
polygon bounded by (10, 10) to (20, 20) to (30, 40) to (10, 10)."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
newpoints = []
for x in range(0, len(points), 2):
pt = Point(points[x], points[x+1])
newpoints += [ pt ]
polygon = Polygon(*newpoints)
_set_not_filled(polygon)
_canvas.add(polygon)
def draw_filled_polygon(*points):
"""Draws a filled polygon on the canvas. The points of the polygon are (x,y) pairs
specified as one big list. E.g.: draw_polygon(10, 10, 20, 20, 30, 40) draws a
polygon bounded by (10, 10) to (20, 20) to (30, 40) to (10, 10)."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
newpoints = []
for x in range(0, len(points), 2):
pt = Point(points[x], points[x+1])
newpoints += [ pt ]
polygon = Polygon(*newpoints)
_set_filled(polygon)
_canvas.add(polygon)
def draw_polyline(*points):
"""Draws a polyline on the canvas. The points of the polyline are (x,y) pairs
specified as one big list. E.g.: draw_polyline(10, 10, 20, 20, 30, 40) draws a
line from (10, 10) to (20, 20) to (30, 40)."""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
#print(points)
#print(len(points))
newpoints = []
for x in range(0, len(points), 2):
#print(x)
pt = Point(points[x], points[x+1])
newpoints += [ pt ]
#print(newpoints)
path = Path(*newpoints)
path.setBorderWidth(_current_line_thickness)
path.setBorderColor(_current_color)
_canvas.add(path)
def draw_string(message, x, y, textSize):
"""Draws the message at the given location [(x, y) will be where the
midpoint of the string ends up] with the given font size in points."""
global _canvas
global _current_color
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
t = Text(message, textSize)
t.move(x, y)
t.setFontColor(_current_color)
_canvas.add(t)
def close_canvas():
"""Closes the canvas window immediately."""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
_canvas.close()
_canvas = None
def close_canvas_after_click():
"""Sets the canvas window to close after the next mouse click anywhere on it."""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
wait_for_click()
_canvas.close()
_canvas = None
def set_background_color(color):
"""Sets the background color of the canvas. Can be called at any time and the color will
instantly change."""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
_canvas.setBackgroundColor(color)
def set_background_color_rgb(r, g, b):
"""Sets the background color of the canvas. Can be called at any time and the color will
instantly change."""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
_canvas.setBackgroundColor((r, g, b))
def save_canvas_as_image(filename):
"""Saves the image to the supplied filename, which must end in .ps or .eps"""
global _canvas
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
_canvas.saveToFile(filename)
def wait_for_click():
"""This function just waits until the canvas is clicked. After a mouse click, the program
resumes."""
global _canvas
global _cue
if _canvas == None:
raise RuntimeError("Canvas is not open yet.")
else:
while True:
_cue = _canvas.wait()
if _cue.getDescription() == 'mouse release': break
def get_last_click_x():
"""Returns the x coordinate of the last mouse click that was waited for."""
return _cue.getMouseLocation().getX()
def get_last_click_y():
"""Returns the y coordinate of the last mouse click that was waited for."""
return _cue.getMouseLocation().getY()
|
17,911 | ce072ec490920f88b5cd80e7676baab453894224 | """
Assignment 9
@author: Hans Ludvig Kleivdal
"""
import os
from flask import Flask, request, render_template, g, flash, redirect, url_for, session, send_from_directory
import mysql.connector
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
import json
UPLOAD_FOLDER = 'static'
ALLOWED_EXTENSIONS = ["txt", "pdf", "png", "jpg", "jpeg", "gif"]
app = Flask(__name__)
app.debug = True
# Application config
app.config["DATABASE_USER"] = "root"
app.config["DATABASE_PASSWORD"] = "admin"
app.config["DATABASE_DB"] = "test_storage"
app.config["DATABASE_HOST"] = "localhost"
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
app.secret_key = "any random string"
class Database_product_info:
def __init__(self, db):
self.products = []
self._load_products(db)
def _load_products(self, db):
db.ping(True)
cur = db.cursor()
try:
sql = "select * from product_info;"
cur.execute(sql)
for i in cur:
id, name, description, normal_price, bonus_price, photo = i
self.products.append({
"id": id,
"name": name,
"description": description,
"normal_price": float(normal_price) if float(normal_price).is_integer() else None,
"bonus_price": float(bonus_price) if bonus_price else None,
"photo": photo
})
except mysql.connector.Error as err:
print(err)
finally:
cur.close()
db.close()
def get_products(self):
return self.products
def get_product(self, id):
for i in self.products:
if int(id) == i['id']:
return i
return None
class Database_order:
def __init__(self, db):
self.orders = []
self._load_orders(db)
def _load_orders(self, db):
db.ping(True)
cur = db.cursor()
try:
sql = "select order_id, fname, lname, email, phone, street, postcode, city from order_head;"
cur.execute(sql)
for i in cur:
id, fname, lname, email, phone, street, postcode, city = i
self.orders.append({
"id": id,
"fname": fname,
"lname": lname,
"email": email,
"phone": phone,
"street": street,
"postcode": postcode,
"city": city
})
except mysql.connector.Error as err:
print(err)
finally:
cur.close()
db.close()
def get_orders(self):
return self.orders
def get_order(self, id):
for i in self.orders:
if int(id) == i['id']:
return i
return None
class Database_order_list:
def __init__(self, db):
self.order_list = []
self._load_order_list(db)
def _load_order_list(self, db):
db.ping(True)
cur = db.cursor()
try:
sql = "select order_items.order_id, order_items.product_id, product_info.name, product_info.normal_price, product_info.bonus_price, order_items.qt from order_items inner join product_info on order_items.product_id=product_info.id;"
cur.execute(sql)
for i in cur:
order_id, product_id, name, normal_price, bonus_price, qt= i
self.order_list.append({
"order_id": order_id,
"product_id": product_id,
"name": name,
"normal_price": normal_price,
"bonus_price": bonus_price,
"qt": qt,
"sum": bonus_price * qt if bonus_price else normal_price * qt
})
except mysql.connector.Error as err:
print(err)
flash(err, "remove")
finally:
cur.close()
db.close()
def get_orders(self):
return self.order_list
def get_order(self, id):
id_orders = []
for i in self.order_list:
if int(id) == i['order_id']:
id_orders.append(i)
return id_orders
def get_db():
if not hasattr(g, "_database"):
g._database = mysql.connector.connect(host=app.config["DATABASE_HOST"], user=app.config["DATABASE_USER"],
password=app.config["DATABASE_PASSWORD"], database=app.config["DATABASE_DB"])
return g._database
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
with app.app_context():
app.config["PRODUCTS"] = Database_product_info(get_db())
app.config["ORDERS"] = Database_order(get_db())
app.config["ORDER_LIST"] = Database_order_list(get_db())
@app.route("/")
def index():
return redirect(url_for('login'))
@app.route("/product/<id>") #not done
def product(id):
db = app.config["PRODUCTS"]
return render_template("product.html", username=session.get("username", None), product=db.get_product(id))
@app.route("/statistics")
def statistics():
db = get_db()
db.ping(True)
cur = db.cursor()
try:
sta = []
sql = "select order_items.product_id, product_info.name, product_info.normal_price, product_info.bonus_price, sum(order_items.qt) as Quantity from order_items inner join product_info on order_items.product_id=product_info.id group by order_items.product_id;"
cur.execute(sql)
for i in cur:
id, name, price, bonus_price, qt = i
sta.append({
"id": id,
"name": name,
"price": price,
"bonus_price": bonus_price,
"qt": qt
})
return render_template("statistics.html", sta=sta, username=session.get("username", None))
except mysql.connector.Error as err:
flash(err, "remove")
finally:
cur.close()
db.close()
@app.route("/products")
def products():
db = app.config["PRODUCTS"]
print(db.get_products())
return render_template("products.html", products=db.get_products(), username=session.get("username", None))
@app.route("/orders")
def orders():
db = app.config["ORDERS"]
return render_template("orders.html", orders=db.get_orders(), username=session.get("username", None))
@app.route("/order/<id>")
def order(id):
db_order = app.config["ORDERS"]
db_order_list = app.config["ORDER_LIST"]
order_list = db_order_list.get_order(id)
total = 0
for i in order_list:
total += i["sum"]
return render_template("order.html", order_info=db_order.get_order(id), order_list=order_list, total=total, username=session.get("username", None))
@app.route("/edit/<id>")
def edit(id):
products = app.config["PRODUCTS"]
if id:
return render_template("edit.html", product=products.get_product(id), username=session.get("username", None))
else:
flash("Product to not exist")
@app.route("/submit", methods=['POST'])
def submit():
db = app.config["PRODUCTS"]
id = request.form.get("id")
name = request.form.get("name")
desc = request.form.get("description")
price = request.form.get("normal_price")
bonus_price = request.form.get("bonus_price")
curent_product = db.get_product(id)
file = request.files["file"]
new_photo = False
if file.filename == "":
file = curent_product["photo"]
new_photo = False
else:
new_photo = True
print(new_photo)
print(file)
if bonus_price == "":
bonus_price = None
else:
bonus_price = float(bonus_price)
product = db.get_product(id)
if int(id) == product['id'] and name == product['name'] and desc == product['description'] and float(price) == product['normal_price'] and bonus_price == product['bonus_price'] and file == product["photo"]: # feil HERRRR!!!!!!!!
flash("Noting to update!", "remove")
print("NO UPDATE")
return redirect(url_for('products'))
else:
if bonus_price == None:
bonus_price = "NULL"
if new_photo:
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
photo = "../"+path
sub = db_update_product(id, name, desc, price, bonus_price, photo)
if sub is True:
file.save(path)
flash("Product #" + id + " updated!", "set")
return redirect(url_for('products'))
else:
return redirect(url_for('products'))
else:
sub = db_update_product(id, name, desc, price, bonus_price, file)
if sub is True:
flash("Product #" + id + " updated!", "set")
return redirect(url_for('products'))
else:
return redirect(url_for('products'))
@app.route("/add")
def add():
return render_template("add.html", username=session.get("username", None))
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route("/add_product", methods=['POST'])
def add_product():
name = request.form.get("name")
desc = request.form.get("description")
price = request.form.get("normal_price")
bonus_price = request.form.get("bonus_price")
file = request.files["file"]
print(file)
print(file.filename)
db = app.config["PRODUCTS"]
if file and allowed_file(file.filename):
# "secure" the filename (form input may be forged and filenames can be dangerous)
filename = secure_filename(file.filename)
# save the file to the upload folder
path = os.path.join(app.config["UPLOAD_FOLDER"], filename)
print(path)
if bonus_price == "":
bonus_price = "NULL"
print("bonus: " + bonus_price)
db_add = db_add_product(name, desc, price, bonus_price, path)
if db_add:
file.save(path)
flash("File uploaded", "set")
return redirect(url_for("products"))
else:
return redirect(url_for("products"))
else:
flash("Not allowed file type", "set")
return redirect(url_for("products"))
@app.route("/delete/<id>")
def delete(id):
db_del = db_delete_product(id)
if db_del:
flash("Product #" + id + " has been removed", "remove")
return redirect(url_for('products'))
else:
return redirect(url_for('products'))
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST": # if the form was submitted (otherwise we just display form)
if valid_login(request.form["username"], request.form["password"]):
session["username"] = request.form["username"]
return redirect(url_for("products"))
else:
flash("Invalid username or password!", "remove")
return render_template("login.html")
@app.route("/logout")
def logout():
session.pop("username")
flash("You are now logged out!", "set")
return redirect(url_for("login"))
def valid_login(username, password):
"""Checks if username-password combination is valid."""
db = get_db()
db.ping(True)
cur = db.cursor()
try:
sql = "SELECT password FROM users WHERE user_name = '{}';".format(username)
cur.execute(sql)
for i in cur:
return check_password_hash(i[0], password)
return False
except mysql.connector.Error as err:
flash(err, "set")
return False
finally:
cur.close()
db.close()
def db_delete_product(id):
db = get_db()
db.ping(True)
cur = db.cursor()
try:
sql = "DELETE FROM product_info WHERE id={};".format(id)
cur.execute(sql)
db.commit()
return True
except mysql.connector.Error as err:
flash(err, "remove")
print(err)
return False
finally:
cur.close()
db.close()
app.config["PRODUCTS"] = Database_product_info(get_db())
def db_update_product(id, name, desc, price, bonus_price, photo):
db = get_db()
db.ping(True)
cur = db.cursor()
try:
sql = "UPDATE product_info SET name='{}', description='{}', normal_price='{}', bonus_price={}, photo='{}' WHERE id={};".format(name, desc, price, bonus_price, photo, id)
print(sql)
cur.execute(sql)
db.commit()
print("TRY")
return True
except mysql.connector.Error as err:
flash(err, "remove")
print(err)
print("FAIL")
return False
finally:
cur.close()
db.close()
app.config["PRODUCTS"] = Database_product_info(get_db())
def db_add_product(name, desc, price, bonus_price, img):
db = get_db()
db.ping(True)
cur = db.cursor()
try:
sql = "INSERT INTO product_info (name, description, normal_price, bonus_price, photo) VALUE ('{}', '{}', {}, {}, '../{}');".format(name, desc, price, bonus_price, img)
print(sql)
cur.execute(sql)
db.commit()
print("TRY")
return True
except mysql.connector.Error as err:
flash(err, "remove")
print(err)
print("FAIL")
return False
finally:
cur.close()
db.close()
app.config["PRODUCTS"] = Database_product_info(get_db())
if __name__ == '__main__':
app.run() |
17,912 | efaa4da5538dd6d7b064a964ddf10efd48e2fb2b | import numpy as np
import torch
import os
import matplotlib.pyplot as plt
class Results(object):
def __init__(self, keys):
self.map = dict()
for k in keys:
self.map[k] = []
def append(self, key, value):
self.map[key].append(value)
def get(self, key, index=-1):
return self.map.get(key, [0])[index]
def empty(self, key):
return len(self.map[key]) == 0
def mean(self, key):
return np.array(self.map[key]).mean()
def stdev(self, key):
return np.array(self.map[key]).std()
def save(self, key, path, index=None):
if index is None:
np.savetxt(os.path.join(path, "{}.csv".format(key)), np.array(self.map[key]), encoding="utf-8")
else:
np.savetxt(os.path.join(path, "{}-{}.csv".format(key, index)), np.array(self.map[key]), encoding="utf-8")
def saveall(self, path, index=None):
for key in self.map.keys():
if len(self.map[key]) > 0:
self.save(key, path, index)
def plot(self, keys, path, xlabel='Epoch', ylabel='Loss', marker=None):
# plt.style.use(['seaborn-colorblind'])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid()
for key in keys:
ax.plot(np.arange(1, 1 + len(self.map[key]), 1), np.array(self.map[key]), marker=marker, label=key)
ax.legend(loc="best")
fig.savefig(path)
plt.clf()
plt.close()
def plotall(self, path, xlabel="Epoch", ylabel="Loss", marker=None):
self.plot(self.map.keys(), path, xlabel, ylabel, marker)
def save_model(model, path):
torch.save(model.state_dict(), os.path.join(path, "checkpoint.pth.tar"))
def save_train_history(path, model, history):
history.saveall(path)
history.plotall(path)
save_model(model, path)
def save_test_history(path, histories):
for i, history in enumerate(histories):
history.saveall(path, i)
def output_train_results(path, history, prior):
train_loss, validation_loss = history.get("train_loss"), history.get("validation_loss")
with open(path, mode='a', encoding="utf-8") as f:
print("--Training result--\n", file=f)
print("Train loss : {:.9f}".format(train_loss), file=f)
print("Validation loss : {:.9f}".format(validation_loss), file=f)
print("Training Prior : {:.6f}".format(prior), file=f)
print("", file=f)
def output_test_results(path, test_idx, true_prior, acc, auc, prior=None, thresh=None, boundary=None):
with open(path, mode='a', encoding="utf-8") as f:
if test_idx == 0:
print("--Test result---\n", file=f)
print("Test {} : Dataset prior = {}".format(test_idx, true_prior), file=f)
print("Accuracy : {:.6f}".format(acc), file=f)
print("AUC : {:.6f}".format(auc), file=f)
if prior is not None:
print("Prior : {:.6f}".format(prior), file=f)
if thresh is not None:
print("Thresh : {:.6f}".format(thresh), file=f)
if boundary is not None:
print("Boundary : {:.6f}".format(boundary), file=f)
print("", file=f)
def append_test_results(path, acc, auc, prior=None, thresh=None, boundary=None):
with open(os.path.join(path, "accuracy.txt"), mode='a', encoding="utf-8") as f:
print(acc, file=f)
with open(os.path.join(path, "auc.txt"), mode='a', encoding="utf-8") as f:
print(auc, file=f)
if prior is not None:
with open(os.path.join(path, "prior.txt"), mode='a', encoding="utf-8") as f:
print(prior, file=f)
if thresh is not None:
with open(os.path.join(path, "thresh.txt"), mode='a', encoding="utf-8") as f:
print(thresh, file=f)
if boundary is not None:
with open(os.path.join(path, "boundary.txt"), mode='a', encoding="utf-8") as f:
print(boundary, file=f)
def output_config(path, train_size, val_size, max_epochs, batch_size, lr, alpha, seed):
with open(path, mode='a', encoding="utf-8") as f:
print("--Parameters--", file=f)
print("train_size = {}".format(train_size), file=f)
print("validation_size = {}".format(val_size), file=f)
print("max_epochs = {}".format(max_epochs), file=f)
print("batch_size = {}".format(batch_size), file=f)
print("lr = {}".format(lr), file=f)
print("alpha = {}".format(alpha), file=f)
print("random seed = {}".format(seed), file=f)
def getdirs(dir_path):
os.makedirs(dir_path, exist_ok=True)
return dir_path
|
17,913 | abfab172c15f14c5bbaacf104603d54ca4b5ab63 | # pip install Django
# python -m django --version
# need create a new directory for project , cd new_project
# django-admin startproject name_project
# for run server , go to project directory
# python manage.py runserver
# in browser , insert 127.0.0.1:8000
# stop server : CTRL + C
# create a new app
# python manage.py startapp name_app
|
17,914 | 67158cc75b11bee6809d9e21db5dfdd21010d7c8 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import os
import os.path
import json
def utf82gbk(dir, fileName, outDir):
print(dir, fileName, outDir)
realOutDir = outDir + "\\" + dir
if not os.path.exists(realOutDir):
os.makedirs(realOutDir)
fullName = os.path.join(dir, fileName)
outputName = realOutDir + "\\" + fileName
file_object = open(fullName, encoding='UTF-8')
outputFile = open(outputName, mode='w', encoding='GBK')
head = "排名,玩家Id,区服Id,玩家名字,情侣Id,情侣区服Id,情侣名字,分数\n"
outputFile.write(head)
try:
i = 0
playerId = ""
playerName = ""
ranking = ""
score = ""
temp = ""
while True:
line = file_object.readline()
if line:
line = line.strip().strip('\n').strip('\r')
line = line.replace("ObjectId", "")
line = line.replace("NumberInt", "")
line = line.replace("(", "")
line = line.replace(")", "")
temp += line
if line[0] == '}':
i += 1
if i % 2 == 0:
obj = json.loads(temp)
#print(obj)
temp = ""
output_line = "%d,%d,0,%s,%d,0,%s,%d\n" % (obj["ranking"], obj["role_id"], obj["extra"]["playerName"], obj["extra"]["fereId"], obj["extra"]["fereName"], obj["score"])
outputFile.write(output_line)
else:
break
finally:
file_object.close()
outputFile.close()
def transInDir(dirName, outDir):
for parent, dirnames, filenames in os.walk(dirName):
for fileName in filenames:
utf82gbk(parent, fileName, outDir)
transInDir(sys.argv[1], sys.argv[2]) |
17,915 | b54002313bf6d5e7af023be8d0a9a55266df9c75 | #!/usr/bin/python
import roslib
roslib.load_manifest('autopnp_scenario')
import rospy
import smach
import smach_ros
import random
from time import sleep
from nav_msgs.srv import *
from ApproachPose import *
class SelectNavigationGoal(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['selected','not_selected','failed'],
output_keys=['base_pose'])
self.goals = []
def execute(self, userdata):
# defines
# (-0.4, 1.0, 0...270); (0.9, 0.9, 180...270); (-0.4, -1.0, 0...90)
#x_min = 0
#x_max = 4.0
#x_increment = 2
#y_min = -4.0
#y_max = 0.0
#y_increment = 2
#th_min = -3.14
#th_max = 3.14
#th_increment = 2*3.1414926/4
# generate new list, if list is empty
if len(self.goals) == 0:
self.goals.append([0.9, 0.9, 1.0*3.1414926])
self.goals.append([0.9, 0.9, 1.25*3.1414926])
self.goals.append([0.9, 0.9, 1.5*3.1414926])
self.goals.append([-0.3, -0.8, 0.25*3.1414926])
self.goals.append([-0.3, -0.8, 0.5*3.1414926])
self.goals.append([-0.3, -0.8, 0.25*3.1414926])
self.goals.append([-0.3, -0.8, 0])
self.goals.append([-0.3, 0.85, 1.5*3.1414926])
self.goals.append([-0.3, 0.85, 1.75*3.1414926])
self.goals.append([-0.3, 0.85, 0])
self.goals.append([-0.3, 0.85, 1.0*3.1414926])
# x = x_min
# y = y_min
# th = th_min
# while x <= x_max:
# while y <= y_max:
# while th <= th_max:
# pose = []
# pose.append(x) # x
# pose.append(y) # y
# pose.append(th) # th
# self.goals.append(pose)
# th += th_increment
# y += y_increment
# th = th_min
# x += x_increment
# y = y_min
# th = th_min
#print self.goals
userdata.base_pose = self.goals.pop() # takes last element out of list
sleep(2)
#userdata.base_pose = self.goals.pop(random.randint(0,len(self.goals)-1)) # takes random element out of list
return 'selected'
class Explore(smach.StateMachine):
def __init__(self):
smach.StateMachine.__init__(self,
outcomes=['finished','failed'])
with self:
smach.StateMachine.add('SELECT_GOAL',SelectNavigationGoal(),
transitions={'selected':'MOVE_BASE',
'not_selected':'finished',
'failed':'failed'})
smach.StateMachine.add('MOVE_BASE',ApproachPose(),
transitions={'reached':'SELECT_GOAL',
'not_reached':'SELECT_GOAL',
'failed':'failed'})
class SM(smach.StateMachine):
def __init__(self):
smach.StateMachine.__init__(self,outcomes=['ended'])
with self:
smach.StateMachine.add('STATE',Explore(),
transitions={'finished':'ended',
'failed':'ended'})
if __name__=='__main__':
rospy.init_node('Explore')
sm = SM()
sis = smach_ros.IntrospectionServer('SM', sm, 'SM')
sis.start()
outcome = sm.execute()
rospy.spin()
sis.stop()
|
17,916 | da59e7398a25faefb1e0d127da72567d7b53c7cb | import re, os, sys
from useful import *
P = re.compile("\\begin{verbatim}.*?\\end{verbatim}", re.DOTALL)
def smalltt(ifile="notes-14-08-2017.tex", out=sys.stdout):
with safeout(out) as write:
write(P.sub(r"{\small\g<0>}", open(ifile).read()))
|
17,917 | 9f09ea913550e3858db35a78aaa61622dc0bb252 | from django.urls import path
from .views import IndexView, ProductView, ProductCreateView, BasketChangeView, BasketView, ProductUpdateView, \
ProductDeleteView, OrderListView, OrderDetailView, OrderCreateView, OrderUpdateView, OrderProductCreateView,\
OrderProductDeleteView, OrderProductUpdateView, OrderDeliverView, OrderCancelView
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('products/<int:pk>/', ProductView.as_view(), name='product_detail'),
path('products/create/', ProductCreateView.as_view(), name='product_create'),
path('basket/change/', BasketChangeView.as_view(), name='basket_change'),
path('basket/', BasketView.as_view(), name='basket'),
path('products/<int:pk>/update/', ProductUpdateView.as_view(), name='product_update'),
path('products/<int:pk>/delete/', ProductDeleteView.as_view(), name='product_delete'),
path('order/', OrderListView.as_view(), name='orders'),
path('order/<int:pk>/', OrderDetailView.as_view(), name='order_detail'),
path('order/create/', OrderCreateView.as_view(), name='order_create'),
path('order/<int:pk>/update/', OrderUpdateView.as_view(), name='order_update'),
path('order/<int:pk>/add-products/', OrderProductCreateView.as_view(), name='order_order_create'),
path('product_delete/<int:pk>/', OrderProductDeleteView.as_view(), name='product_delete_from_order'),
path('product_update/<int:pk>/', OrderProductUpdateView.as_view(), name='product_update_in_order'),
path('order-deliver/<int:pk>/', OrderDeliverView.as_view(), name='order_deliver'),
path('order-cancel/<int:pk>/', OrderCancelView.as_view(), name='order_cancel'),
]
app_name = 'webapp' |
17,918 | 373f7708ab9a58141f0382b13a3b28ce939535e4 | # Set up your imports here!
# import ...
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/')
def index():
# Welcome Page
return '<h1> This is the generic page for puppies </h1> '# Create a generic welcome page.
@app.route('/puppy/<name>') # Fill this in!
def puppylatin(name):
pupname = ' '
if name[ -1] == 'y':
pupname = name[: -1] + 'iful'
else:
pupname = name + 'y'
return '<h1> Your puppylatin name is :{}'.format(pupname)
if __name__ == '__main__':
app.run()
|
17,919 | bc910b48ce3fbb63683cc234ff0fafdb4de10869 | from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import statistics
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score, recall_score
class LemmaTokenizer:
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
def softmax(y):
for i in range(len(y)):
if y[i] > 0:
y[i] = 1
else:
y[i] = -1
return y
def confusion_matrix(y_pred, y):
tp = tn = fp = fn = 0
for idx, val in enumerate(y_pred):
if val == -1 and y[idx] == -1:
tn += 1
elif val == 1 and y[idx] == 1:
tp += 1
elif val == -1 and y[idx] == 1:
fn += 1
elif val == 1 and y[idx] == -1:
fp += 1
return tp, tn, fp, fn
def accuracy_calc(conf_tuple):
tp = conf_tuple[0]
tn = conf_tuple[1]
fp = conf_tuple[2]
fn = conf_tuple[3]
return (tp+tn)/(tp+tn+fp+fn)
def calc_shuffle_order(length):
shuffle_order = np.arange(length)
random.shuffle(shuffle_order)
return shuffle_order
def shuffle2(X, y, shuffle_order):
X_shuff = []
y_shuff = []
for i in shuffle_order:
X_shuff.append(X[i])
y_shuff.append(y[i])
return np.array(X_shuff), np.array(y_shuff)
def unshuffle1(X_shuff, shuffle_order):
X = []
for i in range(len(shuffle_order)):
X.append(X_shuff[np.where(shuffle_order == i)[0][0]])
return np.array(X)
vect = TfidfVectorizer(tokenizer=LemmaTokenizer(), max_features=1500, stop_words='english')
df_col_names = ['text', 'upvote', 'early_access']
df = pd.read_csv('en_ascii_reviews.csv', comment='#', names=df_col_names)
# df = pd.read_csv('translated_reviews_all_ascii.csv', comment='#', names=df_col_names)
transform = vect.fit_transform(df.text)
X_orig = np.array(transform.toarray())
y = []
for index, row in df.iterrows():
if(row['upvote'] == True):
# if(row['early_access'] == True):
y.append(1)
else:
y.append(-1)
y_orig = np.array(y)
print(f"All Features - {vect.get_feature_names()}")
print(f"Num input features - {len(X_orig[0])}\nNum rows - {len(X_orig)}")
print(f"Num output rows - {len(y_orig)}")
# <BEST-FIT MODEL COMPARISONS>
lr_model = LogisticRegression(penalty='l2',C=1, max_iter=1000)
knn_model = KNeighborsClassifier(n_neighbors=4)
rf_model = RandomForestClassifier()
X_train, X_test, y_train, y_test = train_test_split(X_orig, y_orig, test_size=0.2, shuffle=True)
lr_model.fit(X_train, y_train)
knn_model.fit(X_train, y_train)
rf_model.fit(X_train, y_train)
ns_probs = [0 for _ in range(len(y_test))]
lr_probs = lr_model.predict_proba(X_test)
lr_probs = lr_probs[:, 1]
knn_probs = knn_model.predict_proba(X_test)
knn_probs = knn_probs[:, 1]
rf_probs = rf_model.predict_proba(X_test)
rf_probs = rf_probs[:, 1]
ns_auc = roc_auc_score(y_test, ns_probs)
lr_auc = roc_auc_score(y_test, lr_probs)
knn_auc = roc_auc_score(y_test, knn_probs)
rf_auc = roc_auc_score(y_test, rf_probs)
print('No Skill: ROC AUC=%.3f' % (ns_auc))
print('4-NN: ROC AUC=%.3f' % (knn_auc))
print('Logistic Reg ROC AUC=%.3f' % (lr_auc))
print('Random Forest ROC AUC=%.3f' % (rf_auc))
ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)
knn_fpr, knn_tpr, _ = roc_curve(y_test, knn_probs)
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
rf_fpr, rf_tpr, _ = roc_curve(y_test, rf_probs)
train_acc_lr = accuracy_score(lr_model.predict(X_train), y_train)
test_acc_lr = accuracy_score(lr_model.predict(X_test), y_test)
train_acc_knn = accuracy_score(knn_model.predict(X_train), y_train)
test_acc_knn = accuracy_score(knn_model.predict(X_test), y_test)
train_acc_rf = accuracy_score(rf_model.predict(X_train), y_train)
test_acc_rf = accuracy_score(rf_model.predict(X_test), y_test)
print(f"Log Reg: training accuracy - {train_acc_lr}")
print(f"Log Reg: validation accuracy - {test_acc_lr}")
print(f"4NN: training accuracy - {train_acc_knn}")
print(f"4NN: validation accuracy - {test_acc_knn}")
print(f"Random forest: training accuracy - {train_acc_rf}")
print(f"Random forest: validation accuracy - {test_acc_rf}")
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
plt.plot(lr_fpr, lr_tpr, marker='.', label='Log. Reg. C=1')
plt.plot(knn_fpr, knn_tpr, marker='x', label='4NN')
plt.plot(rf_fpr, rf_tpr, marker='o', label='Random Forest')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.title('ROC curve comparison')
plt.show()
# <\BEST-FIT MODEL COMPARISONS>
# <LOGISTIC REGRESSION CROSS-VAL>
# costs = [0.1, 0.25, 0.5, 0.75, 1, 2, 5, 7.5, 10]
costs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
logit_shuff_train_metrics = []
logit_shuff_test_metrics = []
logit_shuff_train_variances = []
logit_shuff_test_variances = []
logit_unshuff_train_metrics = []
logit_unshuff_test_metrics = []
logit_unshuff_train_variances = []
logit_unshuff_test_variances = []
for cost in costs:
for shuffle in [True]:
if(not shuffle):
print("UNSHUFFLED")
X = X_orig
y = y_orig
else:
X, y = shuffle2(X_orig, y_orig, calc_shuffle_order(len(X_orig)))
print("SHUFFLED")
kf = KFold(n_splits=5)
temp_train_metrics = []
temp_test_metrics = []
for train, test in kf.split(X):
logit_model = LogisticRegression(penalty='l2',C=cost, max_iter=1000)
logit_model.fit(X[train],y[train])
y_hat_train = logit_model.predict(X[train])
y_hat_test = logit_model.predict(X[test])
y_hat_train_logit_acc = accuracy_score(y_hat_train, y[train])
y_hat_test_logit_acc = accuracy_score(y_hat_test, y[test])
y_hat_train_logit_rec = recall_score(y_hat_train, y[train])
y_hat_test_logit_rec = recall_score(y_hat_test, y[test])
temp_train_metrics.append([y_hat_train_logit_acc, y_hat_train_logit_rec])
temp_test_metrics.append([y_hat_test_logit_acc, y_hat_test_logit_rec])
logit_train_metrics = [np.mean([row[0] for row in temp_train_metrics]), np.mean([row[1] for row in temp_train_metrics])]
logit_test_metrics = [np.mean([row[0] for row in temp_test_metrics]), np.mean([row[1] for row in temp_test_metrics])]
logit_train_variance = [np.var([row[0] for row in temp_train_metrics]), np.var([row[1] for row in temp_train_metrics])]
logit_test_variance = [np.var([row[0] for row in temp_test_metrics]), np.var([row[1] for row in temp_test_metrics])]
# print(f"Some model coefs - {model.coef_[0]}")
print("LOGISTIC REG")
print(f"COST - {cost}")
print(f"Training accuracy - {logit_train_metrics[0]} +/- {logit_train_variance[0]}")
print(f"Validation accuracy - {logit_test_metrics[0]} +/- {logit_test_variance[0]}")
print(f"Training recall - {logit_train_metrics[1]} +/- {logit_train_variance[1]}")
print(f"Validation recall - {logit_test_metrics[1]} +/- {logit_test_variance[1]}\n")
if(not shuffle):
logit_unshuff_train_metrics.append(logit_train_metrics)
logit_unshuff_test_metrics.append(logit_test_metrics)
logit_unshuff_train_variances.append(logit_train_variance)
logit_unshuff_test_variances.append(logit_test_variance)
else:
logit_shuff_train_metrics.append(logit_train_metrics)
logit_shuff_test_metrics.append(logit_test_metrics)
logit_shuff_train_variances.append(logit_train_variance)
logit_shuff_test_variances.append(logit_test_variance)
plt.figure(1)
# plt.errorbar(costs, [row[0] for row in logit_unshuff_train_metrics], yerr=[row[0] for row in logit_unshuff_train_variances], color='r', ecolor='k', label="Unshuffled - train")
plt.errorbar(costs, [row[0] for row in logit_shuff_train_metrics], yerr=[row[0] for row in logit_shuff_train_variances], color='b', ecolor='k', label="Training")
# plt.errorbar(costs, [row[0] for row in logit_unshuff_test_metrics], yerr=[row[0] for row in logit_unshuff_test_variances], color='orange', ecolor='k', label="Unshuffled - val")
plt.errorbar(costs, [row[0] for row in logit_shuff_test_metrics], yerr=[row[0] for row in logit_shuff_test_variances], color='c', ecolor='k', label="Validation")
plt.legend()
plt.xlabel("Cost")
plt.ylabel("Accuracy")
plt.title("Logistic regression - Sentiment analysis accuracy")
plt.xscale("log")
plt.savefig('logistic_acc_cv_early_acc_all.png')
plt.figure(2)
# plt.errorbar(costs, [row[1] for row in logit_unshuff_train_metrics], yerr=[row[1] for row in logit_unshuff_train_variances], color='r', ecolor='k', label="Unshuffled - train")
plt.errorbar(costs, [row[1] for row in logit_shuff_train_metrics], yerr=[row[1] for row in logit_shuff_train_variances], color='b', ecolor='k', label="Training")
# plt.errorbar(costs, [row[1] for row in logit_unshuff_test_metrics], yerr=[row[1] for row in logit_unshuff_test_variances], color='orange', ecolor='k', label="Unshuffled - val")
plt.errorbar(costs, [row[1] for row in logit_shuff_test_metrics], yerr=[row[1] for row in logit_shuff_test_variances], color='c', ecolor='k', label="Validation")
plt.legend()
plt.xlabel("Cost")
plt.ylabel("Recall")
plt.title("Logistic regression - Sentiment analysis recall")
plt.xscale("log")
plt.savefig('logistic_rec_cv_early_acc_all.png')
plt.show()
# <\LOGISTIC REGRESSION CROSS-VAL>
# <KNN CROSS-VAL>
knn_unshuff_train_metrics = []
knn_unshuff_test_metrics = []
knn_unshuff_train_variances = []
knn_unshuff_test_variances = []
knn_shuff_train_metrics = []
knn_shuff_test_metrics = []
knn_shuff_train_variances = []
knn_shuff_test_variances = []
neighbours = [1,2,3,4,5,6,7,8,9]
for neighbour in neighbours:
for shuffle in [True]:
if(not shuffle):
print("UNSHUFFLED")
X = X_orig
y = y_orig
else:
X, y = shuffle2(X_orig, y_orig, calc_shuffle_order(len(X_orig)))
print("SHUFFLED")
kf = KFold(n_splits=5)
temp_train_metrics = []
temp_test_metrics = []
for train, test in kf.split(X):
knn_model = KNeighborsClassifier(n_neighbors=neighbour)
knn_model.fit(X[train],y[train])
y_hat_train = knn_model.predict(X[train])
y_hat_test = knn_model.predict(X[test])
y_hat_train_logit_acc = accuracy_score(y_hat_train, y[train])
y_hat_test_logit_acc = accuracy_score(y_hat_test, y[test])
y_hat_train_logit_rec = recall_score(y_hat_train, y[train])
y_hat_test_logit_rec = recall_score(y_hat_test, y[test])
temp_train_metrics.append([y_hat_train_logit_acc, y_hat_train_logit_rec])
temp_test_metrics.append([y_hat_test_logit_acc, y_hat_test_logit_rec])
knn_train_metrics = [np.mean([row[0] for row in temp_train_metrics]), np.mean([row[1] for row in temp_train_metrics])]
knn_test_metrics = [np.mean([row[0] for row in temp_test_metrics]), np.mean([row[1] for row in temp_test_metrics])]
knn_train_variance = [np.var([row[0] for row in temp_train_metrics]), np.var([row[1] for row in temp_train_metrics])]
knn_test_variance = [np.var([row[0] for row in temp_test_metrics]), np.var([row[1] for row in temp_test_metrics])]
print("kNN")
print(f"Neighbours - {neighbour}")
print(f"Training accuracy - {knn_train_metrics[0]} +/- {knn_train_variance[0]}")
print(f"Validation accuracy - {knn_test_metrics[0]} +/- {knn_test_variance[0]}")
print(f"Training recall - {knn_train_metrics[1]} +/- {knn_train_variance[1]}")
print(f"Validation recall - {knn_test_metrics[1]} +/- {knn_test_variance[1]}\n")
if(not shuffle):
knn_unshuff_train_metrics.append(knn_train_metrics)
knn_unshuff_test_metrics.append(knn_test_metrics)
knn_unshuff_train_variances.append(knn_train_variance)
knn_unshuff_test_variances.append(knn_test_variance)
else:
knn_shuff_train_metrics.append(knn_train_metrics)
knn_shuff_test_metrics.append(knn_test_metrics)
knn_shuff_train_variances.append(knn_train_variance)
knn_shuff_test_variances.append(knn_test_variance)
plt.figure(1)
# plt.errorbar(neighbours, [row[0] for row in knn_unshuff_train_metrics], yerr=[row[0] for row in knn_unshuff_train_variances], color='r', ecolor='k', label="Unshuffled - train")
plt.errorbar(neighbours, [row[0] for row in knn_shuff_train_metrics], yerr=[row[0] for row in knn_shuff_train_variances], color='b', ecolor='k', label="Training")
# plt.errorbar(neighbours, [row[0] for row in knn_unshuff_test_metrics], yerr=[row[0] for row in knn_unshuff_test_variances], color='orange', ecolor='k', label="Unshuffled - val")
plt.errorbar(neighbours, [row[0] for row in knn_shuff_test_metrics], yerr=[row[0] for row in knn_shuff_test_variances], color='c', ecolor='k', label="Validation")
plt.legend()
plt.xlabel("Neighbours")
plt.ylabel("Accuracy")
plt.title("kNN - Sentiment analysis accuracy")
plt.savefig('knn_acc_cv_early_acc_all.png')
plt.figure(2)
# plt.errorbar(neighbours, [row[1] for row in knn_unshuff_train_metrics], yerr=[row[1] for row in knn_unshuff_train_variances], color='r', ecolor='k', label="Unshuffled - train")
plt.errorbar(neighbours, [row[1] for row in knn_shuff_train_metrics], yerr=[row[1] for row in knn_shuff_train_variances], color='b', ecolor='k', label="Training")
# plt.errorbar(neighbours, [row[1] for row in knn_unshuff_test_metrics], yerr=[row[1] for row in knn_unshuff_test_variances], color='orange', ecolor='k', label="Unshuffled - val")
plt.errorbar(neighbours, [row[1] for row in knn_shuff_test_metrics], yerr=[row[1] for row in knn_shuff_test_variances], color='c', ecolor='k', label="Validation")
plt.legend()
plt.xlabel("Neighbours")
plt.ylabel("Recall")
plt.title("kNN - Sentiment analysis recall")
plt.savefig('knn_rec_cv_early_acc_all.png')
plt.show()
# <\KNN CROSS-VAL>
# <RANDOM FOREST K-FOLD ANALYSIS>
X, y = shuffle2(X_orig, y_orig, calc_shuffle_order(len(X_orig)))
print("SHUFFLED")
kf = KFold(n_splits=5)
temp_train_metrics = []
temp_test_metrics = []
for train, test in kf.split(X):
rf_model = RandomForestClassifier(max_depth=None, random_state=0)
rf_model.fit(X[train],y[train])
y_hat_train = rf_model.predict(X[train])
y_hat_test = rf_model.predict(X[test])
y_hat_train_rf_acc = accuracy_score(y_hat_train, y[train])
y_hat_test_rf_acc = accuracy_score(y_hat_test, y[test])
y_hat_train_rf_rec = recall_score(y_hat_train, y[train])
y_hat_test_rf_rec = recall_score(y_hat_test, y[test])
temp_train_metrics.append([y_hat_train_rf_acc, y_hat_train_rf_rec])
temp_test_metrics.append([y_hat_test_rf_acc, y_hat_test_rf_rec])
rf_train_metrics = [np.mean([row[0] for row in temp_train_metrics]), np.mean([row[1] for row in temp_train_metrics])]
rf_test_metrics = [np.mean([row[0] for row in temp_test_metrics]), np.mean([row[1] for row in temp_test_metrics])]
rf_train_variance = [np.var([row[0] for row in temp_train_metrics]), np.var([row[1] for row in temp_train_metrics])]
rf_test_variance = [np.var([row[0] for row in temp_test_metrics]), np.var([row[1] for row in temp_test_metrics])]
print("RANDOM FOREST")
print(f"Training accuracy - {rf_train_metrics[0]} +/- {rf_train_variance[0]}")
print(f"Validation accuracy - {rf_test_metrics[0]} +/- {rf_test_variance[0]}")
print(f"Training recall - {rf_train_metrics[1]} +/- {rf_train_variance[1]}")
print(f"Validation recall - {rf_test_metrics[1]} +/- {rf_test_variance[1]}\n")
# <\RANDOM FOREST K-FOLD ANALYSIS>
# <BASELINE K-FOLD ANALYSIS>
X, y = shuffle2(X_orig, y_orig, calc_shuffle_order(len(X_orig)))
print("UNSHUFFLED")
kf = KFold(n_splits=5)
temp_train_metrics = []
temp_test_metrics = []
for train, test in kf.split(y):
train_common_class = statistics.mode(y[train])
y_hat_train = np.ones(len(y[train]), dtype=np.int32) * train_common_class
y_hat_test = np.ones(len(y[test]), dtype=np.int32) * train_common_class
y_hat_train_baseline_acc = accuracy_score(y[train], y_hat_train)
y_hat_test_baseline_acc = accuracy_score(y[test], y_hat_test)
y_hat_train_baseline_rec = recall_score(y[train], y_hat_train)
y_hat_test_baseline_rec = recall_score(y[test], y_hat_test)
temp_train_metrics.append([y_hat_train_baseline_acc, y_hat_train_baseline_rec])
temp_test_metrics.append([y_hat_test_baseline_acc, y_hat_test_baseline_rec])
baseline_train_metrics = [np.mean([row[0] for row in temp_train_metrics]), np.mean([row[1] for row in temp_train_metrics])]
baseline_test_metrics = [np.mean([row[0] for row in temp_test_metrics]), np.mean([row[1] for row in temp_test_metrics])]
baseline_train_variance = [np.var([row[0] for row in temp_train_metrics]), np.var([row[1] for row in temp_train_metrics])]
baseline_test_variance = [np.var([row[0] for row in temp_test_metrics]), np.var([row[1] for row in temp_test_metrics])]
print("BASELINE")
print(f"Training accuracy - {baseline_train_metrics[0]} +/- {baseline_train_variance[0]}")
print(f"Validation accuracy - {baseline_test_metrics[0]} +/- {baseline_test_variance[0]}")
print(f"Training recall - {baseline_train_metrics[1]} +/- {baseline_train_variance[1]}")
print(f"Validation recall - {baseline_test_metrics[1]} +/- {baseline_test_variance[1]}\n")
# <\BASELINE K-FOLD ANALYSIS>
|
17,920 | 20b89321b4a997c824dc23d6f779f617a68aeca6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Visualize Relay IR by Graphviz DOT language."""
from typing import (
Any,
Callable,
Dict,
)
from .interface import (
DefaultVizParser,
Plotter,
VizEdge,
VizGraph,
VizNode,
)
try:
import graphviz
except ImportError:
# add "from None" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"The graphviz package is required for DOT renderer. "
"Please install it first. For example, pip3 install graphviz"
) from None
DotVizParser = DefaultVizParser
class DotGraph(VizGraph):
"""DOT graph for relay IR.
See also :py:class:`tvm.contrib.relay_viz.dot.DotPlotter`
Parameters
----------
name: str
name of this graph.
graph_attr: Optional[Dict[str, str]]
key-value pairs for the graph.
node_attr: Optional[Dict[str, str]]
key-value pairs for all nodes.
edge_attr: Optional[Dict[str, str]]
key-value pairs for all edges.
get_node_attr: Optional[Callable[[VizNode], Dict[str, str]]]
A callable returning attributes for the node.
"""
def __init__(
self,
name: str,
graph_attr: Dict[str, str] = None,
node_attr: Dict[str, str] = None,
edge_attr: Dict[str, str] = None,
get_node_attr: Callable[[VizNode], Dict[str, str]] = None,
):
self._name = name
self._get_node_attr = self._default_get_node_attr
if get_node_attr is not None:
self._get_node_attr = get_node_attr
# graphviz recognizes the subgraph as a cluster subgraph
# by the name starting with "cluster" (all lowercase)
self._digraph = graphviz.Digraph(
name=f"cluster_{self._name}",
graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr,
)
self._digraph.attr(label=self._name)
def node(self, viz_node: VizNode) -> None:
"""Add a node to the underlying graph.
Nodes in a Relay IR Module are expected to be added in the post-order.
Parameters
----------
viz_node : VizNode
A `VizNode` instance.
"""
self._digraph.node(
viz_node.identity,
f"{viz_node.type_name}\n{viz_node.detail}",
**self._get_node_attr(viz_node),
)
def edge(self, viz_edge: VizEdge) -> None:
"""Add an edge to the underlying graph.
Parameters
----------
viz_edge : VizEdge
A `VizEdge` instance.
"""
self._digraph.edge(viz_edge.start, viz_edge.end)
@property
def digraph(self):
return self._digraph
@staticmethod
def _default_get_node_attr(node: VizNode):
if "Var" in node.type_name:
return {"shape": "ellipse"}
return {"shape": "box"}
class DotPlotter(Plotter):
"""DOT language graph plotter
The plotter accepts various graphviz attributes for graphs, nodes, and edges.
Please refer to https://graphviz.org/doc/info/attrs.html for available attributes.
Parameters
----------
graph_attr: Optional[Dict[str, str]]
key-value pairs for all graphs.
node_attr: Optional[Dict[str, str]]
key-value pairs for all nodes.
edge_attr: Optional[Dict[str, str]]
key-value pairs for all edges.
get_node_attr: Optional[Callable[[VizNode], Dict[str, str]]]
A callable returning attributes for a specific node.
render_kwargs: Optional[Dict[str, Any]]
keyword arguments directly passed to `graphviz.Digraph.render()`.
Examples
--------
.. code-block:: python
from tvm.contrib import relay_viz
from tvm.relay.testing import resnet
mod, param = resnet.get_workload(num_layers=18)
# graphviz attributes
graph_attr = {"color": "red"}
node_attr = {"color": "blue"}
edge_attr = {"color": "black"}
# VizNode is passed to the callback.
# We want to color NCHW conv2d nodes. Also give Var a different shape.
def get_node_attr(node):
if "nn.conv2d" in node.type_name and "NCHW" in node.detail:
return {
"fillcolor": "green",
"style": "filled",
"shape": "box",
}
if "Var" in node.type_name:
return {"shape": "ellipse"}
return {"shape": "box"}
# Create plotter and pass it to viz. Then render the graph.
dot_plotter = relay_viz.DotPlotter(
graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr,
get_node_attr=get_node_attr)
viz = relay_viz.RelayVisualizer(
mod,
relay_param=param,
plotter=dot_plotter,
parser=relay_viz.DotVizParser())
viz.render("hello")
"""
def __init__(
self,
graph_attr: Dict[str, str] = None,
node_attr: Dict[str, str] = None,
edge_attr: Dict[str, str] = None,
get_node_attr: Callable[[VizNode], Dict[str, str]] = None,
render_kwargs: Dict[str, Any] = None,
):
self._name_to_graph = {}
self._graph_attr = graph_attr
self._node_attr = node_attr
self._edge_attr = edge_attr
self._get_node_attr = get_node_attr
self._render_kwargs = {} if render_kwargs is None else render_kwargs
def create_graph(self, name):
self._name_to_graph[name] = DotGraph(
name, self._graph_attr, self._node_attr, self._edge_attr, self._get_node_attr
)
return self._name_to_graph[name]
def render(self, filename: str = None):
"""render the graph generated from the Relay IR module.
This function is a thin wrapper of `graphviz.Digraph.render()`.
"""
# Create or update the filename
if filename is not None:
self._render_kwargs["filename"] = filename
# default cleanup
if "cleanup" not in self._render_kwargs:
self._render_kwargs["cleanup"] = True
root_graph = graphviz.Digraph()
for graph in self._name_to_graph.values():
root_graph.subgraph(graph.digraph)
root_graph.render(**self._render_kwargs)
|
17,921 | d4cbee7055a9f1350aeab35512dc4e70916654aa | # -*- coding: utf-8 -*-
from __future__ import division
P1=input('digite p1:')
c1=input('digite c1:')
p2=input('digite p2:')
c2=input('digite c2:')
if (p1*c1)=(p2*c2):
print('O')
elif (p1*c1)<(p2*c2):
print('-1')
else:
print('1')
|
17,922 | 594bcde343d0b14ec13f72ae25da36506e2810d7 | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
p_2490_2536_filename = os.path.join('/home/coh1/Experiments/StochasticDepthP', 'stochastic_depth_death_rate_cifar100+_20180125-11:42:54:345544_2490&2536.pkl')
p_2494_2489_filename = os.path.join('/home/coh1/Experiments/StochasticDepthP', 'stochastic_depth_death_rate_cifar100+_20180125-11:51:59:879674_2494&2489.pkl')
p_2474_2497_filename = os.path.join('/home/coh1/Experiments/StochasticDepthP', 'stochastic_depth_death_rate_cifar100+_20180130-13:15:35:724102_2474&2497.pkl')
def death_rate_plot(p_filename):
valid_err = float((os.path.split(p_filename)[1])[-13:-9]) / 100
test_err = float((os.path.split(p_filename)[1])[-8:-4]) / 100
f = open(p_filename)
p_data = pickle.load(f)
f.close()
plt.figure(figsize=(10, 8))
plt.bar(list(range(1, len(p_data) + 1)), p_data)
plt.xticks(size=24)
plt.xlabel('n-th residual block', fontsize=24)
plt.yticks(size=24)
plt.ylabel('Death rate', fontsize=24)
plt.title('Validation Error : %5.2f%% / Test Error : %5.2f%%' % (valid_err, test_err), fontsize=24)
plt.subplots_adjust(left=0.12, right=0.98, top=0.95, bottom=0.1)
print((np.sum(p_data)))
plt.show()
if __name__ == '__main__':
death_rate_plot(p_2490_2536_filename) |
17,923 | c9bb03af08a3a48218be9531704feb8a3251d581 | N = int(input())
for i in range(1, 10**18):
if 2 ** i > N:
print(i - 1)
break
|
17,924 | 82b40fe0a4dbd764e42123fdf8218cfc978999a6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Classification(models.Model):
utility = models.CharField(max_length=50)
mechanics = models.CharField(max_length=50)
force = models.CharField(max_length=50)
class Instruction(models.Model):
preparation = models.TextField()
execution = models.TextField()
# todo: class for Synergist, Stabilizers, etc ... how to have list tho?
class Exercise(models.Model):
name = models.CharField(max_length=50)
classification = models.ForeignKey(Classification, on_delete=models.CASCADE)
target_muscle = models.CharField(max_length=50)
apparatus = models.CharField(max_length=50)
instructions = models.ForeignKey(Instruction, on_delete=models.CASCADE)
|
17,925 | d6e5806a24db7473b4ae2420b97b3ca137bf2779 | """ Module responsible for providing control of the camera which the system will use. """
import time
import numpy as np
import cv2
from PyQt5 import QtCore
__author__ = 'Curtis McAllister'
__maintainer__ = 'Curtis McAllister'
__email__ = 'mcallister_c20@ulster.ac.uk'
__status__ = 'Development'
class Camera(QtCore.QObject):
""" Creates a Camera object able to be accessed by other objects, and provides controls for the camera. """
image_data = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent=None):
""" Initialises the Camera object. """
super().__init__(parent)
self.camera = None
self.timer = QtCore.QBasicTimer()
def start_recording(self, capture_duration):
""" Starts the camera feed, and sets timer which the camera will run for. """
self.camera = cv2.VideoCapture(0)
# Create timer to enforce how long the camera records for
self.start_time = time.time()
self.capture_duration = capture_duration
self.timer.start(0, self)
def stop_recording(self):
""" Stops the camera feed. """
self.timer.stop()
self.camera.release()
def timerEvent(self, event):
""" Event run after every increment of the timer, which outputs image data
and stops the camera feed when time limit is exceeded. """
if int(time.time()) - self.start_time > self.capture_duration:
self.stop_recording()
recording, data = self.camera.read()
if recording:
self.image_data.emit(data)
|
17,926 | 0b2b840c769331bb022e6d3b1de1a1275ccc7bf2 | # Generated by Django 3.0.5 on 2020-04-20 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rooms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='room',
name='image',
field=models.ImageField(upload_to='room_images/'),
),
migrations.AlterField(
model_name='room',
name='room_id',
field=models.CharField(max_length=31, unique=True),
),
]
|
17,927 | 3f3573cc6beb46f48681b50e355eb22956a465e6 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Hunspell(AutotoolsPackage):
"""The most popular spellchecking library (sez the author...)."""
homepage = "https://hunspell.github.io/"
url = "https://github.com/hunspell/hunspell/archive/v1.6.0.tar.gz"
version("1.7.0", sha256="bb27b86eb910a8285407cf3ca33b62643a02798cf2eef468c0a74f6c3ee6bc8a")
version("1.6.0", sha256="512e7d2ee69dad0b35ca011076405e56e0f10963a02d4859dbcc4faf53ca68e2")
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("libtool", type="build")
depends_on("m4", type="build")
depends_on("gettext")
# TODO: If https://github.com/spack/spack/pull/12344 is merged, this
# method is unnecessary.
def autoreconf(self, spec, prefix):
autoreconf = which("autoreconf")
autoreconf("-fiv")
|
17,928 | d06c9d699b45c2c4d5ae642ddd2314113f3c233a | def average(array):
# your code goes here
hite = set(array)
avg = sum(hite) / len(hite)
return avg |
17,929 | 7eac88959e2944c8ae19c6b8718814332f251002 | __author__ = 'Akash'
import os
import sys
import pickle
sys.path.append("../Model/")
from bridge_corr_net import *
def create_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
data_folder = sys.argv[1]
model_folder = sys.argv[2]
projected_views_folder = sys.argv[3]
LABELS_KEY = "labels"
model = BridgeCorrNet()
model.load(model_folder)
create_folder(projected_views_folder)
all_train = pickle.load(open(data_folder + "all_train.pkl"))
def generate_projections(model, all_views_dict):
left_view = all_views_dict[LEFT]
right_view = all_views_dict[RIGHT]
pivot_view = all_views_dict[PIVOT]
labels = all_views_dict[LABELS_KEY]
left_projected_view = model.proj_from_left(left_view)
right_projected_view = model.proj_from_right(right_view)
pivot_projected_view = model.proj_from_pivot(pivot_view)
# Projecting from test_views
left_pivot_projection = model.proj_from_left_pivot(left_view, pivot_view)
right_pivot_projection = model.proj_from_right_pivot(right_view, pivot_view)
right_left_projection = model.proj_from_left_right(right_view, left_view)
return [left_projected_view, right_projected_view, pivot_projected_view, left_pivot_projection, right_pivot_projection, right_left_projection, labels]
[train_left_projected_view, train_right_projected_view, train_pivot_projected_view,
train_left_pivot_projection, train_right_pivot_projection, train_right_left_projection, train_labels] = generate_projections(model, all_train)
all_test = pickle.load(open(data_folder + "test_views.pkl"))
[test_left_projected_view, test_right_projected_view, test_pivot_projected_view,
test_left_pivot_projection, test_right_pivot_projection, test_right_left_projection, test_labels] = generate_projections(model, all_test)
def write_projections(postfix, projections_list):
[left_projected_view, right_projected_view, pivot_projected_view,
left_pivot_projection, right_pivot_projection, right_left_projection, labels] = projections_list
pickle.dump(left_projected_view, open(projected_views_folder + "left_proj_view_" + postfix + ".pkl", "w"))
pickle.dump(right_projected_view, open(projected_views_folder + "right_proj_view_" + postfix + ".pkl", "w"))
pickle.dump(pivot_projected_view, open(projected_views_folder + "pivot_proj_view_" + postfix + ".pkl", "w"))
pickle.dump(left_pivot_projection, open(projected_views_folder + "left_pivot_proj_view_" + postfix + ".pkl", "w"))
pickle.dump(right_pivot_projection, open(projected_views_folder + "right_pivot_proj_view_" + postfix + ".pkl", "w"))
pickle.dump(right_left_projection, open(projected_views_folder + "right_left_proj_view_" + postfix + ".pkl", "w"))
pickle.dump(labels, open(projected_views_folder + "labels_" + postfix + ".pkl", "w"))
write_projections(postfix="train",
projections_list=[train_left_projected_view, train_right_projected_view, train_pivot_projected_view,
train_left_pivot_projection, train_right_pivot_projection,
train_right_left_projection, train_labels])
write_projections(postfix="test", projections_list=[test_left_projected_view, test_right_projected_view, test_pivot_projected_view,
test_left_pivot_projection, test_right_pivot_projection, test_right_left_projection, test_labels]) |
17,930 | ceb8b3aaefc98421d535bd6eb450cd50b3fa9656 | # -*- coding: utf-8 -*-
"""
@author: tz_zs
MNIST 升级----mnist_train.py
"""
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 加载函数
from nnfc import mnist_inference
# 配置神经网络参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
# 模型保存路径和文件名
MODEL_SAVE_PATH = "/path/to/model/"
MODEL_NAME = "model.ckpt"
def train(mnist):
# 定义输入输出的placeholder
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
# 定义正则化
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
# 使用前向传播
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)
# 滑动平均
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
# 损失函数
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
# 学习率
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY)
# 优化算法
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name="train")
# 持久化
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
# 运行
_, loss_valuue, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
# 每1000轮保存一次模型
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_valuue))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main(argv=None):
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
train(mnist)
if __name__ == '__main__':
tf.app.run()
'''
After 1 training step(s), loss on training batch is 3.22362.
After 1001 training step(s), loss on training batch is 0.202338.
After 2001 training step(s), loss on training batch is 0.141154.
After 3001 training step(s), loss on training batch is 0.13816.
After 4001 training step(s), loss on training batch is 0.123687.
After 5001 training step(s), loss on training batch is 0.116358.
After 6001 training step(s), loss on training batch is 0.0994073.
After 7001 training step(s), loss on training batch is 0.0853637.
After 8001 training step(s), loss on training batch is 0.0775001.
After 9001 training step(s), loss on training batch is 0.072494.
After 10001 training step(s), loss on training batch is 0.0755896.
After 11001 training step(s), loss on training batch is 0.0617309.
After 12001 training step(s), loss on training batch is 0.0621173.
After 13001 training step(s), loss on training batch is 0.0540873.
After 14001 training step(s), loss on training batch is 0.0491002.
After 15001 training step(s), loss on training batch is 0.0505174.
After 16001 training step(s), loss on training batch is 0.0451144.
After 17001 training step(s), loss on training batch is 0.0472387.
After 18001 training step(s), loss on training batch is 0.041461.
After 19001 training step(s), loss on training batch is 0.0393669.
After 20001 training step(s), loss on training batch is 0.0477065.
After 21001 training step(s), loss on training batch is 0.0442965.
After 22001 training step(s), loss on training batch is 0.0363835.
After 23001 training step(s), loss on training batch is 0.0386328.
After 24001 training step(s), loss on training batch is 0.0365634.
After 25001 training step(s), loss on training batch is 0.0398796.
After 26001 training step(s), loss on training batch is 0.0374554.
After 27001 training step(s), loss on training batch is 0.034578.
After 28001 training step(s), loss on training batch is 0.0341904.
After 29001 training step(s), loss on training batch is 0.0366765.
''' |
17,931 | 7b7087fc5bb9f9b241a231aa896fc7d770ff336e | import math
def find_next_square(sq):
root = math.sqrt(sq)
return -1 if root % 1 else (root + 1) ** 2
class TestFindNextSquare(unittest.TestCase):
def test_not_perfect_square(self):
self.assertEqual(find_next_square(111), -1)
def test_perfect_sqaure(self):
self.assertEqual(find_next_square(121), 144) |
17,932 | 83b9dee002a4eabf9ad5aa0bec50febcb3c73bc1 | print("welcome to Netflix movie suggetion program ")
in1 = input("are you watching with chldern? yes or no ")
in1 = str(in1)
if in1 == "yes":
in2 = str(input("do you have to watch with them? yes/y or no/n "))
if in2 == "yes":
in3 = str(input("feeling nostalgic? yes/y or no/n"))
if in3 == "yes":
in4 = str(input("old made new or justold ? old or new"))
if in4 == "old":
print(" Alice in Wonderland")
else:
print("The Lorax")
else:
in5 = str(input("are the kiids frightend easily? yes/y or no/n"))
if in5 == "yes":
print("Rango")
else:
in6 = str(input("musical?yes/y or no/n"))
if in6 == "yes":
print("The Nightmare before christmas")
else:
print("ParaNorman")
else:
in7 = str(input("Girls only? yes/y or no/n"))
if in7 == "yes":
print("The secret garden")
else:
in8 = str(input("muppets or music? type muppets or music"))
if in8 == "muppets":
print("seasame street")
else:
print("yo gabba gabba!")
else:
in9 = str(input("tv series or movie? type tv series or movie"))
if in9 == "tv series":
in10 = str(input("30 minutes or 60 minutes? type 30 or 60"))
if in10 == "60":
in11 = str(input("coming age of storyline? yes or no "))
if in11 == "yes":
print("freaks and Geeks")
else:
in12 = str(input("criminal drama? yes or no"))
if in12 == "yes":
in13 = str(
input("classical mystery or trhillar cliffhanger? type mystery or thriller"))
if in13 == "mystery":
print("Sherlock(BBC)")
else:
in14 = str(
input("do you want the mostr intemse series ever made? type yes or no "))
if in14 == "yes":
print("Breaking Bad")
else:
in15 = str(
input("focus on gud guys or bad guys? type gud or bad"))
if in15 == "gud":
print("Law and Order")
else:
print("Son of Anarchy")
else:
in16 = str(
input("Escape with some fantacy sci-fi ? type yes or no "))
if in16 == "yes":
in17 = str(
input("head to outer space? type yes or no"))
if in17 == "yes":
in18 = str(
input("Explore or war? type explore or war"))
if in18 == "explore":
print("Star trek(TNG)")
else:
print("battlestar galactica")
else:
in19 = str(
input("less science more fiction ? type yes or no"))
if in19 == "yes":
in20 = str(input("braiiiiiiiiins? yes or no"))
if in20 == "yes":
print("The walking dead")
else:
print("Lost")
else:
print("Soliders")
else:
in21 = str(
input("Politics and fast talking ? yes or no"))
if in21 == "yes":
in22 = str(
input("kevin,verbal kent, spacy? yes or no"))
if in22 == "yes":
print("house of cards")
else:
print(" The West Wing")
else:
in23 = str(input("Steeped in history/yes or no"))
if in23 == "yes":
print("Th Tudors")
else:
in24 = str(
input("Do you want things to be ittle strange? yes or no"))
if in24 == "yes":
print("Twn peaks")
else:
print("Mad Men")
else:
in25 = str(
input("You want to see things in black & white ? yes or no"))
if in25 == "yes":
print("The Twilight Zone")
else:
in26 = str(input("Animated ? yes or no"))
if in26 == "yes":
in27 = str(
input("Dick and fart jokes with random musical intruders ? yes or no"))
if in27 == "yes":
print("Family guy")
else:
in28 = str(
input(" gratitouls shots and upskirt shots ? yes or no "))
if in28 == "yes":
print("Highschool of the Dead")
else:
in29 = str(
input("a little more Childish or Mature ? type childish or mature "))
if in29 == "childish":
in30 = str(
input("world class social commentery ? yes or no"))
if in30 == "yes":
print(" south park")
else:
print("Futurama")
else:
in31 = str(
input("do you want to watch most f***kd up show on netflix ? yes or no"))
if in31 == "yes":
print("Ren & Stimpy Show")
else:
print("Archer")
else:
in32 = str(
input("Do you want to remember laughter ? yes or no"))
if in32 == "yes":
in33 = str(
input("do you want to do as little as thinking possible / yes or no"))
if in33 == "yes":
in34 = str(input("Skit show? yes or no"))
if in34 == "yes":
in35 = str(
input("Do you want things to get a little odd, a little quirky"))
if in35 == "yes":
print("The Kids in the Hall")
else:
print("Chappelle's show")
else:
in36 = str(
input("Are you really into weed and alcohol ? yes or no"))
if in36 == "yes":
in37 = str(input(
"Can you hear the shit hawks circling in the shit winds ? type yes or no "))
if in37 == "yes":
print("Trailor park boys")
else:
print("Workaholics")
else:
in38 = str(
input("Are you really into football? type yes or no"))
if in38 == "yes":
print("The league")
else:
in39 = str(
input("Satire or insanity ? type satair or insanity"))
if in39 == "insanity":
print(
"Its always sunny in philadelphia")
else:
print("Parks and recreation")
else:
in40 = str(
input("Do you dig british humor ? yes or no"))
if in40 == "yes":
print("Fawlty towers")
else:
in41 = str(
input(" Do you reacon to bit sophisticated ? yes or no"))
if in41 == "yes":
print("Frasier")
else:
in42 = str(
input("Do you want a show with astounding rewatchability ? yes or no"))
if in42 == "yes":
print("Arrested Devlopment")
else:
in43 = str(
input("Do you find awakward situation funny ? yes or no"))
if in43 == "yes":
print("Louise")
else:
print("Cheers")
else:
in44 = str(
input("Do you like the paranormal ? type yes or no"))
if in44 == "yes":
print("The X-files")
else:
in45 = str(
input("Mystery or Space ? type mystery or space"))
if in45 == "mystery":
in46 = str(
input("are you about the solution or the process ? type solution or process"))
if in46 == "solution":
print("the Rockford files")
else:
print("columbo")
else:
in47 = str(
input("Are you extremly nostalgic ? type yes or no "))
if in47 == "yes":
print("Star trek")
else:
print("Firefly")
else:
in48 = str(input("want your night to action packed ? type yes or no"))
if in48 == 'yes':
in49 = str(input(" have you seen terminator ? yes or no"))
if in49 == "yes":
print("Terminator-2 Judgement Day")
else:
in50 = str(input(" Hw about a war movie ? type yes or no "))
if in50 == "yes":
print("Black hawk down")
else:
in51 = str(
input("horses , boots, and 10 gallon of hats ? type yes or no"))
if in51 == "yes":
in52 = str(
input("you want to go oldschol or fresh ? type old or fresh"))
if in52 == "fresh":
print("true girt")
else:
print("One upon a time in the west")
else:
in53 = str(
input("Do you want it to be serious ? type yes or no"))
if in53 == "yes":
in54 = str(
input("Set in future or present day ? type future or present"))
if in54 == "now":
print("Shooter")
else:
print("Aeon Flux")
else:
print("Top Gun")
else:
in55 = str(input("What about a documentry ? type yes or no"))
if in55 == "yes":
in56 = str(
input("Do youconsider snowboard films to be documentries ? type ys or no"))
if in56 == "yes":
print("are if flight")
else:
in57 = str(input(
"Do you want yoyr face to be melted against maximum rock ? type yes or no"))
if in57 == "yes":
print(" Year of horse")
else:
in58 = str(
input("Take it to the streets ? type yes or no"))
if in58 == "yes":
in59 = str(
input("Pain or paint ? type pain or paint"))
if in59 == "paint":
print(" Exit through the gift shop")
else:
print("Bones Brigade: A Autobiography ")
else:
in60 = str(
input("Do you want to see how ridiculus religion can be ? type yes or no "))
if in60 == "yes":
print("Religulous")
else:
in61 = str(input(
"Are you a woody allen fa or would you like to become one ? type yes or no"))
if in61 == "yes":
print(" woody allen a documentry")
else:
in62 = str(
input("Do you subtitle bother you ? type yes or no"))
if in62 == "yes":
print("Grizzly Man")
else:
print("Man on Wire")
else:
in63 = str(
input("monsters,murdrers and madness ? type ys or no"))
if in63 == "yes":
in64 = str(
input("Should there be a blood soaked chainsaw at some point ? type yes or no"))
if in64 == "yes":
print(" Evil dead")
else:
in65 = str(
input("three part classis horror, one part something new ? type yes or no"))
if in65 == "yes":
print(" Cabin in the Woods")
else:
in66 = str(
input("Do abandon asylums scare the sence out of you ? type yes or no"))
if in66 == "yes":
print(" session-9")
else:
print(" insidous")
else:
in67 = str(
input("Ae you in mood to laugh ? type yes or no"))
if in67 == "yes":
in68 = str(
input("Do you want some drama with laughter ? type yes or no"))
if in68 == "yes":
in69 = str(
input("are you advance to black and white ? type yes or no "))
if in69 == "yes":
in70 = str(
input("do you love paul newman ? do you want to ? type yes or no"))
if in70 == "yes":
in71 = str(input(
"do you want to watch something great or legendery ? type great or legendery"))
if in71 == "great":
print("slap shot")
else:
print("butch cassady and sundnace kid")
else:
in72 = str(
input("gritty or pretty ? type pretty or gritty"))
if in72 == "pretty":
in73 = str(
input("slow paces quieky romance indie? type yes or no"))
if in73 == "yes":
print(" lost in transition")
else:
print("big fish")
else:
print("Transpotting")
else:
in74 = str(
input("Do you want your humor to be more or less lewd? type more or less"))
if in74 == "more":
print("clerks")
else:
print("ManHattan")
else:
in75 = str(
input("Do you mind when people singing ? type yes or no"))
if in75 == "yes":
print("Happy Gilmore")
else:
in76 = str(
input("do you finf cleaver wordplay entertaining? type yes or no"))
if in76 == "yes":
print("Duck Soup")
else:
print("FErries buller's day off")
else:
in77 = str(
input("do you have hunger for adventure ? type yes or no"))
if in77 == "yes":
in78 = str(
input("Were you a fan of firlfy? type yes or no"))
if in78 == "yes":
print(" Serinity")
else:
in79 = str(
input("Do you like main role played by kids? type yes or no"))
if in79 == "yes":
in80 = str(
input("Want some thrills with your adventure? type yes or no"))
if in80 == "yes":
print("super-8")
else:
print("the hunger games")
else:
print("star trek 2: The wrath of khan")
else:
in81 = str(
input("Care for sme criminal intent? type yes or no"))
if in81 == "yes":
in82 = str(
input("Do you enjoy Diatribes entirey unrelateds to the plot ? type yes or no"))
if in82 == "yes":
print("Reservior Dogs")
else:
in83 = str(
input("Are you upset by seeing drug use? type yes or no"))
if in83 == "yes":
in84 = str(
input("mob story? yes or no"))
if in84 == "yes":
print("miller's crossing")
else:
print("clay pigeons")
else:
in85 = str(
input("Do you love hiphop? type yes or no"))
if in85 == "yes":
print("Hustle7 Flow")
else:
print("Traffic")
else:
in86 = str(
input("Are you ready for Romance? type yes or no"))
if in86 == "yes":
in87 = str(
input("Tom cruise or Matt Damon ? type tom or matt"))
if in87 == "tom":
print("Vannila Sky")
else:
print("Good WIll Hunting")
else:
in88 = str(input(
"Hypothetically: Would a long brutal rape scene ruin your night? type yes or no "))
if in88 == "yes":
in89 = str(
input("Do you want to be completely confused ? type yes or no"))
if in89 == "yes":
print("Pi")
else:
in90 = str(
input("Are subtitles going to be probelm ? type yes or no"))
if in90 == "yes":
in91 = str(
input("Keep it Mellow? type yes or no"))
if in91 == "yes":
print("Midnight Cowboy")
else:
print("The Mechinist")
else:
print("Tomboy")
else:
in92 = str(
input("Have you seen a original girl with a dragon tattoo ? type yes or no"))
if in92 == "yes":
in93 = str(
input("Hace you seen a girl who played with fire? type yes or no"))
if in93 == "yes":
print(
"The girl who kiked hornet's Nest")
else:
print(
"the girl who played with fire")
else:
print(
"The girl with the dragon tattoo")
|
17,933 | 951211f7d33dc996bfc6688123eb90113cb80387 |
# train-clean-100: 251 speaker, 28539 utterance
# train-clean-360: 921 speaker, 104104 utterance
# test-clean: 40 speaker, 2620 utterance
# batchisize 32*3 : train on triplet: 3.3s/steps , softmax pre train: 3.1 s/steps ,select_best_batch
# local: load pkl time 0.00169s - > open file time 4.2e-05s pickle loading time 0.00227s
# server: load pkl time 0.0389s -> open file time 6.1e-05s pickle load time 0.0253s
import pandas as pd
import random
import numpy as np
import constants_vctk as c#=======================================================2020/04/17 20:52
from utils import get_last_checkpoint_if_any
from models import convolutional_model
from triplet_loss import deep_speaker_loss
from pre_process import data_catalog
import heapq
import threading
from time import time, sleep
alpha = c.ALPHA
def batch_cosine_similarity(x1, x2):
# https://en.wikipedia.org/wiki/Cosine_similarity
# 1 = equal direction ; -1 = opposite direction
mul = np.multiply(x1, x2)
s = np.sum(mul,axis=1)
return s
def matrix_cosine_similarity(x1, x2):
# https://en.wikipedia.org/wiki/Cosine_similarity
# 1 = equal direction ; -1 = opposite direction
mul = np.dot(x1, x2.T)
return mul
def clipped_audio(x, num_frames=c.NUM_FRAMES):
# 剪辑音频到固定长度num_frames
if x.shape[0] > num_frames + 20:
bias = np.random.randint(20, x.shape[0] - num_frames)
clipped_x = x[bias: num_frames + bias]
elif x.shape[0] > num_frames:
bias = np.random.randint(0, x.shape[0] - num_frames)
clipped_x = x[bias: num_frames + bias]
else:
clipped_x = x
return clipped_x
spk_utt_index = {}
def preprocess(unique_speakers, spk_utt_dict,candidates=c.CANDIDATES_PER_BATCH):
files = []
flag = False if len(unique_speakers) > candidates/2 else True
speakers = np.random.choice(unique_speakers, size=int(candidates/2), replace=flag)
for speaker in speakers:
index=0
ll = len(spk_utt_dict[speaker])# s0002编号说话人音频条数===========================2020/04/15 21:51
if speaker in spk_utt_index:
index = spk_utt_index[speaker] % ll
files.append(spk_utt_dict[speaker][index])
files.append(spk_utt_dict[speaker][(index+1)%ll])
spk_utt_index[speaker] = (index + 2) % ll
'''
for ii in range(int(candidates/2)):
utts = libri[libri['speaker_id'] == speakers[ii]].sample(n=2, replace=False)
files = files.append(utts)
#print("sampling utterance time {0:.5}s".format(time() - orig_time))
#orig_time = time()
'''
x = []
labels = []
for file in files:
x_ = np.load(file)
x_ = clipped_audio(x_)
# 网络标准输入格式:(c.NUM_FRAMES, 64, 1)
if x_.shape != (c.NUM_FRAMES, 64, 1):
print("Error !!!",file['filename'].values[0])
x.append(x_)
# labels.append(file.split("/")[-1].split("-")[0])
labels.append(file.split("/")[-1].split('_')[0])#===================================================2020/04/17 20:59
#features = np.array(x) # (batchsize, num_frames, 64, 1)
return np.array(x),np.array(labels)
stack = []
def create_data_producer(unique_speakers, spk_utt_dict,candidates=c.CANDIDATES_PER_BATCH):
producer = threading.Thread(target=addstack, args=(unique_speakers, spk_utt_dict,candidates))
producer.setDaemon(True)
producer.start()
def addstack(unique_speakers, spk_utt_dict,candidates=c.CANDIDATES_PER_BATCH):
data_produce_step = 0
while True:
if len(stack) >= c.DATA_STACK_SIZE:
sleep(0.01)
continue
orig_time = time()
feature, labels = preprocess(unique_speakers, spk_utt_dict, candidates)
#print("pre-process one batch data costs {0:.4f} s".format(time() - orig_time))
stack.append((feature, labels))
data_produce_step += 1
if data_produce_step % 100 == 0:
for spk in unique_speakers:
np.random.shuffle(spk_utt_dict[spk])
def getbatch():
while True:
if len(stack) == 0:
continue
return stack.pop(0)
hist_embeds = None
hist_labels = None
hist_features = None
hist_index = 0
hist_table_size = c.HIST_TABLE_SIZE
def best_batch(model, batch_size=c.BATCH_SIZE,candidates=c.CANDIDATES_PER_BATCH):
orig_time = time()
global hist_embeds, hist_features, hist_labels, hist_index, hist_table_size
features,labels = getbatch()
print("get batch time {0:.3}s".format(time() - orig_time))
orig_time = time()
embeds = model.predict_on_batch(features)
print("forward process time {0:.3}s".format(time()-orig_time))
if hist_embeds is None:
hist_features = np.copy(features)
hist_labels = np.copy(labels)
hist_embeds = np.copy(embeds)
else:
if len(hist_labels) < hist_table_size*candidates:
hist_features = np.concatenate((hist_features, features), axis=0)
hist_labels = np.concatenate((hist_labels, labels), axis=0)
hist_embeds = np.concatenate((hist_embeds, embeds), axis=0)
else:
hist_features[hist_index*candidates: (hist_index+1)*candidates] = features
hist_labels[hist_index*candidates: (hist_index+1)*candidates] = labels
hist_embeds[hist_index*candidates: (hist_index+1)*candidates] = embeds
hist_index = (hist_index+1) % hist_table_size
anchor_batch = []
positive_batch = []
negative_batch = []
anchor_labs, positive_labs, negative_labs = [], [], []
orig_time = time()
anh_speakers = np.random.choice(hist_labels, int(batch_size/2), replace=False)
anchs_index_dict = {}
inds_set = []
for spk in anh_speakers:
anhinds = np.argwhere(hist_labels==spk).flatten()
anchs_index_dict[spk] = anhinds
inds_set.extend(anhinds)
inds_set = list(set(inds_set))
speakers_embeds = hist_embeds[inds_set]
sims = matrix_cosine_similarity(speakers_embeds, hist_embeds)
print('beginning to select..........')
for ii in range(int(batch_size/2)): #每一轮找出两对triplet pairs
while True:
speaker = anh_speakers[ii]
inds = anchs_index_dict[speaker]
np.random.shuffle(inds)
anchor_index = inds[0]
pinds = []
for jj in range(1,len(inds)):
if (hist_features[anchor_index] == hist_features[inds[jj]]).all():
continue
pinds.append(inds[jj])
if len(pinds) >= 1:
break
sap = sims[ii][pinds]
min_saps = heapq.nsmallest(2, sap)
pos0_index = pinds[np.argwhere(sap == min_saps[0]).flatten()[0]]
if len(pinds) > 1:
pos1_index = pinds[np.argwhere(sap == min_saps[1]).flatten()[0]]
else:
pos1_index = pos0_index
ninds = np.argwhere(hist_labels != speaker).flatten()
san = sims[ii][ninds]
max_sans = heapq.nlargest(2, san)
neg0_index = ninds[np.argwhere(san == max_sans[0]).flatten()[0]]
neg1_index = ninds[np.argwhere(san == max_sans[1]).flatten()[0]]
anchor_batch.append(hist_features[anchor_index]); anchor_batch.append(hist_features[anchor_index])
positive_batch.append(hist_features[pos0_index]); positive_batch.append(hist_features[pos1_index])
negative_batch.append(hist_features[neg0_index]); negative_batch.append(hist_features[neg1_index])
anchor_labs.append(hist_labels[anchor_index]); anchor_labs.append(hist_labels[anchor_index])
positive_labs.append(hist_labels[pos0_index]); positive_labs.append(hist_labels[pos1_index])
negative_labs.append(hist_labels[neg0_index]); negative_labs.append(hist_labels[neg1_index])
batch = np.concatenate([np.array(anchor_batch), np.array(positive_batch), np.array(negative_batch)], axis=0)
labs = anchor_labs + positive_labs + negative_labs
print("select best batch time {0:.3}s".format(time() - orig_time))
return batch, np.array(labs)
if __name__ == '__main__':
model = convolutional_model()
model.compile(optimizer='adam', loss=deep_speaker_loss)
last_checkpoint = get_last_checkpoint_if_any(c.CHECKPOINT_FOLDER)
if last_checkpoint is not None:
print('Found checkpoint [{}]. Resume from here...'.format(last_checkpoint))
model.load_weights(last_checkpoint)
grad_steps = int(last_checkpoint.split('_')[-2])
print('[DONE]')
libri = data_catalog(c.DATASET_DIR)
unique_speakers = libri['speaker_id'].unique()
labels = libri['speaker_id'].values
files = libri['filename'].values
spk_utt_dict = {}
for i in range(len(unique_speakers)):
spk_utt_dict[unique_speakers[i]] = []
for i in range(len(labels)):
spk_utt_dict[labels[i]].append(files[i])
create_data_producer(unique_speakers,spk_utt_dict)
for i in range(100):
x, y = best_batch(model)
print(x.shape)
#print(y)
|
17,934 | 2b1098b9a48e262d39e8fc4f1765f1f36f992270 | import numpy as np
class AdvancedFilter:
def mean_blur(self, arr):
cp_arr = arr.copy()
imx = arr.shape[0] - 1
jmx = arr.shape[1] - 1
for i in range (imx + 1):
for j in range(jmx + 1):
if i > 0 and i < imx and j > 0 and j < jmx:
temp = arr[i-1:i+1,j-1:j+1]
cp_arr[i][j][0] = np.sum(temp[:,:,0]) / 9
cp_arr[i][j][1] = np.sum(temp[:,:,1]) / 9
cp_arr[i][j][2] = np.sum(temp[:,:,2]) / 9
return cp_arr
def gaussian_blur(self, arr):
cp_arr = arr.copy()
imx = arr.shape[0] - 1
jmx = arr.shape[1] - 1
for i in range (imx + 1):
for j in range(jmx + 1):
if i > 0 and i < imx and j > 0 and j < jmx:
cp_arr[i][j] = (4 * arr[i][j] + 2 * arr[i][j+1] + 2 * arr[i][j-1] + 2 * arr[i+1][j] + arr[i+1][j+1] + arr[i+1][j-1] + 2 * arr[i-1][j] + arr[i-1][j+1] + arr[i-1][j-1]) / 16
return cp_arr |
17,935 | 62fb68932d53777ba4a8e4c44ad3642f615e46d2 | import pytest
from marshmallow import ValidationError
from aliceplex.schema import Person, PersonSchema, PersonStrictSchema
def test_person_schema_load(person_schema: PersonSchema):
schema = person_schema
load = schema.load({"name": "name", "photo": "photo"})
assert load == Person(name="name", photo="photo")
load = schema.load({"name": None, "photo": None})
assert load == Person()
load = schema.load({})
assert load == Person()
def test_person_schema_dump(person_schema: PersonSchema):
schema = person_schema
dump = schema.dump(Person(name="name", photo="photo"))
assert dump == {"name": "name", "photo": "photo"}
dump = schema.dump(Person())
assert dump == {"name": None, "photo": None}
def test_person_strict_schema_load(person_strict_schema: PersonStrictSchema):
schema = person_strict_schema
load = schema.load({"name": "name", "photo": "photo"})
assert load == Person(name="name", photo="photo")
load = schema.load({"name": "name"})
assert load == Person(name="name")
with pytest.raises(ValidationError):
schema.load({"name": None, "photo": None})
with pytest.raises(ValidationError):
schema.load({})
def test_person_strict_schema_dump(person_strict_schema: PersonStrictSchema):
schema = person_strict_schema
dump = schema.dump(Person(name="name", photo="photo"))
assert dump == {"name": "name", "photo": "photo"}
dump = schema.dump(Person())
assert dump == {"name": None, "photo": None}
|
17,936 | d8a26aa735babe858e9f528819e8e043f461677b | '''
Faça um Programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o usuário.
'''
basequadrado = float(input("\nQual a base do quadrado: "))
alturaquadrado = float(input("Qual a altura do quadrado: "))
area = basequadrado * alturaquadrado
dobroarea = pow(area, 2)
print("\nÁrea do quadrado = {}".format(area))
print("Dobro da área do quadrado = {}".format(dobroarea)) |
17,937 | d65e4f6e3be2df760f1035363f18964f38a30963 | #!/usr/bin/env python
'''
@Description: pyA20控制GPIO
@Version: 1.0
@Autor: lhgcs
@Date: 2019-09-06 11:36:50
@LastEditors: lhgcs
@LastEditTime: 2019-09-16 18:11:05
'''
'''
安装:pip install pyA20
'''
import os
import sys
import time
from pyA20.gpio import gpio
from pyA20.gpio import port
#print dir(port)
# 初始化
gpio.init()
'''
@description: 输出电平
@param {type}
@return:
'''
def gpio_input(pin, level):
gpio.setcfg(pin, gpio.OUTPUT)
gpio.output(pin, level)
'''
@description: 读电平
@param {type}
@return:
'''
def gpio_input(pin):
gpio.setcfg(pin, gpio.INPUT)
return gpio.input(pin)
'''
@description: 控制风扇降温
@param {type}
@return:
'''
def fan_control():
# GPIO初始化输出
args = sys.argv
# 引脚
Pin = int(args[1]) # Pin = 2
# 电平
Act = int(args[2])
# 设置
gpio.setcfg(Pin, gpio.OUTPUT)
# 输出电平
gpio.output(Pin, 1 if Act == 1 else 0)
while True:
# output = os.popen('cat /sys/devices/virtual/hwmon/hwmon1/temp1_input')
output = os.popen('cat /sys/devices/virtual/thermal/thermal_zone0/temp')
wd = output.read()
temp = int(wd)
print("cpu温度: ", temp)
if temp > 50000:
gpio.output(Pin, 1)
else:
gpio.output(Pin, 0)
time.sleep(1)
if __name__ == "__main__":
fan_control()
|
17,938 | 56cae801d226c5facc28363b5e82bda0f324ab93 | from tkinter import *
from Wall import Wall
from Obstacle import Obstacle
from Player import Player
from Wallpad import Wallpad
import random
DISPLAY_WIDTH = 800
DISPLAY_HEIGHT = 600
class Step_move:
def __init__(self,parent):
self.parent = parent
self.canvas = Canvas(self.parent,width=DISPLAY_WIDTH,height=DISPLAY_HEIGHT,bg = 'white')
self.canvas.pack()
#user story
#เพื่อที่จะตรวจสอบการชนของผู้เล่นกับกล่อง
#ฉันเป็นผู้สร้าง
#ต้องการรู้ว่าผู้เล่นชนกับกล่องหรือไม่
#scenario:
#ให้ผู้เล่นชนกับกล่อง
#เมื่อตรวจสอบแล้ว
#จึงแสดงว่าผู้เล่นชนกับกล่องหรือไม่
#purpose:ตรวจสอบการชนของผู้เล่นกับกล่อง
#input:กล่อง(int),ผู้เล่น(int)
#output:ชนหรือไม่ชน(boolean)
#contract:hitplayer(int,int)-->(boolean)
#example:hitplayer(Mobstacle,mplayer)-->True
# hitplayer(Mobstacle,mplayer)-->False
def hitPlayer(self,Mobstacle,mplayer):
xP1,yP1,xP2,yP2 = self.getCurPos(mplayer)
xa1,ya1,xa2,ya2, = self.getCurPos(Mobstacle)
if xP1 <= xa2 and xP2 >= xa1 and yP1 <= ya2-12 and yP2 >= ya1+3 :
return True
else: return False
#user story
#เพื่อที่จะสร้างกรอบหน้าจอ
#ฉันเป็นผู้ออกแบบ
#ต้องการสร้างกรอบหน้าจอ
#scenario:
#ให้เส้นตารางแสดงบนล่างซ้ายและขวา
#เมื่อทำการสร้าง
#จึงแสดงเส้นกรอบ
#purpose:สร้างกรอบหน้าจอ
#input:จุดที่จะสร้าง(int)
#output:เส้นกรอบ(Nonetype)
#contract:createtable(int)-->(Nonetype)
#example:createtable(aWall)-->เส้นกรอบ
def createtable(self,aWall):
leftWall=self.canvas.create_line(aWall.xLeft,aWall.xTop,aWall.xLeft,aWall.xBottom)
RightWall=self.canvas.create_line(aWall.xRight,aWall.xTop,aWall.xRight,aWall.xBottom)
TopWall=self.canvas.create_line(aWall.xLeft,aWall.xTop,aWall.xRight,aWall.xTop)
BottomWall=self.canvas.create_line(aWall.xLeft,aWall.xBottom,aWall.xRight,aWall.xBottom)
#user story
#เพื่อที่จะสร้างช่องสี่เหลี่ยม
#ฉันเป็นผู้ออกแบบ
#ต้องการสร้างช่องสี่เหลี่ยม
#scenario:
#ให้ช่องสี่เหลี่ยมแสดงตรงกลาง
#เมื่อทำการสร้าง
#จึงแสดงช่องสี่เหลี่ยม
#purpose:สร้างช่องสี่เหลี่ยม
#input:จุดที่จะสร้าง(int),สี(str)
#output:ช่องสี่เหลี่ยม(Nonetype)
#contract:createWallpad(int,str)-->(Nonetype)
#example:createWallpad(aWallpad,color)-->ช่องสีเหลี่ยม
def createWallpad(self,aWallpad,color):
x1 = aWallpad.x1
y1 = aWallpad.y1
x2 = aWallpad.x2
y2 = aWallpad.y2
self.canvas.create_rectangle(x1,y1,x2,y2,fill=color)
#user story
#เพื่อที่จะสร้างตัวละครของผู้เล่น
#ฉันเป็นผู้ออกแบบ
#ต้องการสร้างรูปตัวละคร
#scenario:
#ให้ตัวละครแสดงบนหน้าจอ
#เมื่อทำการสร้าง
#จึงแสดงตัวละคร
#purpose:สร้างตัวละครของผู้เล่น
#input:จุดที่จะสร้าง(int),รูป(tkinter.PhotoImage)
#output:แสดงรูปตัวละคร(type)
#contract:createPlayer(int,tkinter.PhotoImage)-->(type)
#example:createPlayer(aPlay,image)-->รูปตัวละคร
def createPlayer(self,aPlay,image):
x1 = aPlay.xpos
y1 = aPlay.ypos
img = image
Player = self.canvas.create_image(aPlay.xpos,aPlay.ypos,image = image)
return Player
#user story
#เพื่อที่จะเคลื่อนย้ายและตรวจสอบตำแหน่งของตัวละครผู้เล่น
#ฉันเป็นผู้ออกแบบ
#ต้องการเคลื่อนย้ายและตรวจสอบตำแหน่งตัวละครผู้เล่น
#scenario1:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครไปยังจุดที่จะทำให้ชนะ
#เมื่อทำการตรวจสอบการเคลื่อนย้ายไปยังจุดที่ชนะ
#จึงจะแสดง"You Win!"
#scenario2:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครไปชนเส้นกรอบ
#เมื่อทำการตรวจสอบการเคลื่อนย้ายไปชนเส้นกรอบ
#จึงจะแสดง"Game Over!"
#scenario3:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครไปชนกล่อง
#เมื่อทำการตรวจสอบการเคลื่อนย้ายไปชนกล่อง
#จึงหยุดทุกอย่างและแสดง"Game Over!"
#purpose:ต้องการเคลื่อนย้ายและตรวจสอบตำแหน่งตัวละครผู้เล่น
#input:ความเร็วแนวราบ(int),ความเร็วแนวดิ่ง(int),ตัวละคร(int)
#output:แสดงข้อความ'You Win!'หรือ'Game Over!'(tkinter.text)
#contract:movePlayer(int,int,int)-->(tkinter.text)
#example:movePlayer(xSpeed,ySpeed,mplayer)-->You Win!
# movePlayer(xSpeed,ySpeed,mplayer)-->Game Over!
def movePlayer(self,xSpeed,ySpeed,mplayer):
self.canvas.move(mplayer,xSpeed,0)
self.canvas.move(mplayer,0,ySpeed)
if self.Win(aWall,mplayer)== True:
root=Tk()
gamewin = Label(root,font=('arial',25,'bold'),text='You Win!', fg='white',anchor='center',bd=300,bg='green',width=10,height = 2)
gamewin.pack()
root.mainloop()
if self.hitWallPlayer(aWall,mplayer) == True:
aPlay.vel = 0
root=Tk()
gameover = Label(root,font=('arial',25,'bold'),text='Game Over!', fg='white',anchor='center',bd=300,bg='red',width=10,height = 2)
gameover.pack()
root.mainloop()
if self.hitPlayer(Mobstacle,mplayer) == True:
aPlay.vel = 0
aobstacle.xvel = 0
root=Tk()
gameover = Label(root,font=('arial',25,'bold'),text='Game Over!', fg='white',anchor='center',bd=300,bg='red',width=10,height = 2)
gameover.pack()
root.mainloop()
#user story
#เพื่อที่จะเคลื่อนย้ายตำแหน่งของตัวละครผู้เล่น
#ฉันเป็นผู้ออกแบบ
#ต้องการเคลื่อนย้ายตำแหน่งตัวละครผู้เล่น
#scenario1:
#ให้กดปุ่มไปทางด้านซ้าย
#เมื่อทำการตรวจสอบการเคลื่อนที่ของปุ่มที่กด
#จึงจะแสดงการเคลื่อนย้ายตัวละครไปทางด้านซ้าย
#scenario2:
#ให้กดปุ่มไปทางด้านขวา
#เมื่อทำการตรวจสอบการเคลื่อนที่ของปุ่มที่กด
#จึงจะแสดงการเคลื่อนย้ายตัวละครไปทางด้านขวา
#scenario3:
#ให้กดปุ่มไปทางด้านบน
#เมื่อทำการตรวจสอบการเคลื่อนที่ของปุ่มที่กด
#จึงจะแสดงการเคลื่อนย้ายตัวละครไปทางด้านบน
#scenario4:
#ให้กดปุ่มไปทางด้านล่าง
#เมื่อทำการตรวจสอบการเคลื่อนที่ของปุ่มที่กด
#จึงจะแสดงการเคลื่อนย้ายตัวละครไปทางด้านล่าง
#purpose:ต้องการเคลื่อนย้ายตัวละครผู้เล่น
#input:ความเร็ว(int),ตัวละคร(int)
#output:เปลี่ยนตำแหน่งของตัวละคร
#contract:controlPlayer(int,int)-->(int)
#example:controlPlayer(aPlay,mplayer)-->เปลี่ยนตำแหน่งตัวละครไปทางซ้าย
# controlPlayer(aPlay,mplayer)-->เปลี่ยนตำแหน่งตัวละครไปทางขวา
# controlPlayer(aPlay,mplayer)-->เปลี่ยนตำแหน่งตัวละครไปทางบน
# controlPlayer(aPlay,mplayer)-->เปลี่ยนตำแหน่งตัวละครไปทางล่าง
def controlPlayer(self,aPlay,mplayer):
self.parent.bind("<KeyPress-Left>",lambda Left:self.movePlayer(-aPlay.vel,0,mplayer))
self.parent.bind("<KeyPress-Right>",lambda Right:self.movePlayer(aPlay.vel,0,mplayer))
self.parent.bind("<KeyPress-Up>",lambda Up:self.movePlayer(0,-aPlay.vel,mplayer))
#self.parent.bind("<KeyPress-Down>",lambda Down:self.movePlayer(0,aPlay.vel,mplayer))
#user story
#เพื่อที่จะสร้างกล่องสี่เหลี่ยม
#ฉันเป็นผู้ออกแบบ
#ต้องสร้างกล่องสี่เหลี่ยม
#scenario1:
#ให้กล่องสี่เหลี่ยมแสดงบนหน้าจอ
#เมื่อทำการสร้าง
#จึงแสดงกล่องสี่เหลี่ยม
#purpose:ต้องการสร้างสี่เหลี่ยม
#input:จุดที่ต้องการให้แสดง(int),สี(str)
#output:แสดงกล่องสี่เหลี่ยม(None)
#contract:createobstacle(int,str)-->(int)
#example:createobstacle(aobstacle,color)-->กล่องสี่เหลี่ยม
def createobstacle(self,aobstacle,color):
x1 = aobstacle.xpos
y1 = aobstacle.ypos
x2 = aobstacle.xpos + aobstacle.width
y2 = aobstacle.ypos + aobstacle.height
obstacle = self.canvas.create_rectangle(x1,y1,x2,y2,fill=color)
return obstacle
#user story
#เพื่อที่จะเคลื่อนย้ายและตรวจสอบการชนของกล่องสี่เหลี่ยม
#ฉันเป็นผู้ออกแบบ
#ต้องเคลื่อนย้ายและตรวจสองการชนของกล่องสี่เหลี่ยม
#scenario1:
#ให้ตรวจสอบการเคลื่อนย้ายกล่องสี่เหลี่ยมไปชนกรอบ
#เมื่อทำการตรวจสอบการเคลื่อนย้ายกล่องไปชนกรอบ
#จึงเปลี่ยนทิศทางการเคลื่อนที่ของกล่องสี่เหลี่ยม
#scenario2:
#ให้ตรวจสอบการเคลื่อนย้ายกล่องสี่เหลี่ยมไปชนตัวละคร
#เมื่อทำการตรวจสอบการเคลื่อนย้ายกล่องไปชนกับตัวละครผู้เล่น
#จึงจะหยุดเกมและแสดง"Game Over"
#purpose:ต้องการเคลื่อนย้ายกล่องสี่เหลี่ยม
#input:กล่อง(int),จุดที่ต้องการให้แสดง(int)
#output:เปลี่ยนทิศการเคลื่อนที่(int) หรือ Game Over!(tkinter.text)
#contract:moveobstacle(int,tkinter.text)-->(int)
#example:moveobstacle(Mobstacle,aobstacle)-->เปลี่ยนทิศทางการเคลื่อนที่
# moveobstacle(Mobstacle,aobstacle)-->Game Over!
def moveobstacle(self,Mobstacle,aobstacle):
self.canvas.move(Mobstacle,aobstacle.xvel,aobstacle.yvel)
if self.hitWallObstacle(aWall,Mobstacle,aobstacle) == True:
aobstacle.xvel = - aobstacle.xvel
if self.hitPlayer(Mobstacle,mplayer) == True:
aPlay.vel = 0
aobstacle0.xvel = 0
aobstacle1.xvel = 0
aobstacle2.xvel = 0
aobstacle3.xvel = 0
aobstacle4.xvel = 0
aobstacle5.xvel = 0
aobstacle6.xvel = 0
aobstacle7.xvel = 0
aobstacle8.xvel = 0
aobstacle9.xvel = 0
root=Tk()
gameover = Label(root,font=('arial',25,'bold'),text='Game Over!', fg='white',anchor='center',bd=300,bg='red',width=10,height = 2)
gameover.pack()
root.mainloop()
self.canvas.after(10,self.moveobstacle,Mobstacle,aobstacle)
def getCurPos(self,obj):
return self.canvas.bbox(obj)
#user story
#เพื่อที่จะจบเกมโดยชนะ
#ฉันเป็นผู้เล่น
#ต้องเคลื่อนย้ายตัวละครไปยังกรอบด้านบน
#scenario1:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่ที่กรอบด้านบน
#เมื่อทำการตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่กรอบด้านบน
#จึง return True
#scenario2:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครว่าไม่อยู่ที่กรอบด้านบน
#เมื่อทำการตรวจสอบการเคลื่อนย้ายตัวละครว่าไม่อยู่กรอบด้านบน
#จึง return False
#purpose:ต้องการตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่กรอบด้านบนหรือไม่
#input:กรอบ(int),ตัวละครผู้เล่น(int)
#output:True or False(boolean)
#contract:Win(int,int)-->(boolean)
#example:Win(aWall,mplayer)-->True
# Win(aWall,mplayer)-->False
def Win(self,aWall,mplayer):
xa1,ya1,xa2,ya2 = self.getCurPos(mplayer)
if ya1 <= aWall.xTop-10:
return True
else:
return False
#user story
#เพื่อที่จะ game over
#ฉันเป็นผู้เล่น
#ต้องเคลื่อนที่ไปชนกับ
#scenario1:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่ที่กรอบด้านซ้าย
#เมื่อทำการตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่กรอบด้านซ้าย
#จึง return True
#scenario2:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่ที่กรอบด้านขวา
#เมื่อทำการตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่กรอบด้านขวา
#จึง return True
#scenario3:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่ที่กรอบด้านล่าง
#เมื่อทำการตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่กรอบด้านล่าง
#จึง return True
#scenario4:
#ให้ตรวจสอบการเคลื่อนย้ายตัวละครว่าไม่อยู่ที่กรอบ
#เมื่อทำการตรวจสอบการเคลื่อนย้ายตัวละครว่าไม่อยู่ที่กรอบ
#จึง return False
#purpose:ต้องการตรวจสอบการเคลื่อนย้ายตัวละครว่าอยู่กรอบด้านบนหรือไม่
#input:กรอบ(int),ตัวละครผู้เล่น(int)
#output:True or False(boolean)
#contract:Win(int,int)-->(boolean)
#example:Win(aWall,mplayer)-->True
# Win(aWall,mplayer)-->False
def hitWallPlayer(self,aWall,mplayer):
x1,y1,x2,y2 = self.getCurPos(mplayer)
if aWall.xLeft >= x1:
return True
if aWall.xRight <= x2:
return True
if aWall.xBottom+10 <= y2:
return True
else:
return False
#user story
#เพื่อที่จะตรวจสอบว่ากล่องชนกรอบหรือไม่
#ฉันเป็นผู้ออกแบบ
#ต้องตรวจสอบว่ากล่องชนกรอบหรือไม่
#scenario1:
#ให้ตรวจสอบการเคลื่อนย้ายกล่องว่าอยู่ที่กรอบด้านซ้าย
#เมื่อทำการตรวจสอบการเคลื่อนย้ายกล่องว่าอยู่กรอบด้านซ้าย
#จึง return True
#scenario2:
#ให้ตรวจสอบการเคลื่อนย้ายกล่องว่าอยู่ที่กรอบด้านขวา
#เมื่อทำการตรวจสอบการเคลื่อนย้ายกล่องว่าอยู่กรอบด้านขวา
#จึง return True
#scenario4:
#ให้ตรวจสอบการเคลื่อนย้ายกล่องว่าไม่อยู่ที่กรอบด้านซ้ายหรือขวา
#เมื่อทำการตรวจสอบการเคลื่อนย้ายกล่องว่าไม่อยู่ที่กรอบ
#จึง return False
#purpose:ต้องการตรวจสอบการเคลื่อนย้ายกล่องว่าอยู่กรอบด้านซ้ายหรือขวาหรือไม่
#input:กรอบ(int),กล่อง(int)
#output:True or False(boolean)
#contract:hitWallObstacle(int,int,int)-->(boolean)
#example:hitWallObstacle(aWall,mplayer)-->True
# hitWallObstacle(aWall,mplayer)-->False
def hitWallObstacle(self,aWall,Mobstacle,aobstacle):
x1,y1,x2,y2 = self.getCurPos(Mobstacle)
if aobstacle.xvel < 0 and aWall.xLeft >= x1 + aobstacle.width + 20:
return True
if aobstacle.xvel > 0 and aWall.xRight <= x2 - aobstacle.width - 20:
return True
else:
return False
x = range(0,800)
xpose = random.choice(x)
xpose1 = random.choice(x)
xpose2 = random.choice(x)
xpose3 = random.choice(x)
xpose4 = random.choice(x)
xpose5 = random.choice(x)
xpose6 = random.choice(x)
xpose7 = random.choice(x)
xpose8 = random.choice(x)
xpose9 = random.choice(x)
root = Tk()
root.title("Step move")
#add widgets
myApp = Step_move(root)
#===================================================================
#Wall
aWall = Wall(3,800,3,600)
myApp.createtable(aWall)
bWallpad = Wallpad(3,550,800,600)
myApp.createWallpad(bWallpad,"light green")
cWallpad = Wallpad(3,3,100,25)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(100,25,200,50)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(200,3,300,25)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(300,25,400,50)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(400,3,500,25)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(500,25,600,50)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(600,3,700,25)
myApp.createWallpad(cWallpad,"black")
cWallpad = Wallpad(700,25,800,50)
myApp.createWallpad(cWallpad,"black")
dWallpad = Wallpad(3,50,800,100)
myApp.createWallpad(dWallpad,"#DCDCDC")
dWallpad = Wallpad(3,100,800,150)
myApp.createWallpad(dWallpad,"#FFFFF0")
dWallpad = Wallpad(3,150,800,200)
myApp.createWallpad(dWallpad,"#DCDCDC")
dWallpad = Wallpad(3,200,800,250)
myApp.createWallpad(dWallpad,"#FFFFF0")
dWallpad = Wallpad(3,250,800,300)
myApp.createWallpad(dWallpad,"#DCDCDC")
dWallpad = Wallpad(3,300,800,350)
myApp.createWallpad(dWallpad,"#FFFFF0")
dWallpad = Wallpad(3,350,800,400)
myApp.createWallpad(dWallpad,"#DCDCDC")
dWallpad = Wallpad(3,400,800,450)
myApp.createWallpad(dWallpad,"#FFFFF0")
dWallpad = Wallpad(3,450,800,500)
myApp.createWallpad(dWallpad,"#DCDCDC")
dWallpad = Wallpad(3,500,800,550)
myApp.createWallpad(dWallpad,"#FFFFF0")
imgPl = PhotoImage(file='Playerimg.png')
aPlay = Player(400,570,50)
mplayer = myApp.createPlayer(aPlay,imgPl)
#=========================================================================
#Obstacle
aobstacle0 = Obstacle(xpose,50,7.5,0)
Mobstacle0 = myApp.createobstacle(aobstacle0,"orange")
myApp.moveobstacle(Mobstacle0,aobstacle0)
aobstacle1 = Obstacle(xpose1,100,7,0)
Mobstacle1 = myApp.createobstacle(aobstacle1,"green")
myApp.moveobstacle(Mobstacle1,aobstacle1)
aobstacle2 = Obstacle(xpose2,150,6.5,0)
Mobstacle2 = myApp.createobstacle(aobstacle2,"pink")
myApp.moveobstacle(Mobstacle2,aobstacle2)
aobstacle3 = Obstacle(xpose3,200,6,0)
Mobstacle3 = myApp.createobstacle(aobstacle3,"red")
myApp.moveobstacle(Mobstacle3,aobstacle3)
aobstacle4 = Obstacle(xpose4,250,5.5,0)
Mobstacle4 = myApp.createobstacle(aobstacle4,"blue")
myApp.moveobstacle(Mobstacle4,aobstacle4)
aobstacle5 = Obstacle(xpose5,300,5,0)
Mobstacle5 = myApp.createobstacle(aobstacle5,"brown")
myApp.moveobstacle(Mobstacle5,aobstacle5)
aobstacle6 = Obstacle(xpose6,350,4.5,0)
Mobstacle6 = myApp.createobstacle(aobstacle6,"yellow")
myApp.moveobstacle(Mobstacle6,aobstacle6)
aobstacle7 = Obstacle(xpose7,400,4,0)
Mobstacle7 = myApp.createobstacle(aobstacle7,"grey")
myApp.moveobstacle(Mobstacle7,aobstacle7)
aobstacle8 = Obstacle(xpose8,450,3.5,0)
Mobstacle8 = myApp.createobstacle(aobstacle8,"gold")
myApp.moveobstacle(Mobstacle8,aobstacle8)
aobstacle9 = Obstacle(xpose9,500,3,0)
Mobstacle9 = myApp.createobstacle(aobstacle9,"light blue")
myApp.moveobstacle(Mobstacle9,aobstacle9)
#===============================================================
#Player
myApp.controlPlayer(aPlay,mplayer)
#===============================================================
root.mainloop()
|
17,939 | 202330b84260e2962cf03fbc1bad1328da222758 | """Input utility functions for reading small norb dataset.
Handles reading from small norb dataset saved in binary original format. Scales and
normalizes the images as the preprocessing step. It can distort the images by
random cropping and contrast adjusting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def _read_input(filename_queue):
"""Reads a single record and converts it to a tensor.
Each record consists the 3x32x32 image with one byte for the label.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
image: a [32, 32, 3] float32 Tensor with the image data.
label: an int32 Tensor with the label in the range 0..9.
"""
label_bytes = 1
height = 32
depth = 3
image_bytes = height * height * depth
record_bytes = label_bytes + image_bytes
reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)
_, byte_data = reader.read(filename_queue)
uint_data = tf.io.decode_raw(byte_data, tf.uint8)
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, height])
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
return image, label
def _distort_resize(image, image_size):
"""Distorts input images for CIFAR training.
Adds standard distortions such as flipping, cropping and changing brightness
and contrast.
Args:
image: A float32 tensor with last dimmension equal to 3.
image_size: The output image size after cropping.
Returns:
distorted_image: A float32 tensor with shape [image_size, image_size, 3].
"""
distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
distorted_image.set_shape([image_size, image_size, 3])
return distorted_image
def _batch_features(image, label, batch_size, split, image_size):
"""Constructs the batched feature dictionary.
Batches the images and labels accourding to the split. Shuffles the data only
if split is train. Formats the feature dictionary to be in the format required
by experiment.py.
Args:
image: A float32 tensor with shape [image_size, image_size, 3].
label: An int32 tensor with the label of the image.
batch_size: The number of data points in the output batch.
split: 'train' or 'test'.
image_size: The size of the input image.
Returns:
batched_features: A dictionary of the input data features.
"""
image = tf.transpose(a=image, perm=[2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 5),
'recons_image': image,
'recons_label': label,
}
if split == 'train':
batched_features = tf.compat.v1.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=10000 + 3 * batch_size,
min_after_dequeue=10000)
else:
batched_features = tf.compat.v1.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=10000 + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 5])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = image_size
batched_features['width'] = image_size
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
return batched_features
import os
def _parser(serialized_example):
"""Parse smallNORB example from tfrecord.
Args:
serialized_example: serialized example from tfrecord
Returns:
img: image
lab: label
cat:
category
the instance in the category (0 to 9)
elv:
elevation
the elevation (0 to 8, which mean cameras are 30,
35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
azi:
azimuth
the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in
degrees)
lit:
lighting
the lighting condition (0 to 5)
"""
features = tf.compat.v1.parse_single_example(
serialized_example,
features={
'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),
'label': tf.compat.v1.FixedLenFeature([], tf.int64),
'category': tf.compat.v1.FixedLenFeature([], tf.int64),
'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),
'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),
'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),
})
img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)
img = tf.reshape(img, [96, 96, 1])
img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized
lab = tf.cast(features['label'], tf.int32)
cat = tf.cast(features['category'], tf.int32)
elv = tf.cast(features['elevation'], tf.int32)
azi = tf.cast(features['azimuth'], tf.int32)
lit = tf.cast(features['lighting'], tf.int32)
return img, lab, cat, elv, azi, lit
def _train_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for training.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped images.
During test, we crop a 32 × 32 patch from the center of the image and
achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.compat.v1.random_crop(img, [32, 32, 1])
img = tf.image.random_brightness(img, max_delta=32. / 255.)
# original 0.5, 1.5
img = tf.image.random_contrast(img, lower=0.5, upper=1.5)
# Original
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# image = tf.image.resize_images(image, [48, 48])
# image = tf.random_crop(image, [32, 32, 1])
return img, lab, cat, elv, azi, lit
def _val_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for validation/testing.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped
images. During test, we crop a 32 × 32 patch from the center of the image
and achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.slice(img, [8, 8, 0], [32, 32, 1])
# Original
# image = tf.image.resize_images(image, [48, 48])
# image = tf.slice(image, [8, 8, 0], [32, 32, 1])
return img, lab, cat, elv, azi, lit
def input_fn(path, is_train: bool, batch_size = 64, epochs=100):
"""Input pipeline for smallNORB using tf.data.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
dataset: image tf.data.Dataset
"""
import re
if is_train:
CHUNK_RE = re.compile(r"train.*\.tfrecords")
else:
CHUNK_RE = re.compile(r"test.*\.tfrecords")
chunk_files = [os.path.join(path, fname)
for fname in os.listdir(path)
if CHUNK_RE.match(fname)]
# 1. create the dataset
dataset = tf.data.TFRecordDataset(chunk_files)
# 2. map with the actual work (preprocessing, augmentation…) using multiple
# parallel calls
dataset = dataset.map(_parser, num_parallel_calls=4)
if is_train:
dataset = dataset.map(_train_preprocess,
num_parallel_calls=4)
else:
dataset = dataset.map(_val_preprocess,
num_parallel_calls=4)
# 3. shuffle (with a big enough buffer size)
# In response to a question on OpenReview, Hinton et al. wrote the
# following:
# https://openreview.net/forum?id=HJWLfGWRb¬eId=rJgxonoNnm
# "We did not have any special ordering of training batches and we random
# shuffle. In terms of TF batch:
# capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of
# examples. min_after_dequeue=2000."
capacity = 2000 + 3 * batch_size
dataset = dataset.shuffle(buffer_size=capacity)
# 4. batch
dataset = dataset.batch(batch_size, drop_remainder=True)
# 5. repeat
dataset = dataset.repeat(count=epochs)
# 6. prefetch
dataset = dataset.prefetch(1)
return dataset
def create_inputs_norb(path, is_train: bool,batch_size,epochs):
"""Get a batch from the input pipeline.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
img, lab, cat, elv, azi, lit:
"""
# Create batched dataset
dataset = input_fn(path, is_train,batch_size=batch_size, epochs=epochs)
# Create one-shot iterator
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
img, lab, cat, elv, azi, lit = iterator.get_next()
output_dict = {'image': img,
'label': lab,
'category': cat,
'elevation': elv,
'azimuth': azi,
'lighting': lit}
return output_dict
def inputs(data_dir,
batch_size,
split,
epochs=50):
dict = create_inputs_norb(data_dir, split == "train",batch_size=batch_size, epochs=epochs)
batched_features={}
batched_features['height'] = 32
batched_features['width'] = 32
batched_features['depth'] = 1
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
batched_features['recons_image'] = dict['image']
batched_features['recons_label'] = dict['label']
batched_features['images'] = dict['image']
batched_features['labels'] = tf.one_hot(dict['label'], 5)
return batched_features |
17,940 | 68fe326a61d8eeb372ee46e9672e066a200def49 | #! /usr/bin/env python3
import rospy
from geometry_msgs.msg import Pose
from sensor_msgs.msg import JointState
from std_msgs.msg import String, Bool, UInt8MultiArray
from MecademicRobot import RobotController, RobotFeedback
class MecademicRobot_Driver():
"""ROS Mecademic Robot Node Class to make a Node for the Mecademic Robot
Attributes:
subscriber: ROS subscriber to send command to the Mecademic Robot through a topic
publisher: ROS publisher to place replies from the Mecademic Robot in a topic
MecademicRobot : driver to control the MecademicRobot Robot
"""
def __init__(self, robot, feedback):
"""Constructor for the ROS MecademicRobot Driver
"""
rospy.init_node("MecademicRobot_driver", anonymous=True)
self.joint_subscriber = rospy.Subscriber("MecademicRobot_joint", JointState, self.joint_callback)
self.pose_subscriber = rospy.Subscriber("MecademicRobot_pose", Pose, self.pose_callback)
self.command_subscriber = rospy.Subscriber("MecademicRobot_command", String, self.command_callback)
self.gripper_subscriber = rospy.Subscriber("MecademicRobot_gripper", Bool, self.gripper_callback)
self.reply_publisher = rospy.Publisher("MecademicRobot_reply", String, queue_size=1)
self.joint_publisher = rospy.Publisher("MecademicRobot_joint_fb", JointState, queue_size=1)
self.pose_publisher = rospy.Publisher("MecademicRobot_pose_fb", Pose, queue_size=1)
self.status_publisher = rospy.Publisher("MecademicRobot_status", UInt8MultiArray, queue_size=1)
self.robot = robot
self.feedback = feedback
self.socket_available = True
self.feedbackLoop()
def __del__(self):
"""Deconstructor for the Mecademic Robot ROS driver
Deactivates the robot and closes socket connection with the robot
"""
self.robot.DeactivateRobot()
self.robot.disconnect()
self.feedback.disconnect()
def command_callback(self, command):
"""Forwards a ascii command to the Mecademic Robot
:param command: ascii command to forward to the Robot
"""
while not self.socket_available: # wait for socket to be available
pass
self.socket_available = False # block socket from being used in other processes
if self.robot.is_in_error():
self.robot.ResetError()
self.robot.ResumeMotion()
reply = self.robot.exchange_msg(command.data, decode=False)
self.socket_available = True # Release socket so other processes can use it
if reply is not None:
self.reply_publisher.publish(reply)
def joint_callback(self, joints):
"""Callback when the MecademicRobot_emit topic receives a message
Forwards message to driver that translate into real command
to the Mecademic Robot
:param joints: message received from topic containing position and velocity information
"""
while not self.socket_available: # wait for the socket to be available
pass
reply = None
self.socket_available = False # Block other processes from using the socket
if self.robot.is_in_error():
self.robot.ResetError()
self.robot.ResumeMotion()
if len(joints.velocity) > 0:
self.robot.SetJointVel(joints.velocity[0])
if len(joints.position) == 6:
reply = self.robot.MoveJoints(joints.position[0], joints.position[1], joints.position[2],
joints.position[3], joints.position[4], joints.position[5])
elif len(joints.position) == 4:
reply = self.robot.MoveJoints(joints.position[0], joints.position[1], joints.position[2],
joints.position[3])
self.socket_available = True # Release the socket so other processes can use it
if reply is not None:
self.reply_publisher.publish(reply)
def pose_callback(self, pose):
"""Callback when the MecademicRobot_emit topic receives a message
Forwards message to driver that translate into real command
to the Mecademic Robot
:param pose: message received from topic containing position and orientation information
"""
while (not self.socket_available): # wait for socket to become available
pass
reply = None
self.socket_available = False # Block other processes from using the socket while in use
if self.robot.is_in_error():
self.robot.ResetError()
self.robot.ResumeMotion()
if pose.position.z is not None:
reply = self.robot.MovePose(pose.position.x, pose.position.y, pose.position.z, pose.orientation.x,
pose.orientation.y, pose.orientation.z)
else:
reply = self.robot.MovePose(pose.position.x, pose.position.y, pose.orientation.x, pose.orientation.y)
self.socket_available = True # Release socket so other processes can continue
if reply is not None:
self.reply_publisher.publish(reply)
def gripper_callback(self, state):
"""Controls whether to open or close the gripper.
True for open, False for close
:param state: ROS Bool message
"""
while not self.socket_available: # wait for socket to be available
pass
self.socket_available = False # Block other processes from using the socket
if self.robot.is_in_error():
self.robot.ResetError()
self.robot.ResumeMotion()
if state.data:
reply = self.robot.GripperOpen()
else:
reply = self.robot.GripperClose()
self.socket_available = True # Release socket so other processes can use it
if reply is not None:
self.reply_publisher.publish(reply)
def feedbackLoop(self):
"""Retrieves live position feedback and publishes the data
to its corresponding topic. (infinite loop)
"""
while not rospy.is_shutdown():
try:
# Robot Status Feedback
if self.socket_available:
self.socket_available = False # Block other operations from using the socket while in use
robot_status = self.robot.GetStatusRobot()
gripper_status = self.robot.GetStatusGripper()
self.socket_available = True # Release the socket so other processes can happen
status = UInt8MultiArray()
status.data = [
robot_status["Activated"],
robot_status["Homing"],
robot_status["Simulation"],
robot_status["Error"],
robot_status["Paused"],
robot_status["EOB"],
robot_status["EOM"],
gripper_status["Gripper enabled"],
gripper_status["Homing state"],
gripper_status["Limit reached"],
gripper_status["Error state"],
gripper_status["force overload"]
]
self.status_publisher.publish(status)
# Position Feedback
self.feedback.get_data()
joints_fb = JointState()
joints_fb.position = feedback.joints
pose_fb = Pose()
pose_fb.position.x = feedback.cartesian[0]
pose_fb.position.y = feedback.cartesian[1]
if len(feedback.cartesian) == 4:
pose_fb.orientation.x = feedback.cartesian[2]
pose_fb.orientation.y = feedback.cartesian[3]
else:
pose_fb.position.z = feedback.cartesian[2]
pose_fb.orientation.x = feedback.cartesian[3]
pose_fb.orientation.y = feedback.cartesian[4]
pose_fb.orientation.z = feedback.cartesian[5]
self.joint_publisher.publish(joints_fb)
self.pose_publisher.publish(pose_fb)
except Exception as error:
rospy.logerr(str(error))
if __name__ == "__main__":
robot = RobotController('192.168.0.100')
feedback = RobotFeedback('192.168.0.100', "v8.1.6.141")
robot.connect()
feedback.connect()
robot.ActivateRobot()
robot.home()
driver = MecademicRobot_Driver(robot, feedback)
rospy.spin()
|
17,941 | b4180a2df2c8f983446b406068d411272f9543df | from numpy import random
from collections import Counter
import matplotlib.pyplot as plt
import math
import pandas as pd
import scipy.stats as sts
from statsmodels.distributions.empirical_distribution import ECDF
import numpy as np
a = 0
b = 7
y0 = 2
n = 30
X = []
Y = []
r = sts.uniform()
xi = r.rvs(size=n)
for i in range(n):
x = xi[i] * (b - a) + a
X.append(x)
y = math.sqrt(pow(x, 3))
Y.append(y)
Y.sort()
var_series = pd.DataFrame(data={'$Y_i$': Y})
print(var_series.T)
emp_dist_func = ECDF(Y)
print(emp_dist_func.y)
f_y = []
x_theor = np.linspace(0, 7, n)
for xi in x_theor:
f_y.append(pow(xi, 2/3) / 8) # теоретическая функция распределения
plt.plot(x_theor, f_y, label='Theoretical distribution function')
plt.step(emp_dist_func.x, emp_dist_func.y, label='Empirical distribution function')
plt.ylabel('F(y)')
plt.xlabel('x')
plt.legend()
plt.show()
d_plus = []
d_minus = []
for i in range(n - 1):
d_plus.append(abs((i + 1) / n - (pow(Y[i], 2/3) / 8)))
d_minus.append(abs(i / n - (pow(Y[i], 2/3) / 8)))
d = max(max(d_plus), max(d_minus))
print("d = ", d)
lambd = d * np.sqrt(n)
print("lambda = ", lambd)
|
17,942 | 4b785f2c17488979ab0b6bb5910cef2c398115f5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 2019/3/29 20:00
@File : mul_find_regional.py
@Desc :
"""
import re
import threading
import time
import os
class Find(threading.Thread):
def __init__(self, namelist, startIndex, endIndex, text):
threading.Thread.__init__(self)
self.namelist = namelist # 要搜索的数据的内存地址
self.startIndex = startIndex # 开始的索引
self.endIndex = endIndex # 结束的索引
self.seachstr = text
def run(self):
self._finddict = dict()
for i in range(self.startIndex, self.endIndex):
findone = re.findall(self.namelist[i] + '[\W]', self.seachstr)
if len(findone):
self._finddict[self.namelist[i].strip()] = len(findone)
FINDREGIONALDICT.update(self._finddict)
# global finddict # 多线程共享全局变量(全局锁)
# if lock.acquire():
# # 获取锁(自动释放锁)
# try:
# finddict.update(self._finddict)
# finally:
# lock.release()
def multithread_find_regional(text, names_map, thread_num=30):
global lock, FINDREGIONALDICT
FINDREGIONALDICT = dict()
namelist = list(names_map.keys())
namenum = len(namelist)
print(namenum)
# lock = threading.Lock()# 创建一个锁
threadlist = [] # 线程列表
# 97 9 0-1000000 1000000-2000000 2000000-3000000
for i in range(0, thread_num - 1):
mythd = Find(namelist, i * (namenum // (thread_num - 1)), (i + 1) * (namenum // (thread_num - 1)), text)
mythd.start()
threadlist.append(mythd) # 添加到线程列表
# 97 = 97//10*10=90
mylastthd = Find(namelist, namenum // (thread_num - 1) * (thread_num - 1), namenum, text) # 最后的线程搜索剩下的尾数
mylastthd.start()
threadlist.append(mylastthd) # 添加到线程列表
for thd in threadlist: # 遍历线程列表
thd.join()
return FINDREGIONALDICT |
17,943 | 2245a83edf01133985cd1c417fb04f78a22be505 | # Write a function that takes a string as input and reverse only the vowels of a string.
# Example 1:
# Given s = "hello", return "holle".
# Example 2:
# Given s = "leetcode", return "leotcede".
class Solution(object):
def reverseVowels(self, s):
vowels = 'aeiouAEIOU'
s_list = list(s)
i, j = 0, len(s) - 1
while i < j:
while i < j and s[i] not in vowels:
i += 1
while i < j and s[j] not in vowels:
j -= 1
tmp = s_list[i]
s_list[i] = s_list[j]
s_list[j] = tmp
i += 1
j -= 1
return ''.join(s_list) |
17,944 | cf9e1ecf0287abd34eafc229431e1ee2ddbfbfbf | import os
import sys
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import HiveContext
from pyspark.sql.types import StringType
from pyspark.sql.types import FloatType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
local_path = os.path.dirname(__file__)
sys.path.append(local_path + "/./")
sys.path.append(local_path + "/../")
def cal_adj_per(x):
re = dict({})
re["symbol"] = x.symbol
re["date"] = x.date
re["open"] = x.open
re["high"] = x.high
re["low"] = x.low
re["close"] = x.close
re["volume"] = x.volume
re["adjclose"] = x.adjclose
ope = re["adjclose"]/re["close"]
re["adjopen"] = ope * re["open"]
re["adjhigh"] = ope * re["high"]
re["adjlow"] = ope * re["low"]
# re["adjclose"] = ope * re["close"]
return re
def cal_adj(sc, sql_context, is_hive):
df_eod = sql_context.sql("""
SELECT
symbol,
date,
open,
high,
low,
close,
volume,
adjclose
FROM
eod2
WHERE
date >= "2000-01-01"
""")
return df_eod.rdd.map(lambda x: cal_adj_per(x))
def cal_close_norm_per(x):
if len(x) == 0:
return []
l = []
x.sort(lambda xx,yy: cmp(xx["date"], yy["date"]),reverse=False)
per = x[0].copy()
per["normclose"] = 1.0
l.append(per.copy())
for i in range(1, len(x)):
per = x[i].copy()
per["normclose"] = l[i-1]["normclose"]/l[i-1]["adjclose"] * per["adjclose"]
l.append(per.copy())
return l
def cal_close_norm(rdd_adj):
return rdd_adj.groupBy(lambda x: x["symbol"]).map(lambda x: (x[0], list(x[1]))) \
.flatMapValues(lambda x: cal_close_norm_per(x))
def cal_norm_per(x):
re = x.copy()
ope = re["normclose"]/re["adjclose"]
re["normopen"] = ope * re["adjopen"]
re["normhigh"] = ope * re["adjhigh"]
re["normlow"] = ope * re["adjlow"]
# re["normclose"] = ope * re["adjclose"]
return re
def cal_norm(rdd_close_norm):
return rdd_close_norm.mapValues(lambda x: cal_norm_per(x)).map(lambda x: x[1])
def save(rdd_norm, sc, sql_context, is_hive):
rddl_norm = rdd_norm.map(lambda p: (p["date"], p["symbol"], p["open"], p["high"], p["low"], p["close"], p["volume"],
p["adjopen"], p["adjhigh"], p["adjlow"], p["adjclose"],
p["normopen"], p["normhigh"], p["normlow"], p["normclose"]
)
)
schema = StructType([
StructField("date", StringType(), True),
StructField("symbol", StringType(), True),
StructField("open", FloatType(), True),
StructField("high", FloatType(), True),
StructField("low", FloatType(), True),
StructField("close", FloatType(), True),
StructField("volume", FloatType(), True),
StructField("adjopen", FloatType(), True),
StructField("adjhigh", FloatType(), True),
StructField("adjlow", FloatType(), True),
StructField("adjclose", FloatType(), True),
StructField("normopen", FloatType(), True),
StructField("normhigh", FloatType(), True),
StructField("normlow", FloatType(), True),
StructField("normclose", FloatType(), True),
])
df_norm = sql_context.createDataFrame(rddl_norm, schema)
if not is_hive:
df_norm.registerAsTable("eod_delta")
return
sql_context.sql("""
DROP TABLE IF EXISTS %s
""" % "eod_norm")
sql_context.sql("""
CREATE TABLE IF NOT EXISTS eod_norm(
date string,
symbol string,
open float,
high float,
low float,
close float,
volume float,
adjopen float,
adjhigh float,
adjlow float,
adjclose float,
normopen float,
normhigh float,
normlow float,
normclose float
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
""")
df_norm.insertInto("eod_norm", overwrite = True)
def main(sc, sql_context, is_hive = True):
rdd_adj = cal_adj(sc, sql_context, is_hive)
rdd_close_norm = cal_close_norm(rdd_adj)
rdd_norm = cal_norm(rdd_close_norm)
save(rdd_norm, sc, sql_context, is_hive)
if __name__ == "__main__":
conf = SparkConf()
conf.set("spark.executor.instances", "4")
conf.set("spark.executor.cores", "4")
conf.set("spark.executor.memory", "8g")
sc = SparkContext(appName="bintrade.trade.mat_trade_see", conf=conf)
sql_context = HiveContext(sc)
main(sc, sql_context, is_hive = True)
sc.stop()
|
17,945 | 4bf41699cafa81bb6a003421bfb4b5a80415399b |
#from .nucleus import INHERITANCE
#S_INHERITANCE={"dominance":{1.0:["A.A","A.a"],0.0:["a.a"]},"intermedier":{2.0:["A.A;A.A"],1.5:["A.A;A.a"],1.0:["A.a;A.a","A.A;a.a"],0.5:["A.a;a.a"],0.0:["a.a;a.a"]}}
def DoesMemberHave(list,member):
if member in list:
return True
else:
return False
class Child():
def __init__(self,boy,girl):
self.boy=boy
self.girl=girl
def get_max(self,which):
if which:
max_p=max(list(self.boy.values()))
for i in self.boy.keys():
if self.boy[i]==max_p:
gt=i
else:
max_p=max(list(self.boy.values()))
for i in self.boy.keys():
if self.boy[i]==max_p:
gt=i
return {max_p:gt}
def get_all(self):
pass
class Children():
pass
class Gene():
def __init__(self,inheritance,name,is_X_linked):
self.inheritance=inheritance
self.name=name
self.genotype=0
self.possibility=0
self.is_X_linked=is_X_linked
def recombination(self,other,self_sex):
if isinstance(other,Gene):
if self.is_X_linked:
if self.inheritance=="dominance":
return self.dom_x_linked_recombination(other,self_sex)
elif self.inheritance=="recessive":
return self.rec_x_linked_recombination(other,self_sex)
else:
if self.inheritance=="dominance":
return self.dom_recombination(other)
elif self.inheritance=="recessive":
return self.rec_recombination(other)
elif other:
if self.is_X_linked:
if self.inheritance=="recessive":
return Child({1:0,0:1},{0:1,1:0,0.5:0})
elif self.inheritance=="dominance":
return Child({1:1,0:0},{0.5:(2/3),1:(1/3),0:0})
else:
if self.inheritance=="recessive":
return {1:0,0:1,0.5:0}
elif self.inheritance=="dominance":
return {1:(1/3),0.5:(2/3),0:0}
else:
if self.is_X_linked:
if self.inheritance=="recessive":
return Child({1:1,0:0},{0.5:(2/3),1:(1/3),1:0})
elif self.inheritance=="dominance":
return Child({0:1,1:0},{0:1,0.5:0,1:0})
else:
if self.inheritance=="recessive":
return {0.5:(2/3),0:0,1:(1/3)}
elif self.inheritance=="dominance":
return {0:1,1:0,0.5:0}
def dom_recombination(self,other):
dom=self.genotype+other.genotype
if dom==2:
return {1:1,0.5:0,0:0}
elif dom==1.5:
return {0.5:0.75,1:0.25,0:0}
elif dom==1:
return {0.5:1,0:0,1:0}
elif dom==0.5:
return {0.5:1,0:0,1:0}
elif dom==0:
return {0.5:0,0:0,1:0}
def rec_recombination(self,other):
rec=self.genotype+other.genotype
if rec==1:
return {0.5:0.75,1:0,0:0.25}
elif rec==0.5:
return {0.5:0.5,1:0,0:0.5}
elif rec==0:
return {0:1,1:0}
else:
return {0.5:0,0:0,1:0}
def rec_x_linked_recombination(self,other,self_sex):
rec=self.genotype+other.genotype
if rec==2:
return Child({1:1,0:0},{1:1,0.5:0,0:0})
elif rec==1.5:
return Child({1:0.5,0:0.5},{1:0.5,0.5:0.5,0:0})
elif rec==1:
if self.genotype>other.genotype:
if self_sex:
return Child({0:1,1:0},{0.5:1,1:0,0:0})
else:
return Child({0:0,1:1},{0.5:1,1:0,0:0})
elif rec==0.5:
return Child({0:0.5,1:0.5},{0.5:0.5,0:0.5,1:0})
elif rec==0.0:
return Child({0:1,1:0},{0:1,1:0,0.5:0})
def dom_x_linked_recombination(self,other,self_sex):
dom=self.genotype+other.genotype
if dom==2:
return Child({1:1,0:0},{1:1,0.5:0,0:0})
elif dom==1.5:
return Child({1:0.5,0:0.5},{1:0.5,0.5:0.5,0:0})
elif dom==1:
if self.genotype>other.genotype:
if self_sex:
return Child({0:0,1:1},{0.5:1,1:0,0:0})
else:
return Child({0:0,1:1},{0.5:1,1:0,0:0})
elif dom==0.5:
return Child({0:0.5,1:0.5},{0.5:0.5,0:0.5,1:0})
elif dom==0.0:
return Child({0:1,1:0},{0:1,1:0,0.5:0})
class Parent():
def __init__(self,doesHave,desease,sex,ID):
self.sex=sex
self.ID=ID
self.doesHave=doesHave
self.desease=desease
self.dad=None
self.mom=None
self.child=None
#def add_dad(self,doesHave,ID):
def add_dad(self,DAD):
#self.dad=Parent(doesHave,Gene(self.desease.inheritance,self.desease.name,self.desease.is_X_linked),True,ID)
self.dad=DAD
self.dad.child=self
#def add_mom(self,doesHave,ID):
#self.mom=Parent(doesHave,Gene(self.desease.inheritance,self.desease.name,self.desease.is_X_linked),False,ID)
def add_mom(self,MOM):
self.mom=MOM
self.mom.child=self
def set_genotype(self):
if not self.doesHave:
if self.dad is not None and self.mom is not None:
self.dad.set_genotype()
self.mom.set_genotype()
r=self.dad.desease.recombination(self.mom.desease,self.sex)
if isinstance(r,Child):
get_max=r.get_max(self.sex)
self.desease.genotype=list(get_max.keys())[0]
self.desease.possibility=list(get_max.values())[0]
else:
self.desease.genotype=list(r.keys())[0]
self.desease.possibility=list(r.values())[0]
return self.desease.genotype
else:
r=self.desease.recombination(self.doesHave,self.sex)
if isinstance(r,Child):
get_max=r.get_max(self.sex)
self.desease.genotype=list(get_max.keys())[0]
self.desease.possibility=list(get_max.values())[0]
else:
self.desease.genotype=list(r.keys())[0]
self.desease.possibility=list(r.values())[0]
return self.desease.genotype
else:
r=self.desease.recombination(self.doesHave,self.sex)
if isinstance(r,Child):
get_max=r.get_max(self.sex)
self.desease.genotype=list(get_max.keys())[0]
self.desease.possibility=list(get_max.values())[0]
else:
self.desease.genotype=list(r.keys())[0]
self.desease.possibility=list(r.values())[0]
return self.desease.genotype
def add_child(self,other):
cum=self.desease.recombination(other.desease)
if isinstance(r,Child):
get_max=r.get_max(self.sex)
self.desease.genotype=list(get_max.keys())[0]
self.desease.possibility=list(get_max.values())[0]
else:
self.desease.genotype=list(r.keys())[0]
self.desease.possibility=list(r.values())[0]
return Children()
|
17,946 | bbf7046d1a43a7f8fddeee09efc3c0013b8fa82f | #codechef factor tree problem #code
import math
mod=(10**9)+7
'''Function for calculating prime factors and adding the count of prime numbers in dictionary '''
def calc(path,a):
#print(path)
dic={}
for i in range(len(path)):
dic=primeFactors(a[path[i]-1],dic)
value=list(dic.values())
ans=1
for i in range(len(value)):
ans*=(value[i]+1)
ans=ans%mod
print(ans)
def primeFactors(n,dic):
while n % 2 == 0:
if 2 not in dic:
dic[2]=1
else:
dic[2]+=1
n = n / 2
for i in range(3,int(math.sqrt(n))+1,2):
while n % i== 0:
if i not in dic:
dic[i]=1
else:
dic[i]+=1
n = n / i
if n > 2:
if n not in dic:
dic[n]=1
else:
dic[n]+=1
return dic
def addEdge(x, y,v):
v[x].append(y)
v[y].append(x)
def DFS(vis, x, y, stack,v,ansflag,a):
stack.append(x)
if (x == y):
ansflag=1
calc(stack,a)
return
vis[x] = True
flag = 0
if (len(v[x]) > 0):
for j in v[x]:
if (vis[j] == False):
DFS(vis, j, y, stack,v,ansflag,a)
if(ansflag==1):
flag = 1
break
if (flag == 0):
del stack[-1]
return
def DFSCall(x, y, n, stack,v,a):
# visited array
ansflag=0
vis = [0 for i in range(n + 1)]
if(x!=y):
x1=DFS(vis, x, y, stack,v,ansflag,a)
else:
x1=[x]
calc(x1,a)
return
#main
for _ in range(int(input())):
n = int(input())
v = [[] for i in range(n+1)]
for i in range(n-1):
a1,b=map(int,input().split())
addEdge(a1,b,v)
#print(v)
a=list(map(int,input().split()))
q=int(input())
for i in range(q):
x,y=map(int,input().split())
stack = []
DFSCall(x,y,n,stack,v,a)
#calling DFS for the path in tree |
17,947 | 93402a864d392416a8aa864e46ee8a7ca2cdea8b | # coding=utf-8
import pprint
#途中結果を表示するか
class consolePrint():
def __init__(self,pJudg):
self.pJudg = pJudg
def cPrint(self,printData):
self.printData = printData
if self.pJudg == True:
pprint.pprint(self.printData) |
17,948 | 50080c62bbb5f2f894e75a769c9a714b261cb8c2 | #!/usr/bin/python
"""
Clean raw docs up a bit and map labels to {1, -1}.
Docs are encoded in utf-8, so don't assume ascii
when using any other libraries.
"""
import unicodecsv as csv
import json
from collections import Counter
SOURCE = "./raw/training-Table 1.csv"
TARGET = "./training.csv"
WORD_DIST_OUT = "./word-dist.csv"
c = Counter()
def extract_doc(doc):
return doc
def extract_label(l):
if len(l) == 0:
return -1
elif l.strip().upper() == "M":
return 1
else:
return 0
with open(SOURCE, "r") as fin, open(TARGET, "w") as fout:
reader = csv.reader(fin, encoding="utf-8")
clean = []
for x in reader:
doc = extract_doc(x[0])
label = extract_label(x[1])
c.update(doc)
clean.append((doc, label))
writer = csv.writer(fout, encoding="utf-8", delimiter="|")
for out_row in clean:
writer.writerow(out_row)
ffreq = open(WORD_DIST_OUT, "w")
ffreq.write(json.dumps(c))
ffreq.close() |
17,949 | f9f94106e376c28aff48ba261f237d16ccec95e5 | # -*- coding: UTF-8 -*-
__author__ = 'MD'
from xadmin.views import BaseAdminPlugin, CommAdminView
from xadmin.sites import site
class CommonInitPlugin(BaseAdminPlugin):
# 添加媒体文件
def get_media(self, media):
media.add_css({"screen": ["css/cust_index.css", "css/font-awesome.min.css"]})
return media
site.register_plugin(CommonInitPlugin, CommAdminView)
|
17,950 | be6df13da5a117a7bfc6364d79e06de752fb041d | import textblob
from textblob.classifiers import NaiveBayesClassifier
from textblob.classifiers import NLTKClassifier
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer, TfidfVectorizer
from sklearn.svm import SVC
from sklearn import metrics
import numpy as np
import os
class textCat(object):
def __init__(self):
self.no = 0
self.train_path = './0001TrainText/'
self.test_path = './0002TestText/'
self.label_path = './0003Labels/'
self.files_0001 = os.listdir(self.train_path)
self.files_0002 = os.listdir(self.test_path)
self.files_0003 = os.listdir(self.label_path)
self.train_dic = {}
self.test_dic = {}
self.train_corpus = []
self.test_corpus = []
self.train_target = []
self.test_target = []
self.tv_train = None
self.tv_test = None
for local_i in range(len(self.files_0001)):
with open(self.train_path + self.files_0001[local_i]) as local_of:
local_content = local_of.read()
# self.train_corpus.append(local_content)
# print files_0001[i], type(files_0001[i])
self.train_dic[int(self.files_0001[local_i])] = local_content
for local_i in range(len(self.files_0002)):
with open(self.test_path + self.files_0002[local_i]) as local_of:
local_content = local_of.read()
# self.test_corpus.append(local_content)
# print files_0001[i], type(files_0001[i])
self.test_dic[int(self.files_0002[local_i])] = local_content
# print len(train_dic)
self.train_label = []
self.test_label = []
with open(self.label_path + "test.doc.label") as local_of:
print self.files_0003[0]
local_content = local_of.readlines()
for local_i in range(len(local_content)):
local_ctt_splt = local_content[local_i].split('\t')
# print test_dic[int(ctt_splt[0])]
# self.test_target.append(int(local_ctt_splt[1]))
self.test_label.append((int(local_ctt_splt[0]), int(local_ctt_splt[1])))
with open(self.label_path + "train.doc.label") as local_of:
print self.files_0003[1]
local_content = local_of.readlines()
for local_i in range(len(local_content)):
local_ctt_splt = local_content[local_i].split('\t')
# print test_dic[int(ctt_splt[0])]
# self.train_target.append(int(local_ctt_splt[1]))
# train_label is (filename, target label)
self.train_label.append((int(local_ctt_splt[0]), int(local_ctt_splt[1])))
pass
def get_ready(self):
for i in range(len(self.train_label)):
self.train_corpus.append(self.train_dic[self.train_label[i][0]])
self.train_target.append(self.train_label[i][1])
for i in range(len(self.test_label)):
self.test_corpus.append(self.test_dic[self.test_label[i][0]])
self.test_target.append(self.test_label[i][1])
pass
def fea_extract(self):
tv_model_1 = TfidfVectorizer(sublinear_tf=True, max_df=0.7, min_df=9, stop_words='english')
self.tv_train = tv_model_1.fit_transform(self.train_corpus)
tv_model_2 = TfidfVectorizer(vocabulary=tv_model_1.vocabulary_)
self.tv_test = tv_model_2.fit_transform(self.test_corpus)
pass
def svm_cls(self, reverse=False):
if not reverse:
svc_cf = SVC(kernel='linear')
svc_cf.fit(self.tv_train, ins_tcat.train_target)
pred = svc_cf.predict(self.tv_test)
calculate_result(self.test_target, pred)
else:
svc_cf = SVC(kernel='linear')
svc_cf.fit(self.tv_test, ins_tcat.test_target)
pred = svc_cf.predict(self.tv_train)
calculate_result(self.train_target, pred)
pass
def calc_prec(self):
pass
def test_1(self):
l_vectorizer = CountVectorizer(min_df=1)
corpus = [
'This is the first\n document.',
'This is the second\n second document.',
'And the third one.',
'Is this the first document?',
]
l_X = l_vectorizer.fit_transform(corpus)
feature_name = l_vectorizer.get_feature_names()
print feature_name
print l_X.toarray()
def main(self):
pass
def calculate_result(actual, f_pred):
if len(actual) != len(f_pred):
print 'there is some wrong!'
err = 0
for i in range(len(actual)):
if actual[i] != f_pred[i]:
err += 1
m_pre = 1. * (len(actual) - err) / len(actual)
print 'predict info:'
print 'precision: ', round(m_pre, 3)
pass
if __name__ == '__main__':
ins_tcat = textCat()
ins_tcat.get_ready()
ins_tcat.fea_extract()
ins_tcat.svm_cls(reverse=True)
# ins_tcat.test_1()
"""
Here could be more fix, for |feature| == 2
"""
# tv = TfidfVectorizer(sublinear_tf=True, max_df=0.7, min_df=9, stop_words='english')
# tv_train = tv.fit_transform(ins_tcat.train_corpus)
# tv2 = TfidfVectorizer(vocabulary=tv.vocabulary_)
# tv_test = tv2.fit_transform(ins_tcat.test_corpus)
# print repr(tv_train.shape), repr(tv_test.shape)
# svc_cf = SVC(kernel='linear')
# svc_cf.fit(tv_train, ins_tcat.train_target)
# pred = svc_cf.predict(tv_test)
# calculate_result(ins_tcat.test_target, pred)
|
17,951 | bff8f325c56febfa91ed90b5956c87a8f50fc7e3 | from bs4 import BeautifulSoup
from datetime import datetime
from utilities import sms
import requests
import json
import sys
import os
today = datetime.today()
filename = os.path.join(sys.path[0], 'covid-data.txt')
def get_saved_data(state_name):
with open(filename, 'r+') as file:
states = json.load(file)
try:
return states[1][state_name]
except KeyError:
states[1][state_name] = {"old": {"pos": 0, "neg": 0, "tot": 0, "ded": 0, "upd": "0"},
"new": {"pos": 0, "neg": 0, "tot": 0, "ded": 0, "upd": "0"}}
file.seek(0)
json.dump(states, file)
return states[1][state_name]
def save_file(state_name, pos, neg, total, death, date):
with open(filename, 'r+') as file:
states = json.load(file)
states[1][state_name]['old'] = states[1][state_name]['new']
states[1][state_name]['new'] = {"pos": pos, "neg": neg, "tot": total, "ded": death, "upd": date}
file.seek(0)
json.dump(states, file)
def get_data(*args):
results = ""
url = 'https://covidtracking.com/api/states'
for state in args:
r = requests.Session()
params = {'state': state}
raw_data = r.get(url, params=params)
raw_data.raise_for_status()
new_data = json.loads(raw_data.text)
if new_data:
state_name = new_data['state']
saved_data = get_saved_data(state_name)
positive = new_data['positive']
negative = new_data['negative']
total = new_data['total']
death = new_data['death']
updated = new_data['lastUpdateEt']
# Using * to bold and _ to italicize in WhatsApp
results += f'''
*{new_data['state']}* - _Updated: {updated}_
Positive: {positive:,} *(+{positive - saved_data['old']['pos']:,})*
Negative: {negative:,} *(+{negative - saved_data['old']['neg']:,})*
Total: {total:,} *(+{total - saved_data['old']['tot']:,})*
Death: {death:,} *(+{death - saved_data['old']['ded']:,})*
'''
if state == "TX":
url_county = 'https://dshs.texas.gov/news/updates.shtm'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
try:
r2 = requests.Session()
s2 = r2.get(url_county, headers=headers, verify=False)
soup2 = BeautifulSoup(s2.text, 'lxml')
table = soup2.find('table', summary="COVID-19 Cases in Texas Counties")
counties = ['Harris', 'Fort Bend']
for county in counties:
label = table.find('td', text=county)
cases = label.find_next_sibling('td').text
results += f'*{county} County:* {cases} cases\n'
except:
results += '_Cannot retrieve county data_'
# If the last updated date is the same as today then dont update numbers
last_updated = updated.split()[0]
if saved_data['new']['upd'] == last_updated:
pass
else:
save_file(state_name, positive, negative, total, death, last_updated)
else:
results += f'''
Unable to retrieve data for {state}
'''
return results
if __name__ == '__main__':
final = ""
final += get_data('TX', 'NJ')
sms.send_whatsapp(final)
|
17,952 | 93332c23bbb891b49cb7b795defd8d4815193b20 | """
File: fitjet_3d.py
Fits a geometric model to mock jet data. Uses image subtraction;
otherwise same as fitjet.py
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.optimize as op
import emcee
import triangle
import sys
# These mock data are produced by jet3d.py.
a2 = np.fromfile('mockdata_3d_nc100.dat',dtype=np.float32)
def I(theta):
a, b, i, l, alpha, beta, gamma = theta
u = np.linspace(0.0, 20.0*np.pi, 1000)
def z(u):
return (a/(2.0*np.pi)) * u * (u/(2.0*np.pi))**beta
zv = z(u)
def x(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.cos(u)
def y(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.sin(u)
xv = x(u)
yv = y(u)
def ri(i):
return np.matrix([[np.cos(i), 0.0, np.sin(i)],[0.0, 1.0, 0.0],[-np.sin(i), 0.0, np.cos(i)]])
def rl(l):
return np.matrix([[np.cos(l), -np.sin(l), 0.0],[np.sin(l), np.cos(l), 0.0],[0.0, 0.0, 1.0]])
zvarr = zv*gamma
iarr = zvarr/zvarr.max()
iarr *= np.pi/2.0
c = np.dstack((xv, yv, zv))
c = np.squeeze(c)
d = np.zeros((1000,3))
lm = rl(l)
for n in range(1000):
d[n] = c[n]*ri(iarr[n])*lm
xv = d[:,0]
yv = d[:,1]
xv = xv[~np.isnan(xv)]
yv = yv[~np.isnan(yv)]
nc = 100
a = np.zeros((nc,nc),dtype=np.float32)
zl = xv.min() - 5.0
zu = xv.max() + 5.0
yl = yv.min() - 5.0
yu = yv.max() + 5.0
lz = zu - zl
ly = yu - yl
dz = lz/nc
dy = -ly/nc # Because "y" coordinate increases in opposite direction to "y" array index of a (or a2).
def zloc(cood):
return int((cood-zl)/dz) + 1
def yloc(cood):
return int((cood-yl)/dy) + 1
for i in xrange(xv.size):
zpos = zloc(xv[i])
ypos = yloc(yv[i])
a[ypos, zpos] += 1.0
return a.flatten()
def neglnlike(theta, intensity, intensity_err):
model = I(theta)
inv_sigma2 = 1.0/intensity_err**2
return 0.5*(np.sum((intensity-model)**2*inv_sigma2 - np.log(inv_sigma2)))
a2_err = np.zeros_like(a2)
a2_err += 0.1
theta_guess = (0.1, 10.0, 2.0, 3.0, 0.2, 2.0, 0.5)
result = op.minimize(neglnlike, theta_guess, args=(a2, a2_err), method='Nelder-Mead')
print result.x
print result.success
def lnprior(theta):
a, b, i, l, alpha, beta, gamma = theta
if (0.05 < a < 0.15 and
8.0 < b < 12.0 and
1.0 < i < 3.0 and
2.0 < l < 4 and
0.1 < alpha < 0.3 and
1.0 < beta < 3.0 and
0.3 < gamma < 0.7):
return 0.0
return -np.inf
def lnprob(theta, intensity, intensity_err):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp - neglnlike(theta, intensity, intensity_err)
ndim, nwalkers = 7, 100
pos = [result.x + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(a2, a2_err))
sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 100:, :].reshape((-1, ndim))
plot_chain = True
if plot_chain:
mpl.rcParams['font.size'] = '10'
nplots = 7
plot_number = 0
fig = plt.figure(figsize=(12, 6), dpi=100)
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,0], c='k', alpha=0.1)
ax.axhline(result.x[0], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$A$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,1], c='k', alpha=0.1)
ax.axhline(result.x[1], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel('$B$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,2], c='k', alpha=0.1)
ax.axhline(result.x[2], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$i_0$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\lambda_0$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\alpha$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\beta$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\gamma$')
ax.set_xlabel('step')
plt.savefig('chains.pdf',bbox_inches='tight')
mpl.rcParams['font.size'] = '14'
fig = triangle.corner(samples, labels=['$A$', '$B$', '$i_0$', r'$\lambda_0$', r'$\alpha$', r'$\beta$', r'$\gamma$'],
truths=result.x)
fig.savefig("triangle.pdf")
|
17,953 | f2f8d6a4696af48a294dd7a3760a76943e0fa51a | # -*- coding: utf-8 -*-
# Author: XuMing <shibing624@126.com>
# Data: 17/10/18
# Brief: 预测
import os
import sys
import paddle.v2 as paddle
import config
import reader
from network import dssm_lm
from utils import logger, load_dict, load_reverse_dict
def infer(model_path, dic_path, infer_path, prediction_output_path, rnn_type="gru", batch_size=1):
logger.info("begin to predict...")
# check files
assert os.path.exists(model_path), "trained model not exits."
assert os.path.exists(dic_path), " word dictionary file not exist."
assert os.path.exists(infer_path), "infer file not exist."
logger.info("load word dictionary.")
word_dict = load_dict(dic_path)
word_reverse_dict = load_reverse_dict(dic_path)
logger.info("dictionary size = %d" % (len(word_dict)))
try:
word_dict["<unk>"]
except KeyError:
logger.fatal("the word dictionary must contain <unk> token.")
sys.exit(-1)
# initialize PaddlePaddle
paddle.init(use_gpu=config.use_gpu, trainer_count=config.num_workers)
# load parameter
logger.info("load model parameters from %s " % model_path)
parameters = paddle.parameters.Parameters.from_tar(
open(model_path, "r"))
# load the trained model
prediction = dssm_lm(
vocab_sizes=[len(word_dict), len(word_dict)],
emb_dim=config.emb_dim,
hidden_size=config.hidden_size,
stacked_rnn_num=config.stacked_rnn_num,
rnn_type=rnn_type,
share_semantic_generator=config.share_semantic_generator,
share_embed=config.share_embed,
is_infer=True)
inferer = paddle.inference.Inference(
output_layer=prediction, parameters=parameters)
feeding = {"left_input": 0, "left_target": 1, "right_input": 2, "right_target": 3}
logger.info("infer data...")
# define reader
reader_args = {
"file_path": infer_path,
"word_dict": word_dict,
"is_infer": True,
}
infer_reader = paddle.batch(reader.rnn_reader(**reader_args), batch_size=batch_size)
logger.warning("output prediction to %s" % prediction_output_path)
with open(prediction_output_path, "w")as f:
for id, item in enumerate(infer_reader()):
left_text = " ".join([word_reverse_dict[id] for id in item[0][0]])
right_text = " ".join([word_reverse_dict[id] for id in item[0][2]])
probs = inferer.infer(input=item, field=["value"], feeding=feeding)
f.write("%f\t%f\t%s\t%s" % (probs[0], probs[1], left_text, right_text))
f.write("\n")
if __name__ == "__main__":
infer(model_path=config.model_path,
dic_path=config.dic_path,
infer_path=config.infer_path,
prediction_output_path=config.prediction_output_path,
rnn_type=config.rnn_type)
|
17,954 | edad2d164bbb2bc5289365a05846acb3db55e792 | import FIXPMsgUtil
from typing import Dict
import Constant
from datetime import datetime
class ApplicationMsgUtil:
@staticmethod
def create_application_msg(msg_type: str):
msg = dict()
msg['MsgType'] = msg_type
return msg
@staticmethod
def decorate_application_msg(msg: dict):
msg['ApplVerID'] = Constant.APPL_VER_ID
msg['SendingTime'] = datetime.now().isoformat()
return msg
@staticmethod
def create_new_single_order(account: str,
client_order_id: str,
security_id: str,
side: str,
order_qty: str,
ord_typ: str,
currency: str,
time_in_force: str,
price: str = None,
expire_time: str = None):
# {"MsgType": "NewOrderSingle",
# "ApplVerID": "FIX50SP2",
# "CstmApplVerID": "IGUS/Trade/V1",
# "SendingTime": "20190802-21:14:38.717",
# "ClOrdID": "12345",
# "Account": "PDKKL",
# "SecurityID": "CS.D.GBPUSD.CZD.IP",
# "SecurityIDSource": "MarketplaceAssignedIdentifier",
# "Side": "Buy",
# "TransactTime": "20190802-21:14:38.717",
# "OrderQty": "6",
# "OrdTyp": "2",
# "Price": "34.444",
# "Currency": "USD",
# "TimeInForce": "GoodTillDate",
# "ExpireTime": "20190802-17:00:00.000" }
msg: dict = ApplicationMsgUtil.create_application_msg("NewOrderSingle")
msg = ApplicationMsgUtil.decorate_application_msg(msg)
msg['ClOrdID'] = client_order_id
msg['Account'] = account
msg['SecurityID'] = security_id
msg['SecurityIDSource'] = "MarketplaceAssignedIdentifier"
msg['Side'] = side
msg['OrderQty'] = order_qty
msg['OrdType'] = ord_typ
if expire_time is not None:
msg['Price'] = price
msg['Currency'] = currency
msg['TimeInForce'] = time_in_force
if expire_time is not None:
msg['ExpireTime'] = expire_time
msg['TransactTime'] = msg['SendingTime']
return msg |
17,955 | 14ccde1243128e06cfb415691855d3f3ec238fe4 | from django.db import models
from django.utils import timezone
from django.urls import reverse
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager,
self).get_queryset() \
.filter(status='published', )
class Categories(models.TextChoices):
Actualite = 'actualite'
Magzi = 'magazine'
Anor = 'annouce'
Mines = 'mines'
Industries = 'industries'
Developpement_Technologique = 'développement_Technologique'
Blog_Post = 'blog_Post'
class Post(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published')
)
title = models.CharField(max_length=250)
category = models.CharField(max_length=50, choices=Categories.choices, default=Categories.Actualite)
excerpt = models.CharField(max_length=150)
slug = models.SlugField(max_length=250, unique_for_date='publish')
thumbnail = models.ImageField(upload_to='photos/%Y/%m', blank=True)
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.SlugField(max_length=30, choices=STATUS_CHOICES, default='draft')
object = models.Manager()
published = PublishedManager()
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail',
args=[self.publish.year,
self.publish.month,
self.publish.day, self.slug])
|
17,956 | a4c7ee6a9e18289ab2146d1629793d23d5a1bd06 | """Helper functions
"""
import json
import numpy as np
import pandas as pd
from scipy.signal import savgol_filter
import statsmodels.api as sm
def smooth_series(y,p = 6.25):
"""Smooth a series in a dataframe using Hodrick Prescott Filter
"""
cycle, trend = sm.tsa.filters.hpfilter(y, p)
return trend
def clean_series(y,smooth = False,p = 6.25,logsmooth = True):
"""Clean outliers in a series in a dataframe
"""
# Remove null values in the middle of the series using interpolate
# First null values are not interpolated but later filled by 0.0
y = y.replace(0.0,np.NaN).interpolate().fillna(0.0)
# Smooth using Hodrick Prescott filter with parameter p
if smooth:
y = smooth_series(y,p)
y.loc[(y < 1) & (y > 0)] = 1
if logsmooth:
y = y.map(lambda x : np.log(1+x))
y = smooth_series(y,p)
y = y.map(lambda x : np.exp(x) - 1)
y.loc[(y < 1) & (y > 0)] = 1
y.loc[y < 0] = 0
return y
def load_json(json_path):
with open(json_path) as f:
data = json.load(f)
return data
def save_json(data, path):
with open(path, 'w') as f:
json.dump(data, f) |
17,957 | e841fdcd531567db68f9ceb289550fdf4e12fc42 | from django import forms
from django.contrib.auth import get_user_model
User = get_user_model()
class AgentModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AgentModelForm, self).__init__(*args, **kwargs)
for fieldname in ['username']:
self.fields[fieldname].help_text = None
class Meta:
model = User
fields = (
'username',
'email',
'first_name',
'last_name'
)
|
17,958 | 03eee98ee44421986fbdc333f46a26fcfc34b19d | from django.test import TestCase
from selenium import webdriver
from tests_functional.messages_en import msg
class MoviesPageVisitorTest(TestCase):
@classmethod
def setUpClass(cls):
# USER STORY
# user opens browser and
# visits website /movies endpoint on localhost:8000
cls.browser = webdriver.Firefox()
cls.browser.implicitly_wait(3)
cls.browser.get('http://localhost:8000/movies')
@classmethod
def tearDownClass(cls):
# user closes browser
cls.browser.quit()
def test_user_can_access_movie_page(self):
self.assertIn(
msg['GHIBLI_MOVIES_TITLE'],
self.browser.title,
msg=msg['GHIBLI_MOVIES_TITLE_MSG']
)
def test_user_can_see_movie_list(self):
# user checks the list of all movies from Ghibli Studio
self.assertEqual(
msg['GHIBLI_MOVIE_LIST_TITLE'],
self.browser.find_element_by_id(msg['GHIBLI_MOVIE_LIST_TITLE_TAG']).text, # noqa
msg=msg['GHIBLI_MOVIE_LIST_TITLE_MSG']
)
self.assertIn(
msg['GHIBLI_MOVIE_NAME'],
[title.text.replace('\n', '')
for title in self.browser.find_elements_by_class_name(msg['GHIBLI_MOVIE_NAME_TAG'])], # noqa
msg=msg['GHIBLI_MOVIE_NAME_MSG']
)
def test_user_can_see_list_of_people_from_each_movie(self):
# user also checks list of people (cast) for each movie
self.assertIn(
msg['GHIBLI_PERSON_NAME'],
[people.text.replace('* ', '')
for people in self.browser.find_elements_by_class_name(msg['GHIBLI_PERSON_NAME_TAG'])], # noqa
msg=msg['GHIBLI_PERSON_NAME_MSG']
)
|
17,959 | 147ac4cd596c141a548252a0d442e3c07f29ae34 | r=int(input('Enter radius'))
V=4/3*3.142*r*r*r
print('Volume of sphere',V)
|
17,960 | 776b397fdaed2e720361a9e4f67e135ba606487a | """Query an mcpe server easily
query.py
Copyright (c) 2017 w-gao
"""
import socket
import struct
from random import randint
from ._server_data import ServerData
"""
______ ___.__. _____ ____ ______ ____ ________ __ ___________ ___.__.
\____ < | | ______ / \_/ ___| |____ \_/ __ \ ______ / ____/ | \_/ __ \_ __ < | |
| |_> >___ | /_____/ | Y Y \ \___| |_> > ___/ /_____/ < <_| | | /\ ___/| | \/\___ |
| __// ____| |__|_| /\___ > __/ \___ > \__ |____/ \___ >__| / ____|
|__| \/ \/ \/|__| \/ |__| \/ \/
"""
class Query:
MAGIC = b'\xFE\xFD'
HANDSHAKE = b'\x09'
STATISTICS = b'\x00'
def __init__(self, host, port, timeout=5):
self.host = host
self.port = port
self.timeout = timeout
self.socket = None
def query(self):
# init socket
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.settimeout(self.timeout)
self.socket.connect((self.host, self.port))
except socket.error as msg:
print("Cannot connect to the server. Error: ", msg)
return None
# Returned stats
stats = ServerData()
# get data from the server
try:
# Handshake
# Magic + packetType + sessionId + payload
hand_shake = Query.MAGIC + Query.HANDSHAKE + struct.pack("L", randint(1, 9999999))
self.socket.send(hand_shake)
token = self.socket.recv(65535)[5:-1].decode()
if token is not None:
payload = b"\x00\x00\x00\x00"
request_stat = Query.MAGIC + Query.STATISTICS + struct.pack("L", randint(1, 9999999)) + struct.pack(
'>l', int(token)) + payload
self.socket.send(request_stat)
buff = str(self.socket.recv(65535)[5:])
if buff is not None:
server_data = buff.split(r'\x01')
server_data_1 = server_data[0].split(r'\x00')[2:-2]
# Player list
server_data_2 = server_data[1].split(r'\x00')[2:-2]
# Trimmed Server Data
data = {}
for i in range(0, len(server_data_1), 2):
data[server_data_1[i]] = server_data_1[i + 1]
stats.HOSTNAME = data['hostname']
stats.GAME_TYPE = data['gametype']
stats.GAME_ID = data['game_id']
stats.VERSION = data['version']
stats.SERVER_ENGINE = data['server_engine']
# Plugins
plugins = []
for p in data['plugins'].split(';'):
plugins.append(p)
stats.PLUGINS = plugins
stats.MAP = data['map']
stats.NUM_PLAYERS = int(data['numplayers'])
stats.MAX_PLAYERS = int(data['maxplayers'])
stats.WHITE_LIST = data['whitelist']
stats.HOST_IP = data['hostip']
stats.HOST_PORT = int(data['hostport'])
# Players
players = []
for p in server_data_2:
players.append(p)
stats.PLAYERS = players
stats.SUCCESS = True
# The server is offline or it did not enable query
except socket.error as msg:
print('Failed to query. Error message: ', msg)
# print('closing the socket')
self.socket.close()
return stats
|
17,961 | 74380efe4278375ec92eca3f9a5fea68e582e679 | from datetime import date
from time import sleep
ano = int(input('Digite um ano para verificar se é bissexto, digite 0 para analisar o ano atual: '))
if ano == 0:
ano = date.today().year
print ('\nAnalisando o ano {} ...\n'.format(ano))
sleep (2)
c1 = ano % 4
c2 = ano % 100
c3 = ano % 400
if (c1 == 0) and (c2 != 0):
print('Este ano é bissexto')
else:
if c3 == 0:
print('Este ano é bissexto')
else:
print('Este ano NÃO é bissexto')
print('\n---FIM---')
|
17,962 | cd9be86378207d409cfb697197b400864633c4e0 | #!/usr/bin/env python
Import("env")
env_png = env.Clone()
# Thirdparty source files
thirdparty_obj = []
if env["builtin_libpng"]:
thirdparty_dir = "#thirdparty/libpng/"
thirdparty_sources = [
"png.c",
"pngerror.c",
"pngget.c",
"pngmem.c",
"pngpread.c",
"pngread.c",
"pngrio.c",
"pngrtran.c",
"pngrutil.c",
"pngset.c",
"pngtrans.c",
"pngwio.c",
"pngwrite.c",
"pngwtran.c",
"pngwutil.c",
]
thirdparty_sources = [thirdparty_dir + file for file in thirdparty_sources]
env_png.Prepend(CPPPATH=[thirdparty_dir])
# Needed for drivers includes and in platform/web.
env.Prepend(CPPPATH=[thirdparty_dir])
env_thirdparty = env_png.Clone()
env_thirdparty.disable_warnings()
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_sources)
if env["arch"].startswith("arm"):
if env.msvc: # Can't compile assembly files with MSVC.
env_thirdparty.Append(CPPDEFINES=[("PNG_ARM_NEON_OPT"), 0])
else:
env_neon = env_thirdparty.Clone()
if "S_compiler" in env:
env_neon["CC"] = env["S_compiler"]
neon_sources = []
neon_sources.append(env_neon.Object(thirdparty_dir + "/arm/arm_init.c"))
neon_sources.append(env_neon.Object(thirdparty_dir + "/arm/filter_neon_intrinsics.c"))
neon_sources.append(env_neon.Object(thirdparty_dir + "/arm/filter_neon.S"))
neon_sources.append(env_neon.Object(thirdparty_dir + "/arm/palette_neon_intrinsics.c"))
thirdparty_obj += neon_sources
elif env["arch"].startswith("x86"):
env_thirdparty.Append(CPPDEFINES=["PNG_INTEL_SSE"])
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_dir + "/intel/intel_init.c")
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_dir + "/intel/filter_sse2_intrinsics.c")
elif env["arch"] == "ppc64":
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_dir + "/powerpc/powerpc_init.c")
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_dir + "/powerpc/filter_vsx_intrinsics.c")
env.drivers_sources += thirdparty_obj
# Godot source files
driver_obj = []
env_png.add_source_files(driver_obj, "*.cpp")
env.drivers_sources += driver_obj
# Needed to force rebuilding the driver files when the thirdparty library is updated.
env.Depends(driver_obj, thirdparty_obj)
|
17,963 | 4fd555b41ab4c465b5c99ae2468154f3b6cccaba | import pygame
#Initialize pygame
pygame.init()
#Create our display surface
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 300
display_surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Discrete Movement!")
#Set game values
VELOCITY = 30
#Load in images
dragon_image = pygame.image.load("dragon_right.png")
dragon_rect = dragon_image.get_rect()
dragon_rect.centerx = WINDOW_WIDTH//2
dragon_rect.bottom = WINDOW_HEIGHT
#The main game loop
running = True
while running:
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
running = False
#Check for discrete movement
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
dragon_rect.x -= VELOCITY
if event.key == pygame.K_RIGHT:
dragon_rect.x += VELOCITY
if event.key == pygame.K_UP:
dragon_rect.y -= VELOCITY
if event.key == pygame.K_DOWN:
dragon_rect.y += VELOCITY
#Fill the display surface to cover old images
display_surface.fill((0, 0, 0))
#Blit (copy) assets to the screen
display_surface.blit(dragon_image, dragon_rect)
#Update the display
pygame.display.update()
#End the game
pygame.quit() |
17,964 | 8f07b255344032fe18637cf5f7d1c9fd3ed31843 | '''
UserInput.py generates the performances of a coating given
thickness parameters by the user
edit values like 'AlN_seed = x' to change thickness parameters,
for double stack / inclusion of seeding layers, uncomment
lines like 'Ag_top_thickness = y' and include the corresponding
'GivenAgTop = nklib.Ag(Ag_top_thickness)' and
'MyStack = ML([GivenSeed, GivenAgBot, GivenAlNBot, GivenAgTop, GivenAlNTop])'
'''
import ga_2layers_aln_bright as bright
import opt_sim as opt
import numpy as np
import opt_sim.nklib as nklib
import opt_sim.structure
from opt_sim.structure import MultiLayer as ML
import matplotlib.pyplot as plt
import GenerateColor
P_data=np.loadtxt("plot support files\\Photopic_luminosity_function.txt", skiprows=1)
S_data = np.loadtxt("plot support files\\ASTMG173.txt", skiprows=2)
#S_data = np.loadtxt("plot support files\\Standard_Illuminant_D65.txt")
AlN_seed = 3
Ag_bot_thickness = 10 # 12.2 (-3.5) originally for scc 10
AlN_bot_thickness = 43 # 54.2 originally for scc 10
#Ag_top_thickness = 14
#AlN_top_thickness = 45.1
GivenSeed = nklib.AlN(AlN_seed)
GivenAgBot = nklib.Ag(Ag_bot_thickness)
GivenAlNBot = nklib.AlN(AlN_bot_thickness)
#GivenAgTop = nklib.Ag(Ag_top_thickness)
#GivenAlNTop = nklib.AlN(AlN_top_thickness)
#MyStack = ML([GivenAgBot, GivenAlNHBot])
MyStack = ML([GivenSeed, GivenAgBot, GivenAlNBot])
#MyStack = ML([GivenSeed, GivenAgBot, GivenAlNBot, GivenAgTop, GivenAlNTop])
MyStack.calculate_TR()
MyStack = bright.correct_TR(MyStack)
MyStack.A = [1] * len(MyStack.T) - MyStack.T - MyStack.R
L,a,b = GenerateColor.color_calc(MyStack.wl, MyStack.T, MyStack.R)
index_lowerUV = 0
index_lowervis = 32
index_uppervis = 94
index_upper = 454
photopic_array = np.interp(MyStack.wl[index_lowervis:index_uppervis],P_data[:, 0] , P_data[:, 1])
sol_array = np.interp(MyStack.wl[index_lowerUV:index_upper],S_data[:, 0] , S_data[:, 3])
Tvis = sum(MyStack.T[index_lowervis:index_uppervis]*photopic_array)/(sum(photopic_array))
#yes -0.04
#TSER = 1 - sum(MyStack.T[index_lowerUV:index_upper]*sol_array)/(sum(sol_array)) - 0.04
TSER = sum(MyStack.R[index_lowerUV:index_upper]*sol_array)/(sum(sol_array))+0.85*sum(MyStack.A[index_lowerUV:index_upper]*sol_array)/(sum(sol_array))
#print 'UV lower bound:', MyStack.wl[index_lowerUV]
#print 'Visible lower bound:', MyStack.wl[index_lowervis]
#print 'Visible upper bound:', MyStack.wl[index_uppervis]
#print 'IR upper bound:', MyStack.wl[index_upper]
#print MyStack.wl[:]
print ("==============================================================")
#print (GivenAgBot, GivenAlNBot)
print (GivenSeed, GivenAgBot, GivenAlNBot)
#print (GivenSeed, GivenAgBot, GivenAlNBot, GivenAgTop, GivenAlNTop)
print ('VLT = %s' % float("{0:.4f}".format(Tvis)))
print ('TSER = %s' % float("{0:.4f}".format(TSER)))
#for i in range(len(MyStack.wl)):
# print MyStack.wl[i],",", MyStack.T[i],",", MyStack.R[i]
#print ('Expected VLT = %s' % float(75.6))
#print ('Expected TSER = %s' % float(46))
print 'Color in L* a* b* space is:', L, a, b
opt.plot.TR([MyStack])
plt.show()
|
17,965 | 0007d518232e0a4de89c1f579d28a4cb548bb673 | import os
import numpy as np
import py_compile
import sys
import matplotlib.pyplot as plt
a=1
del a
#os.path.dirname(os.path.realpath(__file__))
current_path=os.getcwd()
#path_column_roof='/home/chenming/Dropbox/tailing_column/data/column_on_roof/'
#file_list_column_roof=os.listdir(current_path+'/data/column_on_roof/')
path_data_column_roof=current_path+'/data/column_on_roof/'
#execfile('class_scale.py')
sys.path.append(current_path+'/python/')
sys.path.append(current_path+'/python/')
py_compile.compile(current_path+'/class_cam2.py')
import class_cam2
reload(class_cam2)
#a=class_cam2.cam2('free.cam2.h0.exp0.2003.nc')
#a=class_cam2.cam2('nudged.isogsm_sfc.h0.exp1.2009.nc')
a=class_cam2.cam2('free.LMDZ4.h0.20070101_0000.nc')
a.show_variable_list()
a.plot_vapor_contour()
a.get_coastlines(npts_min=80) # the minimum points in the system
#for i in len(a.coastal_segments):
a.plot_continent(continent_id=1)
a.find_city_locations()
a.append_nc_files( '/home/chenming/gis_swing_nasa/' )
a.extracting_city_variable_over_time()
a.plot_wind_rose_at_cities(datatype=['UINT','VINT'])
self=a
#a=class_scale.scale(path_data_column_roof+'scale_2016_Jul_03.dat')
#a.surf_area1=np.pi*(0.265/2)**2
#self=a
#for n in np.arange(len(file_list_column_roof)):
# a.append_file(path_data_column_roof+file_list_column_roof[n])
#
#a.export_data_as_csv('2016-06-25_2016-07-11.dat')
##a.spline_scale_readings(coef=0.001,time_interval_sec_sp=600)
##a.spline_scale_readings(coef=0.0000001,time_interval_sec_sp=600)
##a.spline_scale_readings(coef=1e-8,time_interval_sec_sp=600)
##a.spline_scale_readings(coef=1e-10,time_interval_sec_sp=600)
##a.spline_scale_readings(coef=1e-13,time_interval_sec_sp=600)
#a.spline_scale_readings(coef=1e-14,time_interval_sec_sp=600)
##a.spline_scale_readings(coef=1e-15,time_interval_sec_sp=600)
####################################################################################################
#a.cities['los angeles']['latlon_idx'] #5718
#a.cities['los angeles']['latitude'] #34.0543942
#a.cities['los angeles']['longitude'] #241.75605919999998
#bb = a.file_lists['2002']['fn'].variables['UINT'][-1,:,:]
#a.lats_mtx.flat[5718] # 34.882520993773461
#a.lons_mtx.flat[5718] # 241.875
# # it is very close to losangeles
#
#a.lats_mtx.shape # (64, 128)
#len(a.lats_mtx[1]) # 128 which means rows first
#5718/128 # 44 row number
#np.remainder(5718,128) # 86 column number
#
#a.lats_mtx[44,86] # 34.882520993773461
# a.lons_mtx[44,86] # 241.875
#
#
## the above are all correct
#
## a.file_lists['1999']['fn'].variables['VINT'][2,44,86]
#
#
#bb.flat[5718] # 340.77967443317175
#cc=np.array([[1,2,3,4],[5,6,7,8]])
#cc.flat[3] # 4
#
|
17,966 | 9a9ff6eb416331a2e6efeeb3ce2edd6afe8b0c5a | #############################################################
# FILE: slicer.py
# UPDATED: 17.5.17
#############################################################
import math
import sys
import os
g_updated_commands = []
g_command_args = []
g_current_X_position = 3
g_current_Y_position = 0
g_current_R_position = 3
g_current_alpha_position = 0
printer_X_origin = 0
printer_Y_origin = 0
g_code_pointer = -1
#####################################################
class switch(object):
value = None
def __new__(cls, value):
cls.value = value
return True
def case(*args):
return any((arg == switch.value for arg in args))
def relpath(filename):
return os.path.join(os.path.dirname(__file__), filename)
def join(it):
return ("\n" if it is g_updated_commands else " ").join(it)
def change_coordinates(X, Y):
"""
we define the axes as follows:
^
| we "simulate" the run of the gcode file,
| to know how to convert our axis.
|
| if the new angle is smaller than the current angle (relative to the origins),
| we set the motion clockwise, otherwise CCW.
|
| <---|
| | <== alpha
| |
------------------------>
=======> R
"""
global g_current_R_position, g_current_alpha_position
# compute the radius
R = math.hypot(X, Y)
# compute the arc length, by the radius and the angle
# if it makes trouble, comment it and uncomment the next line
alpha = math.atan2(Y, X)
# alpha = math.degrees(math.atan2(Y, X))
g_current_R_position = R
g_current_alpha_position = alpha
return R, alpha
def R_will_change_direction(point0, point1, point2):
"""
to attack the case in which we should change the direction of R move
point1 (current arm position)
*
|
|
__|
| |
*-----------------------| (x3, y3) <== until this point we should shorter R,
point0 (origin) | and from this point we should longer R.
|
|
|
|
*
point2 (next arm position)
"""
x0, y0 = point0[0], point0[1]
x1, y1 = point1[0], point1[1]
x2, y2 = point2[0], point2[1]
try:
m1 = (x1 - x2) / (y2 - y1)
m2 = (y2 - y1) / (x2 - x1)
x3 = ((m2 * x1) - (m1 * x0) - y1 + y0) / (m2 - m1)
y3 = m1 * (x3 - x0) + y0
except ZeroDivisionError:
(x3, y3) = (x0, y1) if y1 == y2 else (x1, y0)
return ((min(x1, x2) <= x3 <= max(x1, x2)) and (min(y1, y2) <= y3 <= max(y1, y2))), (x3, y3)
def code_seen(code, char):
global g_code_pointer
g_code_pointer = code.find(char)
return g_code_pointer >= 0
def G0_G1_gcode():
global g_current_X_position, \
g_current_Y_position, \
g_updated_commands, \
g_command_args
untouched_args = []
next_X_position = None
next_Y_position = None
X_continuation_position = None
Y_continuation_position = None
continuation_command = False
for arg in g_command_args[1:]:
if code_seen(arg, 'X'):
next_X_position = float(arg[g_code_pointer + 1:])
elif code_seen(arg, 'Y'):
next_Y_position = float(arg[g_code_pointer + 1:])
elif code_seen(arg, 'I'):
X_continuation_position = next_X_position + float(arg[g_code_pointer + 1:])
continuation_command = True
elif code_seen(arg, 'J'):
Y_continuation_position = next_Y_position + float(arg[g_code_pointer + 1:])
continuation_command = True
else:
untouched_args.append(arg)
if next_X_position is None and next_Y_position is None:
g_updated_commands.append(join(g_command_args))
return
# check if they even appeared in the command
next_X_position = next_X_position if next_X_position is not None else g_current_X_position
next_Y_position = next_Y_position if next_Y_position is not None else g_current_Y_position
# if it is a straight line, R needs to be changes during its move
R_will_change, point = R_will_change_direction((printer_X_origin, printer_Y_origin),
(g_current_X_position, g_current_Y_position),
(next_X_position, next_Y_position))
if R_will_change:
R, alpha = change_coordinates(point[0], point[1])
command = "G1 X{R} Y{alpha} ".format(R = R, alpha = alpha) + join(untouched_args)
g_updated_commands.append(command)
g_current_X_position, g_current_Y_position = point[0], point[1]
# after that, continue from the same spot
R, alpha = change_coordinates(next_X_position, next_Y_position)
command = "G1 X{R} Y{alpha} ".format(R = R, alpha = alpha) + join(untouched_args)
g_updated_commands.append(command)
g_current_X_position, g_current_Y_position = next_X_position, next_Y_position
# if I or J appeared, we need to add one more command. simply rewind this function
if continuation_command:
current_command = join(g_command_args)
command = "G1 "
command += "X{} ".format(X_continuation_position if code_seen(current_command, 'I') else g_current_X_position)
command += "Y{} ".format(Y_continuation_position if code_seen(current_command, 'J') else g_current_Y_position)
g_command_args = (command + join(untouched_args)).split(" ")
G0_G1_gcode()
return
def parse(path):
global g_command_args
res = open("{}_updated_no_endstops.gcode".format(os.path.splitext(path)[0]), "w")
for gcode in open(path, "r").readlines():
# if its not a command, pass
if not code_seen(gcode, "G"):
g_updated_commands.append(gcode.replace("\n", ""))
continue
g_command_args = list(map(lambda x: x.strip(), gcode.replace("\n", "").split(" ")))
while switch(g_command_args[0]):
if case("G0", "G1"):
G0_G1_gcode()
break
if case("G28"):
# make a new endstop
break
# default, the code is intact
g_updated_commands.append(join(g_command_args))
break
res.write(join(g_updated_commands))
res.close()
return
if __name__ == "__main__":
parse(relpath(sys.argv[1])) |
17,967 | 46f6edac8e6bdcdde781e3de9e2d075e6bb1eea3 | ITEM: TIMESTEP
1500
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
4.0275618465912544e-01 4.6797243815334738e+01
4.0275618465912544e-01 4.6797243815334738e+01
4.0275618465912544e-01 4.6797243815334738e+01
ITEM: ATOMS id type xs ys zs
8 1 0.118851 0.0624606 0.0660278
35 1 0.0670193 0.123775 0.0611012
130 1 0.0685417 0.0639709 0.130487
165 1 0.128653 0.129461 0.120331
4 1 0.00130096 0.0666302 0.0617069
637 1 0.877382 0.377653 0.497838
113 1 0.494792 0.377381 0.00243534
393 1 0.248178 -0.000186831 0.37644
145 1 0.499372 0.00157003 0.125719
12 1 0.252187 0.0674462 0.0719671
39 1 0.182648 0.118705 0.0510425
43 1 0.310913 0.127651 0.0622191
134 1 0.188646 0.0668282 0.124874
138 1 0.312211 0.0591892 0.127609
169 1 0.247779 0.125368 0.134203
411 1 0.804169 0.00678576 0.446096
21 1 0.623758 -0.00144011 -0.0104021
1177 1 0.747133 0.491696 0.127729
16 1 0.36995 0.0608549 0.0594088
47 1 0.431502 0.121371 0.0571583
142 1 0.429657 0.0637164 0.126979
173 1 0.364438 0.127806 0.123724
20 1 0.492884 0.0595345 0.0616599
29 1 0.875514 0.00165863 0.0089974
267 1 0.318348 -0.00197717 0.31603
1 1 0.998291 0.00560831 0.00961238
155 1 0.816382 0.00179844 0.186877
177 1 0.490763 0.123361 0.119501
24 1 0.61979 0.0634458 0.0662465
51 1 0.547978 0.130198 0.0565133
146 1 0.56126 0.0636161 0.120437
181 1 0.622972 0.125224 0.12263
283 1 0.81432 -0.000799761 0.309311
114 1 0.554002 0.444955 0.00555425
110 1 0.432821 0.434125 0.00406996
94 1 0.934735 0.30954 -0.000193752
34 1 0.0560126 0.193167 0.0127468
28 1 0.749304 0.0659008 0.0603306
55 1 0.686557 0.126594 0.0628723
59 1 0.814669 0.12333 0.0572736
150 1 0.684859 0.0623375 0.117856
154 1 0.810985 0.061865 0.11904
185 1 0.745232 0.135055 0.128131
1561 1 0.754576 0.49893 0.497427
269 1 0.375955 0.00517248 0.253073
1435 1 0.8169 0.497247 0.433906
161 1 -0.00108638 0.117779 0.132628
32 1 0.877468 0.0685729 0.0579861
63 1 0.929934 0.13559 0.0630063
158 1 0.937445 0.0683095 0.115263
189 1 0.869285 0.127069 0.118456
512 1 0.876806 0.437383 0.429232
511 1 0.934941 0.379684 0.43404
137 1 0.252093 0.00763136 0.129668
40 1 0.123519 0.184682 0.0569683
67 1 0.0710821 0.257029 0.0638817
72 1 0.131095 0.312477 0.0559625
162 1 0.0647778 0.179437 0.119219
194 1 0.0712277 0.315671 0.122904
197 1 0.128812 0.244807 0.124296
36 1 0.99681 0.184407 0.0635109
68 1 0.999336 0.31025 0.0701043
1299 1 0.564805 0.49406 0.31706
26 1 0.807536 0.0652672 0.00236857
1047 1 0.689163 0.499728 0.0576469
44 1 0.251896 0.179965 0.0691536
71 1 0.187754 0.248177 0.0640497
75 1 0.312717 0.251595 0.0578449
76 1 0.250244 0.317939 0.0606777
166 1 0.187079 0.185066 0.128124
170 1 0.308998 0.186722 0.122213
198 1 0.187002 0.309502 0.116156
201 1 0.252763 0.245218 0.117518
202 1 0.315374 0.308769 0.12443
399 1 0.433587 0.00160303 0.441851
149 1 0.622178 0.00972119 0.125059
135 1 0.187274 0.00351238 0.188779
1169 1 0.504226 0.498751 0.126071
273 1 0.495698 0.0106569 0.246826
48 1 0.373628 0.184419 0.0589562
79 1 0.425733 0.248061 0.0495344
80 1 0.375251 0.312699 0.0540678
174 1 0.425357 0.185958 0.121675
205 1 0.374858 0.251973 0.127254
206 1 0.433163 0.308567 0.12612
594 1 0.55623 0.316416 0.504608
157 1 0.871235 0.00482566 0.124544
84 1 0.490206 0.316401 0.0572892
209 1 0.494286 0.259064 0.118721
52 1 0.489108 0.187363 0.0496367
56 1 0.617966 0.188643 0.0595058
83 1 0.557828 0.249117 0.0699118
88 1 0.61921 0.31093 0.0641994
178 1 0.555342 0.186155 0.123748
210 1 0.559595 0.31332 0.125588
213 1 0.620258 0.250002 0.119897
19 1 0.55746 0.00362829 0.0624681
60 1 0.747151 0.187426 0.0589547
87 1 0.681625 0.245182 0.056598
91 1 0.801812 0.242406 0.0480338
92 1 0.743215 0.301142 0.0628745
182 1 0.675198 0.186072 0.12493
186 1 0.809684 0.196354 0.117401
214 1 0.678933 0.308229 0.122982
217 1 0.742191 0.239073 0.125726
218 1 0.80651 0.305507 0.119243
93 1 0.880498 0.24131 -0.00482977
151 1 0.695042 0.00848102 0.185062
193 1 0.00449931 0.248631 0.118867
64 1 0.870158 0.189462 0.0602136
95 1 0.932976 0.252977 0.067636
96 1 0.868414 0.306694 0.0546391
190 1 0.929228 0.191047 0.12282
221 1 0.87225 0.253754 0.122212
222 1 0.929839 0.313347 0.120659
27 1 0.810758 0.00471761 0.0623462
387 1 0.0596874 0.00377499 0.434194
99 1 0.0661938 0.37278 0.0614175
104 1 0.121 0.43554 0.056738
226 1 0.0623343 0.435037 0.124126
229 1 0.127218 0.370399 0.117756
225 1 0.996691 0.371649 0.11822
1045 1 0.624746 0.505043 0.00359429
405 1 0.623778 0.0141616 0.376629
265 1 0.24092 -0.000595444 0.251833
510 1 0.937217 0.435069 0.375202
103 1 0.18578 0.380133 0.0573756
107 1 0.314975 0.368355 0.0590515
108 1 0.248743 0.442757 0.0609154
230 1 0.183577 0.439989 0.118677
233 1 0.252709 0.373548 0.121084
234 1 0.312385 0.442404 0.123752
125 1 0.869114 0.375147 -0.000947838
1165 1 0.376526 0.495527 0.126045
389 1 0.120897 -0.00226249 0.372747
395 1 0.308913 0.00141693 0.434433
111 1 0.433661 0.376087 0.0625817
112 1 0.379625 0.433807 0.0680247
237 1 0.37027 0.370276 0.125091
238 1 0.436502 0.428866 0.124198
116 1 0.501061 0.443249 0.0628898
1157 1 0.121972 0.504416 0.120187
7 1 0.19646 0.00972203 0.0559992
241 1 0.498519 0.373802 0.119876
115 1 0.55589 0.375592 0.061932
120 1 0.617548 0.437336 0.0640774
242 1 0.560032 0.432291 0.130668
245 1 0.618207 0.372246 0.124526
153 1 0.749006 0.00854941 0.122374
271 1 0.439382 0.00475893 0.309291
1159 1 0.184299 0.495243 0.189266
119 1 0.683797 0.374424 0.0593415
123 1 0.809076 0.364094 0.0673363
124 1 0.749335 0.43812 0.0614659
246 1 0.686568 0.427992 0.125739
249 1 0.74503 0.368394 0.115542
250 1 0.816158 0.428221 0.126443
100 1 0.00375483 0.432164 0.0583383
127 1 0.926898 0.367948 0.0578026
128 1 0.871962 0.431947 0.0630215
253 1 0.875854 0.372894 0.129024
254 1 0.935635 0.432016 0.12556
1179 1 0.814183 0.491977 0.184019
509 1 0.870301 0.374259 0.37427
136 1 0.120592 0.0551365 0.194115
163 1 0.0705511 0.127341 0.186227
258 1 0.0622918 0.0589623 0.248939
264 1 0.128109 0.0598891 0.31206
291 1 0.0645347 0.119213 0.306779
293 1 0.125629 0.12012 0.249454
132 1 0.987816 0.0609011 0.18874
403 1 0.560718 0.0020336 0.432143
1553 1 0.498696 0.499648 0.508573
140 1 0.244755 0.0646362 0.189152
167 1 0.184043 0.12875 0.196039
171 1 0.311753 0.122729 0.190474
262 1 0.187687 0.0600498 0.249464
266 1 0.305584 0.0607016 0.254277
268 1 0.246362 0.0577887 0.319813
295 1 0.190357 0.121992 0.305656
297 1 0.253475 0.132077 0.250277
299 1 0.314701 0.131551 0.313081
484 1 0.000566328 0.446378 0.434079
634 1 0.814616 0.436677 0.493601
144 1 0.362625 0.0612285 0.194324
175 1 0.427741 0.129614 0.179174
270 1 0.435901 0.0750522 0.238749
272 1 0.377471 0.0609766 0.315119
301 1 0.367365 0.12014 0.253515
303 1 0.438305 0.121859 0.316309
276 1 0.500894 0.0568406 0.318168
148 1 0.499717 0.0661468 0.180254
305 1 0.504789 0.122232 0.255145
23 1 0.68042 0.00691082 0.0593412
118 1 0.689267 0.437614 0.00406242
1307 1 0.8086 0.495831 0.312314
513 1 0.00215618 -0.00517743 0.503091
152 1 0.62029 0.0634377 0.184961
179 1 0.557616 0.121381 0.188944
274 1 0.566908 0.0682745 0.259407
280 1 0.632511 0.0619186 0.308727
307 1 0.556418 0.126871 0.31779
309 1 0.626535 0.124084 0.246539
1309 1 0.871125 0.499241 0.24911
1033 1 0.249897 0.498209 0.001755
409 1 0.75071 0.0152309 0.374048
156 1 0.754288 0.069813 0.189913
183 1 0.681185 0.128105 0.181959
187 1 0.814029 0.124124 0.181353
278 1 0.684504 0.0634149 0.247001
282 1 0.809364 0.0692012 0.254978
284 1 0.754176 0.0756386 0.317344
311 1 0.68525 0.129635 0.311026
313 1 0.74862 0.130382 0.248345
315 1 0.812765 0.134847 0.320949
1419 1 0.315715 0.494867 0.439646
129 1 -0.00174998 0.00784587 0.125321
507 1 0.80852 0.377723 0.433038
260 1 -0.000733971 0.0655272 0.311125
289 1 0.999474 0.128299 0.25229
160 1 0.879238 0.0657548 0.179244
191 1 0.935102 0.126812 0.188203
286 1 0.929889 0.0633034 0.250347
288 1 0.874247 0.0644894 0.311513
317 1 0.874142 0.130892 0.251594
319 1 0.936719 0.126473 0.313522
168 1 0.128665 0.190498 0.182749
195 1 0.0638386 0.254195 0.179955
200 1 0.128274 0.314733 0.182995
290 1 0.0616848 0.186181 0.23781
296 1 0.115468 0.186358 0.309739
322 1 0.0626778 0.310709 0.23786
323 1 0.0661877 0.251892 0.30564
325 1 0.1212 0.250364 0.245111
328 1 0.133232 0.314035 0.310526
292 1 0.0076235 0.188163 0.312216
321 1 1.00588 0.257703 0.244026
172 1 0.25212 0.191965 0.18379
199 1 0.19083 0.251945 0.183921
203 1 0.311006 0.250725 0.196551
204 1 0.250461 0.312127 0.178784
294 1 0.184604 0.191261 0.251312
298 1 0.307799 0.19291 0.251593
300 1 0.250105 0.185125 0.317521
326 1 0.187497 0.311181 0.247535
327 1 0.183871 0.253197 0.313652
329 1 0.245553 0.249969 0.25818
330 1 0.30932 0.313305 0.247553
331 1 0.310589 0.250423 0.318042
332 1 0.250962 0.312764 0.314428
176 1 0.366852 0.187495 0.187249
207 1 0.447969 0.237772 0.18686
208 1 0.368382 0.31587 0.191256
302 1 0.428851 0.181822 0.245703
304 1 0.378379 0.181158 0.31066
333 1 0.373432 0.243046 0.25096
334 1 0.44307 0.310992 0.252063
335 1 0.437374 0.24858 0.311959
336 1 0.376352 0.311258 0.311453
212 1 0.492215 0.310733 0.18457
308 1 0.494406 0.188648 0.322946
180 1 0.500878 0.183185 0.182689
337 1 0.505484 0.24619 0.250997
340 1 0.496508 0.314003 0.313997
184 1 0.615145 0.188746 0.184653
211 1 0.555461 0.247964 0.183844
216 1 0.617909 0.310663 0.183989
306 1 0.557005 0.184096 0.24531
312 1 0.614816 0.181921 0.306183
338 1 0.559819 0.311377 0.252876
339 1 0.561935 0.245542 0.316932
341 1 0.61536 0.243933 0.247987
344 1 0.621025 0.311702 0.311011
188 1 0.748198 0.192324 0.193647
215 1 0.673846 0.246049 0.18456
219 1 0.809026 0.249324 0.193138
220 1 0.742686 0.307059 0.188215
310 1 0.688062 0.197395 0.254971
314 1 0.808901 0.190507 0.253586
316 1 0.746331 0.193606 0.323544
342 1 0.689111 0.313374 0.252469
343 1 0.675596 0.253665 0.315018
345 1 0.746424 0.255497 0.253002
346 1 0.810361 0.309593 0.247774
347 1 0.817298 0.248351 0.324232
348 1 0.749094 0.304763 0.320748
324 1 1.0006 0.307797 0.317629
196 1 -0.000665678 0.310833 0.17763
164 1 0.994866 0.184153 0.180017
192 1 0.869288 0.18907 0.191916
223 1 0.941251 0.248891 0.181445
224 1 0.86885 0.311671 0.185456
318 1 0.938679 0.189475 0.25251
320 1 0.869805 0.190225 0.307378
349 1 0.868281 0.250515 0.254501
350 1 0.938343 0.309919 0.248617
351 1 0.93903 0.252757 0.30851
352 1 0.870413 0.313729 0.313958
506 1 0.810185 0.433879 0.365955
227 1 0.0623969 0.375015 0.184933
232 1 0.131085 0.43 0.18843
354 1 0.0608662 0.432494 0.247479
355 1 0.0708309 0.368037 0.316004
357 1 0.128244 0.380358 0.250379
360 1 0.122396 0.438821 0.311266
503 1 0.683776 0.376157 0.431719
593 1 0.49247 0.25196 0.500892
231 1 0.191384 0.373231 0.188646
235 1 0.307708 0.374044 0.182725
236 1 0.254147 0.438576 0.190559
358 1 0.187674 0.443982 0.250179
359 1 0.185094 0.381114 0.313706
361 1 0.256582 0.380483 0.249104
362 1 0.316944 0.440235 0.250391
363 1 0.301115 0.379789 0.308427
364 1 0.245711 0.439445 0.30913
1439 1 0.937599 0.499375 0.434579
481 1 0.010942 0.3716 0.377248
239 1 0.436918 0.371125 0.188567
240 1 0.369795 0.432634 0.182463
365 1 0.365568 0.371552 0.25549
366 1 0.431776 0.43602 0.252057
367 1 0.434685 0.374085 0.314096
368 1 0.370138 0.442397 0.3101
65 1 0.995043 0.251387 0.00824673
502 1 0.692069 0.437694 0.37969
372 1 0.492914 0.443511 0.314265
244 1 0.492437 0.436774 0.189545
369 1 0.490694 0.37726 0.256036
243 1 0.556417 0.366768 0.191681
248 1 0.627922 0.428133 0.190301
370 1 0.559836 0.437216 0.254895
371 1 0.556279 0.376866 0.311865
373 1 0.621454 0.376672 0.258014
376 1 0.628272 0.436435 0.318181
505 1 0.744222 0.373001 0.371845
508 1 0.749354 0.436362 0.439855
493 1 0.361998 0.37878 0.375077
247 1 0.6915 0.367814 0.188967
251 1 0.806432 0.364697 0.184604
252 1 0.752425 0.430366 0.180419
374 1 0.688214 0.428616 0.253272
375 1 0.681482 0.368475 0.316476
377 1 0.755554 0.373451 0.256271
378 1 0.812436 0.430178 0.249613
379 1 0.810064 0.369539 0.320331
380 1 0.740867 0.439634 0.312108
287 1 0.935066 0.0049256 0.311194
66 1 0.0612747 0.315084 -0.00115249
495 1 0.428398 0.368401 0.43817
356 1 1.0041 0.435782 0.317671
228 1 0.99921 0.434497 0.191259
353 1 0.0022622 0.367698 0.255976
255 1 0.93649 0.367569 0.189586
256 1 0.879929 0.428547 0.187025
381 1 0.869388 0.373235 0.248474
382 1 0.936982 0.431373 0.251288
383 1 0.937873 0.370221 0.317004
384 1 0.876892 0.43474 0.312958
1025 1 0.00629377 0.501948 0.00159751
386 1 0.0547721 0.0579157 0.37028
392 1 0.130833 0.0638588 0.444108
419 1 0.0603786 0.126141 0.43917
421 1 0.114293 0.129549 0.37139
388 1 1.00103 0.0608671 0.434969
487 1 0.188203 0.371879 0.44275
390 1 0.181615 0.0546533 0.377159
394 1 0.316134 0.0630138 0.373451
396 1 0.253223 0.0614877 0.431596
423 1 0.187589 0.133267 0.437765
425 1 0.243765 0.119559 0.372507
427 1 0.321429 0.116736 0.442178
1433 1 0.753018 0.500524 0.374159
489 1 0.249442 0.374609 0.374926
496 1 0.375752 0.437743 0.43454
398 1 0.435875 0.0598728 0.381304
400 1 0.37031 0.0554435 0.4452
429 1 0.381021 0.123218 0.376427
431 1 0.432447 0.121354 0.439438
433 1 0.499038 0.118515 0.380121
404 1 0.493809 0.0606048 0.438373
500 1 0.49356 0.430901 0.435339
491 1 0.310912 0.378403 0.441752
488 1 0.123208 0.441827 0.434602
402 1 0.561709 0.0600049 0.379623
408 1 0.623078 0.0646965 0.439238
435 1 0.558296 0.123016 0.432209
437 1 0.626479 0.121845 0.377347
485 1 0.124017 0.376822 0.378729
497 1 0.493839 0.370708 0.374888
483 1 0.0676058 0.367062 0.442513
406 1 0.686038 0.0663454 0.377085
410 1 0.817425 0.0657708 0.38097
412 1 0.743475 0.0700585 0.443113
439 1 0.688201 0.125469 0.448709
441 1 0.750548 0.129624 0.383535
443 1 0.809903 0.1258 0.447475
504 1 0.623457 0.434121 0.436766
147 1 0.555839 0.00845689 0.193457
629 1 0.619664 0.372107 0.504662
417 1 1.00697 0.12587 0.37446
414 1 0.938234 0.0658918 0.380801
416 1 0.868833 0.064422 0.447493
445 1 0.872865 0.129794 0.374802
447 1 0.93519 0.119119 0.438546
501 1 0.621296 0.364426 0.378913
482 1 0.0629091 0.441088 0.377638
499 1 0.554469 0.372104 0.431888
452 1 1.00426 0.314782 0.440628
418 1 0.0619421 0.191592 0.377543
424 1 0.126955 0.191256 0.441662
450 1 0.0617098 0.304732 0.373068
451 1 0.0646823 0.253568 0.445691
453 1 0.12333 0.247642 0.371007
456 1 0.124763 0.308002 0.432646
449 1 1.00231 0.253949 0.37834
490 1 0.309383 0.438211 0.371587
486 1 0.192021 0.441893 0.385786
494 1 0.438433 0.4367 0.370449
422 1 0.186374 0.187826 0.366289
426 1 0.314284 0.183815 0.376452
428 1 0.258325 0.184793 0.439543
454 1 0.190762 0.314973 0.375328
455 1 0.188551 0.242367 0.439423
457 1 0.248554 0.252892 0.374414
458 1 0.312259 0.316971 0.369075
459 1 0.316128 0.247264 0.436315
460 1 0.245318 0.307735 0.440389
430 1 0.428697 0.195833 0.381135
432 1 0.374338 0.187129 0.445561
461 1 0.371308 0.251147 0.37626
462 1 0.433943 0.312424 0.371399
463 1 0.431294 0.256266 0.439985
464 1 0.365503 0.315758 0.436492
468 1 0.494634 0.312929 0.43899
436 1 0.488916 0.189389 0.44024
465 1 0.493308 0.251439 0.380877
492 1 0.251329 0.430398 0.439965
448 1 0.875349 0.188588 0.442677
434 1 0.563913 0.188772 0.374256
469 1 0.622919 0.245474 0.378363
472 1 0.616579 0.313442 0.439644
466 1 0.55699 0.312444 0.371386
440 1 0.617156 0.182002 0.438725
467 1 0.561334 0.247841 0.444057
476 1 0.749512 0.312925 0.435477
442 1 0.814465 0.187044 0.383732
471 1 0.683164 0.249771 0.441744
475 1 0.814338 0.247701 0.437609
470 1 0.686739 0.306711 0.378526
438 1 0.683045 0.183761 0.37877
473 1 0.74742 0.247901 0.383043
474 1 0.809437 0.315367 0.381781
444 1 0.74864 0.191255 0.450721
498 1 0.554692 0.432526 0.374316
446 1 0.935993 0.185142 0.370855
479 1 0.94163 0.243588 0.43512
477 1 0.881275 0.248181 0.379238
478 1 0.938353 0.315268 0.372363
480 1 0.871831 0.313125 0.439182
1297 1 0.496291 0.495055 0.248378
420 1 0.99105 0.174184 0.436489
143 1 0.429276 0.00586517 0.183446
1293 1 0.377201 0.503832 0.249048
277 1 0.624067 0.00409885 0.247877
613 1 0.127461 0.376603 0.49926
139 1 0.307012 0.00324514 0.19212
413 1 0.869306 0.00630773 0.383953
407 1 0.692222 0.00188497 0.442249
279 1 0.691823 -0.000517449 0.309643
606 1 0.93545 0.307312 0.506107
1173 1 0.621708 0.496754 0.124168
1167 1 0.43253 0.498388 0.183671
1171 1 0.565351 0.492316 0.19006
1301 1 0.623017 0.489343 0.253869
514 1 0.0634455 0.0608737 0.496045
1027 1 0.0558708 0.500155 0.0648915
1041 1 0.498539 0.503759 -0.000705905
1175 1 0.681204 0.496817 0.186654
126 1 0.93292 0.437311 0.00450644
3 1 0.0592091 -0.00159804 0.0696167
1281 1 1.00056 0.499491 0.240279
1305 1 0.749761 0.50017 0.254165
1289 1 0.248665 0.501341 0.243721
630 1 0.686911 0.442669 0.492596
522 1 0.312263 0.0723695 0.510087
598 1 0.680886 0.313248 0.4986
1155 1 0.0661214 0.495319 0.186794
1051 1 0.810746 0.498904 0.0690436
415 1 0.939549 0.00219174 0.43438
1153 1 0.993836 0.497825 0.124282
1437 1 0.880799 0.501011 0.373133
582 1 0.189574 0.312193 0.502352
638 1 0.943563 0.432536 0.497424
275 1 0.561274 0.00157929 0.319538
281 1 0.750446 -0.00285196 0.243068
549 1 0.11986 0.12657 0.50196
618 1 0.318898 0.4371 0.504626
1053 1 0.875156 0.495943 0.00557447
614 1 0.190077 0.434955 0.500156
106 1 0.310214 0.437424 0.00506779
122 1 0.812816 0.434733 0.00402141
562 1 0.557869 0.179212 0.497798
30 1 0.941942 0.0655305 0.00315861
610 1 0.0595317 0.436739 0.494011
82 1 0.559211 0.300751 0.00718262
121 1 0.752913 0.377026 0.00237381
22 1 0.688651 0.0629397 -0.00406324
10 1 0.308024 0.0626474 0.00331403
561 1 0.498584 0.124196 0.501632
1557 1 0.624314 0.496011 0.497127
626 1 0.551828 0.437762 0.49544
585 1 0.250765 0.243408 0.500982
602 1 0.814283 0.316666 0.497787
90 1 0.803681 0.314252 -0.0014659
86 1 0.683767 0.319735 0.00146714
97 1 0.996505 0.367872 0.00515878
45 1 0.369297 0.120257 -0.00248663
520 1 0.13175 0.0638797 0.563948
547 1 0.0576713 0.120986 0.561606
642 1 0.0680608 0.0621498 0.618409
677 1 0.12573 0.135082 0.628238
622 1 0.427169 0.434367 0.49822
793 1 0.753128 -0.0051023 0.747784
569 1 0.755053 0.127749 0.508037
524 1 0.257873 0.0608068 0.569494
551 1 0.196987 0.121007 0.568316
555 1 0.306546 0.124529 0.568009
646 1 0.185018 0.0641438 0.639479
650 1 0.317783 0.0665877 0.632525
681 1 0.250474 0.129944 0.629273
1939 1 0.560698 0.50663 0.943791
62 1 0.93959 0.18528 0.999371
907 1 0.30855 0.0034666 0.934922
586 1 0.313663 0.307498 0.500812
528 1 0.369549 0.066278 0.565059
559 1 0.431662 0.120554 0.570323
654 1 0.436555 0.0561097 0.633708
685 1 0.382367 0.125791 0.638053
532 1 0.494567 0.0662146 0.565925
915 1 0.562842 0.00424316 0.932662
577 1 0.00066403 0.257951 0.50316
61 1 0.875589 0.137828 1.00011
689 1 0.500263 0.111962 0.625826
536 1 0.624857 0.0581825 0.565834
563 1 0.567517 0.119932 0.554434
658 1 0.56405 0.0569067 0.627249
693 1 0.632336 0.11785 0.619793
1537 1 1.00203 0.495819 0.500873
18 1 0.552437 0.0550369 0.994667
2 1 0.0692924 0.0584829 0.999781
649 1 0.246001 0.00269413 0.624026
558 1 0.436964 0.183844 0.500006
540 1 0.753643 0.0615364 0.562353
567 1 0.694438 0.121067 0.568074
571 1 0.815619 0.12592 0.564047
662 1 0.691703 0.0494298 0.630945
666 1 0.816532 0.0648758 0.625893
697 1 0.750009 0.120841 0.626503
1925 1 0.122531 0.495066 0.881477
542 1 0.94055 0.0572749 0.498411
527 1 0.431903 -0.00516139 0.564079
673 1 0.000940392 0.133999 0.619174
516 1 -0.000917952 0.0555098 0.559262
544 1 0.880443 0.064396 0.559048
575 1 0.936945 0.129905 0.562074
670 1 0.934666 0.0610092 0.615348
701 1 0.873932 0.123083 0.625542
557 1 0.374999 0.128586 0.504603
993 1 -0.00100561 0.372181 0.875613
1024 1 0.876967 0.442389 0.936439
671 1 0.942395 0.00697321 0.691167
552 1 0.12301 0.185431 0.565919
579 1 0.0648525 0.256648 0.565046
584 1 0.131709 0.318887 0.561235
674 1 0.0649303 0.198303 0.627318
706 1 0.0619829 0.316223 0.628171
709 1 0.128156 0.254744 0.619783
705 1 -0.00128148 0.259788 0.623692
580 1 0.990938 0.31164 0.567271
81 1 0.497666 0.2549 0.995831
537 1 0.752983 0.00140795 0.504568
570 1 0.810511 0.193734 0.506025
556 1 0.250426 0.190508 0.566014
583 1 0.187515 0.250779 0.557308
587 1 0.314585 0.24718 0.56319
588 1 0.2512 0.305397 0.563209
678 1 0.189027 0.19737 0.626798
682 1 0.322116 0.181375 0.633134
710 1 0.188297 0.319363 0.622802
713 1 0.260447 0.247599 0.62606
714 1 0.313312 0.315825 0.633166
521 1 0.25343 0.00278238 0.507944
641 1 0.00719583 -0.00109398 0.621435
1797 1 0.11924 0.507964 0.752864
560 1 0.377736 0.185377 0.568479
591 1 0.43915 0.243089 0.568904
592 1 0.376556 0.305412 0.56337
686 1 0.440003 0.186312 0.632527
717 1 0.376515 0.247224 0.637118
718 1 0.442238 0.314813 0.629072
564 1 0.501187 0.174644 0.567482
596 1 0.493984 0.313213 0.564016
721 1 0.49566 0.244644 0.631859
645 1 0.125629 0.00274768 0.62678
41 1 0.246348 0.127506 1.0003
568 1 0.622027 0.182878 0.567253
595 1 0.559811 0.246611 0.552455
600 1 0.623164 0.31034 0.56095
690 1 0.563809 0.175682 0.619224
722 1 0.553627 0.311238 0.623405
725 1 0.625692 0.252614 0.620623
651 1 0.309628 0.000138627 0.693661
554 1 0.313168 0.188378 0.501314
98 1 0.0591768 0.433616 1.00155
1567 1 0.941977 0.497264 0.562401
572 1 0.751706 0.192175 0.574543
599 1 0.692181 0.251527 0.565266
603 1 0.811684 0.263683 0.558798
604 1 0.748972 0.320323 0.56434
694 1 0.682434 0.182656 0.621375
698 1 0.81791 0.190578 0.623131
726 1 0.69196 0.314276 0.627084
729 1 0.756297 0.249914 0.631203
730 1 0.815621 0.319474 0.620636
545 1 0.997953 0.124321 0.501798
589 1 0.377746 0.247157 0.502307
923 1 0.816701 0.00573719 0.946332
548 1 0.000667148 0.195278 0.56051
576 1 0.874892 0.193081 0.565144
607 1 0.940291 0.249239 0.570753
608 1 0.869993 0.317699 0.557173
702 1 0.931471 0.186556 0.622913
733 1 0.870601 0.254907 0.623149
734 1 0.925263 0.320171 0.625621
1547 1 0.312956 0.501361 0.557046
611 1 0.0658364 0.375709 0.564091
616 1 0.127719 0.440312 0.565427
738 1 0.0562173 0.428965 0.632676
741 1 0.131057 0.377199 0.630388
737 1 0.996789 0.371264 0.624123
781 1 0.381182 0.00997321 0.749395
791 1 0.691725 -0.00160871 0.805488
1543 1 0.188688 0.499363 0.56226
1023 1 0.931821 0.370357 0.945161
615 1 0.188365 0.378079 0.557234
619 1 0.309841 0.370973 0.570222
620 1 0.253233 0.443236 0.565555
742 1 0.194254 0.439672 0.62402
745 1 0.25072 0.371757 0.627572
746 1 0.312975 0.437034 0.630732
1539 1 0.0588372 0.496311 0.562375
6 1 0.181984 0.0647442 0.994589
1943 1 0.691615 0.501605 0.947997
539 1 0.817266 -0.000123343 0.570195
1549 1 0.377148 0.499382 0.497067
623 1 0.447483 0.379261 0.565766
624 1 0.37012 0.441756 0.56565
749 1 0.373035 0.37574 0.624512
750 1 0.440564 0.433537 0.628986
779 1 0.307882 -0.00580725 0.813439
57 1 0.746963 0.121338 0.998809
1793 1 0.991205 0.498143 0.752245
1559 1 0.692817 0.502796 0.553522
38 1 0.190795 0.184792 0.992681
753 1 0.501139 0.37768 0.632665
628 1 0.49647 0.444969 0.569415
627 1 0.553295 0.377699 0.565927
632 1 0.618703 0.43133 0.559261
754 1 0.564155 0.436173 0.630421
757 1 0.620295 0.374733 0.62502
1669 1 0.126878 0.492294 0.627723
538 1 0.817326 0.0664277 0.512482
105 1 0.251853 0.370588 0.993654
631 1 0.68448 0.381692 0.562648
635 1 0.815626 0.383365 0.559363
636 1 0.746621 0.439818 0.553694
758 1 0.689815 0.436152 0.616262
761 1 0.754212 0.384808 0.614688
762 1 0.813321 0.447864 0.627367
1945 1 0.749495 0.497839 0.889284
612 1 0.999254 0.431736 0.563813
639 1 0.932801 0.374911 0.564894
640 1 0.880916 0.441759 0.558351
765 1 0.874705 0.386871 0.622347
766 1 0.935803 0.43869 0.622224
1022 1 0.937192 0.434539 0.874082
797 1 0.878224 0.00411266 0.749744
1927 1 0.191453 0.493824 0.944722
1029 1 0.122738 0.497978 1.00219
648 1 0.12172 0.0622518 0.687204
675 1 0.0662074 0.127745 0.682363
770 1 0.0640462 0.0586692 0.74408
776 1 0.126396 0.0576898 0.809042
803 1 0.0674019 0.125356 0.803745
805 1 0.127536 0.139203 0.743945
927 1 0.942245 -0.00141871 0.949465
801 1 0.00568167 0.128401 0.743858
772 1 1.00309 0.0704669 0.809061
919 1 0.682587 0.0019588 0.931605
652 1 0.250989 0.0650734 0.687273
679 1 0.184511 0.13316 0.688212
683 1 0.317823 0.120284 0.695459
774 1 0.18217 0.0600881 0.746403
778 1 0.307961 0.0649536 0.753731
780 1 0.246768 0.0573976 0.81069
807 1 0.179331 0.122554 0.805313
809 1 0.243984 0.126876 0.746461
811 1 0.309986 0.12689 0.808922
1683 1 0.569332 0.501444 0.692368
1021 1 0.87614 0.375869 0.877424
656 1 0.378179 0.064299 0.687648
687 1 0.44264 0.124236 0.696055
782 1 0.444849 0.0577035 0.754507
784 1 0.379442 0.0651097 0.812942
813 1 0.378418 0.121869 0.749705
815 1 0.437178 0.121669 0.812255
660 1 0.504348 0.0628003 0.68979
109 1 0.369229 0.378282 0.995817
788 1 0.498053 0.0603026 0.826033
817 1 0.506063 0.127929 0.748253
664 1 0.627037 0.0545937 0.690164
691 1 0.570081 0.119458 0.683579
786 1 0.56481 0.0590456 0.757323
792 1 0.629523 0.0559066 0.810056
819 1 0.561604 0.123867 0.818721
821 1 0.622458 0.123418 0.7486
773 1 0.124833 -0.00590257 0.74241
668 1 0.753447 0.0539192 0.685201
695 1 0.686334 0.121238 0.691084
699 1 0.817147 0.12597 0.683403
790 1 0.687416 0.0647777 0.749953
794 1 0.817808 0.0640402 0.736957
796 1 0.758663 0.0578978 0.814871
823 1 0.685287 0.130392 0.80676
825 1 0.752967 0.127255 0.742855
827 1 0.808456 0.135244 0.809157
565 1 0.623601 0.116238 0.500296
1821 1 0.879379 0.503487 0.74805
644 1 1.01093 0.0623358 0.678033
672 1 0.877729 0.055021 0.68023
703 1 0.936386 0.12157 0.686845
798 1 0.947289 0.0639325 0.752982
800 1 0.878074 0.0578457 0.811311
829 1 0.877438 0.121161 0.75182
831 1 0.933734 0.119142 0.813423
1695 1 0.929053 0.494397 0.687729
707 1 0.0641987 0.252074 0.688043
680 1 0.126148 0.193421 0.679776
712 1 0.120058 0.320409 0.693413
802 1 0.0596452 0.184143 0.751789
808 1 0.125625 0.190659 0.816219
834 1 0.0556275 0.312307 0.751216
835 1 0.0566105 0.25044 0.814311
837 1 0.122176 0.253705 0.753805
840 1 0.124546 0.315394 0.812678
708 1 0.996548 0.311158 0.688598
676 1 1.0005 0.188172 0.681023
833 1 -0.00541025 0.245447 0.740094
836 1 0.00143783 0.308736 0.812507
804 1 0.988342 0.186119 0.814913
684 1 0.251837 0.188227 0.685974
711 1 0.188693 0.252311 0.68777
715 1 0.317207 0.24878 0.693829
716 1 0.253423 0.307438 0.690819
806 1 0.186852 0.197813 0.748419
810 1 0.316279 0.189911 0.753938
812 1 0.245534 0.18476 0.809464
838 1 0.189161 0.302301 0.748456
839 1 0.188794 0.252259 0.814666
841 1 0.254622 0.245697 0.75645
842 1 0.307838 0.313894 0.756485
843 1 0.321442 0.249042 0.816542
844 1 0.254292 0.303871 0.82155
688 1 0.377087 0.187468 0.696234
719 1 0.440739 0.248415 0.691516
720 1 0.375133 0.314513 0.692143
814 1 0.44029 0.181912 0.752116
816 1 0.379691 0.186069 0.814248
845 1 0.3806 0.253459 0.749431
846 1 0.437281 0.317006 0.750437
847 1 0.445399 0.245529 0.810953
848 1 0.386744 0.305582 0.817391
852 1 0.497245 0.310968 0.810356
724 1 0.502124 0.310689 0.69365
692 1 0.511934 0.177779 0.680611
849 1 0.502211 0.246291 0.750848
820 1 0.496381 0.183281 0.812926
696 1 0.625775 0.190532 0.683802
723 1 0.561455 0.253796 0.684569
728 1 0.625381 0.315557 0.68699
818 1 0.559589 0.188807 0.742905
824 1 0.619607 0.187349 0.813833
850 1 0.566156 0.316116 0.74791
851 1 0.564976 0.251369 0.812681
853 1 0.617986 0.245511 0.750164
856 1 0.629627 0.321826 0.801822
700 1 0.757228 0.1845 0.680527
727 1 0.684498 0.256217 0.690623
731 1 0.815739 0.252477 0.686425
732 1 0.752226 0.317235 0.680642
822 1 0.686717 0.187401 0.74821
826 1 0.809919 0.185674 0.746711
828 1 0.7512 0.19117 0.805983
854 1 0.694727 0.322563 0.748584
855 1 0.686096 0.252377 0.817034
857 1 0.742993 0.252918 0.740782
858 1 0.808763 0.318786 0.749051
859 1 0.811348 0.25074 0.798468
860 1 0.751659 0.314195 0.80856
704 1 0.875741 0.17878 0.688885
735 1 0.933495 0.256997 0.683586
736 1 0.871927 0.317588 0.687976
830 1 0.940784 0.183329 0.740639
832 1 0.869679 0.187951 0.812422
861 1 0.877459 0.249787 0.745531
862 1 0.931573 0.318196 0.746247
863 1 0.940871 0.255635 0.801769
864 1 0.869934 0.316708 0.805528
601 1 0.74259 0.2578 0.499507
739 1 0.0563341 0.374507 0.692863
744 1 0.128908 0.438221 0.68827
866 1 0.0629356 0.440446 0.748594
867 1 0.0611056 0.373298 0.812894
869 1 0.122433 0.380542 0.752779
872 1 0.119652 0.443743 0.807036
740 1 0.997107 0.437158 0.69012
655 1 0.443664 -0.0025599 0.689671
546 1 0.0587938 0.188588 0.504159
1016 1 0.621547 0.444108 0.938877
1941 1 0.627034 0.501051 0.880857
743 1 0.186716 0.379317 0.692182
747 1 0.314459 0.381281 0.687005
748 1 0.250739 0.442319 0.683747
870 1 0.183858 0.436186 0.750206
871 1 0.186818 0.365453 0.814812
873 1 0.242898 0.370788 0.750032
874 1 0.307149 0.433902 0.749398
875 1 0.31178 0.374761 0.814051
876 1 0.24226 0.436827 0.800698
1665 1 -0.00559742 0.492131 0.627391
1679 1 0.440654 0.501749 0.686408
70 1 0.186171 0.312219 1.00009
1004 1 0.255361 0.43639 0.939838
751 1 0.440913 0.376585 0.692101
752 1 0.371416 0.440245 0.68721
877 1 0.37196 0.377175 0.759739
878 1 0.440713 0.436054 0.754343
879 1 0.437909 0.37666 0.819487
880 1 0.379055 0.437858 0.818625
881 1 0.505977 0.378019 0.753568
756 1 0.498816 0.434908 0.692739
884 1 0.499003 0.43569 0.815328
755 1 0.566648 0.377207 0.694001
760 1 0.640073 0.437808 0.681857
882 1 0.559522 0.440009 0.756504
883 1 0.561137 0.380177 0.811597
885 1 0.630184 0.380641 0.747765
888 1 0.627129 0.445482 0.809218
769 1 -0.00142323 -0.00260511 0.752285
759 1 0.693784 0.375981 0.683507
763 1 0.811313 0.380667 0.686408
764 1 0.754065 0.447292 0.692427
886 1 0.696026 0.435119 0.747486
887 1 0.690581 0.38427 0.811847
889 1 0.754614 0.376469 0.748265
890 1 0.812884 0.44698 0.751242
891 1 0.811137 0.379799 0.814901
892 1 0.751904 0.439376 0.821488
78 1 0.440268 0.316643 0.993366
868 1 1.00077 0.434083 0.812979
865 1 0.998381 0.368715 0.759263
767 1 0.932485 0.380069 0.686695
768 1 0.871924 0.440732 0.685254
893 1 0.866642 0.386206 0.750855
894 1 0.938389 0.430936 0.752536
895 1 0.932604 0.369964 0.819313
896 1 0.872956 0.443496 0.817113
1020 1 0.745114 0.438213 0.943246
1018 1 0.817286 0.43382 0.877968
898 1 0.0599782 0.0586898 0.873498
904 1 0.123409 0.0566242 0.934349
931 1 0.0620327 0.125357 0.933316
933 1 0.120467 0.120495 0.870984
1017 1 0.751044 0.375049 0.874873
526 1 0.43217 0.0658981 0.500703
902 1 0.183618 0.0550562 0.867281
906 1 0.311256 0.0574343 0.873505
908 1 0.24975 0.0728404 0.939862
935 1 0.184014 0.121179 0.930781
937 1 0.249541 0.121055 0.871842
939 1 0.312461 0.121532 0.942065
531 1 0.562235 -0.000313663 0.562881
1019 1 0.807598 0.376542 0.938672
911 1 0.431789 -0.00674477 0.93852
910 1 0.434334 0.0524443 0.877306
912 1 0.371561 0.0575715 0.938881
941 1 0.366304 0.121851 0.879174
943 1 0.428441 0.115478 0.930768
1667 1 0.0578288 0.496603 0.684846
945 1 0.500475 0.12679 0.878996
916 1 0.493044 0.0594855 0.929796
914 1 0.569003 0.0657609 0.877817
920 1 0.629474 0.0650191 0.929492
947 1 0.567363 0.131447 0.945343
949 1 0.628383 0.131349 0.872293
1009 1 0.511993 0.375359 0.877714
1002 1 0.309179 0.437271 0.875262
1015 1 0.69282 0.375735 0.936968
573 1 0.879476 0.127844 0.500784
999 1 0.19322 0.38167 0.93161
918 1 0.684345 0.0649304 0.863203
922 1 0.813061 0.0598948 0.877239
924 1 0.749941 0.0641289 0.93275
951 1 0.687284 0.121037 0.93687
953 1 0.741196 0.126015 0.869629
955 1 0.816121 0.119394 0.935434
1013 1 0.630606 0.375161 0.874175
1008 1 0.370738 0.432854 0.932977
1003 1 0.306007 0.371193 0.938153
917 1 0.628816 -0.00385143 0.867241
900 1 -0.00109177 0.0655538 0.935453
929 1 0.992218 0.125273 0.873866
926 1 0.936343 0.0587245 0.885192
928 1 0.882017 0.0717588 0.950134
957 1 0.876621 0.117969 0.879668
959 1 0.940529 0.124522 0.942026
54 1 0.68649 0.184882 0.998449
1006 1 0.442338 0.445788 0.875422
1563 1 0.802922 0.497011 0.563361
930 1 0.0519292 0.184241 0.87003
936 1 0.126055 0.191854 0.922784
962 1 0.064707 0.310478 0.871523
963 1 0.0646766 0.243922 0.943132
965 1 0.123577 0.248933 0.868595
968 1 0.113759 0.309978 0.933906
964 1 -0.00432107 0.313731 0.941476
934 1 0.186825 0.186424 0.873781
938 1 0.314456 0.180369 0.871143
940 1 0.248793 0.186002 0.935016
966 1 0.183366 0.314789 0.882927
967 1 0.189566 0.25393 0.935531
969 1 0.256039 0.248688 0.878877
970 1 0.325006 0.307817 0.876329
971 1 0.314343 0.250542 0.937161
972 1 0.253318 0.308402 0.933551
73 1 0.254906 0.245116 0.993882
973 1 0.382355 0.244636 0.875007
944 1 0.364398 0.18283 0.937198
942 1 0.439167 0.179439 0.872364
976 1 0.379027 0.312505 0.93545
975 1 0.435755 0.238876 0.933621
974 1 0.444767 0.31521 0.87983
37 1 0.12234 0.12821 0.992927
948 1 0.49592 0.185079 0.939836
977 1 0.505022 0.251403 0.876818
980 1 0.501134 0.316035 0.938355
979 1 0.559567 0.251601 0.939301
981 1 0.620809 0.253979 0.870035
978 1 0.564997 0.31453 0.871258
984 1 0.62176 0.314198 0.934434
946 1 0.565118 0.187178 0.879514
952 1 0.62929 0.187839 0.933668
74 1 0.313989 0.308109 0.994363
1012 1 0.497919 0.439821 0.942625
1011 1 0.56372 0.381925 0.939045
1005 1 0.378952 0.374161 0.874435
987 1 0.80732 0.250883 0.939982
956 1 0.745169 0.188023 0.932163
988 1 0.754402 0.312891 0.937019
986 1 0.811924 0.315569 0.871033
982 1 0.693408 0.312968 0.87302
954 1 0.809884 0.184073 0.87133
983 1 0.693744 0.257463 0.934125
985 1 0.751423 0.252605 0.868811
950 1 0.690626 0.191681 0.873873
1007 1 0.439572 0.381232 0.928617
961 1 1.00038 0.263973 0.880127
932 1 0.994088 0.19113 0.93387
989 1 0.8662 0.248824 0.866272
990 1 0.932044 0.307481 0.881436
991 1 0.935294 0.249187 0.936558
992 1 0.863505 0.312082 0.941181
960 1 0.878777 0.184605 0.93273
958 1 0.928958 0.1918 0.875327
625 1 0.499199 0.379586 0.49374
1014 1 0.685259 0.44084 0.880049
996 1 0.999453 0.437599 0.936004
998 1 0.19642 0.446706 0.875129
995 1 0.0602761 0.377466 0.937535
994 1 0.0547529 0.44441 0.874861
1000 1 0.124192 0.437362 0.936202
997 1 0.120281 0.380514 0.873768
1010 1 0.566489 0.444572 0.875755
1001 1 0.249213 0.372023 0.870185
25 1 0.753987 0.00632242 0.998624
50 1 0.567114 0.196864 0.999275
921 1 0.751773 0.00125647 0.877285
525 1 0.369676 0.00259548 0.504235
102 1 0.187492 0.437397 1.00093
519 1 0.180947 -0.00370581 0.563887
42 1 0.310464 0.18884 1.00575
53 1 0.621379 0.113364 0.9987
85 1 0.62654 0.252614 0.995474
1819 1 0.812193 0.496176 0.817147
1555 1 0.564202 0.495491 0.566807
1809 1 0.499242 0.497891 0.755967
1823 1 0.93717 0.497388 0.814288
1671 1 0.191709 0.496829 0.692896
1947 1 0.819273 0.498189 0.936076
566 1 0.683779 0.192063 0.501619
58 1 0.807785 0.178546 0.988436
117 1 0.620689 0.379059 0.996554
590 1 0.434402 0.31504 0.5054
617 1 0.256377 0.375089 0.506078
574 1 0.943521 0.192726 0.503428
609 1 0.995199 0.369009 0.505173
46 1 0.426035 0.176533 0.993748
597 1 0.621345 0.247972 0.499617
14 1 0.433379 0.0598624 0.995022
77 1 0.372896 0.247358 0.99196
69 1 0.13346 0.248452 0.996593
33 1 -0.00178438 0.127283 0.997568
534 1 0.688305 0.0610608 0.506939
581 1 0.124227 0.248313 0.509484
578 1 0.070565 0.310703 0.502623
1565 1 0.874754 0.500568 0.497846
553 1 0.253015 0.12952 0.50777
89 1 0.744447 0.245645 0.990393
605 1 0.87476 0.251831 0.504795
530 1 0.566773 0.0588649 0.503025
101 1 0.132331 0.370812 0.989383
550 1 0.1869 0.187954 0.505901
633 1 0.749113 0.375669 0.497621
518 1 0.193804 0.0574243 0.504038
49 1 0.490358 0.115626 0.98479
621 1 0.372454 0.367936 0.509342
1032 1 0.114924 0.559958 0.0577191
1059 1 0.0651581 0.627753 0.0646464
1154 1 0.064082 0.57123 0.123028
1189 1 0.126343 0.629472 0.136536
1185 1 0.992794 0.621393 0.12107
1028 1 0.00737359 0.567881 0.0612529
1034 1 0.311489 0.558802 0.00135172
533 1 0.626115 0.997145 0.50068
1285 1 0.122336 0.50588 0.254353
1036 1 0.250535 0.562427 0.053636
1063 1 0.180989 0.613915 0.0647544
1067 1 0.318374 0.624636 0.0580347
1158 1 0.188701 0.560435 0.128679
1162 1 0.324269 0.563211 0.128192
1193 1 0.256302 0.620975 0.118553
385 1 0.994065 0.99067 0.373665
31 1 0.933657 0.997117 0.0661854
133 1 0.128408 0.99254 0.129086
1633 1 0.99406 0.868846 0.502706
1040 1 0.380278 0.555613 0.0634413
1071 1 0.433235 0.628488 0.0615363
1166 1 0.432932 0.561359 0.12763
1197 1 0.376683 0.631683 0.124524
1183 1 0.937901 0.49407 0.186333
1077 1 0.626184 0.624026 0.00283692
1630 1 0.927892 0.805784 0.501696
1044 1 0.500327 0.561177 0.0593142
1201 1 0.500325 0.622207 0.127423
1048 1 0.628649 0.565981 0.063782
1075 1 0.562193 0.624148 0.064526
1170 1 0.5646 0.561439 0.131297
1205 1 0.616266 0.624348 0.12566
1058 1 0.0642406 0.685207 -0.00256915
1181 1 0.879083 0.496643 0.121808
1062 1 0.19512 0.681864 0.0120569
1052 1 0.749433 0.563011 0.0606443
1079 1 0.692837 0.62924 0.065415
1083 1 0.819164 0.618793 0.0713234
1174 1 0.681958 0.572857 0.128517
1178 1 0.818161 0.555961 0.130836
1209 1 0.751917 0.624767 0.116956
1425 1 0.498351 0.499633 0.386004
1291 1 0.315394 0.501694 0.310291
1593 1 0.752093 0.62642 0.492916
1421 1 0.375908 0.500175 0.378607
1056 1 0.878283 0.558194 0.0624591
1087 1 0.936284 0.621303 0.0638134
1182 1 0.938024 0.562071 0.116589
1213 1 0.883356 0.623954 0.128549
1417 1 0.253601 0.499064 0.373874
1064 1 0.130929 0.692367 0.0626692
1091 1 0.072436 0.754316 0.0595951
1096 1 0.142364 0.811543 0.0673628
1186 1 0.0639918 0.693339 0.124366
1218 1 0.0667091 0.8099 0.123719
1221 1 0.137679 0.754431 0.134241
1092 1 0.0055662 0.816631 0.0574917
1217 1 -0.00214664 0.747629 0.113843
1060 1 5.68699e-05 0.684244 0.0623226
1069 1 0.378912 0.621402 0.000929941
1068 1 0.258431 0.684535 0.068945
1095 1 0.202713 0.746289 0.0653109
1099 1 0.314837 0.748667 0.0640697
1100 1 0.248785 0.81143 0.0627846
1190 1 0.196377 0.680677 0.118825
1194 1 0.314822 0.689949 0.133862
1222 1 0.19432 0.826553 0.131034
1225 1 0.25404 0.753408 0.124971
1226 1 0.322651 0.823909 0.12766
1035 1 0.309427 0.506118 0.0677806
1072 1 0.374717 0.690148 0.0654375
1103 1 0.436911 0.752091 0.0627227
1104 1 0.375894 0.808795 0.0627334
1198 1 0.437986 0.685415 0.129722
1229 1 0.370975 0.750711 0.135416
1230 1 0.434787 0.81594 0.124523
1076 1 0.493752 0.68269 0.0658254
1427 1 0.559789 0.505715 0.440921
141 1 0.376487 0.995923 0.123767
1039 1 0.437423 0.49798 0.062326
1233 1 0.493994 0.752325 0.124416
1411 1 0.0657595 0.502633 0.444827
1108 1 0.496987 0.81015 0.0611955
1080 1 0.628112 0.687415 0.0633754
1107 1 0.56219 0.749913 0.0682117
1112 1 0.623176 0.817183 0.052469
1202 1 0.56359 0.685602 0.121354
1234 1 0.563209 0.81603 0.121707
1237 1 0.623158 0.740989 0.125684
1084 1 0.749034 0.693022 0.0602556
1111 1 0.681741 0.746234 0.0593661
1115 1 0.816785 0.743941 0.060433
1116 1 0.745913 0.816685 0.0607691
1206 1 0.685739 0.683901 0.121577
1210 1 0.808779 0.692469 0.123821
1238 1 0.691569 0.809814 0.124277
1241 1 0.748496 0.74967 0.122163
1242 1 0.804702 0.816806 0.120554
1088 1 0.878458 0.685213 0.0720034
1119 1 0.935664 0.742693 0.0632042
1120 1 0.878057 0.815365 0.0607125
1214 1 0.945137 0.685623 0.130573
1245 1 0.879329 0.751538 0.129888
1246 1 0.943727 0.809104 0.114373
1622 1 0.692976 0.811654 0.494779
1413 1 0.12047 0.495645 0.371197
1283 1 0.0546082 0.500078 0.304877
1123 1 0.0730764 0.865931 0.0627788
1128 1 0.12755 0.947484 0.061513
1250 1 0.0591109 0.931035 0.126761
1253 1 0.129472 0.875806 0.122052
1124 1 0.998015 0.945313 0.0643932
1303 1 0.68356 0.498141 0.314657
1094 1 0.192857 0.809836 0.00207593
1287 1 0.191898 0.50309 0.308785
261 1 0.121963 0.990816 0.243934
131 1 0.0546367 0.996253 0.187784
1429 1 0.623806 0.496572 0.381536
1127 1 0.189213 0.878947 0.0601141
1131 1 0.307919 0.879808 0.0658321
1132 1 0.249394 0.941041 0.0599951
1254 1 0.18953 0.939619 0.115086
1257 1 0.250138 0.880467 0.128236
1258 1 0.311132 0.943873 0.134153
1109 1 0.621953 0.745517 0.00015862
1110 1 0.687532 0.816357 0.0033453
1135 1 0.435571 0.879114 0.0720878
1136 1 0.374022 0.937474 0.0630931
1261 1 0.378662 0.88894 0.132464
1262 1 0.443937 0.941534 0.128093
263 1 0.188486 1.0029 0.310535
1140 1 0.50203 0.942953 0.0592541
1265 1 0.506422 0.874873 0.1237
1139 1 0.556596 0.879151 0.060525
1144 1 0.621635 0.945433 0.0578839
1266 1 0.564494 0.943652 0.123903
1269 1 0.619232 0.878221 0.117659
1118 1 0.936069 0.813454 0.0059418
1143 1 0.683523 0.889053 0.0639817
1147 1 0.810346 0.878065 0.066988
1148 1 0.749587 0.952574 0.0600235
1270 1 0.682722 0.951353 0.12195
1273 1 0.745871 0.883387 0.118423
1274 1 0.813721 0.945124 0.123108
1105 1 0.500616 0.747003 0.00667807
1093 1 0.128069 0.746579 -0.00894124
1249 1 0.000324537 0.873106 0.122889
1151 1 0.936775 0.878886 0.0630736
1152 1 0.867443 0.944226 0.0682278
1277 1 0.880401 0.877653 0.122001
1278 1 0.934087 0.943616 0.130017
1030 1 0.186798 0.559687 -0.000290833
1106 1 0.559312 0.815543 0.00165277
1160 1 0.115275 0.564224 0.189282
1187 1 0.059177 0.630721 0.183893
1282 1 0.0569236 0.565953 0.248679
1288 1 0.120293 0.563509 0.31746
1315 1 0.0621943 0.623162 0.309316
1317 1 0.125433 0.636065 0.248102
1156 1 0.998142 0.561834 0.18516
401 1 0.493337 0.995718 0.375126
1295 1 0.42886 0.505026 0.314981
1066 1 0.311346 0.685604 -0.000105558
1164 1 0.253675 0.563442 0.191679
1191 1 0.194069 0.621444 0.188482
1195 1 0.315778 0.628551 0.187548
1286 1 0.181868 0.569075 0.247877
1290 1 0.315468 0.561823 0.244117
1292 1 0.253343 0.563803 0.307485
1319 1 0.188092 0.627623 0.307917
1321 1 0.253578 0.630525 0.257303
1323 1 0.319516 0.623448 0.306935
1121 1 0.0203969 0.886688 -0.000283972
1415 1 0.184821 0.502518 0.441355
397 1 0.375435 1.00333 0.378837
1168 1 0.374159 0.557899 0.191903
1199 1 0.436538 0.625421 0.19024
1294 1 0.440178 0.557273 0.247749
1296 1 0.373731 0.56496 0.307728
1325 1 0.371204 0.627559 0.245262
1327 1 0.440746 0.625101 0.305118
1300 1 0.495777 0.55357 0.315652
1038 1 0.441255 0.560023 0.0045048
1329 1 0.505599 0.621234 0.24829
1172 1 0.50212 0.553735 0.191986
1176 1 0.62749 0.560207 0.186773
1203 1 0.563813 0.623708 0.188959
1298 1 0.561195 0.553701 0.250965
1304 1 0.625715 0.562357 0.316495
1331 1 0.559726 0.619864 0.315694
1333 1 0.62542 0.622892 0.252451
1055 1 0.944246 0.495398 0.0609427
1311 1 0.939608 0.490969 0.306371
1026 1 0.0665943 0.569201 -0.00323842
1180 1 0.74873 0.561551 0.182706
1207 1 0.688152 0.62955 0.186597
1211 1 0.814349 0.625921 0.189503
1302 1 0.682533 0.556035 0.252643
1306 1 0.812645 0.560357 0.242095
1308 1 0.751485 0.559964 0.308994
1335 1 0.687368 0.620915 0.318082
1337 1 0.742412 0.626977 0.249177
1339 1 0.817139 0.622158 0.305868
1313 1 -0.00120122 0.623464 0.247626
1284 1 0.00332143 0.567494 0.318572
1184 1 0.882171 0.556734 0.189368
1215 1 0.938917 0.621756 0.188047
1310 1 0.94325 0.558431 0.253149
1312 1 0.875335 0.560113 0.303199
1341 1 0.88034 0.619451 0.24496
1343 1 0.93652 0.626267 0.312087
1533 1 0.869639 0.872747 0.370922
1192 1 0.134306 0.692059 0.187447
1219 1 0.0601523 0.749456 0.179856
1224 1 0.126723 0.816542 0.18548
1314 1 0.0642676 0.702428 0.249223
1320 1 0.130455 0.692501 0.310014
1346 1 0.0557283 0.807032 0.243775
1347 1 0.0707577 0.752749 0.315585
1349 1 0.136416 0.75347 0.241053
1352 1 0.133116 0.817317 0.310894
1220 1 0.00289979 0.81349 0.178776
1316 1 0.00373173 0.687266 0.307419
1196 1 0.256215 0.691591 0.190624
1223 1 0.200602 0.762898 0.184582
1227 1 0.314357 0.759696 0.191187
1228 1 0.257556 0.824789 0.188788
1318 1 0.18898 0.684209 0.242088
1322 1 0.314548 0.689047 0.248963
1324 1 0.256836 0.691543 0.315682
1350 1 0.187908 0.815616 0.24308
1351 1 0.184792 0.752501 0.310561
1353 1 0.261675 0.7541 0.248886
1354 1 0.31351 0.818017 0.253888
1355 1 0.314493 0.747207 0.312766
1356 1 0.242927 0.816619 0.312413
1200 1 0.375617 0.687136 0.192907
1231 1 0.438569 0.753395 0.190747
1232 1 0.376975 0.821168 0.195297
1326 1 0.437991 0.687072 0.248728
1328 1 0.380989 0.688317 0.307019
1357 1 0.37447 0.749773 0.252722
1358 1 0.438071 0.816506 0.250407
1359 1 0.438608 0.74874 0.311187
1360 1 0.374807 0.812191 0.313432
1332 1 0.507188 0.687249 0.302158
1236 1 0.49867 0.813607 0.189115
1204 1 0.498567 0.693755 0.186984
1361 1 0.504425 0.75064 0.249677
1364 1 0.493844 0.817312 0.313821
1208 1 0.625555 0.680043 0.187371
1235 1 0.559393 0.753303 0.181378
1240 1 0.613072 0.821437 0.184739
1330 1 0.569355 0.689282 0.24845
1336 1 0.627157 0.690528 0.309838
1362 1 0.559788 0.809515 0.250948
1363 1 0.565517 0.751835 0.310793
1365 1 0.620394 0.756599 0.238454
1368 1 0.623368 0.823436 0.30529
1212 1 0.749927 0.682108 0.180229
1239 1 0.681962 0.752174 0.180596
1243 1 0.806894 0.752237 0.186991
1244 1 0.748638 0.812178 0.183986
1334 1 0.680081 0.683625 0.248642
1338 1 0.815697 0.683846 0.244951
1340 1 0.74979 0.685085 0.309463
1366 1 0.685726 0.814379 0.246751
1367 1 0.680651 0.758625 0.31022
1369 1 0.749581 0.741806 0.249828
1370 1 0.821914 0.807199 0.246934
1371 1 0.808593 0.746225 0.302969
1372 1 0.746267 0.814168 0.30808
1348 1 -0.00130755 0.823777 0.313453
1188 1 0.00288376 0.686543 0.193477
1345 1 0.997703 0.753889 0.249025
1216 1 0.876516 0.690562 0.186098
1247 1 0.943086 0.752902 0.182648
1248 1 0.868355 0.817412 0.181537
1342 1 0.936944 0.688535 0.249385
1344 1 0.874888 0.682974 0.299574
1373 1 0.876323 0.745297 0.245142
1374 1 0.938053 0.820292 0.240735
1375 1 0.940222 0.743548 0.313349
1376 1 0.874804 0.814196 0.310147
1409 1 1.00312 0.502038 0.371657
1251 1 0.0665021 0.876489 0.189968
1256 1 0.126273 0.932113 0.184957
1378 1 0.0622483 0.935594 0.251726
1379 1 0.0596947 0.878132 0.322697
1381 1 0.123808 0.879882 0.254245
1384 1 0.126831 0.933104 0.31242
1534 1 0.929446 0.933878 0.38301
1163 1 0.315406 0.498718 0.188457
1161 1 0.250268 0.498764 0.124707
1535 1 0.93298 0.864194 0.440103
1255 1 0.187663 0.880203 0.187307
1259 1 0.31464 0.881834 0.194526
1260 1 0.244281 0.942824 0.186453
1382 1 0.187332 0.935058 0.252921
1383 1 0.188324 0.881412 0.31772
1385 1 0.252673 0.879567 0.251534
1386 1 0.306846 0.943226 0.251985
1387 1 0.315659 0.875879 0.311756
1388 1 0.249957 0.936891 0.315479
1263 1 0.443336 0.876809 0.185898
1264 1 0.371621 0.94367 0.189163
1389 1 0.379151 0.884494 0.24855
1390 1 0.441251 0.941178 0.254072
1391 1 0.434021 0.880948 0.318839
1392 1 0.380717 0.94112 0.311798
1396 1 0.500702 0.942876 0.311472
1393 1 0.496916 0.876641 0.251765
1268 1 0.504073 0.945415 0.190541
1267 1 0.558406 0.885943 0.189585
1272 1 0.628924 0.946137 0.18077
1394 1 0.564473 0.943572 0.256999
1395 1 0.559154 0.881557 0.313053
1397 1 0.620697 0.881794 0.248277
1400 1 0.628398 0.94194 0.310602
1073 1 0.494827 0.625033 -0.00708993
1271 1 0.675838 0.874711 0.177096
1275 1 0.807279 0.882894 0.175347
1276 1 0.748024 0.943227 0.17826
1398 1 0.693801 0.930942 0.248376
1399 1 0.68767 0.875982 0.307469
1401 1 0.754737 0.872956 0.244129
1402 1 0.809609 0.940706 0.244901
1403 1 0.816333 0.868443 0.309659
1404 1 0.755566 0.935819 0.304139
285 1 0.874322 1.0027 0.254197
1614 1 0.436106 0.814164 0.498737
1380 1 0.995903 0.927622 0.315403
1377 1 1.00292 0.876137 0.248687
1536 1 0.870191 0.936904 0.443061
1252 1 0.00436925 0.934733 0.182899
1279 1 0.938507 0.864331 0.173323
1280 1 0.871421 0.94037 0.18154
1405 1 0.864765 0.881719 0.24205
1406 1 0.933218 0.932256 0.244813
1407 1 0.932162 0.876836 0.310841
1408 1 0.874555 0.942706 0.322365
1637 1 0.121449 0.867749 0.494114
159 1 0.934395 1.00385 0.189778
1410 1 0.0652777 0.558398 0.37553
1416 1 0.124186 0.55976 0.448944
1443 1 0.0648183 0.62141 0.440459
1445 1 0.119217 0.622963 0.374705
1412 1 -0.000390652 0.553203 0.438888
1597 1 0.878248 0.627419 0.498349
1423 1 0.4397 0.491751 0.445738
1449 1 0.252014 0.624128 0.364692
1451 1 0.326826 0.628079 0.437737
1420 1 0.254391 0.569889 0.439134
1447 1 0.183506 0.622172 0.43413
1414 1 0.190072 0.568246 0.370429
1418 1 0.308644 0.554022 0.378143
1602 1 0.0573078 0.805463 0.486884
11 1 0.312672 0.99838 0.0552779
1422 1 0.43467 0.564906 0.373
1455 1 0.441669 0.626222 0.44194
1424 1 0.377824 0.55809 0.440093
1453 1 0.379322 0.624473 0.374719
1428 1 0.498236 0.557911 0.440189
1031 1 0.186337 0.503086 0.0637855
1505 1 0.995693 0.876423 0.377883
1137 1 0.490538 0.874978 0.00135093
1457 1 0.499391 0.621252 0.375738
1461 1 0.62347 0.631435 0.372725
1426 1 0.568674 0.562752 0.378721
1432 1 0.624901 0.562251 0.44103
1459 1 0.570692 0.624455 0.438778
391 1 0.18712 1.0012 0.440062
1430 1 0.692254 0.56124 0.379704
1463 1 0.688043 0.620955 0.437259
1436 1 0.756651 0.561283 0.430869
1434 1 0.82351 0.564051 0.369581
1465 1 0.752906 0.621763 0.367583
1467 1 0.814551 0.622529 0.431537
259 1 0.0591065 0.993128 0.308122
1590 1 0.691346 0.685729 0.498233
1566 1 0.938443 0.564087 0.503763
1122 1 0.0705841 0.947739 0.0021325
1441 1 0.000901179 0.624326 0.38261
1440 1 0.876447 0.565132 0.433697
1469 1 0.880336 0.619228 0.37016
1471 1 0.937601 0.630417 0.434771
1438 1 0.940291 0.559225 0.375279
1517 1 0.371056 0.874281 0.374875
1531 1 0.809742 0.879119 0.431793
1474 1 0.0636077 0.813486 0.37401
1480 1 0.12475 0.806632 0.434877
1475 1 0.059571 0.74587 0.430381
1442 1 0.0633338 0.688849 0.374508
1477 1 0.134286 0.75196 0.374855
1448 1 0.127735 0.685692 0.438826
1452 1 0.256511 0.672696 0.436554
1450 1 0.32128 0.680958 0.374094
1446 1 0.185783 0.683652 0.373413
1482 1 0.319867 0.80999 0.374666
1484 1 0.254523 0.817148 0.439165
1478 1 0.18117 0.824187 0.378931
1481 1 0.239002 0.756283 0.378372
1483 1 0.306491 0.743783 0.436741
1479 1 0.192003 0.738856 0.442341
1043 1 0.56612 0.5011 0.0660226
1114 1 0.811919 0.811945 0.00902277
1456 1 0.386141 0.684307 0.435949
1487 1 0.443886 0.755065 0.436759
1485 1 0.375185 0.744786 0.380253
1488 1 0.378445 0.817136 0.441038
1454 1 0.443275 0.686094 0.371851
1486 1 0.430309 0.812001 0.373242
1529 1 0.750937 0.874864 0.368339
1492 1 0.502837 0.816118 0.433357
1460 1 0.502589 0.69639 0.437481
1489 1 0.499172 0.754432 0.363281
1491 1 0.562225 0.753882 0.437004
1493 1 0.62298 0.750042 0.37
1464 1 0.624302 0.696034 0.435509
1496 1 0.629138 0.810469 0.433614
1490 1 0.565703 0.81806 0.375805
1458 1 0.561719 0.690128 0.369155
1662 1 0.938419 0.931412 0.495179
1606 1 0.190006 0.805868 0.492025
1530 1 0.81119 0.935709 0.37429
1594 1 0.812104 0.695147 0.494848
1526 1 0.69308 0.94207 0.368323
1523 1 0.561474 0.878878 0.442383
1494 1 0.683283 0.821967 0.368335
1499 1 0.812815 0.75175 0.435218
1466 1 0.810132 0.690152 0.368804
1497 1 0.749291 0.75852 0.371231
1500 1 0.747911 0.817997 0.429707
1468 1 0.753097 0.688011 0.432027
1462 1 0.688966 0.691822 0.364341
1498 1 0.814268 0.806154 0.372455
1495 1 0.689395 0.75261 0.435489
1527 1 0.683903 0.875214 0.435305
1476 1 0.993026 0.80676 0.42574
1473 1 0.00327005 0.75244 0.364591
1503 1 0.932433 0.745849 0.437101
1502 1 0.934881 0.811115 0.370245
1501 1 0.878907 0.743147 0.372383
1472 1 0.869472 0.686566 0.430238
1504 1 0.870984 0.804829 0.433808
1470 1 0.938799 0.684181 0.375095
1444 1 1.00055 0.681472 0.437514
1125 1 0.121194 0.880322 -0.000577362
1146 1 0.808138 0.937845 0.00397322
15 1 0.439298 1.00092 0.0652268
1509 1 0.117085 0.879636 0.380761
1508 1 1.00385 0.935112 0.442265
1506 1 0.0589789 0.938465 0.376436
1507 1 0.0572458 0.871679 0.436624
1512 1 0.125783 0.943418 0.435133
1431 1 0.69051 0.50491 0.43211
257 1 0.994603 0.999318 0.255557
1522 1 0.565171 0.937422 0.371593
1634 1 0.0683302 0.931193 0.497158
1145 1 0.746441 0.883526 0.00134643
1510 1 0.194213 0.945163 0.376557
1133 1 0.37171 0.878389 0.00359816
1513 1 0.256191 0.875118 0.379366
1514 1 0.315946 0.936092 0.378292
1516 1 0.248689 0.939089 0.43598
1511 1 0.185764 0.881114 0.436849
1515 1 0.319275 0.878786 0.438403
1532 1 0.747599 0.934351 0.429595
1525 1 0.62076 0.882373 0.369399
1528 1 0.622856 0.9482 0.427046
1518 1 0.431341 0.934453 0.380185
1519 1 0.435763 0.871954 0.432503
1520 1 0.370001 0.942793 0.44118
1524 1 0.501806 0.936179 0.435751
1521 1 0.500759 0.879284 0.377233
1141 1 0.627638 0.882198 -0.00541831
1601 1 0.992962 0.750034 0.500291
1609 1 0.2496 0.741874 0.502573
1654 1 0.681945 0.931738 0.494213
1545 1 0.247942 0.498714 0.501795
1653 1 0.621755 0.880335 0.4991
1089 1 0.00469716 0.751093 -0.00604641
1645 1 0.376296 0.877806 0.499432
1037 1 0.378553 0.497962 0.00671282
1554 1 0.558494 0.556693 0.49839
1585 1 0.50799 0.633735 0.49804
1649 1 0.498016 0.879096 0.495508
1042 1 0.563503 0.566015 0.00968967
1657 1 0.753893 0.883122 0.493684
1613 1 0.37113 0.745689 0.484738
1085 1 0.876558 0.621071 0.0042097
1070 1 0.434756 0.68257 0.00618083
1586 1 0.566719 0.695638 0.496606
13 1 0.375135 0.999624 0.00594533
1082 1 0.81138 0.690791 0.00201307
1646 1 0.439138 0.933221 0.494161
1149 1 0.877408 0.876846 0.0049135
1546 1 0.318241 0.56654 0.492898
1098 1 0.309373 0.810103 0.00166074
1618 1 0.563696 0.816766 0.497505
1142 1 0.685573 0.940347 0.000911425
529 1 0.502465 1.00127 0.497938
1589 1 0.628939 0.634137 0.500284
1641 1 0.250998 0.875716 0.501807
1101 1 0.374998 0.74717 0.00305666
1050 1 0.816713 0.564009 0.00717048
1081 1 0.753312 0.629249 0.00328094
1074 1 0.560228 0.692915 0.0106136
1544 1 0.13217 0.557643 0.560732
1571 1 0.0536253 0.624838 0.559337
1666 1 0.0620897 0.555402 0.616833
1701 1 0.116066 0.619453 0.620284
1540 1 0.996376 0.560234 0.560363
1697 1 0.00374034 0.619746 0.62858
1935 1 0.436736 0.502925 0.943903
1929 1 0.257081 0.511759 0.868419
1548 1 0.252188 0.556195 0.570537
1575 1 0.183746 0.624218 0.567955
1579 1 0.313152 0.621715 0.56343
1670 1 0.186884 0.555147 0.622232
1674 1 0.312452 0.56313 0.623283
1705 1 0.243952 0.626156 0.616951
1685 1 0.630729 0.49273 0.62002
1689 1 0.746728 0.500212 0.623374
1552 1 0.366905 0.560812 0.55878
1583 1 0.447361 0.627312 0.556802
1678 1 0.441424 0.567319 0.631562
1709 1 0.375678 0.629426 0.622305
1713 1 0.502167 0.628396 0.624585
1923 1 0.0570747 0.498408 0.940185
657 1 0.499141 0.998195 0.625938
1562 1 0.818342 0.565163 0.49142
1556 1 0.502439 0.561892 0.564241
1560 1 0.62793 0.568913 0.553746
1587 1 0.559552 0.624042 0.557764
1682 1 0.566134 0.55905 0.624544
1717 1 0.622332 0.625152 0.62274
1642 1 0.309565 0.940068 0.506353
1813 1 0.619155 0.507219 0.754693
1805 1 0.367209 0.502645 0.753783
899 1 0.0620978 1.00019 0.930625
1799 1 0.175866 0.504018 0.810155
1564 1 0.755557 0.559588 0.555483
1591 1 0.689737 0.621465 0.562395
1595 1 0.809581 0.625211 0.558517
1686 1 0.684871 0.559068 0.621
1690 1 0.815107 0.56156 0.617605
1721 1 0.748269 0.6171 0.621453
1675 1 0.313284 0.506736 0.689782
9 1 0.248833 0.996049 0.994122
1578 1 0.312576 0.682118 0.493834
1568 1 0.877168 0.564704 0.56022
1599 1 0.941409 0.62802 0.56223
1694 1 0.944665 0.563364 0.628291
1725 1 0.876312 0.62065 0.623738
1807 1 0.430603 0.506725 0.816868
1538 1 0.062836 0.557625 0.505122
1542 1 0.185761 0.564416 0.506527
1576 1 0.120868 0.685581 0.562862
1603 1 0.069688 0.75304 0.567231
1608 1 0.130556 0.807333 0.558097
1698 1 0.0663855 0.68946 0.628796
1730 1 0.0705819 0.810013 0.631495
1733 1 0.129749 0.749706 0.627565
1572 1 0.00876633 0.697686 0.562768
925 1 0.875058 1.00359 0.880776
1580 1 0.250966 0.687627 0.560992
1607 1 0.183875 0.740983 0.56366
1611 1 0.321908 0.746438 0.558355
1612 1 0.253625 0.804805 0.560088
1702 1 0.18083 0.681457 0.628242
1706 1 0.315375 0.688133 0.620485
1734 1 0.18477 0.804842 0.61939
1737 1 0.251893 0.745097 0.625993
1738 1 0.313968 0.810022 0.617649
1558 1 0.686765 0.569016 0.498466
1126 1 0.188869 0.939194 0.998573
523 1 0.320253 0.998177 0.572159
1803 1 0.31109 0.495772 0.813318
1584 1 0.379802 0.690862 0.558914
1615 1 0.437484 0.754542 0.557662
1616 1 0.372041 0.816037 0.560017
1710 1 0.438841 0.682007 0.627391
1741 1 0.381127 0.760933 0.616585
1742 1 0.437756 0.815699 0.629338
909 1 0.373539 0.991192 0.86896
1681 1 0.504178 0.503328 0.633342
1588 1 0.505372 0.695997 0.556618
1620 1 0.500715 0.812708 0.567037
1745 1 0.50706 0.753472 0.624796
1592 1 0.626553 0.6918 0.562525
1619 1 0.566113 0.755793 0.561277
1624 1 0.625672 0.8189 0.557226
1714 1 0.563299 0.690189 0.61765
1746 1 0.566174 0.813433 0.624795
1749 1 0.63143 0.750491 0.622673
1673 1 0.255928 0.500673 0.626426
1687 1 0.687013 0.502 0.696189
1596 1 0.751435 0.684609 0.556288
1623 1 0.691168 0.748505 0.561978
1627 1 0.817481 0.745287 0.569852
1628 1 0.749767 0.807889 0.569701
1718 1 0.687184 0.688026 0.620939
1722 1 0.811354 0.676673 0.622093
1750 1 0.683634 0.809494 0.620495
1753 1 0.754593 0.742395 0.623012
1754 1 0.818732 0.807748 0.627693
1581 1 0.382235 0.630371 0.4998
1604 1 0.997976 0.807896 0.563544
1729 1 0.993826 0.752356 0.627682
1600 1 0.872072 0.685216 0.559519
1631 1 0.935822 0.746929 0.566769
1632 1 0.879911 0.812825 0.567032
1726 1 0.938975 0.682467 0.624148
1757 1 0.87315 0.733922 0.625766
1758 1 0.93564 0.807445 0.629276
1626 1 0.81814 0.810611 0.500499
783 1 0.438784 0.995053 0.812767
1635 1 0.0680177 0.866177 0.559396
1640 1 0.129982 0.926479 0.562771
1762 1 0.0604799 0.929983 0.624378
1765 1 0.124092 0.871757 0.625555
1761 1 0.992194 0.873164 0.62506
1550 1 0.443009 0.570263 0.500192
2045 1 0.879447 0.874321 0.876405
1138 1 0.563691 0.9382 0.995052
1639 1 0.1881 0.862296 0.559198
1643 1 0.316597 0.872489 0.556333
1644 1 0.244487 0.934755 0.569995
1766 1 0.189754 0.932346 0.624933
1769 1 0.248271 0.868673 0.625835
1770 1 0.314178 0.930613 0.630531
1921 1 0.00476552 0.512226 0.875898
795 1 0.812319 0.987937 0.817173
1815 1 0.689294 0.497131 0.817702
1647 1 0.44277 0.872732 0.560689
1648 1 0.373769 0.935981 0.558463
1773 1 0.372196 0.866497 0.624974
1774 1 0.433502 0.93775 0.630425
2046 1 0.93481 0.940093 0.880329
1777 1 0.501033 0.880016 0.626283
1652 1 0.497294 0.941071 0.554879
1651 1 0.559863 0.872891 0.561699
1656 1 0.627032 0.931647 0.561096
1778 1 0.569417 0.937708 0.619101
1781 1 0.626046 0.872696 0.618066
1677 1 0.377096 0.50291 0.633697
1655 1 0.690273 0.8752 0.555149
1659 1 0.813888 0.870518 0.565354
1660 1 0.755451 0.93309 0.563256
1782 1 0.690458 0.930494 0.621045
1785 1 0.757296 0.866514 0.627115
1786 1 0.81505 0.936787 0.621914
1573 1 0.118513 0.623819 0.501035
1636 1 0.00209225 0.930484 0.562821
1663 1 0.935582 0.880102 0.562819
1664 1 0.87508 0.937245 0.561265
1789 1 0.878575 0.891328 0.629755
1790 1 0.945041 0.941715 0.622575
1672 1 0.119534 0.557269 0.684597
1699 1 0.0661696 0.617099 0.691776
1794 1 0.0590911 0.555352 0.751155
1800 1 0.125676 0.564023 0.812794
1827 1 0.0686921 0.628825 0.806712
1829 1 0.126554 0.619205 0.750238
1668 1 -0.00151046 0.56241 0.692991
1825 1 1.00478 0.626351 0.749818
1693 1 0.876078 0.504132 0.619668
1676 1 0.255045 0.56916 0.685998
1703 1 0.186481 0.614168 0.684231
1707 1 0.317295 0.625086 0.68611
1798 1 0.187148 0.560476 0.753097
1802 1 0.31483 0.573299 0.757955
1804 1 0.251499 0.570763 0.813235
1831 1 0.186542 0.628508 0.808854
1833 1 0.252294 0.625567 0.746809
1835 1 0.312813 0.633134 0.813997
2047 1 0.93561 0.875252 0.934612
897 1 -0.00110535 0.988483 0.875308
1680 1 0.378507 0.560977 0.696283
1711 1 0.433572 0.624232 0.690425
1806 1 0.434704 0.559927 0.754048
1808 1 0.376986 0.56369 0.817386
1837 1 0.378492 0.624765 0.755018
1839 1 0.434413 0.634897 0.809156
1841 1 0.494344 0.625247 0.746017
1684 1 0.501276 0.55993 0.693884
1812 1 0.496002 0.565723 0.809797
1688 1 0.621437 0.559555 0.688129
1715 1 0.5686 0.62755 0.686497
1810 1 0.559442 0.570244 0.756862
1816 1 0.630443 0.560241 0.815157
1843 1 0.571882 0.632448 0.816993
1845 1 0.625128 0.621688 0.745539
777 1 0.242899 1.0024 0.747734
2048 1 0.873728 0.938811 0.950041
1817 1 0.752537 0.499764 0.760351
1691 1 0.816743 0.511028 0.689025
905 1 0.247181 1.00223 0.87086
1617 1 0.504007 0.760487 0.498506
1692 1 0.75111 0.56249 0.691395
1719 1 0.689615 0.6247 0.684904
1723 1 0.810116 0.618721 0.694645
1814 1 0.690217 0.567774 0.748689
1818 1 0.815592 0.560485 0.758191
1820 1 0.749564 0.562863 0.814755
1847 1 0.690058 0.626468 0.822137
1849 1 0.748516 0.62636 0.754306
1851 1 0.814232 0.619529 0.82276
1796 1 0.00325384 0.57161 0.814235
1696 1 0.877538 0.570712 0.691616
1727 1 0.940962 0.631372 0.688942
1822 1 0.936852 0.566176 0.748028
1824 1 0.876558 0.555912 0.815322
1853 1 0.877502 0.629684 0.74797
1855 1 0.935932 0.627018 0.810062
665 1 0.752139 0.99362 0.615985
1931 1 0.319342 0.504338 0.939386
1704 1 0.118928 0.689606 0.689755
1731 1 0.0637274 0.748977 0.685535
1736 1 0.129522 0.811944 0.695757
1826 1 0.0625681 0.687665 0.744059
1832 1 0.130399 0.687836 0.819257
1858 1 0.0666893 0.808474 0.754672
1859 1 0.0568006 0.741699 0.820301
1861 1 0.127255 0.744542 0.750194
1864 1 0.127858 0.805573 0.813899
1700 1 0.00276819 0.685291 0.682899
1857 1 0.00253415 0.746259 0.748838
1732 1 1.00291 0.816866 0.691745
1828 1 0.00402064 0.680147 0.819914
1708 1 0.252634 0.682418 0.681051
1735 1 0.186094 0.743686 0.687593
1739 1 0.315698 0.7498 0.680192
1740 1 0.251292 0.810007 0.682884
1830 1 0.18899 0.677591 0.741026
1834 1 0.318739 0.683261 0.742124
1836 1 0.251345 0.684247 0.802254
1862 1 0.193374 0.805322 0.749048
1863 1 0.194381 0.746041 0.818143
1865 1 0.250863 0.742906 0.74004
1866 1 0.315901 0.804151 0.747721
1867 1 0.304659 0.744272 0.812783
1868 1 0.251733 0.81721 0.807439
1712 1 0.375256 0.69024 0.678494
1743 1 0.43867 0.757237 0.69356
1744 1 0.372473 0.807697 0.685382
1838 1 0.43639 0.685826 0.747576
1840 1 0.373851 0.68783 0.810777
1869 1 0.377978 0.751385 0.747637
1870 1 0.445307 0.811484 0.752675
1871 1 0.430562 0.748533 0.813932
1872 1 0.366961 0.812978 0.809926
1748 1 0.498453 0.816924 0.685524
1876 1 0.501909 0.817024 0.809222
1844 1 0.504226 0.684674 0.805587
1716 1 0.505294 0.691376 0.683581
1873 1 0.498309 0.747901 0.751113
1720 1 0.626737 0.687665 0.68033
1747 1 0.564392 0.749642 0.692177
1752 1 0.622797 0.818705 0.681733
1842 1 0.564392 0.68191 0.750758
1848 1 0.636884 0.690234 0.801029
1874 1 0.564235 0.81175 0.745452
1875 1 0.56773 0.747385 0.807849
1877 1 0.632791 0.757013 0.747865
1880 1 0.631417 0.811787 0.813624
1724 1 0.749582 0.684183 0.683135
1751 1 0.694105 0.745605 0.682501
1755 1 0.812673 0.748357 0.688434
1756 1 0.754581 0.808306 0.684051
1846 1 0.692638 0.689749 0.743814
1850 1 0.808024 0.680914 0.751302
1852 1 0.750453 0.68183 0.814321
1878 1 0.68671 0.812345 0.744508
1879 1 0.69191 0.750026 0.812413
1881 1 0.755104 0.749123 0.755782
1882 1 0.813998 0.810684 0.758335
1883 1 0.812915 0.742698 0.81662
1884 1 0.740534 0.811726 0.817757
1860 1 1.00264 0.819669 0.81213
1728 1 0.876307 0.685276 0.690824
1759 1 0.936922 0.749053 0.692592
1760 1 0.878411 0.808102 0.688659
1854 1 0.941162 0.69365 0.751694
1856 1 0.879708 0.689059 0.813049
1885 1 0.87088 0.749635 0.752663
1886 1 0.941877 0.811571 0.750443
1887 1 0.934905 0.755901 0.812599
1888 1 0.879034 0.811969 0.82269
1763 1 0.0637825 0.876537 0.688828
1768 1 0.123578 0.936099 0.685019
1890 1 0.0639138 0.931166 0.748511
1891 1 0.0655488 0.86763 0.813319
1893 1 0.119565 0.871738 0.74758
1896 1 0.123436 0.932683 0.810836
1892 1 0.00733788 0.930925 0.809933
1889 1 0.00329264 0.875657 0.750775
1949 1 0.872086 0.500285 0.875574
1937 1 0.499041 0.502601 0.882617
1767 1 0.185135 0.868861 0.684476
1771 1 0.309676 0.878216 0.692424
1772 1 0.249354 0.938989 0.685449
1894 1 0.182822 0.933001 0.748638
1895 1 0.182079 0.870526 0.812129
1897 1 0.243441 0.874249 0.747015
1898 1 0.308219 0.938078 0.753927
1899 1 0.318082 0.873824 0.807273
1900 1 0.251775 0.934561 0.808508
1605 1 0.126858 0.744038 0.492118
799 1 0.93353 1.00449 0.808786
643 1 0.06048 0.992434 0.687184
1775 1 0.435097 0.875561 0.685007
1776 1 0.379658 0.944491 0.691825
1901 1 0.376604 0.867956 0.746372
1902 1 0.444294 0.930377 0.752292
1903 1 0.439242 0.87155 0.811544
1904 1 0.374916 0.93552 0.80153
1908 1 0.500003 0.929196 0.811461
1129 1 0.247022 0.879456 0.995876
1780 1 0.507716 0.935045 0.680667
1905 1 0.50666 0.867022 0.749583
1779 1 0.56648 0.879439 0.683019
1784 1 0.63218 0.931643 0.686825
1906 1 0.567404 0.935036 0.751528
1907 1 0.562154 0.87606 0.812045
1909 1 0.620188 0.867242 0.747653
1912 1 0.626395 0.93227 0.811327
1795 1 0.0581713 0.503286 0.811408
913 1 0.499154 0.997948 0.876215
659 1 0.564435 0.998578 0.683584
2041 1 0.749415 0.869435 0.873788
2044 1 0.741677 0.940046 0.936931
1783 1 0.693264 0.867303 0.679407
1787 1 0.820238 0.878976 0.68663
1788 1 0.747141 0.93317 0.685413
1910 1 0.686769 0.934789 0.755335
1911 1 0.692042 0.873274 0.810146
1913 1 0.746827 0.875838 0.75538
1914 1 0.813749 0.940714 0.749421
1915 1 0.811285 0.876596 0.81073
1916 1 0.746463 0.93762 0.82137
1764 1 -0.00301772 0.93892 0.685859
1791 1 0.940847 0.875039 0.69393
1792 1 0.883276 0.940881 0.694947
1917 1 0.872648 0.868176 0.75539
1918 1 0.940012 0.941179 0.751022
1919 1 0.939813 0.874855 0.80604
1920 1 0.87636 0.936507 0.81325
787 1 0.564346 1.00076 0.816185
2022 1 0.188596 0.939509 0.867001
1922 1 0.07193 0.564217 0.877065
1928 1 0.131803 0.561883 0.94468
1955 1 0.0663822 0.62366 0.933502
1957 1 0.130755 0.624514 0.879101
2028 1 0.247193 0.937498 0.929583
1933 1 0.376791 0.500565 0.877156
663 1 0.691681 0.993969 0.689976
1924 1 0.00644365 0.56778 0.938207
2027 1 0.310512 0.871905 0.93356
1049 1 0.758689 0.505291 1.00396
1930 1 0.3225 0.562548 0.876293
1926 1 0.18687 0.560638 0.882008
1963 1 0.315914 0.614512 0.940575
1961 1 0.248192 0.612016 0.880752
1932 1 0.251622 0.559998 0.945608
1959 1 0.187558 0.627701 0.944503
667 1 0.809261 0.994569 0.685665
543 1 0.93906 0.992938 0.557543
785 1 0.504914 0.994961 0.764666
2034 1 0.561671 0.934653 0.872884
1965 1 0.378925 0.630027 0.87285
1934 1 0.437184 0.567842 0.88162
1936 1 0.382874 0.56229 0.945647
1967 1 0.433066 0.628579 0.937412
1969 1 0.497031 0.627387 0.873703
1811 1 0.559709 0.505118 0.819541
901 1 0.119565 1.00005 0.870346
2035 1 0.552559 0.872187 0.936454
1940 1 0.501999 0.563562 0.939775
1938 1 0.568763 0.56225 0.882307
1944 1 0.627149 0.563046 0.944369
1971 1 0.567775 0.62466 0.941951
1973 1 0.624165 0.62318 0.882348
771 1 0.0535286 0.997892 0.815543
2042 1 0.814863 0.931457 0.884797
1801 1 0.251151 0.50957 0.748569
1942 1 0.689815 0.555922 0.874689
1946 1 0.812025 0.555977 0.874393
1977 1 0.748709 0.621512 0.878117
1979 1 0.816383 0.623012 0.942183
1948 1 0.764003 0.561582 0.943593
1975 1 0.691183 0.621602 0.941076
1569 1 -0.00334127 0.622684 0.494373
515 1 0.0615397 0.990004 0.559281
2039 1 0.692161 0.876868 0.93812
2032 1 0.368203 0.947897 0.940592
1086 1 0.939025 0.685537 1.00422
1953 1 0.993717 0.621832 0.880964
1950 1 0.939337 0.561663 0.874351
1981 1 0.87807 0.622051 0.881522
1983 1 0.931115 0.635223 0.937066
1952 1 0.872375 0.557172 0.947601
775 1 0.186548 0.99756 0.806822
1134 1 0.433933 0.937242 1.00522
2030 1 0.435979 0.93833 0.872926
1986 1 0.0621447 0.812101 0.880441
1987 1 0.0669435 0.750183 0.939095
1954 1 0.0703597 0.682244 0.875698
1992 1 0.131527 0.805928 0.93607
1989 1 0.129836 0.750081 0.876074
1960 1 0.125557 0.688655 0.936664
1988 1 0.00110503 0.817994 0.940443
2033 1 0.500933 0.872841 0.878104
653 1 0.379958 1.0005 0.627368
2036 1 0.491851 0.933104 0.936367
1995 1 0.315842 0.757501 0.937045
1996 1 0.249104 0.812745 0.940115
1991 1 0.194892 0.743349 0.9364
1958 1 0.19883 0.681646 0.876023
1964 1 0.253206 0.678656 0.942908
1993 1 0.255765 0.751245 0.875432
1962 1 0.313585 0.688772 0.883074
1990 1 0.182161 0.811359 0.875287
1994 1 0.31529 0.817628 0.873529
1090 1 0.0627817 0.812819 0.996051
535 1 0.68417 0.993401 0.564023
1999 1 0.437093 0.753528 0.937326
1998 1 0.430724 0.813331 0.876812
1966 1 0.440277 0.696917 0.880455
1968 1 0.372173 0.695779 0.940669
1997 1 0.370234 0.749491 0.876579
2000 1 0.375101 0.813126 0.937722
1972 1 0.505414 0.695275 0.939746
2023 1 0.183573 0.878004 0.936472
2004 1 0.504161 0.799087 0.938911
2001 1 0.506708 0.746151 0.864477
2008 1 0.625641 0.816985 0.944366
2003 1 0.565022 0.75245 0.940182
1976 1 0.619085 0.68784 0.936743
1970 1 0.566608 0.694104 0.872527
2002 1 0.569268 0.807768 0.872406
2005 1 0.6321 0.750756 0.873806
2021 1 0.128109 0.872887 0.87172
1951 1 0.947619 0.504293 0.940392
2031 1 0.427305 0.886811 0.932185
2040 1 0.627148 0.940274 0.93256
2029 1 0.373562 0.885124 0.878413
2037 1 0.63007 0.88065 0.87305
2026 1 0.310284 0.935198 0.871459
2012 1 0.753389 0.820488 0.942731
2011 1 0.812928 0.757142 0.943654
1978 1 0.811926 0.679251 0.875952
1974 1 0.691982 0.678173 0.883519
2006 1 0.697907 0.81087 0.887262
1980 1 0.756817 0.684138 0.939121
2010 1 0.808953 0.815273 0.874765
2007 1 0.679587 0.754003 0.942964
2009 1 0.756489 0.749341 0.883254
1985 1 -2.93372e-05 0.753154 0.886734
2013 1 0.876415 0.748042 0.876182
2016 1 0.875527 0.816216 0.93751
2015 1 0.94089 0.743687 0.943051
1984 1 0.864727 0.689143 0.936554
1956 1 0.00331305 0.693027 0.93394
1982 1 0.940708 0.694017 0.872385
2014 1 0.940098 0.810258 0.874215
2025 1 0.248923 0.870767 0.875943
2043 1 0.809452 0.878433 0.942333
2038 1 0.687072 0.937691 0.875477
2017 1 0.995094 0.877145 0.870388
2019 1 0.066381 0.877181 0.934266
2024 1 0.123636 0.937927 0.939614
2020 1 -0.00169268 0.938924 0.945361
2018 1 0.0660772 0.936446 0.87068
789 1 0.624987 0.997169 0.753198
647 1 0.18583 0.997669 0.683483
1551 1 0.435581 0.510086 0.56693
669 1 0.87992 0.994838 0.632015
903 1 0.184724 0.992815 0.929888
661 1 0.62789 0.998754 0.629481
541 1 0.874194 0.999973 0.497046
1078 1 0.687512 0.688349 0.993867
1661 1 0.875983 0.872226 0.498841
1150 1 0.937085 0.931258 1.00144
5 1 0.126618 1.00201 0.991064
1097 1 0.250506 0.747592 0.996855
1130 1 0.306196 0.932412 0.998124
1102 1 0.439859 0.813325 0.999837
1057 1 -0.00417595 0.624738 0.998351
1061 1 0.127065 0.626262 1.00163
1054 1 0.943536 0.564103 0.998884
1046 1 0.694869 0.562269 0.999846
1117 1 0.880446 0.748193 1.00355
517 1 0.121231 0.998659 0.500541
1113 1 0.749127 0.750699 0.994412
1625 1 0.750471 0.75314 0.505708
1638 1 0.191652 0.940065 0.505371
1582 1 0.441915 0.689999 0.497224
1650 1 0.560438 0.943396 0.505858
1065 1 0.2572 0.624799 1.00667
1598 1 0.937743 0.690478 0.503376
1570 1 0.0541676 0.691054 0.500806
1610 1 0.312042 0.805689 0.499426
1577 1 0.250344 0.61907 0.506522
1541 1 0.122443 0.496293 0.505846
1629 1 0.875511 0.747541 0.499548
1621 1 0.632985 0.760003 0.499006
1574 1 0.189471 0.681531 0.500813
1658 1 0.813629 0.940651 0.504013
17 1 0.492925 0.994198 0.99488
|
17,968 | 879426df4daf8c2951a91edfd8abb736245c6c5b | import os
import numpy as np
from sklearn.model_selection import train_test_split, ParameterGrid
import dataset_loader.datasets as data_loader
class Data:
def __init__(self, X, y, name, task, metric, train_size=0.8):
assert 0. < train_size < 1.
test_size = 1. - train_size
self.name = name
self.task = task
self.metric = metric
if 'MSRank' in name:
self.X_train = np.vstack([X[0], X[1]])
self.y_train = np.hstack([y[0], y[1]])
self.X_test = X[2]
self.y_test = y[2]
elif 'CoverType' in name:
self.X_train = X[0]
self.y_train = y[0]
self.X_test = X[1]
self.y_test = y[1]
else:
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(X, y, test_size=test_size, random_state=0)
class Experiment:
def __init__(self, data_func, name, task, metric):
self.data_func = data_func
self.name = name
self.task = task
self.metric = metric
def run(self, use_gpu, learners, params_grid, out_dir):
X, y = self.data_func()
data = Data(X, y, self.name, self.task, self.metric)
device_type = 'GPU' if use_gpu else 'CPU'
for LearnerType in learners:
learner = LearnerType(data, use_gpu)
algorithm_name = learner.name() + '-' + device_type
print('Started to train ' + algorithm_name)
for params in ParameterGrid(params_grid):
print(params)
log_dirname = os.path.join(out_dir, self.name, algorithm_name)
try:
elapsed = learner.run(params, log_dirname)
print('Timing: ' + str(elapsed) + ' sec')
except Exception as e:
print('Exception during training: ' + repr(e))
DATASETS = {
"abalone": Experiment(data_loader.get_abalone, "Abalone", "Regression", "RMSE"),
"letters": Experiment(data_loader.get_letters, "Letters", "Multiclass", "Accuracy"),
"year-msd": Experiment(data_loader.get_year, "YearPredictionMSD", "Regression", "RMSE"),
"synthetic": Experiment(data_loader.get_synthetic_regression, "Synthetic", "Regression", "RMSE"),
"synthetic-5k-features": Experiment(data_loader.get_synthetic_regression_5k_features,
"Synthetic5kFeatures", "Regression", "RMSE"),
"cover-type": Experiment(data_loader.get_cover_type, "CoverType", "Multiclass", "Accuracy"),
"epsilon": Experiment(data_loader.get_epsilon, "Epsilon", "Classification", "Accuracy"),
"higgs": Experiment(data_loader.get_higgs, "Higgs", "Classification", "Accuracy"),
"bosch": Experiment(data_loader.get_bosch, "Bosch", "Classification", "Accuracy"),
"airline": Experiment(data_loader.get_airline, "Airline", "Classification", "Accuracy"),
"higgs-sampled": Experiment(data_loader.get_higgs_sampled, "Higgs", "Classification", "Accuracy"),
"epsilon-sampled": Experiment(data_loader.get_epsilon_sampled, "Epsilon", "Classification", "Accuracy"),
"synthetic-classification": Experiment(data_loader.get_synthetic_classification,
"Synthetic2", "Classification", "Accuracy"),
"msrank": Experiment(data_loader.get_msrank, "MSRank-RMSE", "Regression", "RMSE"),
"msrank-classification": Experiment(data_loader.get_msrank, "MSRank-MultiClass", "Multiclass", "Accuracy")
}
|
17,969 | 5b0c25b24e782b418fb77a0103e8c97c5bafce99 | import re
def roman_numerals(text):
"""
Finds any string of letters that could be a Roman numeral
(made up of the letters I, V, X, L, C, D, M).
>>> roman_numerals("Sir Richard IIV, can you tell Richard VI that Richard IV is on the phone?")
['IIV', 'VI', 'IV']
>>> roman_numerals("My TODOs: I. Groceries II. Learn how to count in Roman IV. Profit")
['I', 'II', 'IV']
>>> roman_numerals("I. Act 1 II. Act 2 III. Act 3 IV. Act 4 V. Act 5")
['I', 'II', 'III', 'IV', 'V']
>>> roman_numerals("Let's play Civ VII")
['VII']
>>> roman_numerals("i love vi so much more than emacs.")
[]
>>> roman_numerals("she loves ALL editors equally.")
[]
"""
return re.findall(r"\b([IVXLCDM]+)\b", text)
import re
def calculator_ops(calc_str):
"""
Finds expressions from the Calculator language that have two
numeric operands and returns the expression without the parentheses.
>>> calculator_ops("(* 2 4)")
['* 2 4']
>>> calculator_ops("(+ (* 3 (+ (* 2 4) (+ 3 5))) (+ (- 10 7) 6))")
['* 2 4', '+ 3 5', '- 10 7']
>>> calculator_ops("(* 2)")
[]
"""
return re.findall(r"\(([+-*/]\s+\d+\s+\d+)\)", calc_str)
import re
def cs_classes(post):
"""
Returns strings that look like a Berkeley CS class,
starting with "CS", followed by a number, optionally ending with A, B, or C.
Case insensitive.
>>> cs_classes("Is it unreasonable to take CS61A, CS61B, CS70, and EE16A in the summer?")
True
>>> cs_classes("how do I become a TA for cs61a? that job sounds so fun")
True
>>> cs_classes("Can I take ECON101 as a CS major?")
False
>>> cs_classes("Should I do the lab lites or regular labs in EE16A?")
False
"""
return bool(re.search(r"\s+\[cs|CS]\d+[a-cA-C]\s+", post))
|
17,970 | 616e053f2ea75a15d1e464efe30e636f78a77f6a | # 键的判断:
# in 或 not in 语法:字符串 in 字典名
dic={'张三':100,'李四':98,'王五':90}
print('张三' in dic)
#新增元素
dic['陈六']=100
print(dic)
#修改元素
dic['陈六']=0
print(dic)
# 字典元素的删除
# del 字典名['键']
del dic['张三']
print(dic)
dic.clear()#清空字典中所有元素
print(dic)
|
17,971 | 460276d6f1557d606f340bb7adfa1100b6f2daf3 | # current_number = 1
# while current_number <= 6:
# print(current_number)
# current_number += 1
# a = "Tell me something,and I will repeat it back to you: "
# a += "\nEnter 'quit' to end the program. "
# a = True
# while a:
# message = input(a)
# if message == 'quit':
# a = False
# else:
# print(message)
# i = 1
# sum = 0
# while i <= 100:
# if i % 2 == 0:
# sum += i
# else:
# pass
# i += 1
# print("从1到100的和为:%s" % sum)
# 打印矩形
# x = 1
# y = 1
# while y <= 10: # 为了输出10行
# x = 1
# while x <= 10: # 为了在一行中输出10个*
# print("*", end="") # 函数默认在输出之后就换行,如果不换行,输出help(print)
# x += 1
# print("")
# y += 1
# print("已经完成")
# help(print)
# 打印99乘法表
# x = 1 # 代表行数
# while x <= 9: # 一共要循坏9次还能打印9行,每行打印的列数和行号一样
# y = 1 # 代表列数
# while y <= x:
# print("%d*%d=%d\t" % (y, x, x*y), end="")
# y += 1
# print("")
# x += 1
# print("结束")
# 打印一个倒三角形,要求倒三角形是等边三角形,并且行数由用户输入
# x = int(input("打印等边三角形,请输入行数:"))
# while x <= 6:
x = 1
while x <= 9:
y = 1
while x |
17,972 | 20711f7a426e06b5162188fd74fbad85f405185f | # -*- coding: utf-8 -*-
import shutil
import os
URL_LIST_FILE = './noisy2hin2.txt'
# src_base_dir = './noisy_student_clips_1lk' not needed
dest_base_dir = './noisy2hin2/'
lines = open(URL_LIST_FILE).readlines()
for src_file in lines:
dest_file = dest_base_dir + src_file.rstrip()
print('src_file:', src_file, ', dest_file:', dest_file)
os.makedirs(os.path.dirname(dest_file), exist_ok=True)
shutil.copy(src_file.rstrip(), dest_file) |
17,973 | ead80ff7e262f85fb9e82c38864ce01ba505bf46 | from django.contrib import admin
from .models import Banner, Project
# Register your models here.
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
fields = ('title', 'description', 'type', 'image', 'new')
@admin.register(Banner)
class BannerAdmin(admin.ModelAdmin):
fields = ('title', 'time', 'no', 'image', 'active')
|
17,974 | e93315ce4ade9ffbae5d0f6bcddb9ffde0cd0ef2 | import json
SAVINGS_FILENAME = 'savings.json'
roomTest = {
'number': 0,
'width': 0,
'length': 0,
'price': 0,
'canHold': 0,
'held': 0,
'items': {'wardrobe': 0, 'tv': 0, 'table': 0, 'chair': 0}
}
def create_data():
data = []
return data
def save_data(data):
with open(SAVINGS_FILENAME, 'wt', encoding='utf-8') as file:
file.write(json.dumps(data, indent=4, ensure_ascii=False))
def load_data():
with open(SAVINGS_FILENAME, 'rt', encoding='utf-8') as file:
data = json.loads(file.read())
return data
def try_to_load_data():
try:
return load_data()
except Exception as ex:
print('Сталася помилка з файлом збереження:')
print(ex)
print('Створюємо нові дані')
input('<Enter>')
return create_data()
def RoomInput(string):
roomInput = int(input((string + ": ")))
return roomInput
def main():
data = try_to_load_data()
while True:
print("---Main Menu---")
print("1 - create/delete room")
print("2 - search room")
print("3 - add guests")
print("4 - statistics")
print("5 - exit")
print("6 - print data")
playerInput = int(input("Input: "))
if playerInput == 5:
break
if playerInput == 1:
print(FirstTask(data))
if data != []:
if playerInput == 2:
print(SecondTask(data))
elif playerInput == 3:
ThirdTask(data)
elif playerInput == 4:
ForthTask(data)
elif playerInput == 6:
print(data)
else:
print("add some rooms!")
save_data(data)
def FirstTask(data):
room = {
'number': 0,
'width': 0,
'length': 0,
'price': 0,
'canHold': 0,
'held': 0,
'items': {'wardrobe': 0, 'tv': 0, 'table': 0, 'chair': 0}
}
playerChoice = int(input("create/delete room?(1/2): "))
if playerChoice == 1:
keys = list(room.keys())
itemsKeys = list(room['items'].keys())
print(keys)
i = 0
while i < len(room) - 1:
roomInput = RoomInput(keys[i])
room[keys[i]] = roomInput
i += 1
i = 0
while i < len(itemsKeys):
itemInput = RoomInput(itemsKeys[i])
item = itemsKeys[i]
room['items'][item] = itemInput
i += 1
data.append(room)
return room
else:
Index = int(input("delete room by number: "))
i = 0
while i < len(data):
if data[i]['number'] == Index:
data.pop(i)
i += 1
return None
def SecondTask(data):
roomList = []
areaInput = int(input("Area: "))
canHoldInput = int(input("Guests: "))
for room in data:
if room['width'] * room['length'] >= areaInput and room['canHold'] >= canHoldInput:
roomList.append(room['number'])
return roomList
def ThirdTask(data):
numInput = int(input("Room number: "))
guestsInput = int(input("Guests number: "))
a = 0
for room in data:
if room['number'] == numInput:
if guestsInput <= room['canHold'] - room['held']:
room['held'] += guestsInput
print('check in succesful')
a += 1
if a != 1:
print('sorry, no such places')
def ForthTask(data):
canHold = 0
held = 0
area = 0
items = {'wardrobe': 0, 'tv': 0, 'table': 0, 'chair': 0}
price = 0
for room in data:
canHold += room['canHold']
held += room['held']
area += room['width'] * room['length']
price += room['price']
itemsKeys = list(items.keys())
i = 0
while i < len(itemsKeys):
items[itemsKeys[i]] += room['items'][itemsKeys[i]]
i += 1
print(canHold)
print(held)
print(area)
print(items)
print(price)
main()
|
17,975 | 823c51e481f4b29d2cca3f0f4a94e5a79a73342b | from bs4 import BeautifulSoup
import datetime
import requests
from random import seed
from random import randint
class RandomFlix:
def __init__(self):
now = datetime.datetime.now()
self.base_url = "https://fr.flixable.com"
self.url = ""
self.min_year = 1920
self.max_year = now.year
self.min_rating = 1
self.array_cat = ['action', 'anime', 'comédie', 'documentaire', 'drame', 'français', 'horreur', 'indépendant', 'international', 'jeunesse', 'comédies musicales', 'policier', 'primés', 'romance', 'science-fiction et fantastique', 'stand up', 'thriller']
self.array_id = [1365, 3063, 6548, 2243108, 5763, 58807, 8711, 7077, 78367, 783, 52852, 5824, 89844, 8883, 1492, 11559, 8933 ]
self.genre = -1
def print_url(self):
print(self.url)
def get_url(self):
return self.url
def url_generator(self):
url = self.base_url
print(self.genre)
if self.genre != -1:
url += "/genre/" + str(self.genre) + "/"
url += "?min-rating=" + str(self.min_rating)
url += "&min_year" + str(self.min_year)
url += "&max-year" + str(self.max_year)
url += "&order=title"
self.url = url
def set_categories(self, search):
found = False
for c in self.array_cat:
if c == search:
found = True
self.genre = self.array_id[self.array_cat.index(c)]
if not found:
self.genre = -1
def get_categories(self):
return self.array_cat
def process(self, categorie):
self.set_categories(categorie)
self.url_generator()
self.print_url()
req = requests.get(self.get_url())
soup = BeautifulSoup(req.content, 'html.parser')
movie_containers = soup.find_all('div', class_='card-body')
movies = []
first = True
seed()
value = randint(1, len(movie_containers) - 2)
# for movie in movie_containers:
# print
# if not first:
m = Movie()
m.parse_movie(movie_containers[value])
m.set_description(self.base_url)
m.set_image(self.base_url)
movies.append(m)
# else:
# first = False
print(str(value) + '/' + str(len(movie_containers)))
for movie in movies:
return movie
class Movie:
def __init__(self):
self.title = ""
self.link = ""
self.description = ""
self.image = ""
def parse_movie(self, html):
self.title = html.h5.text
self.link = html.a['href']
self.image = html.img['src']
def set_description(self, url):
req = requests.get(url + self.link)
soup = BeautifulSoup(req.content, 'html.parser')
movie_containers = soup.find_all('div', class_='card card-plain information')
self.description = movie_containers[0].p.text
def set_image(self, url):
req = requests.get(url + self.link)
soup = BeautifulSoup(req.content, 'html.parser')
movie_containers = soup.find_all('div', class_='card card-plain information')
self.image = movie_containers[0].img['data-src']
def to_string(self):
return self.title + ". " + self.description
|
17,976 | cf16e9056799de7d3ebc234af9a384bdc0aa784b | # -*- coding: UTF-8 -*-
#
# Copyright © 2016 Alex Forster. All rights reserved.
# This software is licensed under the 3-Clause ("New") BSD license.
# See the LICENSE file for details.
#
import sys
import os
import time
import functools
import threading
import multiprocessing
import concurrent.futures
import six
from tblib.pickling_support import pickle_traceback, unpickle_traceback
from . import util
_pid = None
""":type: int"""
_thread = None
""":type: threading.Thread"""
_tasks = None
""":type: dict[int, tuple[multiprocessing.Connection, multiprocessing.Connection, concurrent.futures.Future]]"""
def _worker():
global _pid, _thread, _tasks
while True:
for child_pid, task in six.iteritems(_tasks.copy()):
future = task[0]
parent = task[1]
parent_ex = task[2]
if parent.poll():
try:
result = parent.recv()
parent.close()
parent_ex.close()
future.set_result(result)
del _tasks[child_pid]
continue
except EOFError:
pass
finally:
try:
os.waitpid(child_pid, 0)
except OSError:
pass
if parent_ex.poll():
try:
_, ex_value, ex_traceback = parent_ex.recv()
ex_traceback = unpickle_traceback(*ex_traceback)
parent.close()
parent_ex.close()
if six.PY2:
ex = ex_value
future.set_exception_info(ex, ex_traceback)
elif six.PY3:
ex = ex_value.with_traceback(ex_traceback)
future.set_exception(ex)
del _tasks[child_pid]
continue
except EOFError:
pass
finally:
try:
os.waitpid(child_pid, 0)
except OSError:
pass
time.sleep(0.001)
def _future(child_pid, parent, parent_ex):
""" :type parent: multiprocessing.Connection
:type parent_ex: multiprocessing.Connection
:rtype future: concurrent.futures.Future
"""
global _pid, _thread, _tasks
if _pid != os.getpid():
_tasks = {}
_pid = os.getpid()
_thread = threading.Thread(target=_worker, name='inparallel-{}'.format(os.getpid()))
_thread.setDaemon(True)
_thread.start()
future = concurrent.futures.Future()
future.set_running_or_notify_cancel()
_tasks[child_pid] = (future, parent, parent_ex)
return future
@util.decorator
def task(fn):
@six.wraps(fn)
def wrapper(*args, **kwargs):
global _pid, _thread, _tasks
parent, child = multiprocessing.Pipe()
parent_ex, child_ex = multiprocessing.Pipe()
child_pid = os.fork()
if child_pid == 0:
try:
child.send(fn(*args, **kwargs))
except Exception:
ex_type, ex_value, ex_traceback = sys.exc_info()
_, ex_traceback = pickle_traceback(ex_traceback)
child_ex.send((ex_type, ex_value, ex_traceback))
finally:
child.close()
child_ex.close()
if _thread:
util.raiseExceptionInThread(_thread, SystemExit)
_thread.join()
os._exit(0)
return _future(child_pid, parent, parent_ex)
return wrapper
|
17,977 | 355165f7432e6a367dc80e90dd1fdb8be01d4e28 | from django.core.management.base import BaseCommand, CommandError
from ledger.address.models import UserAddress
class Command(BaseCommand):
help = 'Cleans up the oscar user address table.'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
try:
addresses = UserAddress.objects.all()
for a in addresses:
if not a.profile_addresses.all():
a.delete()
except Exception as e:
raise CommandError(e)
self.stdout.write(self.style.SUCCESS('Cleaned up oscar addresses.')) |
17,978 | d4f0a6128744ebe764b085535d6ec3afd8ba05d8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" 操作Mysql数据库"""
import pymysql
__pyname__ = 'usedb'
__author__ = 'Hedwig'
__date__ = '2017/2/14'
def connect_db(db_name, username, password):
"""
连接数据库
:param db_name:数据库名称
:param username:连接数据库用户名称
:param password:连接数据库用户密码
:return:数据库连接
"""
connection = pymysql.connect(
host='127.0.0.1',
port=3306,
user=username,
password=password,
db=db_name,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
return connection
# def insert_table(table_name, **kwargs):
# """
# 将数据插入表中
# :param table_name:表名
# :param kwargs
# """
# conn = connect_db()
# cursor = conn.cursor()
# columns = '`' + '`,`'.join(kwargs['columns']) + '`'
# datas = "\'" + "\',\'".join(kwargs['datas']) + "\'"
# sql = 'INSERT INTO `%s` (%s)VALUES (%s)' % (table_name, columns, datas)
# try:
# cursor.execute(sql)
# conn.commit()
# conn.close()
# except pymysql:
# pass
def insert_table(table_name, **kwargs):
db_name = kwargs['db_name']
# username = kwargs['username']
# password = kwargs['password']
print(db_name)
print(table_name)
insert_table('book',db_name='haha') |
17,979 | 955f7336f4d0d5a06b123658276b9e2596c3ce70 | import socket
import sys
import cfb
import log
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (('', 8888))
print 'Iniciando servidor %s en puerto %s' % server_address
sock.bind(server_address)
sock.listen(1)
while True:
print >>sys.stderr, 'Esperando por una conexion'
connection, client_address = sock.accept()
print "Se ha recibido una conexion desde la direccion:",client_address[0],":",client_address[1]
while(True):
inicio=int(connection.recv(1024))
if inicio>0: #Inicia la conversacion con en largo del mensaje, si es 0 se sale
print("Se esta recibiendo un mensaje\n")
iv=(raw_input("Escriba Vector Inicial:\n")+" "*16)[:16]
k=(raw_input("Escriba clave:\n")+" "*16)[:16]
cfb.reccfb(k,iv,connection,inicio)
else:
break
if(inicio==0):break
print("El cliente se ha desconectado, saliendo..")
connection.close()
|
17,980 | 987313a5238d93b405fc4ae7952fb68a77ab5b19 | import random
print("input number(1 or 2 or 3)")
n = int(input())
eva = ""
pro = ""
setup = ""
a = ""
b = ""
a1 = random.randint(0,100)
b1 = random.uniform(0.1,2.0)
a2 = random.randint(0,100)
b2 = random.uniform(0.1,2.0)
a3 = random.randint(0,min(a1,a2))
b3 = random.uniform(0.1,min(b1,b2))
eva = "{}\t{}\t{}\t{}\t{}\t{}".format(a1,a2,a3,b1,b2,b3)
if(n == 1):
k = 4
elif(n == 2):
k = 10
elif(n == 3):
k = 20
else:
k = 0
for i in range(k):
pro += str(random.randint(1,10000))
setup += str(random.randint(1,10000))
if(i != -1):
pro += "\t"
setup += "\t"
print("EVALUATIONFACTOR\t{}".format(eva))
print("PRODUCTIONFACTOR\t{}".format(pro))
print("SETUPFACTOR\t{}".format(setup))
|
17,981 | 3506eaa0ef5d917b66547e0d9c206690afd2d2b7 | # coding=utf-8
# Python爬虫——爬取豆瓣top250完整代码
# https://www.cnblogs.com/zq-zq/p/13974807.html
# 目录操作
import os
# 正则表达式
import re
# 访问SSL页面
import ssl
# 模拟阻塞
import time
# 获取URL得到html文件
import urllib.request as req
# Excel表格操作
import xlwt
# 解析网页
from bs4 import BeautifulSoup as bf
ssl._create_default_https_context = ssl._create_unverified_context
# 各种目录和文件名
base_url = 'https://movie.douban.com/top250?start='
base_path = os.environ['HOME'] + '/Downloads/HelloPython/douban250/'
base_date = '20210908-'
save_html_path = base_path + '' # html/
save_text_path = base_path + '' # text/
save_excel_path = base_path + '' # excel/
save_html_file = save_html_path + base_date + 'douban250-'
save_text_file = save_text_path + base_date + 'douban250.txt'
save_excel_file = save_excel_path + base_date + 'douban250.xls'
# 主程序
def main():
print('--------0-创建目录--------')
make_dirs(save_html_path)
make_dirs(save_text_path)
make_dirs(save_excel_path)
print('--------1-爬取网页,从豆瓣上获取html文件并保存到本地目录下,该方法成功执行一次即可,保存html,接下来本地操作--------')
# save_douban_html()
print('--------2-解析数据,逐个解析保存在本地的html文件--------')
datas = get_data()
print('--------3-保存数据,保存爬取数据到本地txt文件--------')
save_data_txt(datas, save_text_file)
print('--------4-保存数据,保存爬取数据到本地excel文件--------')
save_data_excel(datas, save_excel_file)
# 0-创建目录
def make_dirs(dir):
isdir = os.path.isdir(dir)
if isdir:
print("目录已存在,不需要创建 | dir = " + dir)
else:
print("目录不存在,创建目录 | dir = " + dir)
os.makedirs(dir)
# 1-爬取网页,从豆瓣上获取html文件并保存到本地目录下,该方法成功执行一次即可,保存html,接下来本地操作
def save_douban_html():
for i in range(0, 250, 25):
print('----爬取第' + str((i // 25) + 1) + '页----')
# 使用基础地址 'https://movie.douban.com/top250?start=' + 偏移地址如 '25'
url = base_url + str(i)
# 获取html保存在本地,方便之后爬虫操作,因为频繁爬取可能被豆瓣发现异常
html = ask_url(url)
# 将文件批量保存在 Data/html/ 目录下 i//25 是整除,命名格式如 html0.html html1.html ...
write_html(save_html_file + 'page' + str((i // 25) + 1) + '.html', html)
# 模拟阻塞
time.sleep(3)
# 获取html信息,并返回html信息
def ask_url(url):
# 设置传给服务器的header头部信息,伪装自己是正规浏览器访问
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36 SE 2.X MetaSr 1.0"
}
# 用于保存获取的html文件
html = ""
# 最好用 try-except 捕捉异常
try:
# 封装一个Request对象,将自定义的头部信息加入进去
res = req.Request(url, headers=headers)
# 向指定的url获取响应信息,设置超时,防止长时间耗在一个页面
response = req.urlopen(res, timeout=10)
# 读取html信息,使用decode('utf-8')解码
html = response.read().decode('utf-8')
# 如果出错,就捕捉报错信息并打印出,这里使用Exception 泛泛的意思一下
except Exception as error:
# 出现异常时候,打印报错信息
print("Ask_url is Error : " + error)
# 将获得的html页面信息返回
return html
# 保存获取的html,避免出现ip异常的情况
def write_html(path, html):
file = open(path, 'w', encoding='utf-8')
file.write(str(html))
file.close()
# 2-解析数据,逐个解析保存在本地的html文件
def get_data():
# 获得多有页面有价值的信息,然后集中存放与data_list列表中
data_list = []
# 循环遍历,修改?start=起始排行序号,获取不同分页的豆瓣top信息,url分页格式去豆瓣换页内容试试
# 例如第一页第 top 0-24,第二页是top 25-49条 ?start=25 这个参数,会让服务器响应第二页的25条信息
for i in range(0, 250, 25):
print('----读取第' + str((i // 25) + 1) + '页----')
# 使用二进制读取,这点很重要,报错无数次
html = open(save_html_file + 'page' + str((i // 25) + 1) + '.html', 'rb')
# 接下来是逐一解析数据
bs = bf(html, 'html.parser')
# 使用标签 + 属性组合查找,查找<div class="item"></div>的标签块
# 注意:class是关键字,所以这里需要使用 class_ 代替
f_list = bs.find_all('div', class_="item")
# 使用re.findall(x, s) 或者 x.findall(s)效果一样
for f in f_list:
data = []
# 将正则表达式提取的内容赋值给自定义变量,将所有需要的数据保存到data列表
# titles = ['电影', '排行', '评分', '评价', '链接', '封面', '概括', '别名']
data.append(set_film(str(f), re.compile(r'<span class="title">(.*?)</span>')))
data.append(set_film(str(f), re.compile(r'em class="">(.*?)</em>')))
data.append(set_film(str(f), re.compile(r'<span class="rating_num".*>(.*?)</span>')))
data.append(set_film(str(f), re.compile(r'<span>(\d*人)评价</span>')))
data.append(set_film(str(f), re.compile(r'<a href="(.*?)">')))
data.append(set_film(str(f), re.compile(r'<img.*src="(.*?)"', re.S))) # re.S让换行符包含在字符中
data.append(set_film(str(f), re.compile(r'<span class="inq">(.*?)</span>')))
data.append(set_film(str(f), re.compile(r'<span class="other">(.*?)</span>')))
# 写入data(单条电影信息)列表,到总的 data_list(所有电影信息)列表
data_list.append(data)
html.close()
return data_list
# 有些电影没有某些项,所以查找长度为0的时候,设置该项为空
def set_film(content, file):
# 检查查找内容的长度,如果不为0,说明查找到内容,则将内容转换成字符串类型
if len(re.findall(file, content)) != 0:
film = str(re.findall(file, content)[0])
else:
film = "-"
return film
# 3-保存数据,保存爬取数据到本地txt文件
def save_data_txt(datas, save_file):
# 打开文本选择写模式,并指定编码格式
file = open(save_file, 'w', encoding='utf-8')
# 不能直接写入list,所以通过遍历一条条写入
for data in datas:
for dat in data:
file.write(dat + '\n')
file.write(split(10) + '\n')
file.close()
# 将读取的txt文本打印到控制台
read_file(save_file)
# 定义分隔线长度,并返回分割线字符串
def split(num):
str1 = ""
for i in range(1, num):
str1 += "--------"
return str1
# 读取文件文本
def read_file(file_name):
# 打开文本选择读模式
file = open(file_name, 'r', encoding='utf-8')
print(file.read())
file.close()
# 4-保存数据,保存爬取数据到本地excel文件
def save_data_excel(datas, save_file):
# 创建一个xlwt对象,使用utf-8编码格式
excel = xlwt.Workbook(encoding='utf-8')
# 创建一个工作表,命名为top250
sheet = excel.add_sheet('top250')
# 设置三种单元格样式 set_font(粗体,尺寸,居中)
style_font_title = set_font(240, True, True)
style_font_content_center = set_font(220, False, True)
style_font_content_left = set_font(220, False, False)
# 设置列宽
width_c = [256 * 20, 256 * 6, 256 * 6, 256 * 12, 256 * 42, 256 * 72, 256 * 68, 256 * 50]
for i in range(0, len(width_c)):
sheet.col(i).width = width_c[i]
# 表格各列的列名,将标题写入excel
titles = ['电影', '排行', '评分', '评价', '链接', '封面', '概括', '别名']
index = 0
for title in titles:
# (单元格行序号,单元格列序号,单元格的内容,单元格样式)
sheet.write(0, index, title, style_font_title)
index += 1
# 将数据写入excel
index_r = 1
# 从多条电影中每次取出一条
for data in datas:
index_c = 0
# 从一条电影中每次取出一个属性
for item in data:
# 前三列设置居中对齐
if index_c <= 3:
sheet.write(index_r, index_c, item, style_font_content_center)
# 后三列设置默认对齐,即左对齐
else:
sheet.write(index_r, index_c, item, style_font_content_left)
index_c += 1
index_r += 1
# 保存excel文件到指定路径
excel.save(save_file)
# 设置excel的单元格字体样式
def set_font(size, bold, center):
# 创建xlwt格式对象
style_font = xlwt.XFStyle()
# 设置字体尺寸大小
style_font.font.height = size
# 设置字体是否为粗体
style_font.font.bold = bold
# 字体是否居中
if center:
# 设置字体水平居中
style_font.alignment.horz = 0x02
# 设置字体垂直居中
style_font.alignment.vert = 0x01
# 设置单元格自动换行
style_font.alignment.wrap = False
# 返回设置的字体样式
return style_font
# 主程序入口
if __name__ == '__main__':
main()
|
17,982 | dd982ad5018cedd3206b9d3f1a8572d74d65fbe3 | import sys
python_version = sys.version_info[0]
if python_version != 2 and python_version != 3:
print('Error Python version {0}, quit'.format(python_version))
exit(0)
grammatical_units = [
'word',
'phrase',
'clause',
'sentence',
]
phrase = [
'verb phrase',
'noun phrase',
'adjective phrase',
'adverb phrase',
'prepositional phrase'
]
word_class_level_01 = [
'Verb',
'Noun',
'Adjective',
'Adverb',
'Preposition',
'Determiner',
'Pronoun',
'Conjunction',
]
word_class_level_02 = {
'Verb' :[
{'Ordinary-verb' :[]},
{'Auxiliary-verb' :[
'be', 'is', 'am', 'are', 'was', 'were',
'have', 'has', 'had',
'can', 'could',
'dare',
'do', 'did', ''
'might', 'may',
'need',
'would', 'will',
'should', 'shall',
'ought',
'must',
]},
],
'Adverb' :[
'Manner-adverb',
'Frequency-adverb',
'Place-adverb',
'Time-adverb',
{'Linking-adverb' :[
{'Addition' :[
'also',
'again',
'another',
'thus',
'furthermore',
'thereafter',
'in addition',
'moreover',
'additionally',
'besides',
'finally',
'meanwhile',
'on top of that',
]},
{'Detail' :[
'namely',
]},
{'Alternative' :[
'otherwise',
'rather',
]},
{'Cause-Effect' :[
'therefore',
'consequently',
'as a consequence',
'as a result',
'hence'
]},
{'Comparison' :[
'likewise',
'in the same way',
'similarly',
'in contrast',
'unlike',
'just like',
'jsut as',
]},
{'Condition' :[
'otherwise',
'in the event',
'anyway'
]},
{'Contrast' :[
'nevertheless',
'nonetheless',
'on the other hand',
'in contrast to',
'however',
'instead',
]},
{'Emphasis' :[
'indeed',
'in fact',
]},
]},
],
'Determiner' :[
{'Article-determiner' :[
'a', 'the', 'an'
]},
{'Possessive-determiner' :[
'my', 'your', 'his', 'her'
]},
{'Demonstrative-determiner' :[
'this', 'that'
]},
{'Quantifier-determiner' :[
'all',
]},
],
'Conjunction' :[
{'Addition' :['and']},
{'Alternative' :['or']},
{'Cause-Effect' :['because', 'Accordingly',]},
{'Comparison' :[]},
{'Condition' :['if']},
{'Contrast' :['']},
{'Emphasis' :[]},
],
}
sentence_element = [
'Subject',
'Verb',
'Object',
'Complement',
'Adverbial',
]
meaning_indication = [
'object',
'person',
'place',
'time',
'action',
'description',
'behavior',
]
verb_abbreviations = {
"'s" :'is', # is was has
"'re" :'are', # are were
"'m" :'am',
"'ve" :'have',
"'d" :'had', # had would
"'ll" :'will',
}
phrase_abbreviations = {
'FYI' : 'for your information',
'ASAP' : 'as soon as possible',
}
pos_full_names = {
# see nltk.help.upenn_tagset()
'$' : 'dollar',
"'" : 'quotation mark',
'"' : 'double quotation mark',
'(' : 'opening parenthesis',
')' : 'closing parenthesis',
'[' : 'opening square',
']' : 'closing square',
'{' : 'opening bracket',
'}' : 'closing bracket',
',' : 'comma',
'--' : 'dash',
'.' : 'dot sentence terminator',
'!' : 'exclamation sentence terminator',
'?' : 'question sentence terminator',
':' : 'colon',
';' : 'semicolon',
'...' : 'ellipsis',
'CC' : 'conjunction', # & 'n and both but either et for less minus neither nor or plus so therefore times v. versus vs. whether yet
'CD' : 'numeral',
'DT' : 'determiner', # all an another any both del each either every half la many much nary neither no some such that the them these this those
'EX' : 'existential there', # there
'FW' : 'foreign word',
'IN' : 'preposition or conjunction, subordinating', # astride among uppon whether out inside pro despite on by throughout below within for towards near behind atop around if like until below next into if beside ...
'JJ' : 'adjective',
'JJR' : 'comparative adjective',
'JJS' : 'superlative adjective',
'LS' : 'list item marker',
'MD' : 'modal auxiliary', # can cannot could couldn't dare may might must need ought shall should shouldn't will would
'NN' : 'noun',
'NNP' : 'proper noun',
'NNPS' : 'proper plural noun',
'NNS' : 'plural noun',
'PDT' : 'pre-determiner', # all both half many quite such sure this
'POS' : 'genitive marker', # 's
'PRP' : 'pronoun', # hers herself him himself hisself it itself me myself one oneself ours ourselves ownself self she thee theirs them themselves they thou thy us
'PRP$' : 'possessive pronoun', # her his mine my our ours their thy your
'RB' : 'adverb',
'RBR' : 'comparative adverb',
'RBS' : 'superlative adverb',
'RP' : 'particle', # aboard about across along apart around aside at away back before behind by crop down ever fast for forth from go high i.e. in into just later low more off on open out over per pie raising start teeth that through under unto up up-pp upon whole with you
'SYM' : 'symbol',
'TO' : "to",
'UH' : 'interjection',
'VB' : 'verb', # do
'VBD' : 'past verb did', # did
'VBG' : 'gerund verb doing', # do-ing
'VBN' : 'past participle verb done', # done
'VBP' : 'present verb do', # do
'VBZ' : 'present verb does', # does
'WDT' : 'wh-determiner', # that what whatever which whichever
'WP' : 'wh-pronoun', # that what whatever whatsoever which who whom whosoever
'WP$' : 'whose', # possessive WH-pronoun, whose
'WRB' : 'wh-adverb', # how however whence whenever where whereby whereever wherein whereof why
}
pos_short_names = {
# see nltk.help.upenn_tagset()
'$' : '$',
"'" : "'",
'"' : '"',
'(' : '(',
')' : ')',
'[' : '[',
']' : ']',
'{' : '{',
'}' : '}',
',' : ',',
'--' : '--',
'.' : '.',
'!' : '!',
'?' : '?',
':' : ':',
';' : ';',
'...' : '...',
'CC' : 'conjunction', # & 'n and both but either et for less minus neither nor or plus so therefore times v. versus vs. whether yet
'CD' : 'number',
'DT' : 'determiner', # all an another any both del each either every half la many much nary neither no some such that the them these this those
'EX' : 'there', # there
'FW' : 'foreign',
'IN' : 'preposition', # astride among uppon whether out inside pro despite on by throughout below within for towards near behind atop around if like until below next into if beside ...
'JJ' : 'adjective',
'JJR' : 'adjective-er',
'JJS' : 'adjective-est',
'LS' : 'list',
'MD' : 'auxiliary', # can cannot could couldn't dare may might must need ought shall should shouldn't will would
'NN' : 'object',
'NNP' : 'Object',
'NNPS' : 'Objects',
'NNS' : 'objects',
'PDT' : 'pre-determiner', # all both half many quite such sure this
'POS' : 'genitive', # 's
'PRP' : 'it', # hers herself him himself hisself it itself me myself one oneself ours ourselves ownself self she thee theirs them themselves they thou thy us
'PRP$' : 'its', # her his mine my our ours their thy your
'RB' : 'adverb',
'RBR' : 'adverb-er',
'RBS' : 'adverb-est',
'RP' : 'particle', # aboard about across along apart around aside at away back before behind by crop down ever fast for forth from go high i.e. in into just later low more off on open out over per pie raising start teeth that through under unto up up-pp upon whole with you
'SYM' : 'symbol',
'TO' : "to",
'UH' : 'interjection',
'VB' : 'do', # do
'VBD' : 'did', # did
'VBG' : 'doing', # do-ing
'VBN' : 'done', # done
'VBP' : 'do', # do
'VBZ' : 'does', # does
'WDT' : 'wh-determiner', # that what whatever which whichever
'WP' : 'wh-pronoun', # that what whatever whatsoever which who whom whosoever
'WP$' : 'whose', # possessive WH-pronoun, whose
'WRB' : 'wh-adverb', # how however whence whenever where whereby whereever wherein whereof why
}
word_replacement = {
'(' :'(',
')' :')',
'[' :'[',
']' :']',
'{' :'{',
'}' :'}',
'am' :'be',
'is' :'be',
'are' :'be',
'was' :'be',
'were' :'be',
'be' :'be',
'being' :'being',
'been' :'been',
'has' :'have',
'have' :'have',
'had' :'have',
#'the' :'the',
#
'and' :'and',
'or' :'or',
'but' :'but',
'nor' :'nor',
'so' :'so',
'yet' :'yet',
'of' :'of',
'for' :'for',
'by' :'by',
#
'on' :'on',
'at' :'at',
'in' :'in',
#
'January' :'time',
'February' :'time',
'March' :'time',
'April' :'time',
'May' :'time',
'June' :'time',
'July' :'time',
'August' :'time',
'September' :'time',
'October' :'time',
'November' :'time',
'December' :'time',
#
'Jan.' :'time',
'Feb.' :'time',
'Aug.' :'time',
'Sept.' :'time',
'Oct.' :'time',
'Nov.' :'time',
'Dec.' :'time',
#
'Monday' :'time',
'Tuesday' :'time',
'Wednesday' :'time',
'Thursday' :'time',
'Friday' :'time',
'Saturday' :'time',
'Sunday' :'time',
}
conj_replacement = {
#
'though' :'though',
'although' :'although',
'even' :'even', #even though
'while' :'while',
#
'only' :'only',
'as' :'as',
'lest' :'lest',
#
'if' :'if',
'whether' :'whether',
'though' :'though',
'although' :'although',
'unless' :'unless',
'until' :'until',
#
'because' :'because',
'therefore' :'therefore',
'thus' :'thus',
'since' :'since',
#'why' :'why',
#
'during' :'during',
'till' :'till',
'until' :'until',
#
#'that' :'that',
}
conj_subordinating_chunk_stack = [
# Concession
("<even><though><SVO>", "Chunk Concession"),
("<though><SVO>", "Chunk Concession"),
("<although><SVO>", "Chunk Concession"),
("<while><SVO>", "Chunk Concession"),
# Condition
("<even><if><SVO>", "Chunk Condition"),
("<only><if><SVO>", "Chunk Condition"),
("<in><case><SVO>", "Chunk Condition"),
("<provided><that><SVO>", "Chunk Condition"),
("<assuming><that><SVO>", "Chunk Condition"),
("<unless><SVO>", "Chunk Condition"),
("<until><SVO>", "Chunk Condition"),
("<least><SVO>", "Chunk Condition"),
("<till><SVO>", "Chunk Condition"),
# Comparison
("<rather><than><SVO>", "Chunk Comparison"),
("<as><much><as><SVO>", "Chunk Comparison"),
("<than><SVO>", "Chunk Comparison"),
("<whether><SVO>", "Chunk Comparison"),
("<whereas><SVO>", "Chunk Comparison"),
# Time
("<as><long><as><SVO>", "Chunk Time"),
("<as><soon><as><SVO>", "Chunk Time"),
("<by><the><time><SVO>", "Chunk Time"),
("<now><that><SVO>", "Chunk Time"),
("<after><time|Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Time"),
("<before><time|Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Time"),
("<once><time|Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Time"),
("<since><time|Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Time"),
("<till><time|Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Time"),
("<until><time|Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Time"),
("<when><SVO>", "Chunk Time"),
("<whenver><SVO>", "Chunk Time"),
("<while><SVO>", "Chunk Time"),
# Reason
("<in><order><that><SVO>", "Chunk Reason"),
("<so><that><SVO>", "Chunk Reason"),
("<because><SVO>", "Chunk Reason"),
("<because><of><Object|object|Ojbects|objects|ObjectG|SVO>", "Chunk Reason"),
("<why><SVO>", "Chunk Reason"),
("<since><SVO>", "Chunk Reason"),
# Adjective
("<that><SVO>", "Chunk Adjective"),
("<what><SVO>", "Chunk Adjective"),
("<which><SVO>", "Chunk Adjective"),
("<whatever><SVO>", "Chunk Adjective"),
("<whichever><SVO>", "Chunk Adjective"),
# Pronoun
("<who><SVO>", "Chunk Pronoun"),
("<whom><SVO>", "Chunk Pronoun"),
("<whose><SVO>", "Chunk Pronoun"),
("<whoever><SVO>", "Chunk Pronoun"),
("<whomever><SVO>", "Chunk Pronoun"),
# Manner
("<as><though><SVO>", "Chunk Manner"),
("<as><if><SVO>", "Chunk Manner"),
("<how><SVO>", "Chunk Manner"),
# Place
("<where><SVO>", "Chunk Place"),
("<wherever><SVO>", "Chunk Place"),
]
conj_adv_chunk_stack = [
# And
("<also><,>*", "Chunk And"),
("<and><,>*", "Chunk And"),
("<besides><,>*", "Chunk And"),
("<furthermore><,>*", "Chunk And"),
("<likewise><,>*", "Chunk And"),
("<moreover><,>*", "Chunk And"),
# But
("<however><,>*", "Chunk But"),
("<nevertheless><,>*", "Chunk But"),
("<nonetheless><,>*", "Chunk But"),
("<still><,>*", "Chunk But"),
("<conversely><,>*", "Chunk But"),
("<instead><,>*", "Chunk But"),
("<otherwise><,>*", "Chunk But"),
("<rather><,>*", "Chunk But"),
# So
("<accordingly><,>*", "Chunk So"),
("<consequently><,>*", "Chunk So"),
("<hence><,>*", "Chunk So"),
("<meanwhile><,>*", "Chunk So"),
("<then><,>*", "Chunk So"),
("<therefore><,>*", "Chunk So"),
("<thus><,>*", "Chunk So"),
]
#Correlative Conjunctions
#as . . . as
#just as . . . so
#both . . . and
#hardly . . . when
#scarcely . . . when
#either . . . or
#neither . . . nor
#
#if . . . then
#not . . . but
#what with . . . and
#whether . . . or
#not only . . . but also
#no sooner . . . than
#rather . . . than
grammar_stack = [
"<have><to><do>",
"<time>*<,>*<number>+"
"<adjective.*>*<o|Object.*>+<number>*"
"<to><do>",
"<have><been><doing>",
"<be><doing>",
"<have><done>",
"<be>",
]
grammar = r'''
time : {<time>*<,>*<number>+}
verb : {<have><to><do>}
to-do : {<to><do>}
verb : {<have><been><doing>}
verb : {<be><doing>}
verb : {<have><done>}
verb : {<be>}
object : {<.*determiner>*<adjective.*>*<o|Object.*>+<number>*}
object : {<o|Object.*>}
NP: {<DET|PRON>?<ADJ>*<NOUNGROUP>+}
VP: {<ADV>*<PRT>*<ADV>*<VERBGROUP>+<ADV>*}
VPP: {<ADP>*<VERB><ADP>*}
PRTPHRS: {<PRT><NP>}
SENT: {<NP|DET><VP><NP>*<PRTPHRS>*}
CONJSENT: {<CONJ><SENT>}
''' |
17,983 | da009c0812ffba293aa2a98b4c4e338a9abc17a2 | import asyncio
from dns.asyncresolver import Resolver
from io import StringIO
from middlewared.service import private, Service
from middlewared.schema import accepts, returns, IPAddr, Dict, Int, List, Str, Ref, OROperator
from middlewared.utils import filter_list
class DNSClient(Service):
class Config:
private = True
@private
async def get_resolver(self, options):
if options['nameservers']:
mem_resolvconf = StringIO()
for n in options['nameservers']:
mem_resolvconf.write(f"nameserver {n}\n")
mem_resolvconf.seek(0)
r = Resolver(mem_resolvconf)
else:
r = Resolver()
r.timeout = options['timeout']
return r
@private
async def resolve_name(self, name, rdtype, options):
r = await self.get_resolver(options)
if rdtype == 'PTR':
ans = await r.resolve_address(
name,
lifetime=options['lifetime']
)
else:
ans = await r.resolve(
name, rdtype,
lifetime=options['lifetime']
)
return ans
@accepts(Dict(
'lookup_data',
List('names', items=[Str('name')], required=True),
Str('record_type', default='A', enum=['A', 'AAAA', 'SRV']),
Dict(
'dns_client_options',
List('nameservers', items=[IPAddr("ip")], default=[]),
Int('lifetime', default=12),
Int('timeout', default=4),
register=True
),
Ref('query-filters'),
Ref('query-options'),
))
@returns(OROperator(
List(
'rdata_list_srv',
items=[
Dict(
Str('name'),
Int('priority'),
Int('weight'),
Int('port'),
Str('class'),
Str('type'),
Int('ttl'),
Str('target'),
)
],
),
List(
'rdata_list',
items=[
Dict(
Str('name'),
Str('class'),
Str('type'),
Int('ttl'),
IPAddr('address'),
)
],
),
name='record_list',
))
async def forward_lookup(self, data):
output = []
options = data['dns_client_options']
rtype = data['record_type']
results = await asyncio.gather(*[
self.resolve_name(h, rtype, options) for h in data['names']
])
for ans in results:
ttl = ans.response.answer[0].ttl
name = ans.response.answer[0].name.to_text()
if rtype == 'SRV':
entries = [{
"name": name,
"priority": i.priority,
"weight": i.weight,
"port": i.port,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"target": i.target.to_text()
} for i in ans.response.answer[0].items]
else:
entries = [{
"name": name,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"address": i.address,
} for i in ans.response.answer[0].items]
output.extend(entries)
return filter_list(output, data['query-filters'], data['query-options'])
@accepts(Dict(
'lookup_data',
List("addresses", items=[IPAddr("address")], required=True),
Ref('dns_client_options'),
Ref('query-filters'),
Ref('query-options'),
))
@returns(List(
'rdata_list',
items=[
Dict(
Str('name'),
Str('class'),
Str('type'),
Int('ttl'),
Str('target'),
)
]
))
async def reverse_lookup(self, data):
output = []
options = data['dns_client_options']
results = await asyncio.gather(*[
self.resolve_name(i, 'PTR', options) for i in data['addresses']
])
for ans in results:
ttl = ans.response.answer[0].ttl
name = ans.response.answer[0].name.to_text()
entries = [{
"name": name,
"class": i.rdclass.name,
"type": i.rdtype.name,
"ttl": ttl,
"target": i.target.to_text(),
} for i in ans.response.answer[0].items]
output.extend(entries)
return filter_list(output, data['query-filters'], data['query-options'])
|
17,984 | 15edaac83061024c36b573c084905f21b3e7cb78 | import os, sys, re, warnings, time, random
import chardet
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import gevent
from gevent import monkey
import google
import bing_client
import nltk
import sunburnt
from classification_common import *
from shared.config.Config import Config
from shared.lwbd.Solr import Solr
import simplejson
monkey.patch_all()
import urllib2
SOLR_HOST = 'ec2-50-17-144-252.compute-1.amazonaws.com'
SOLR_PORT = '8888'
SOLR_COLLECTION = 'sterrell_tmp'
SOLR_COLLECTION_URL = 'http://'+SOLR_HOST+':'+SOLR_PORT+'/solr/'+SOLR_COLLECTION
replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))
def clean_pages(pages):
documents = []
for i in range(len(pages)):
doc = pages[i].lower()
try:
char_detection = chardet.detect(doc)
charset = char_detection['encoding']
if charset == None:
charset = 'utf-8'
decoded_text = doc.decode(charset, errors='ignore')
text = nltk.clean_html(decoded_text)
utf8_text = text.encode('utf-8')
text_page = utf8_text.translate(replace_punctuation)
words = re.findall('[a-z]+', text_page)
#check words???
documents.append( ' '.join(words) )
if len(documents) % 100 == 0:
print '\t', len(documents), 'of', len(pages)
except LookupError as le:
print le #Bad/unknown charset
except UnicodeError as inst:
print "string is not UTF-8"
print inst
return documents
def download_doc(urls):
pages = []
for url in urls:
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req)
page = response.read()
#Try and use http header for decoding
charset = response.headers.getparam('charset')
if charset == None:
char_detection = chardet.detect(page)
charset = char_detection['encoding']
if charset == None:
charset = 'utf-8'
decoded_page = page.decode(charset, errors='ignore')
pages.append(decoded_page.encode('utf-8', errors='ignore'))
except Exception as inst:
print 'Error pulling document', url
print inst
return pages
def web_query(query, num_docs):
results = []
#uas = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2']
#google.Request.add_header('User-Agent', uas[ random.randint(0, len(uas)-1) ])
#print 'Searching Google'
#for r in google.search(query, stop=num_docs): #Google search
# results.append(r)
# print 'Sleeping 30 secs'
# time.sleep(30)
print 'Searching Bing'
for r in bing_client.bing_search(query, 'Web', num_docs):
results.append(r)
return results
def partition_list(l, n):
lol = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]
return lol(l, n)
def google_training_docs(query, num_docs):
print 'Running web search,', query, num_docs
urls = web_query(query, num_docs)
print 'Web search returned', len(urls), 'results'
pages = []
try:
jobs = [gevent.spawn(download_doc, urlset) for urlset in partition_list(urls,5)]
gevent.joinall(jobs)
for job in jobs:
pages += job.value
except Exception as inst:
print inst
finally:
gevent.shutdown()
return pages
def lucid_training_docs(query, num_records=1000):
query_list = query.split()
if len(query_list) == 1: #single term
term_query = 'content:'+query
else:
term_query = ' AND '.join(['content:'+q for q in query_list])
#collection = 'crawl_from_long_fashion_blogs_list'
collection = 'fashion_crawl_try_20131015'
field_string = ','.join( ('content', 'score') )
config = Config()
config.http_debug = True
solr = Solr(config)
json_response = solr.query_solr(
collection=collection,
query=term_query,
field_string=field_string,
start=0,
rows=num_records)
response = simplejson.loads(json_response)
response['response']['numFound']
docs = response['response']['docs']
pages = []
for i in range(len(docs)):
page = docs[i]['content'][0]
pages.append(page.encode('utf-8') )
return pages
def lucid_sunburnt_docs(query, num_records=1000):
si = sunburnt.SolrInterface(SOLR_COLLECTION_URL)
solq = si.query(query)
solq = solq.field_limit(['body'])
chunk_size = 100
start_index = 0
solq = solq.paginate(start=start_index, rows=chunk_size)
response = solq.execute()
num_results = response.result.numFound
print 'Found', num_results
pages = []
while len(pages) < num_results:
results = list(response)
for result in results:
page = result['body'][0]
pages.append(page.encode('utf-8'))
if len(pages) % 1000 == 0:
print 'retrieved', len(search_results)
start_index += chunk_size
solq = solq.paginate(start=start_index, rows=chunk_size)
response = solq.execute()
if response.result.numFound == 0:
break
return pages
def build_corpus(concept, concept_query, training_generator, filetype, num_docs=100):
pages = training_generator(concept_query, num_docs)
print 'Cleaning HTML'
documents = clean_pages(pages)
print 'Writing', concept, 'training file', ' type=', filetype, len(documents), 'documents'
store_concept_training(concept, documents, filetype)
def usage_exit(name):
print 'usage:', name, '<concept name> <concept query>'
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 3:
usage_exit(sys.argv[0])
universe = 'fashion'
use_web_search = True
concept = sys.argv[1]
concept_query = sys.argv[2]
print 'Building corpus: concept =', concept, '; query =', concept_query
if use_web_search:
filetype = universe+'-bing'
training_generator = google_training_docs
build_corpus(concept, concept_query, training_generator, filetype, 250)
else:
filetype = universe+'-lucid'
training_generator=lucid_sunburnt_docs
build_corpus(concept, concept_query, training_generator, filetype, 500)
|
17,985 | e7f74aa58b7b7ada82171251bb858983f7c150d2 | # pylint: disable=broad-except,invalid-name
"""
Flask API Utils
"""
import os
import sys
from http import HTTPStatus
import functools
import simplejson as json
import logging
import base64
import socket
from datetime import datetime
sys.path.insert(0, os.path.dirname(
os.path.realpath(__file__)) + '/../../')
logger = logging.getLogger(__name__)
def get_fqdn():
""" get the default fqdn with socket """
return socket.getfqdn()
def get_ip_address():
""" get the default ip_address with socket """
try:
return socket.gethostbyname(socket.getfqdn())
except socket.gaierror as error:
logger.warn(error)
return socket.gethostbyname("")
def http_status_response(enum_name):
""" create a custom HTTPStatus response dictionary """
if not getattr(HTTPStatus, enum_name):
return {}
return {
'code': getattr(HTTPStatus, enum_name).value,
'status': getattr(HTTPStatus, enum_name).phrase,
'description': getattr(HTTPStatus, enum_name).description
}
def rsetattr(obj, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
sentinel = object()
def rgetattr(obj, attr, default=sentinel):
if default is sentinel:
_getattr = getattr
else:
def _getattr(obj, name):
return getattr(obj, name, default)
return functools.reduce(_getattr, [obj]+attr.split('.'))
class PythonObjectEncoder(json.JSONEncoder):
""" custom json.JSONEncoder for requests """
def default(self, obj):
""" default method """
if isinstance(obj, (list, dict, str, int, float, bool, type(None))):
return json.JSONEncoder.default(self, obj)
if isinstance(obj, datetime):
return obj.isoformat() + 'Z'
#return str(obj)
if isinstance(obj, set):
return list(obj)
|
17,986 | 6406bebb432307aac606823091b8a5f6d2f35d41 | import time
import pandas as pd
import numpy as np
from datetime import datetime
#from collections import Counter
def main():
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
#start of program after title, loop here if restarting program from the top
#Bicycle Picture
print(' o__ __o ,__o __o __o\n ,>/_ -\<, _-\_<, _`\<,_ _ \<_\n(*)`(*).....O/ O.....(*)/\'(*).....(*)/ (*).....(_)/(_)')
#Project Title
print(' ___ __ / \n| | _ | _ _ __ _ | _ (_ _|_ _ _ __ _ \n|^|(/_ | (_ (_)|||(/_ | (_) __) |_(/_\_/(/_| | _> \n _ __ _ _ o \n|_) o | _ (_ |_ _ __ _ | \ _ _|_ _ |_) __ _ | _ _ _|_\n|_) | |<(/_ __)| |(_| | (/_ |_/(_| |_(_| | | (_)_| (/_(_ |_')
#Welcome Statement
print('\nHello! Welcome to Steven Ling\'s udacity python project! \nLet\'s explore some US bikeshare data!\n\n')
#defining time intervals for display time function
intervals = (
('years',217728000), # 60 * 60 * 24 * 7 * 30 * 12
('months',18144000), # 60 * 60 * 24 * 7 * 30
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
#function to convert seconds to years,months,weeks,days,hours,seconds
def display_time(seconds, granularity=6):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def get_filters():
#City Choice Input
city_choice = input("Which city are you interested in?\n\nChoose a city by entering the corresponding number:\n1 for Chicago or\n2 for New York city or\n3 for Washington?")
global city
if city_choice == '1':
city ='chicago'
print('you have chosen Chicago!\n')
elif city_choice == '2':
city = 'new york city'
print('you have chosen New York city!\n')
elif city_choice == '3':
city = 'washington'
print('you have chosen Washington city!\n')
else:
print('This does not seem to be a valid choice!')
restart = input("Do you wish to reselect filters? y/n?\n").lower()
if restart == 'y':
get_filters()
else:
exit()
# TO DO: get user input for month (all, january, february, ... , june)
# Month Choice Input
global month
month =()
month_choice = input("Which month are you interested in?\n\nChoose a month by entering the following choices:\n (all, january, february, march, april, may, june) ")
valid_months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
month_choice = month_choice.lower()
if month_choice in valid_months:
month = month_choice
print ('For months, you have selected {}'.format(month))
else:
print('This does not seem to be a valid choice!')
restart_month = input("Do you wish to choose filters again? y/n?\n").lower()
if restart_month == 'y':
get_filters()
else:
exit()
# Get user input for day of the week
global day
day=()
day_choice = input("which day of the week are you interested in?\n\nChoose a day by entering the following choices:\n (all, monday, tuesday, wednesday, thursday, friday, saturday, sunday)")
valid_days = ['all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
day_choice = day_choice.lower()
if day_choice in valid_days:
day = day_choice
print ('For days, you have selected {}'.format(day))
else:
print('This does not seem to be a valid choice!')
restart_days = input("Do you wish to repick filters? y/n?\n").lower()
if restart_days == 'y':
get_filters()
else:
exit()
print('-'*40)
return city, month, day
def load_data(city, month, day):
# load data file into a dataframe
global df
df = pd.read_csv(CITY_DATA[city],index_col=0, infer_datetime_format=True)
# convert the Start Time and end Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
# extract month and day of week from Start Time to create new columns
df['Start_Hour'] = df['Start Time'].dt.hour
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['Start Time'] = df['Start Time'].dt.time
df['End Time'] = df['End Time'].dt.time
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
#Displays statistics on the most frequent times of travel.
print('\nCalculating The Most Frequent Times of Travel for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display the most common month
most_common_month = df['month'].mode()[0]
print('Most Common month: \n', most_common_month)
#display the most common day of week
most_common_day = df['day_of_week'].mode()[0]
print('Most Common Day: \n', most_common_day)
#display the most common start hour
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:\n', most_common_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
#Displays statistics on the most popular stations and trip.
print('\nCalculating The Most Popular Stations and Trips for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
start_time = time.time()
time_delay_short()
#display most commonly used start station
most_common_start_station = df['Start Station'].mode()[0]
print('Most Common Start Station:{}\n'.format(most_common_start_station))
#print('Most Common Start Hour:', most_common_start_hour)
most_common_start_hour = df['Start_Hour'].mode()[0]
print('Most Common Start Hour:{}: '.format(most_common_start_hour))
#display most commonly used end station
most_common_end_station = df['End Station'].mode()[0]
print('Most Common End Station:{}: '.format(most_common_end_station))
#display most frequent combination of start station and end station trip
time_delay_short()
most_common_start_end_station = df[['Start Station', 'End Station']].mode(0)
print('Most Common Start and End Station: \n',most_common_start_end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
#Displays statistics on the total and average trip duration.
print('\nCalculating Trip Duration for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# TO DO: display total travel time
Total_travel_time = df['Trip Duration'].sum(axis = 0, skipna = True)
print('Total travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ' , display_time(Total_travel_time))
time_delay_short()
# TO DO: display mean travel time
Mean_travel_time = df['Trip Duration'].mean(axis = 0, skipna = True)
print('Total average travel time for: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
print('is... ', display_time(Mean_travel_time))
time_delay_short()
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
#Displays statistics on bikeshare users.
print('\nCalculating User Stats: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
time_delay_short()
start_time = time.time()
# Display counts of user type
x = 'User Type'
print('\nCount of User Type:\n',df[x].value_counts())
time_delay_short()
# Display counts of gender
y = 'Gender'
print('\nCount of Gender:\n',df[y].value_counts())
# Display earliest, most recent, and most common year of birth
z = 'Birth Year'
currentYear = datetime.now().year
oldest_biker = currentYear - df[z].min()
print('\nOldest User is {} years old!'.format(oldest_biker))
print('Wow that\'s old!')
youngest_biker = currentYear - df[z].max()
print('\nYoungest User is {} years old!'.format(youngest_biker))
print('Wow that\'s young!')
common_year = currentYear - df[z].mode()
print('\nMost common age of users in data set is {} years old'.format(str(common_year)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_raw_data():
# get user input whether to displays cycle through 5 rows of data
raw_data_display = input("Would you like to see 5 records of the data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
if raw_data_display != 'pass':
i = 5
while raw_data_display !='pass':
print(df.iloc[i-5:i, :])
raw_data_display = input("Would you like to see the next 5 records of raw data? Press any key to continue displaying or type 'pass' to skip to descriptive statistics \n")
i = i + 5
else:
print("....skipping ahead to descriptive stats\n")
def drop_na_values():
global df
# get number of rows in dataframe
numOfRows = df.shape[0]
print('\nThe raw data set is {} rows long!\n'.format(numOfRows))
time_delay_short()
print('\nAnalyzing for number of blank fields in the raw dataset...\n')
time_delay_short()
nan_count = df.isnull().sum()
print ('\nNumber of blank fields of each column in our dataset:\n', nan_count)
time_delay_short()
count_of_non_nan = df.count()
print ('\nCount of number of completed fields in our data set:\n', count_of_non_nan)
print ('\nWe will now drop the rows with blanks from the dataset so that the calculated statistics will not be skewed...\n')
df.dropna(axis = 0, inplace = True)
time_delay_short()
numOfRows = df.shape[0]
print('\nThe modified data set is now {} rows long!'.format(numOfRows))
#def time_delay_long():
#to add time delay to slow down the bombard of text to the user (and for fun!)
# time.sleep(1)
# print('...executing task...')
# time.sleep(2)
# print('.........................Complete!\n')
# time.sleep(1)
def time_delay_short():
#to add time delay to slow down the bombard of text to the user (and for fun!)
time.sleep(1)
print('...executing task...')
time.sleep(1)
print('....................Complete!\n')
time.sleep(1)
get_filters()
print('\nThe bike data will now be filtered by the following: \n City: {}\n Month: {}\n Day: {}'.format(city,month,day))
load_data(city,month,day)
drop_na_values()
display_raw_data()
continue_choice = input("Time stats will now be displayed. Press any key to continue or type 'pass' to skip to station stats\n").lower()
if continue_choice != 'pass':
time_stats(df)
else:
print("....skipping time stats\n")
continue_choice = input("Station stats will now be displayed. Press any key to continue or type 'pass' to skip to trip duration stats\n").lower()
if continue_choice != 'pass':
station_stats(df)
else:
print("....skipping station_stats\n")
continue_choice = input("Trip duration stats will now be displayed. Press any key to continue or type 'pass' to skip to trip user stats\n").lower()
if continue_choice != 'pass':
trip_duration_stats(df)
else:
print("....skipping trip duration stats\n")
if city != "washington":
continue_choice = input("User stats will now be displayed. Press any key to continue or type 'pass' to skip\n").lower()
if continue_choice != 'pass':
user_stats(df)
else:
print("....skipping user stats\n")
else:
print('Washington data set contains no gender or user type data therefore there are no user stats to display for this city! T_T )')
#restart code
restart = input("Do you wish to try again? y/n\n").lower()
if restart == 'y':
main()
else:
exit()
main()
|
17,987 | af19704dcd5257f935ba4164db4da5d7dfc17e1f | # Generated by Django 3.0.8 on 2020-08-12 05:57
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('task', '0007_auto_20200811_1839'),
]
operations = [
migrations.AddField(
model_name='task',
name='auto_join',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='note',
name='date_end',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 19, 5, 57, 22, 970068)),
),
migrations.AlterField(
model_name='note',
name='task',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='notes', to='task.Task', verbose_name='tasks'),
),
migrations.AlterField(
model_name='note',
name='user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='users'),
),
]
|
17,988 | 177a174a70703b2ff350138c96f0fadef7aa518d | # Project: Proxy Herd with Asyncio
# If you found this file helpful, please consider reaching out to me:
# Website: faithtwardzik.com
# Instagram: @faithtwardzik
import asyncio
import argparse
import re
import time
import aiohttp
import json
import logging
import os
# if putty aborts, must use netstat -tlnp, then kill -9 pid to close port
API_Key = "YOUR_API_KEY"
server_IDs = ["Hill", "Jaquez", "Smith", "Campbell", "Singleton"]
server_tree = {
"Hill": ["Jaquez", "Smith"],
"Jaquez": ["Hill", "Singleton"],
"Smith": ["Hill", "Singleton", "Campbell"],
"Singleton": ["Campbell", "Jaquez", "Smith"],
"Campbell": ["Singleton", "Smith"]
}
port_assignments = {
"Hill": 11950,
"Jaquez": 11951,
"Smith": 11952,
"Singleton": 11953,
"Campbell": 11954
}
### Server class ###
class Server:
# TODO: the port argument is not used
def __init__(self, name, ip='127.0.0.1', port=11951, message_max_length=1e6):
self.name = name
self.ip = ip
self.port = port_assignments[name]
self.message_max_length = int(message_max_length)
# a set to remember flood messages already received
self.flood_msgs = set()
# client name maps to dictionary of client attributes ("lat": latitude, etc)
self.clients = {}
async def handle_echo(self, reader, writer):
"""
on server side
"""
data = await reader.read(self.message_max_length)
curr_time = time.time()
message = data.decode()
addr = writer.get_extra_info('peername')
print("{} received {} from {}".format(self.name, message, addr))
project_log.write(self.name + " received " + message + " from " + str(addr) + '\n')
#process the message and send back appropriate message
cmd = message.split()[0] if message else ""
parsed_msg = message.split()
if cmd == "IAMAT":
sendback_message = await self.IAMAT(parsed_msg, curr_time)
elif cmd == "WHATSAT":
sendback_message = await self.WHATSAT(parsed_msg)
elif cmd == "AT":
await self.AT(message)
sendback_message = None
else:
# handle invalid query
sendback_message = "? " + message
if sendback_message is not None:
print("{} send: {}".format(self.name, sendback_message))
project_log.write(self.name + " send: " + sendback_message + '\n')
writer.write(sendback_message.encode())
await writer.drain()
print("close the client socket")
writer.close()
async def run_forever(self):
server = await asyncio.start_server(self.handle_echo, self.ip, self.port)
# Serve requests until Ctrl+C is pressed
print(f'serving on {server.sockets[0].getsockname()}')
project_log.write(self.name + " is serving on " + str({server.sockets[0].getsockname()}) + '\n')
async with server:
await server.serve_forever()
# Close the server
project_log.write(self.name + " is closing...\n")
server.close()
# server to server communication and updating of client attributes
async def AT(self, flood_msg): # take in flood_msg
if flood_msg not in self.flood_msgs:
self.flood_msgs.add(flood_msg)
parsed_msg = flood_msg.split()
self.update_client(
origin_server=parsed_msg[1],
client_name=parsed_msg[3],
lat_lon_str=parsed_msg[4],
client_time=parsed_msg[5],
time_diff_str=parsed_msg[2]
)
project_log.write("In " + self.name + ", updating client attributes for " + parsed_msg[3] + ", located at " + parsed_msg[1] + '\n')
await self.flood_it(flood_msg)
# for i in 11950 11951 11952 11953 11954 ; do kill $(/usr/sbin/lsof -ti:$i); done
# flooding algorithm for sending client updates to servers in the server_tree
async def flood_it(self, flood_msg):
parsed_msg = flood_msg.split()
origin_server = parsed_msg[1]
project_log.write("Flooding all servers connected to " + self.name + " in server_tree\n")
for server in set(server_tree[self.name]) - {origin_server}:
port = port_assignments[server]
try:
project_log.write("Connecting to " + server + '\n')
reader, writer = await asyncio.open_connection("127.0.0.1", port)
project_log.write("Writing " + flood_msg + " from " + self.name + " to " + server + '\n')
writer.write(flood_msg.encode())
await writer.drain()
except:
project_log.write(server + " is down. Cannot connect\n")
continue
async def WHATSAT(self, parsed_msg):
if len(parsed_msg) != 4:
return "? " + " ".join(parsed_msg)
if float(parsed_msg[2]) > 50 or float(parsed_msg[2]) < 0:
return "? " + " ".join(parsed_msg)
if int(parsed_msg[3]) > 20 or int(parsed_msg[3]) < 0:
return "? " + " ".join(parsed_msg)
client_id = parsed_msg[1]
if client_id not in self.clients:
return "? " + " ".join(parsed_msg)
client_specs = self.clients[client_id]
sendback_message = f'AT {client_specs["server"]} {client_specs["time_diff"]} {client_id} {client_specs["lat"]}{client_specs["lon"]} {client_specs["time"]}'
# TODO: we don't need the global keyword since API_Key is already declared at a global scope
# Querying the Google Places API
formatted_rec_loc = client_specs["lat"] + "," + client_specs["lon"]
radius = float(parsed_msg[2])
url_w_params = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=%s&radius=%d&key=%s' % (formatted_rec_loc, radius, API_Key)
project_log.write(self.name + " is sending an HTTP request to " + url_w_params + '\n')
async with aiohttp.ClientSession() as session:
async with session.get(url_w_params) as resp:
response = await resp.json()
# limiting the results and formatting
response['results'] = response['results'][:(int(parsed_msg[3]))]
response = json.dumps(response, indent=4)
project_log.write(self.name + " received " + response + " from Google API request\n")
sendback_message += "\n" + response + "\n\n"
return sendback_message
async def IAMAT(self, parsed_msg, curr_time):
if len(parsed_msg) != 4:
return "? " + " ".join(parsed_msg)
time_difference = curr_time - float(parsed_msg[3])
if time_difference > 0:
time_difference_str = "+" + str(time_difference)
else:
time_difference_str = str(time_difference)
server_name = self.name
self.update_client(
origin_server=server_name,
client_name=parsed_msg[1],
lat_lon_str=parsed_msg[2],
client_time=parsed_msg[3],
time_diff_str=time_difference_str
)
project_log.write("Updating client attributes for " + parsed_msg[1] + " at " + server_name + '\n')
from_sent = " ".join(parsed_msg[1:])
sendback_message = "AT " + server_name + " " + time_difference_str + " " + from_sent
project_log.write("Flooding server herd with updated client attributes, beginning at " + server_name + '\n')
# send the client's updated attributes to the connected servers
self.flood_msgs.add(sendback_message)
await self.flood_it(sendback_message)
return sendback_message
def update_client(self, origin_server, client_name, lat_lon_str, client_time, time_diff_str):
# update client attributes in clients
latitude, longitude = get_lat_lon(lat_lon_str)
client_attributes = {
"name": client_name,
"lat": latitude,
"lon": longitude,
"time": client_time,
"time_diff": time_diff_str,
"server": origin_server
}
self.clients[client_name] = client_attributes
def get_lat_lon(lat_lon):
if '+' in lat_lon[1:]:
splitted = lat_lon[1:].split('+')
splitted[1] = '+' + splitted[1]
else:
splitted = lat_lon[1:].split('-')
splitted[1] = '-' + splitted[1]
splitted[0] = lat_lon[0] + splitted[0]
return splitted
def main():
parser = argparse.ArgumentParser('CS131 project example argument parser')
parser.add_argument('server_name', type=str,
help='required server name input')
args = parser.parse_args()
# TODO: let's make this project_log a member of the server class
global project_log
project_log = open(args.server_name + ".log", "a")
# TODO: maybe create a helper function to do the project_log.write so that we don't risk forgetting to append the newline character...
project_log.write("Beginning of program run\n")
print("Hello, welcome to server {}".format(args.server_name))
server = Server(args.server_name)
project_log.write("Server " + args.server_name + " has been initialized and opened\n")
try:
project_log.write("Event loop has been initialized\n")
asyncio.run(server.run_forever())
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
17,989 | cf8a4a9d7ad05f033a3d73d5e2047ea0288d16ee | # -*- coding: utf-8 -*-
class QError(Exception):
msg = None
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.message = str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class InvalidRPCClientArguments(QError):
msg = "RPC远程调用的参数类型必须是dict, 但传入的参数类型是{argtype}"
class TradingError(QError):
"""
"""
msg = "交易警告:{err}"
class DataFormatError(QError):
"""
"""
msg = "{type}--错误的数据格式!"
class DataFieldError(QError):
"""
"""
msg = "错误的数据字段: {error_fields}\n正确的字段为: {right_fields} "
class FileDoesNotExist(QError):
"""
当本地文件不存在的时候触发。
"""
msg = "不存在文件:{file}"
class PeriodTypeError(QError):
msg = "不存在该周期! -- {period}"
class DataAlignError(QError):
msg = "数据没有对齐!"
class SeriesIndexError(QError):
msg = "序列变量索引越界!"
class BreakConstError(QError):
msg = "不能对常量赋值!"
class ArgumentError(QError):
msg = "参数错误!"
class WrongDataForTransform(QError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(QError):
"""
Raised if a user script calls the override_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to override slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class OverrideSlippagePostInit(QError):
# Raised if a users script calls override_slippage magic
# after the initialize method has returned.
msg = """
You attempted to override slippage outside of `initialize`. \
You may only call override_slippage in your initialize method.
""".strip()
class RegisterTradingControlPostInit(QError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(QError):
"""
Raised if a user script calls the override_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to override commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class OverrideCommissionPostInit(QError):
"""
Raised if a users script calls override_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call override_commission in your initialize method.
""".strip()
class TransactionWithNoVolume(QError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(QError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(QError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(QError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(QError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class BadOrderParameters(QError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(QError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class TradingControlViolation(QError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {sid} violates trading constraint {constraint}.
""".strip()
class IncompatibleHistoryFrequency(QError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
|
17,990 | 349d3dadab9760a28ccb0f72df667b01518cf132 | import unittest
import numpy as np
import os
import meshcat
from pddl_planning.problems import load_dope
from pddl_planning.simulation import compute_duration, ForceControl
from plan_runner.manipulation_station_simulator import ManipulationStationSimulator
from plan_runner.open_left_door import GenerateOpenLeftDoorPlansByImpedanceOrPosition
class TestPDDLPlanning(unittest.TestCase):
def setUp(self):
self.q0 = [0, 0, 0, -1.75, 0, 1.0, 0]
self.time_step = 2e-3
self.prevdir = os.getcwd()
os.chdir(os.path.expanduser("pddl_planning"))
task, diagram, state_machine = load_dope(time_step=self.time_step,
dope_path="poses.txt",
goal_name="soup",
is_visualizing=False)
plant = task.mbp
task.publish()
context = diagram.GetMutableSubsystemContext(plant, task.diagram_context)
world_frame = plant.world_frame()
tree = plant.tree()
X_WSoup = tree.CalcRelativeTransform(
context, frame_A=world_frame, frame_B=plant.GetFrameByName("base_link_soup"))
self.manip_station_sim = ManipulationStationSimulator(
time_step=self.time_step,
object_file_path="./models/ycb_objects/soup_can.sdf",
object_base_link_name="base_link_soup",
X_WObject=X_WSoup)
def tearDown(self):
os.chdir(self.prevdir)
def InspectLog(self, state_log, plant):
tree = plant.tree()
data = state_log.data()
# create a context of final state.
x_final = data[:, -1]
context = plant.CreateDefaultContext()
x_mutable = tree.GetMutablePositionsAndVelocities(context)
x_mutable[:] = x_final
# cupboard must be open.
hinge_joint = plant.GetJointByName("left_door_hinge")
joint_angle = hinge_joint.get_angle(context)
self.assertTrue(np.abs(joint_angle) > np.pi/6,
"Cupboard door is not fully open.")
# velocity must be small throughout the simulation.
for x in data.T:
v = x[plant.num_positions():]
self.assertTrue((np.abs(v) < 3.).all(), "velocity is too large.")
def HasReturnedToQtarget(self, q_iiwa_target, state_log, plant):
tree = plant.tree()
data = state_log.data()
q_final = data[:, -1][:plant.num_positions()]
iiwa_model = plant.GetModelInstanceByName("iiwa")
q_iiwa_final = tree.GetPositionsFromArray(iiwa_model, q_final)
return (np.abs(q_iiwa_target - q_iiwa_final) < 0.03).all()
def test_pddl(self):
splines = np.load("test_data/splines.npy")
setpoints = np.load("test_data/gripper_setpoints.npy")
plan_list = []
gripper_setpoints = []
for control, setpoint in zip(splines, setpoints):
plan_list.append(control.plan())
gripper_setpoints.append(setpoint)
sim_duration = compute_duration(plan_list)
q_iiwa_beginning = plan_list[0].traj.value(0).flatten()
iiwa_position_command_log, iiwa_position_measured_log, iiwa_external_torque_log, \
plant_state_log, t_plan = \
self.manip_station_sim.RunSimulation(plan_list, gripper_setpoints,
extra_time=2.0, real_time_rate=0.0,
q0_kuka=self.q0, is_visualizing=False)
# Run Tests
self.InspectLog(plant_state_log, self.manip_station_sim.plant)
self.assertTrue(
self.HasReturnedToQtarget(q_iiwa_beginning, plant_state_log, self.manip_station_sim.plant))
def test_pddl_force_control(self):
splines = np.load("test_data/splines_force_control.npy")
setpoints = np.load("test_data/gripper_setpoints_force_control.npy")
plan_list = []
gripper_setpoints = []
for control, setpoint in zip(splines, setpoints):
if isinstance(control, ForceControl):
new_plans, new_setpoints = \
GenerateOpenLeftDoorPlansByImpedanceOrPosition("Impedance", is_open_fully=True)
plan_list.extend(new_plans)
gripper_setpoints.extend(new_setpoints)
else:
plan_list.append(control.plan())
gripper_setpoints.append(setpoint)
sim_duration = compute_duration(plan_list)
q_iiwa_beginning = plan_list[0].traj.value(0).flatten()
iiwa_position_command_log, iiwa_position_measured_log, iiwa_external_torque_log, \
plant_state_log, t_plan = \
self.manip_station_sim.RunSimulation(plan_list, gripper_setpoints,
extra_time=2.0, real_time_rate=0.0,
q0_kuka=self.q0, is_visualizing=False)
# Run Tests
self.InspectLog(plant_state_log, self.manip_station_sim.plant)
self.assertTrue(
self.HasReturnedToQtarget(q_iiwa_beginning, plant_state_log, self.manip_station_sim.plant))
|
17,991 | 66e407bffe0632dc4f3600e76b01b828e4243f68 |
PATH_TO_STORAGE = '/Users/artempilipchuk/pass_storage.json'
|
17,992 | 672622d285f0206ee6039b2d79895bee3fe93f8b | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import inspect
import os
import subprocess
import sys
import timeit
import argparse
import copy
import re
import libconf
import yaml
from common import *
# Output file names.
out_prefix = "timeloop-mapper."
log_file_name = out_prefix + "log"
stats_file_name = out_prefix + "stats.txt"
xml_file_name = out_prefix + "map+stats.xml"
map_txt_file_name = out_prefix + "map.txt"
map_cfg_file_name = out_prefix + "map.cfg"
map_cpp_file_name = out_prefix + "map.cpp"
output_file_names = [log_file_name,
stats_file_name,
xml_file_name,
map_txt_file_name,
map_cfg_file_name,
map_cpp_file_name]
# dimension conversion that maps a WU problem to FW problem
wu2fw = {'P': 'R',
'Q': 'S',
'R': 'P',
'S': 'Q',
'C': 'K',
'K': 'N',
'N': 'C'}
def prod(l):
return functools.reduce(lambda x, y: x * y, l)
def rewrite_workload_bounds(src, dst, workload_bounds, model, layer, batchsize, dataflow, phase, terminate, threads, synthetic, sparsity, save, replication, array_width, glb_scaling, dense): # backward_padding
w, h, c, n, k, s, r, wpad, hpad, wstride, hstride = workload_bounds
n = batchsize
q = int((w - s + 2 * wpad) / wstride) + 1
p = int((h - r + 2 * hpad) / hstride) + 1
wu_equiv = k != 'D' and phase == 'wu'
env_list = {}
if not wu_equiv:
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(' C =', c)
print(' K =', k)
print(' S =', s)
print(' R =', r)
print(' P =', p)
print(' Q =', q)
print(' N =', n)
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
else:
print('Equivalence Test: can we convert WU problem to FW and use cnn-layer.cfg? (at least in the dense case?)')
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(f' C <- N {n}')
print(f' K <- C {c}')
print(f' S <- Q {q}')
print(f' R <- P {p}')
print(f' P <- R {r}')
print(f' Q <- S {s}')
print(f' N <- K {k}')
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
env_list['TIMELOOP_EQUIVLENT_WU'] = 'True'
with open(src, "r") as f:
if "cfg" in src:
config = libconf.load(f)
elif "yaml" in src:
config = yaml.load(f, Loader=yaml.SafeLoader)
config['problem']['shape'] = shapes[phase]
if wu_equiv:
config['problem']['shape'] = shapes['fw']
if k == 'D':
depthwise = True
adapt_depthwise_config(config)
else:
depthwise = False
config['problem']['shape'] += '.yaml'
if wu_equiv:
dataflow = convert_dataflow(dataflow)
if phase == 'wu':
remove_block_constraint(config)
if depthwise:
if dataflow == 'CK':
dataflow = 'CN'
dataflow = dataflow.replace('K', 'C')
rewrite_dataflow(config, dataflow, replication, array_width)
rewrite_mesh(config, array_width)
if glb_scaling:
rewrite_glb_size(config, array_width)
if not wu_equiv:
config['problem']['R'] = r
config['problem']['S'] = s
config['problem']['P'] = p
config['problem']['Q'] = q
config['problem']['C'] = c
if not depthwise:
config['problem']['K'] = k
config['problem']['N'] = n
else:
config['problem']['R'] = p
config['problem']['S'] = q
config['problem']['P'] = r
config['problem']['Q'] = s
config['problem']['C'] = n
config['problem']['K'] = c
config['problem']['N'] = k
config['problem']['Wstride'] = wstride
config['problem']['Hstride'] = hstride
config['problem']['Wdilation'] = 1
config['problem']['Hdilation'] = 1
config['mapper']['model-name'] = model
config['mapper']['layer-name'] = layer
if terminate is not None:
config['mapper']['victory-condition'] = terminate
if threads is not None:
config['mapper']['num-threads'] = threads
# rewrite synthetic mask configuration
if not synthetic:
try:
config['mapper'].pop('mask-synthetic')
except KeyError:
pass
else:
config['mapper']['mask-synthetic'] = {}
if sparsity is not None:
config['mapper']['mask-synthetic']['target-sparsity'] = sparsity
if save is not None:
config['mapper']['mask-synthetic']['synthetic-mask-path'] = save
if dense:
opt_metrics = []
for opt in config['mapper']['optimization-metrics']:
opt_metrics.append(opt.split('-')[-1])
config['mapper']['optimization-metrics'] = opt_metrics
with open(dst, "w") as f:
if "cfg" in src:
f.write(libconf.dumps(config))
elif "yaml" in src:
f.write(yaml.dump(config))
return env_list
def convert_dataflow(dataflow):
pre_convert_dataflow = copy.copy(dataflow)
converted_dataflow = []
converted_dataflow.append(wu2fw[pre_convert_dataflow[0]])
converted_dataflow.append(wu2fw[pre_convert_dataflow[1]])
converted = ''
converted = converted.join(converted_dataflow)
print(f'convert from {dataflow} to {converted}')
return converted
def remove_block_constraint(config): # or possibily remove
for constraint in config['mapspace']['constraints']:
if constraint['type'] == 'temporal' and constraint['target'] == 'RegFile':
try:
constraint.pop('factors')
except KeyError:
pass
def rewrite_dataflow(config, dataflow, replication, array_width):
# loop through constaints, and make sure there is only 1 spatial type constraint
# dingqing FIXME: not general for more spatial level architecture config
num_spatial = 0
for constraint in config['mapspace']['constraints']:
if num_spatial > 1:
raise Exception("More than one spatial level! Check the config and the scripts.")
if constraint['type'] == 'spatial':
num_spatial += 1
# determine if it is possible to replicate
possible2replicate = replication and (not config['problem'][dataflow[0]] > array_width / 2 or not config['problem'][dataflow[1]] > array_width / 2)
print('possible2replicate?', possible2replicate)
factors = constraint['factors'].split(' ')
new_factor = []
for factor in factors:
if factor[0] in dataflow:
# look at problem size
new_factor.append(factor[0] + f'{array_width}')
elif not possible2replicate:
new_factor.append(factor[0] + '1')
constraint['factors'] = ' '.join(new_factor)
# rewrite permutation
# emmmm ugly
non_spatial_dims = constraint['permutation'].replace(dataflow[0], '').replace(dataflow[1], '')
constraint['permutation'] = dataflow[0] + non_spatial_dims + dataflow[1]
def rewrite_mesh(config, array_width):
# honestly, the structure is kinda unnatural...
pe_subtree = config['architecture']['subtree'][0]['subtree'][0] # FIXME: this is not generic enough
pe_name = pe_subtree['name']
num_pe_prev = re.findall(r'\d+', pe_name)[-1]
num_pe_new = array_width * array_width - 1
pe_subtree['name'] = pe_name.replace(num_pe_prev, f'{num_pe_new}')
# iterate over RF and PE
for component in pe_subtree['local']:
component['attributes']['meshX'] = array_width
def rewrite_glb_size(config, array_width):
scaling_factor = array_width / 16
# honestly, the structure is kinda unnatural...
sys_subtree = config['architecture']['subtree'][0] # FIXME: this is not generic enough
for comp in sys_subtree['local']:
if comp['name'] == 'GlobalBuffer':
comp['attributes']['depth'] = int(comp['attributes']['depth'] * scaling_factor)
comp['attributes']['n_banks'] = int(comp['attributes']['n_banks'] * scaling_factor)
def adapt_depthwise_config(config):
config['problem']['shape'] += '-depthwise.yaml'
try:
config['problem'].pop('K')
except KeyError:
pass
for constraint in config['mapspace']['constraints']:
if 'factors' in constraint:
factors = constraint['factors'].split(' ')
new_factor = [x for x in factors if x[0] != 'K']
constraint['factors'] = ' '.join(new_factor)
if 'permutation' in constraint:
constraint['permutation'] = ''.join([x for x in constraint['permutation'] if x != 'K'])
def run_timeloop(dirname, configfile, logfile='timeloop.log', env_list={}, dense=False, dense_dirname='dense-timeloop'):
configfile_path = os.path.join(dirname, os.path.basename(configfile))
logfile_path = os.path.join(dirname, logfile)
print('Running timeloop to get mapping')
def stmt():
with open(logfile_path, "w") as outfile:
this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
if not dense:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', 'build', 'timeloop-mapper')
else:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', '..', dense_dirname, 'build', 'timeloop-mapper')
status = subprocess.call([timeloop_executable_location, configfile_path], stdout=outfile, stderr=outfile, env=dict(os.environ, **env_list))
# status = subprocess.call([timeloop_executable_location, configfile_path, 'ERT.yaml'], stdout=outfile, stderr=outfile)
if status != 0:
subprocess.check_call(['cat', logfile_path])
print('Did you remember to build timeloop and set up your environment properly?')
sys.exit(1)
t = timeit.Timer(stmt)
time = t.timeit(1)
print('Time to run timeloop = ', time)
# Move timeloop output files to the right directory
for f in output_file_names:
if os.path.exists(f):
os.rename(f, dirname + '/' + f)
|
17,993 | 4f65eb93bc3d31277b85d1020134eda24b22af61 | class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
pressTimes = []
for index in range(0, len(releaseTimes)):
if index == 0:
previousPress = releaseTimes[index]
else:
previousPress = releaseTimes[index] - releaseTimes[index - 1]
pressTimes.append((keysPressed[index], previousPress))
pressTimes.sort(key=lambda x:x[0], reverse=True)
return max(pressTimes, key=lambda x:x[1])[0]
|
17,994 | 1b5da7fdcacbbbc2a8d9a18778219e6ebbc052cb | class DogCk(object):
__flg = None
def __init__(self,name):
self.name =name
def __new__(cls,name):
if DogCk.__flg == None:
DogCk.__flg = object.__new__(cls)
return DogCk.__flg
a=DogCk('ck')
b=DogCk('ck')
print(id(a))
print(id(b))
|
17,995 | 457faf3b94b4701bd0c4794fb9dfdc7a142dce42 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel; Physics PhD Student, Ohio University
# Date : Oct-14-2016 Fri
# Last update :
#
#
# Imports
import os,shutil,random,time,subprocess
for i in list(range(5)):
cmd = 'python create_rand_num.py'
subprocess.call(cmd,shell=1)
outfolder = 'outputs'
outfile = outfolder + '/' + \
str(random.randint(0,10000)) + time.strftime('_%b_%d_%H_%M')
subprocess.call('cp temp.txt ' + outfile, shell=1)
os.remove('temp.txt')
|
17,996 | 9f6b7a20c71c679e60acbb6f60b5f58a3a3b03f4 | # Generated by Django 2.1.2 on 2019-01-02 03:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Block',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='userBlocked', to=settings.AUTH_USER_MODEL)),
('userBlocking', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='userBlocking', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commentContent', models.TextField()),
('datePosted', models.DateTimeField(auto_now_add=True)),
('is_deleted', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=255)),
('document', models.FileField(upload_to='documents/')),
('uploaded_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('date', models.CharField(max_length=100)),
('location', models.CharField(default='', max_length=150)),
('latitude', models.FloatField(default=0.0, max_length=150)),
('longitude', models.FloatField(default=0.0, max_length=150)),
('description', models.CharField(max_length=500)),
('category', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isRequest', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='userFollowed', to=settings.AUTH_USER_MODEL)),
('userFollowing', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='userFollowing', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('messageContent', models.TextField()),
('datePosted', models.DateTimeField(auto_now_add=True)),
('isRequest', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('postContent', models.TextField()),
('datePosted', models.DateTimeField(auto_now_add=True)),
('picture', models.ImageField(null=True, upload_to='post_photos/')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField()),
('age', models.PositiveIntegerField()),
('hobbies', models.TextField(default='')),
('photo', models.ImageField(default='profile_photos/default.jpg', null=True, upload_to='profile_photos/')),
('location', models.CharField(blank=True, default='', max_length=150)),
('latitude', models.FloatField(default=0.0, max_length=150)),
('longitude', models.FloatField(default=0.0, max_length=150)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userOne', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userOne', to=settings.AUTH_USER_MODEL)),
('userTwo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userTwo', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='post',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.Profile'),
),
migrations.AddField(
model_name='message',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pages.Thread'),
),
migrations.AddField(
model_name='event',
name='poster',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.PROTECT, related_name='poster', to='pages.Profile'),
),
migrations.AddField(
model_name='event',
name='rsvp_list',
field=models.ManyToManyField(blank=True, to='pages.Profile'),
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.Post'),
),
migrations.AddField(
model_name='comment',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.Profile'),
),
]
|
17,997 | bf478725419318cead425988a68645cd56d198fa | #!/usr/bin/env python
#coding:utf-8
__author__ = 'sws'
from django.utils.html import mark_safe
from random import randint
def get_list_randon_int(num, stop=1):
"""
:param num: 生成num个随机整数
:return: list
"""
res = []
for i in range(num):
res.append(randint(1, stop))
return res
class PagiInfo:
'''
分页信息
确定每页有几条信息 per_item
确定每页的显示开始的信息序号,以及结束序号
self.Page:当前页
self.Total_count:信息总数目
self.Per_item:每页显示的数目 默认为5
'''
def __init__(self, page, total_count, per_item=5):
self.Page = page
self.Total_count = total_count
self.Per_item = per_item
@property
def start(self):
'''
返回开始的序号
'''
return self.Per_item*(self.Page-1)
@property
def end(self):
'''
返回结束的序号
'''
return self.Per_item*(self.Page)
@property
def total_pages(self):
'''
返回总页数
'''
pages = divmod(self.Total_count,self.Per_item)
if pages[1] != 0:
total_page = pages[0]+1
else:
total_page = pages[0]
return total_page
def Paginor(page, total_pages, url_string):
'''
:param page: 当前页
:param total_pages: 总页数
:return: 分页字符串
'''
# text = lambda x: "{% " + "url 'category/product_list' %s " %(x) + " %}"
# print text(1)
if total_pages < 9:
start = 0
end = total_pages
else:
if page < 5:
start = 0
end = 10
else:
start = page - 5
if page + 4 > total_pages:
end = total_pages
else:
end = page+4
pa_html = ['<a class = "btn btn-default" href="'+url_string+'1">首页</a>']
if page <= 1:
pa_html.append('<a class = "btn btn-default" href=#>前一页</a>')
else:
pa_html.append('<a class = "btn btn-default" href='+url_string+'%d>前一页</a>' %(page-1))
for i in range(start+1, end+1):
temp = '<a class = "btn btn-default" href='+url_string+'%d>%d</a>' %(i, i)
pa_html.append(temp)
if page >= total_pages:
pa_html.append('<a class = "btn btn-default" href=#>后一页</a>')
else:
pa_html.append('<a class = "btn btn-default" href='+url_string+'%d>后一页</a>' %(page+1))
pa_html.append('<a class = "btn btn-default" href='+url_string+'%d>尾页</a>' %(total_pages))
page_string=mark_safe(' '.join(pa_html))
return page_string
def try_int(num, default = 0):
"""
转换成int
:param num:
:param default:
:return:
"""
try:
return int(num)
except:
return default
def try_float(num, default=0.0):
try:
return float(num)
except:
return default
|
17,998 | 4a84ecd957605e2bbdf762c4f3f8f7fb63bed340 | """
@author jacobi petrucciani
@desc global vars for archives
"""
import sys
__version__ = "VERSION"
PY_VER = sys.version_info
IS_38 = PY_VER[0] >= 3 and PY_VER[1] >= 8
if IS_38:
# we should use the built in ast if python 3.8 or higher
# see https://github.com/python/typed_ast#python-38
import ast as ast3 # type: ignore # noqa
else:
# use the forked typed version
from typed_ast import ast3 # type: ignore # noqa
DEFAULT_EXCLUDES_LIST = [
r"\.eggs",
r"\.git",
r"\.hg",
r"\.mypy_cache",
r"\.nox",
r"\.tox",
r"\.venv",
r"env",
r"_build",
r"buck-out",
r"build",
r"dist",
]
DEFAULT_EXCLUDES = r"/(" + "|".join(DEFAULT_EXCLUDES_LIST) + ")/"
DEFAULT_INCLUDES = r"\.pyi?$"
DEFAULT_ARG_IGNORE = ["self", "cls"]
FORMATS = {
"flake8": "{path}:{line}:{column}: {code} {text}",
"pylint": "{path}:{line}: [{code}] {text}",
}
|
17,999 | b8348469d0553f288fee0bbbe77d41bc329b9f0a | """
Open and close time calculations
for ACP-sanctioned brevets
following rules described at https://rusa.org/octime_alg.html
and https://rusa.org/pages/rulesForRiders
"""
import arrow
import math
# Note for CIS 322 Fall 2016:
# You MUST provide the following two functions
# with these signatures, so that I can write
# automated tests for grading. You must keep
# these signatures even if you don't use all the
# same arguments. Arguments are explained in the
# javadoc comments.
#
opening_table = {"100": 34, "200": 34, "300": 32, "400": 32, "500": 30,
"600": 30, "700": 28, "800": 28, "900": 28}
closing_table = {"100": 15, "200": 15, "300": 15, "400": 15, "500": 15,
"600": 15, "700": 11.428, "800": 11.428, "900": 11.428}
def open_time(control_dist_km, brevet_dist_km, brevet_start_time):
"""
Args:
control_dist_km: number, the control distance in kilometers
brevet_dist_km: number, the nominal distance of the brevet
in kilometers, which must be one of 200, 300, 400, 600,
or 1000 (the only official ACP brevet distances)
brevet_start_time: An ISO 8601 format date-time string indicating
the official start time of the brevet
Returns:
An ISO 8601 format date string indicating the control open time.
This will be in the same time zone as the brevet start time.
"""
if float(control_dist_km) == 0:
arrow_date = arrow.get(brevet_start_time)
return arrow_date.isoformat()
rounded_dist = int(math.ceil((float(control_dist_km ) * 0.9) / 100.0) * 100)
calc_time = float(control_dist_km) / float(opening_table[str(rounded_dist)])
arrow_date = arrow.get(brevet_start_time)
arrow_some = arrow_date.shift(hours=+calc_time)
return arrow_some.isoformat()
def close_time(control_dist_km, brevet_dist_km, brevet_start_time):
"""
Args:
control_dist_km: number, the control distance in kilometers
brevet_dist_km: number, the nominal distance of the brevet
in kilometers, which must be one of 200, 300, 400, 600, or 1000
(the only official ACP brevet distances)
brevet_start_time: An ISO 8601 format date-time string indicating
the official start time of the brevet
Returns:
An ISO 8601 format date string indicating the control close time.
This will be in the same time zone as the brevet start time.
"""
if float(control_dist_km) == 0:
arrow_date = arrow.get(brevet_start_time).shift(hours=+1)
return arrow_date.isoformat()
rounded_dist = int(math.ceil((float(control_dist_km) * 0.9) / 100.0) * 100)
calc_time = float(control_dist_km) / float(closing_table[str(rounded_dist)])
arrow_date = arrow.get(brevet_start_time)
arrow_some = arrow_date.shift(hours=+calc_time)
return arrow_some.isoformat()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.