text stringlengths 38 1.54M |
|---|
import unittest
import os
from unittest.mock import MagicMock
from . import *
class TestProjectEntry(unittest.TestCase):
entry = None
def setUp(self):
entry = ProjectEntry()
entry.head_line = '[[Statik:Intern:0589 interner Support]]'
entry.content.append('\n\n[*] Ubutu auf Altem laptop zurückgesetztz')
entry.content.append('\t\t[*] Mount-Verzeichnisse repariert')
entry.content.append('\n\n')
entry.time_entry = '[*] @zp 2,5 [ 1,5 [ Hier interne Notiz\n'
self.entry = entry
def test_parse_journal_day(self):
dirname = os.path.dirname(__file__)
fixture_path = os.path.join(dirname, 'tests_fixtures/journal_day.txt')
with open(fixture_path, 'r') as f:
content_lines = f.readlines()
project_entries = ProjectsList.parse_journal_day(content_lines) # type: ProjectsList
self.assertEqual(2, len(project_entries))
self.assertEqual(3.5, project_entries.time_total())
def test_number(self):
self.assertEqual(589, self.entry.number())
def test_description(self):
self.assertEqual('[*] Ubutu auf Altem laptop zurückgesetztz\n\t\t[*] Mount-Verzeichnisse repariert', self.entry.details())
def test_time_total(self):
self.entry.time_entry = '[*] @zp 2,5 [ 1,5 [ Hier interne Notiz\n'
self.assertEqual(2.5, self.entry.time_total())
self.entry.time_entry = '[*] @zp 2 [ 1,5 [ Hier interne Notiz\n'
self.assertEqual(2, self.entry.time_total())
def test_time_client(self):
self.entry.time_entry = '[*] @zp 2,5 [ 1,5 [ Hier interne Notiz\n'
self.assertEqual(1.5, self.entry.time_client())
self.entry.time_entry = '[*] @zp 2,5 [Hier interne Notiz\n'
self.assertEqual(2.5, self.entry.time_client())
self.entry.time_entry = '[*] @zp 0 [Hier interne Notiz\n'
self.assertEqual(0, self.entry.time_client())
self.entry.time_entry = '[*] @zp 4 [Hier interne Notiz\n'
self.assertEqual(4, self.entry.time_client())
def test_internal_comment(self):
self.entry.time_entry = '[*] @zp 2,5 [ 1,5 [c Hier interne Notiz \n'
self.assertEqual('Hier interne Notiz', self.entry.internal_comment())
self.entry.time_entry = '[*] @zp 2,5 '
self.assertEqual('', self.entry.internal_comment())
self.entry.time_entry = '[*] @zp 2,5 [c \n'
self.assertEqual('', self.entry.internal_comment())
def test_is_new(self):
self.entry.time_entry = '[*] @zp 2,5 [ 1,5 [c Hier interne Notiz \n'
self.assertFalse(self.entry.is_new())
self.entry.time_entry = '[ ] @zp 2,5 [ 1,5 [c Hier interne Notiz \n'
self.assertTrue(self.entry.is_new())
def test_check_description(self):
self.entry.time_entry = '[ ] @zp 1 [c Bemerkung'
self.entry.content = ['test']
self.entry.head_line = 'test:1234 foobar'
expected = '\n'.join([
'test:1234 foobar',
'',
'ID: 1234\n'
'===========================',
'Beschreibung:',
'test',
'===========================',
'Interne Bemerkung: Bemerkung',
'===========================',
'Zeitaufwand (Gesamt): 1.0',
'Zeitaufwand (Für Kunden): 1.0'
])
current = self.entry.check_description()
self.assertEqual(expected, current)
class TestRpc(unittest.TestCase):
def test_addVorgang(self):
rpc = self.rpc(server_response=3.0)
rpc.addVorgang(1234, '2019-04-31', 3.2, 'TestVorgang\n Mehrzeilig')
rpc.send_request.assert_called_once_with('addVorgang', [1234, '2019-04-31', 3.2, 'TestVorgang\n Mehrzeilig'])
def rpc(self, server_response):
rpc = Rpc('http://localhost:1420')
rpc.send_request = MagicMock(return_value = server_response)
return rpc
|
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import sys, time, argparse
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from tensorflow.contrib.layers import l2_regularizer
from tensorflow.contrib.layers import batch_norm
_VALIDATION_RATIO = 0.1
class Medgan(object):
def __init__(self,
dataType='binary',
inputDim=615,
embeddingDim=128,
randomDim=128,
generatorDims=(128, 128),
discriminatorDims=(256, 128, 1),
compressDims=(),
decompressDims=(),
bnDecay=0.99,
l2scale=0.001):
self.inputDim = inputDim
self.embeddingDim = embeddingDim
self.generatorDims = list(generatorDims) + [embeddingDim]
self.randomDim = randomDim
self.dataType = dataType
if dataType == 'binary':
self.aeActivation = tf.nn.tanh
else:
self.aeActivation = tf.nn.relu
self.generatorActivation = tf.nn.relu
self.discriminatorActivation = tf.nn.relu
self.discriminatorDims = discriminatorDims
self.compressDims = list(compressDims) + [embeddingDim]
self.decompressDims = list(decompressDims) + [inputDim]
self.bnDecay = bnDecay
self.l2scale = l2scale
def loadData(self, dataPath=''):
data = np.load(dataPath)
if self.dataType == 'binary':
data = np.clip(data, 0, 1)
trainX, validX = train_test_split(data, test_size=_VALIDATION_RATIO, random_state=0)
return trainX, validX
def buildAutoencoder(self, x_input):
decodeVariables = {}
with tf.variable_scope('autoencoder', regularizer=l2_regularizer(self.l2scale)):
tempVec = x_input
tempDim = self.inputDim
i = 0
for compressDim in self.compressDims:
W = tf.get_variable('aee_W_'+str(i), shape=[tempDim, compressDim])
b = tf.get_variable('aee_b_'+str(i), shape=[compressDim])
tempVec = self.aeActivation(tf.add(tf.matmul(tempVec, W), b))
tempDim = compressDim
i += 1
i = 0
for decompressDim in self.decompressDims[:-1]:
W = tf.get_variable('aed_W_'+str(i), shape=[tempDim, decompressDim])
b = tf.get_variable('aed_b_'+str(i), shape=[decompressDim])
tempVec = self.aeActivation(tf.add(tf.matmul(tempVec, W), b))
tempDim = decompressDim
decodeVariables['aed_W_'+str(i)] = W
decodeVariables['aed_b_'+str(i)] = b
i += 1
W = tf.get_variable('aed_W_'+str(i), shape=[tempDim, self.decompressDims[-1]])
b = tf.get_variable('aed_b_'+str(i), shape=[self.decompressDims[-1]])
decodeVariables['aed_W_'+str(i)] = W
decodeVariables['aed_b_'+str(i)] = b
if self.dataType == 'binary':
x_reconst = tf.nn.sigmoid(tf.add(tf.matmul(tempVec,W),b))
loss = tf.reduce_mean(-tf.reduce_sum(x_input * tf.log(x_reconst + 1e-12) + (1. - x_input) * tf.log(1. - x_reconst + 1e-12), 1), 0)
else:
x_reconst = tf.nn.relu(tf.add(tf.matmul(tempVec,W),b))
loss = tf.reduce_mean((x_input - x_reconst)**2)
return loss, decodeVariables
def buildGenerator(self, x_input, bn_train):
tempVec = x_input
tempDim = self.randomDim
with tf.variable_scope('generator', regularizer=l2_regularizer(self.l2scale)):
for i, genDim in enumerate(self.generatorDims[:-1]):
W = tf.get_variable('W_'+str(i), shape=[tempDim, genDim])
h = tf.matmul(tempVec,W)
h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None)
h3 = self.generatorActivation(h2)
tempVec = h3 + tempVec
tempDim = genDim
W = tf.get_variable('W'+str(i), shape=[tempDim, self.generatorDims[-1]])
h = tf.matmul(tempVec,W)
h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None)
if self.dataType == 'binary':
h3 = tf.nn.tanh(h2)
else:
h3 = tf.nn.relu(h2)
output = h3 + tempVec
return output
def buildGeneratorTest(self, x_input, bn_train):
tempVec = x_input
tempDim = self.randomDim
with tf.variable_scope('generator', regularizer=l2_regularizer(self.l2scale)):
for i, genDim in enumerate(self.generatorDims[:-1]):
W = tf.get_variable('W_'+str(i), shape=[tempDim, genDim])
h = tf.matmul(tempVec,W)
h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None, trainable=False)
h3 = self.generatorActivation(h2)
tempVec = h3 + tempVec
tempDim = genDim
W = tf.get_variable('W'+str(i), shape=[tempDim, self.generatorDims[-1]])
h = tf.matmul(tempVec,W)
h2 = batch_norm(h, decay=self.bnDecay, scale=True, is_training=bn_train, updates_collections=None, trainable=False)
if self.dataType == 'binary':
h3 = tf.nn.tanh(h2)
else:
h3 = tf.nn.relu(h2)
output = h3 + tempVec
return output
def getDiscriminatorResults(self, x_input, keepRate, reuse=False):
batchSize = tf.shape(x_input)[0]
inputMean = tf.reshape(tf.tile(tf.reduce_mean(x_input,0), [batchSize]), (batchSize, self.inputDim))
tempVec = tf.concat([x_input, inputMean], 1)
tempDim = self.inputDim * 2
with tf.variable_scope('discriminator', reuse=reuse, regularizer=l2_regularizer(self.l2scale)):
for i, discDim in enumerate(self.discriminatorDims[:-1]):
W = tf.get_variable('W_'+str(i), shape=[tempDim, discDim])
b = tf.get_variable('b_'+str(i), shape=[discDim])
h = self.discriminatorActivation(tf.add(tf.matmul(tempVec,W),b))
h = tf.nn.dropout(h, keepRate)
tempVec = h
tempDim = discDim
W = tf.get_variable('W', shape=[tempDim, 1])
b = tf.get_variable('b', shape=[1])
y_hat = tf.squeeze(tf.nn.sigmoid(tf.add(tf.matmul(tempVec, W), b)))
return y_hat
def buildDiscriminator(self, x_real, x_fake, keepRate, decodeVariables, bn_train):
#Discriminate for real samples
y_hat_real = self.getDiscriminatorResults(x_real, keepRate, reuse=False)
#Decompress, then discriminate for real samples
tempVec = x_fake
i = 0
for _ in self.decompressDims[:-1]:
tempVec = self.aeActivation(tf.add(tf.matmul(tempVec, decodeVariables['aed_W_'+str(i)]), decodeVariables['aed_b_'+str(i)]))
i += 1
if self.dataType == 'binary':
x_decoded = tf.nn.sigmoid(tf.add(tf.matmul(tempVec, decodeVariables['aed_W_'+str(i)]), decodeVariables['aed_b_'+str(i)]))
else:
x_decoded = tf.nn.relu(tf.add(tf.matmul(tempVec, decodeVariables['aed_W_'+str(i)]), decodeVariables['aed_b_'+str(i)]))
y_hat_fake = self.getDiscriminatorResults(x_decoded, keepRate, reuse=True)
loss_d = -tf.reduce_mean(tf.log(y_hat_real + 1e-12)) - tf.reduce_mean(tf.log(1. - y_hat_fake + 1e-12))
loss_g = -tf.reduce_mean(tf.log(y_hat_fake + 1e-12))
return loss_d, loss_g, y_hat_real, y_hat_fake
def print2file(self, buf, outFile):
outfd = open(outFile, 'a')
outfd.write(buf + '\n')
outfd.close()
def generateData(self,
nSamples=100,
modelFile='model',
batchSize=100,
outFile='out'):
x_dummy = tf.placeholder('float', [None, self.inputDim])
_, decodeVariables = self.buildAutoencoder(x_dummy)
x_random = tf.placeholder('float', [None, self.randomDim])
bn_train = tf.placeholder('bool')
x_emb = self.buildGeneratorTest(x_random, bn_train)
tempVec = x_emb
i = 0
for _ in self.decompressDims[:-1]:
tempVec = self.aeActivation(tf.add(tf.matmul(tempVec, decodeVariables['aed_W_'+str(i)]), decodeVariables['aed_b_'+str(i)]))
i += 1
if self.dataType == 'binary':
x_reconst = tf.nn.sigmoid(tf.add(tf.matmul(tempVec, decodeVariables['aed_W_'+str(i)]), decodeVariables['aed_b_'+str(i)]))
else:
x_reconst = tf.nn.relu(tf.add(tf.matmul(tempVec, decodeVariables['aed_W_'+str(i)]), decodeVariables['aed_b_'+str(i)]))
np.random.seed(1234)
saver = tf.train.Saver()
outputVec = []
burn_in = 1000
with tf.Session() as sess:
saver.restore(sess, modelFile)
print('burning in')
for i in range(burn_in):
randomX = np.random.normal(size=(batchSize, self.randomDim))
output = sess.run(x_reconst, feed_dict={x_random:randomX, bn_train:True})
print('generating')
nBatches = int(old_div(np.ceil(float(nSamples)), float(batchSize)))
for i in range(nBatches):
randomX = np.random.normal(size=(batchSize, self.randomDim))
output = sess.run(x_reconst, feed_dict={x_random:randomX, bn_train:False})
outputVec.extend(output)
outputMat = np.array(outputVec)
np.save(outFile, outputMat)
def calculateDiscAuc(self, preds_real, preds_fake):
preds = np.concatenate([preds_real, preds_fake], axis=0)
labels = np.concatenate([np.ones((len(preds_real))), np.zeros((len(preds_fake)))], axis=0)
auc = roc_auc_score(labels, preds)
return auc
def calculateDiscAccuracy(self, preds_real, preds_fake):
total = len(preds_real) + len(preds_fake)
hit = 0
for pred in preds_real:
if pred > 0.5: hit += 1
for pred in preds_fake:
if pred < 0.5: hit += 1
acc = old_div(float(hit), float(total))
return acc
def train(self,
dataPath='data',
modelPath='',
outPath='out',
nEpochs=500,
discriminatorTrainPeriod=2,
generatorTrainPeriod=1,
pretrainBatchSize=100,
batchSize=1000,
pretrainEpochs=100,
saveMaxKeep=0):
x_raw = tf.placeholder('float', [None, self.inputDim])
x_random= tf.placeholder('float', [None, self.randomDim])
keep_prob = tf.placeholder('float')
bn_train = tf.placeholder('bool')
loss_ae, decodeVariables = self.buildAutoencoder(x_raw)
x_fake = self.buildGenerator(x_random, bn_train)
loss_d, loss_g, y_hat_real, y_hat_fake = self.buildDiscriminator(x_raw, x_fake, keep_prob, decodeVariables, bn_train)
trainX, validX = self.loadData(dataPath)
t_vars = tf.trainable_variables()
ae_vars = [var for var in t_vars if 'autoencoder' in var.name]
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
all_regs = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
optimize_ae = tf.train.AdamOptimizer().minimize(loss_ae + sum(all_regs), var_list=ae_vars)
optimize_d = tf.train.AdamOptimizer().minimize(loss_d + sum(all_regs), var_list=d_vars)
optimize_g = tf.train.AdamOptimizer().minimize(loss_g + sum(all_regs), var_list=g_vars+list(decodeVariables.values()))
initOp = tf.global_variables_initializer()
nBatches = int(np.ceil(old_div(float(trainX.shape[0]), float(batchSize))))
saver = tf.train.Saver(max_to_keep=saveMaxKeep)
logFile = outPath + '.log'
with tf.Session() as sess:
if modelPath == '': sess.run(initOp)
else: saver.restore(sess, modelPath)
nTrainBatches = int(old_div(np.ceil(float(trainX.shape[0])), float(pretrainBatchSize)))
nValidBatches = int(old_div(np.ceil(float(validX.shape[0])), float(pretrainBatchSize)))
if modelPath== '':
for epoch in range(pretrainEpochs):
idx = np.random.permutation(trainX.shape[0])
trainLossVec = []
for i in range(nTrainBatches):
batchX = trainX[idx[i*pretrainBatchSize:(i+1)*pretrainBatchSize]]
_, loss = sess.run([optimize_ae, loss_ae], feed_dict={x_raw:batchX})
trainLossVec.append(loss)
idx = np.random.permutation(validX.shape[0])
validLossVec = []
for i in range(nValidBatches):
batchX = validX[idx[i*pretrainBatchSize:(i+1)*pretrainBatchSize]]
loss = sess.run(loss_ae, feed_dict={x_raw:batchX})
validLossVec.append(loss)
validReverseLoss = 0.
buf = 'Pretrain_Epoch:%d, trainLoss:%f, validLoss:%f, validReverseLoss:%f' % (epoch, np.mean(trainLossVec), np.mean(validLossVec), validReverseLoss)
print(buf)
self.print2file(buf, logFile)
idx = np.arange(trainX.shape[0])
for epoch in range(nEpochs):
d_loss_vec= []
g_loss_vec = []
for i in range(nBatches):
for _ in range(discriminatorTrainPeriod):
batchIdx = np.random.choice(idx, size=batchSize, replace=False)
batchX = trainX[batchIdx]
randomX = np.random.normal(size=(batchSize, self.randomDim))
_, discLoss = sess.run([optimize_d, loss_d], feed_dict={x_raw:batchX, x_random:randomX, keep_prob:1.0, bn_train:False})
d_loss_vec.append(discLoss)
for _ in range(generatorTrainPeriod):
randomX = np.random.normal(size=(batchSize, self.randomDim))
_, generatorLoss = sess.run([optimize_g, loss_g], feed_dict={x_raw:batchX, x_random:randomX, keep_prob:1.0, bn_train:True})
g_loss_vec.append(generatorLoss)
idx = np.arange(len(validX))
nValidBatches = int(np.ceil(old_div(float(len(validX)), float(batchSize))))
validAccVec = []
validAucVec = []
for i in range(nBatches):
batchIdx = np.random.choice(idx, size=batchSize, replace=False)
batchX = validX[batchIdx]
randomX = np.random.normal(size=(batchSize, self.randomDim))
preds_real, preds_fake, = sess.run([y_hat_real, y_hat_fake], feed_dict={x_raw:batchX, x_random:randomX, keep_prob:1.0, bn_train:False})
validAcc = self.calculateDiscAccuracy(preds_real, preds_fake)
validAuc = self.calculateDiscAuc(preds_real, preds_fake)
validAccVec.append(validAcc)
validAucVec.append(validAuc)
buf = 'Epoch:%d, d_loss:%f, g_loss:%f, accuracy:%f, AUC:%f' % (epoch, np.mean(d_loss_vec), np.mean(g_loss_vec), np.mean(validAccVec), np.mean(validAucVec))
print(buf)
self.print2file(buf, logFile)
savePath = saver.save(sess, outPath, global_step=epoch)
print(savePath)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_arguments(parser):
parser.add_argument('--embed_size', type=int, default=128, help='The dimension size of the embedding, which will be generated by the generator. (default value: 128)')
parser.add_argument('--noise_size', type=int, default=128, help='The dimension size of the random noise, on which the generator is conditioned. (default value: 128)')
parser.add_argument('--generator_size', type=tuple, default=(128, 128), help='The dimension size of the generator. Note that another layer of size "--embed_size" is always added. (default value: (128, 128))')
parser.add_argument('--discriminator_size', type=tuple, default=(256, 128, 1), help='The dimension size of the discriminator. (default value: (256, 128, 1))')
parser.add_argument('--compressor_size', type=tuple, default=(), help='The dimension size of the encoder of the autoencoder. Note that another layer of size "--embed_size" is always added. Therefore this can be a blank tuple. (default value: ())')
parser.add_argument('--decompressor_size', type=tuple, default=(), help='The dimension size of the decoder of the autoencoder. Note that another layer, whose size is equal to the dimension of the <patient_matrix>, is always added. Therefore this can be a blank tuple. (default value: ())')
parser.add_argument('--data_type', type=str, default='binary', choices=['binary', 'count'], help='The input data type. The <patient matrix> could either contain binary values or count values. (default value: "binary")')
parser.add_argument('--batchnorm_decay', type=float, default=0.99, help='Decay value for the moving average used in Batch Normalization. (default value: 0.99)')
parser.add_argument('--L2', type=float, default=0.001, help='L2 regularization coefficient for all weights. (default value: 0.001)')
parser.add_argument('data_file', type=str, metavar='<patient_matrix>', help='The path to the numpy matrix containing aggregated patient records.')
parser.add_argument('out_file', type=str, metavar='<out_file>', help='The path to the output models.')
parser.add_argument('--model_file', type=str, metavar='<model_file>', default='', help='The path to the model file, in case you want to continue training. (default value: '')')
parser.add_argument('--n_pretrain_epoch', type=int, default=100, help='The number of epochs to pre-train the autoencoder. (default value: 100)')
parser.add_argument('--n_epoch', type=int, default=1000, help='The number of epochs to train medGAN. (default value: 1000)')
parser.add_argument('--n_discriminator_update', type=int, default=2, help='The number of times to update the discriminator per epoch. (default value: 2)')
parser.add_argument('--n_generator_update', type=int, default=1, help='The number of times to update the generator per epoch. (default value: 1)')
parser.add_argument('--pretrain_batch_size', type=int, default=100, help='The size of a single mini-batch for pre-training the autoencoder. (default value: 100)')
parser.add_argument('--batch_size', type=int, default=1000, help='The size of a single mini-batch for training medGAN. (default value: 1000)')
parser.add_argument('--save_max_keep', type=int, default=0, help='The number of models to keep. Setting this to 0 will save models for every epoch. (default value: 0)')
parser.add_argument('--generate_data', type=str2bool, default=False, help='If True the model generates data, if False the model is trained (default value: False)')
parser.add_argument('--generation-batch', type=int, default=10000, help='Size of the batch generated from the model (default value: 10000)')
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
data = np.load(args.data_file)
inputDim = data.shape[1]
mg = Medgan(dataType=args.data_type,
inputDim=inputDim,
embeddingDim=args.embed_size,
randomDim=args.noise_size,
generatorDims=args.generator_size,
discriminatorDims=args.discriminator_size,
compressDims=args.compressor_size,
decompressDims=args.decompressor_size,
bnDecay=args.batchnorm_decay,
l2scale=args.L2)
# True for generation, False for training
if not args.generate_data:
# Training
mg.train(dataPath=args.data_file,
modelPath=args.model_file,
outPath=args.out_file,
pretrainEpochs=args.n_pretrain_epoch,
nEpochs=args.n_epoch,
discriminatorTrainPeriod=args.n_discriminator_update,
generatorTrainPeriod=args.n_generator_update,
pretrainBatchSize=args.pretrain_batch_size,
batchSize=args.batch_size,
saveMaxKeep=args.save_max_keep)
else:
# Generate synthetic data using a trained model
# You must specify "--model_file" and "<out_file>" to generate synthetic data.
mg.generateData(nSamples=args.generation_batch,
modelFile=args.model_file,
batchSize=args.batch_size,
outFile=args.out_file)
|
import ROOT
import lib.plotter
import argparse
def ParseOption():
parser = argparse.ArgumentParser(description='submit all')
parser.add_argument('--t1', dest='tag1', type=str, help='for each plot')
parser.add_argument('--t2', dest='tag2', type=str, help='for each plot')
parser.add_argument('--plotfile1', dest='plotfile1', type=str, help='plotfile1')
parser.add_argument('--plotfile2', dest='plotfile2', type=str, help='plotfile2')
parser.add_argument('--plotdir', dest='plotdir', type=str, help='plotdir')
args = parser.parse_args()
return args
args=ParseOption()
plotfile1 = args.plotfile1
plotfile2 = args.plotfile2
tag1 = args.tag1
tag2 = args.tag2
plotdir = args.plotdir
histnames = ["nSegPerChamebr","nRHPerSeg","chi2PerDOF"]
xmins = [0,0,0]
xmaxs = [10,7,100]
ymins = [0.1,0.1,0.1]
#ymaxs = [10000,10000,10000,10000]
ymaxs = [5000,5000,6000]
xtitles = ["nSegPerChamber","nRHPerSeg","chi2/DOF"]
for i in range(len(histnames)):
plotter = lib.plotter.Plotter()
histname = histnames[i]
# plotter.GetHistFromRoot("tmpRootPlots/CSCresults_RU.root",histname,'RU')
# plotter.GetHistFromRoot("tmpRootPlots/CSCresults_UF_ST.root",histname,'UF')
plotter.GetHistFromRoot(plotfile1,histname,tag1)
plotter.GetHistFromRoot(plotfile2,histname,tag2)
drawOps = ["HIST","HIST"]
colors = [1,2]
styles = [1,1]
sizes = [2,2]
tmpHists = [histname+'_RU',histname+'_UF']
plotter.AddPlot(tmpHists,drawOps, colors, styles, sizes)
comments = ["RU algo","UF algo"]
canvasCord = [xmins[i],xmaxs[i],ymins[i],ymaxs[i]]
legendCord = [0.7,0.75,0.9,0.9]
titles = [xtitles[i],'']
# savename = "/home/mhl/public_html/2017/20171203_cscSeg/ME11/" + histname
# savename = "/home/mhl/public_html/2017/20171203_cscSeg/nonME11/" + histname
savename = plotdir + histname
if i == 2:
plotter.Plot(tmpHists,canvasCord,legendCord,titles,comments,drawOps,savename,True) # log
else:
plotter.Plot(tmpHists,canvasCord,legendCord,titles,comments,drawOps,savename)
|
# trainer.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from base.base_dataset import BaseDataset
from base.base_logger import BaseLogger
from base.base_model import BaseModel
from base.base_trainer import BaseTrainer
from tqdm import tqdm
from typing import Optional
from typing import Tuple
import numpy as np
import random as rand
import tensorflow as tf
class Trainer(BaseTrainer):
def __init__(self, model: BaseModel, logger: BaseLogger, train_dataset: BaseDataset,
valid_dataset: Optional[BaseDataset]) -> None:
super(Trainer, self).__init__(model, logger, train_dataset, valid_dataset)
def train_epoch(self) -> None:
loop = tqdm(range(len(self.train_dataset)))
loop.set_description("Training Epoch [{}/{}]".format(int(self.model.epoch),
self.config.num_epochs))
errs = []
for data, _ in zip(self.train_dataset.data, loop):
err, grad = self.train_step(data)
self.model.optimizer.apply_gradients(zip(grad, self.model.trainable_variables))
# Append step data to epoch data list.
errs.append(err)
# Increment global step counter.
self.model.global_step.assign_add(delta=1)
self.logger.summarize(self.model.global_step, summarizer="train", summaries_dict={
"total_loss": np.mean(errs)
})
def train_step(self, data: Tuple[tf.Tensor, ...]) -> Tuple[tf.Tensor, ...]:
x, y = data
with tf.GradientTape() as tape:
# Evaluate results on training data.
prediction = self.model(x, training=True)
loss = self.model.loss(y, prediction)
grad = tape.gradient(loss, self.model.trainable_variables)
return loss, grad
def validate_epoch(self) -> None:
loop = tqdm(range(len(self.valid_dataset)))
loop.set_description("Validating Epoch {}".format(int(self.model.epoch)))
errs = []
predictions = []
targets = []
for data, _ in zip(self.valid_dataset.data, loop):
err, prediction, target = self.validate_step(data)
# Append step data to epoch data list.
errs.append(err)
predictions.append(prediction)
targets.append(target)
batch = rand.choice(range(len(predictions)))
self.logger.summarize(self.model.global_step, summarizer="validation", summaries_dict={
"prediction": predictions[batch],
"target": targets[batch],
"total_loss": np.mean(errs)
})
def validate_step(self, data: Tuple[tf.Tensor, ...]) -> Tuple[tf.Tensor, ...]:
x, y = data
# Evaluate results on validation data.
prediction = self.model(x, training=False)
loss = self.model.loss(y, prediction)
return loss, prediction, y
|
from django.contrib import admin
from holiday.models import Holiday
# Register your models here.
admin.site.register(Holiday)
|
#!/var/www/vhosts/munichmakerlab.de/status/env/bin/python
import paho.mqtt.client as paho
from threading import Timer
import logging
import os
import config
PATH=os.path.dirname(os.path.realpath(__file__))
STATUS_FILE= "%s/%s" % (PATH, "current_status")
def on_connect(mosq, obj, rc):
logging.info("Connect with RC " + str(rc))
mosq.subscribe(config.topic, 0)
def on_message(mosq, obj, msg):
logging.info(msg.topic + " [" + str(msg.qos) + "]: " + str(msg.payload))
f = open(STATUS_FILE,"w")
f.write(msg.payload)
f.close()
def on_subscribe(mosq, obj, mid, granted_qos):
logging.info("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_disconnect(client, userdata, rc):
logging.warning("Disconnected (RC " + str(rc) + ")")
if rc <> 0:
try_reconnect(client)
def on_log(client, userdata, level, buf):
logging.debug(buf)
def try_reconnect(client, time = 60):
try:
logging.info("Trying reconnect")
client.reconnect()
except:
logging.warning("Reconnect failed. Trying again in " + str(time) + " seconds")
Timer(time, try_reconnect, [client]).start()
logging.basicConfig(format='[%(levelname)s] %(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
logging.info("Initializing MQTT")
mqttc = paho.Client()
#mqttc.username_pw_set(config.broker["user"], config.broker["password"])
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_disconnect = on_disconnect
mqttc.on_subscribe = on_subscribe
mqttc.on_log = on_log
try:
mqttc.connect(config.broker["hostname"], config.broker["port"])
except:
logging.warning("Connection failed. Trying again in 30 seconds")
Timer(30, try_reconnect, [mqttc]).start()
logging.info("Entering loop")
try:
mqttc.loop_forever()
except KeyboardInterrupt:
pass
logging.info("Exiting")
mqttc.disconnect()
|
# Generated by Django 2.1 on 2018-08-03 13:31
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180727_0304'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='phone_number',
field=models.CharField(blank=True, max_length=12, validators=[django.core.validators.RegexValidator(message=('Phone number must be entered in the format: +999999999. Up to 15 digits allowed.',), regex='^\\+?1?\\d{9,15}$')]),
),
]
|
'''
Пользователь вводит трехзначное число.
Программа должна сложить цифры, из которых состоит это число.
'''
input_number = int(input('Введите трехзначное число'))
sum_of_digits = 0
for i in range(3):
last_digit = input_number%10
sum_of_digits += last_digit
input_number //= 10
print(sum_of_digits)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 20:30:42 2019
@author: Michael
"""
import numpy as np
import lab3_functions as fun
import basic_functions as bf
import cv2
# Section 2.2 Testing
# Question 1 and 2
# Histogram Equalize
img1 = cv2.imread(r"Images\testing\contrast\7.2.01-small.png", 0)
img2 = cv2.imread(r"Images\testing\contrast\207056.jpg", 0)
# Global Equalize
out_img1 = fun.histogram_equalization(img1)
out_img2 = fun.histogram_equalization(img2)
bf.display_multiple_images([img1, out_img1])
bf.display_multiple_images([img2, out_img2])
# Adaptive Histogram Equalize
out_img3 = fun.adaptive_histogram(img1, 16, 16)
out_img4 = fun.adaptive_histogram(img2, 16, 16)
bf.display_multiple_images([img1, out_img3])
bf.display_multiple_images([img2, out_img4])
# Question 3 and 4
img3 = cv2.imread(r"Images\testing\sharpen\7.2.01-small.png", 0)
img4 = cv2.imread(r"Images\testing\sharpen\digital_orca_blurred.png", 0)
r = 2
k = 1
# Unsharpen Mask
out_img5 = fun.unsharp_mask(img3, r, k)
out_img6 = fun.unsharp_mask(img4, r, k)
bf.display_multiple_images([img3, out_img5])
bf.display_multiple_images([img4, out_img6])
# Laplacian Sharpen
k = 1
out_img7 = fun.laplacian_sharpen(img3, k)
out_img8 = fun.laplacian_sharpen(img4, k)
bf.display_multiple_images([img3, out_img7])
bf.display_multiple_images([img4, out_img8])
# Section 2.3
img5 = cv2.imread(r"Images\enhance\noise_additive.png", 0)
img6 = cv2.imread(r"Images\enhance\noise_multiplicative.png", 0)
img7 = cv2.imread(r"Images\enhance\noise_impulsive.png", 0)
img8 = cv2.imread(r"Images\enhance\snowglobe.png", 0)
# Additive Noise
out_img9 = cv2.medianBlur(img5, 3)
k = 1
r = 3
out_img9 = fun.unsharp_mask(out_img9, r, k)
k = 0.4
out_img9 = fun.laplacian_sharpen(out_img9, k)
bf.display_multiple_images([img5, out_img9])
# Multiplicative Nosie
average = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
out_img10 = np.array(bf.spatial_filter(img6, average / np.sum(average)), np.uint8)
k = 0.1
out_img10 = fun.laplacian_sharpen(out_img10, k)
bf.display_multiple_images([img6, out_img10])
# Impulsive Noise
out_img11 = cv2.medianBlur(img7, 3)
bf.display_multiple_images([img7, out_img11])
# Snowglobe
r = 2
gauss = fun.gaussian_kernel(1, r)
gauss_sum = np.sum(gauss)
out_img12 = np.array(bf.spatial_filter(img8, gauss / gauss_sum), np.uint8)
out_img12 = fun.histogram_equalization(out_img12)
bf.display_multiple_images([img8, out_img12]) |
## Exercício Python 048: Faça um programa que calcule a soma entre todos os números que são múltiplos de três e que se encontram no intervalo de 1 até 500.
s = int(0)
for c in range(1, 501, 2):
if c % 3 ==0:
s = s + c
print(c)
print('A soma dos números ímpares multiplos de 3 é {}'.format(s)) |
# -*- coding: utf-8 -*-
"""
Define SampleTemplates using a Builder pattern
"""
from typing import Optional, Sequence, Union, List
from urllib.parse import urlparse
import numbers
import datetime as dt
from rspace_client.inv.quantity_unit import QuantityUnit
class TemplateBuilder:
"""
Define a SampleTemplate prior to POSTing to RSpace.
A SampleTemplate only requires a name and a default unit to be defined.
The default unit is supplied as a String from a permitted list in class QuantityUnit. E.g. 'ml', 'g'.
"""
numeric = Union[int, float]
def __init__(self, name, defaultUnit, description=None):
if not QuantityUnit.is_supported_unit(defaultUnit):
raise ValueError(
f"{defaultUnit} must be a label of a supported unit in QuantityUnit"
)
self.name = name
self.fields = []
self.qu = QuantityUnit.of(defaultUnit)
if description is not None:
self.description = description
def _set_name(self, name: str, f_type: str):
if len(name) == 0:
raise ValueError("Name cannot be empty or None")
return {"name": name, "type": f_type}
def radio(self, name: str, options: List, selected: str = None):
"""
Parameters
----------
name : str
The field name.
options : List
A list of radio options.
selected : str, optional
An optional string indicating a radio option that should be selected
by default. If this string is not in the 'options' List, it will be ignored
"""
f = self._set_name(name, "Radio")
f["definition"] = {"options": options}
if selected is not None and len(selected) > 0 and selected in options:
f["selectedOptions"] = [selected]
self.fields.append(f)
return self
def choice(self, name: str, options: List, selected: List = None):
"""
Parameters
----------
name : str
The field name.
options : List
A list of choice options.
selected : List, optional
An optional list of options that should be selected. If items in
this list are not in the 'options' List, they will be ignored
"""
f = self._set_name(name, "Choice")
f["definition"] = {"options": options}
if selected is not None and len(selected) > 0:
selected = [x for x in selected if x in options]
if len(selected) > 0:
f["selectedOptions"] = selected
self.fields.append(f)
return self
def string(self, name: str, default: str = None):
f = self._set_name(name, "String")
if default is not None:
f["content"] = default
self.fields.append(f)
return self
def text(self, name: str, default: str = None):
f = self._set_name(name, "Text")
if default is not None:
f["content"] = default
self.fields.append(f)
return self
def number(self, name: str, default: numeric = None):
"""
Parameters
----------
name : str
The field's name.
default : numeric, optional
A default numeric value for the field.
Raises
------
ValueError
if default value is not a number (integer or float).
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Number")
if default is not None:
if isinstance(default, numbers.Number):
f["content"] = default
else:
raise ValueError(f"Numeric field requires number but was '{default}'")
self.fields.append(f)
return self
## TODO date, time, URI, attachment?
def date(self, name: str, isodate: Union[dt.date, dt.datetime, str] = None):
"""
Parameters
----------
name : str
The field name.
isodate : Union[dt.date, dt.datetime, str]
Either a datetime.dateime, a datetime.date, or an ISO-8601 string.
Raises
------
ValueError
if string value is not an ISO8601 date (e.g. 2022-01-27)
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Date")
defaultDate = None
## these conditions must be in order
if isodate is not None:
if isinstance(isodate, dt.datetime):
defaultDate = isodate.date().isoformat()
elif isinstance(isodate, dt.date):
defaultDate = isodate.isoformat()
elif isinstance(isodate, str):
defaultDate = (
dt.datetime.strptime(isodate, "%Y-%m-%d").date().isoformat()
)
if defaultDate is not None:
f["content"] = defaultDate
self.fields.append(f)
return self
def time(self, name: str, isotime: Union[dt.date, dt.time, str] = None):
"""
Parameters
----------
name : str
The field name.
isodate : Union[dt.time, dt.datetime, str]
Either a datetime.datetime, a datetime.time, or an ISO-8601 string.
Raises
------
ValueError
if string value is not an ISO8601 time (e.g. 12:05:36)
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Time")
defaultTime = None
if isotime is not None:
## these conditions must be in order
if isinstance(isotime, dt.datetime):
defaultTime = isotime.time().isoformat()
elif isinstance(isotime, dt.time):
defaultTime = isotime.isoformat()
elif isinstance(isotime, str):
defaultTime = dt.time.fromisoformat(isotime).isoformat()
if defaultTime is not None:
f["content"] = defaultTime
self.fields.append(f)
return self
def attachment(self, name: str, desc: str = None):
"""
Parameters
----------
name : str
The field name.
desc : str, optional
An optional default description of the file to upload.
Returns
-------
This object for chaining
"""
f = self._set_name(name, "Attachment")
if desc is not None and len(desc) > 0 and len(str.strip(desc)) > 0:
f["content"] = desc
self.fields.append(f)
return self
def uri(self, name: str, uri: str = None):
"""
Parameters
----------
name : str
The field name.
uri : str, optional
An optional default URI
Returns
-------
This object for chaining
Raises
------
ValueError if URI is not parsable into a URI
"""
f = self._set_name(name, "Uri")
if uri is not None and len(uri) > 0 and len(str.strip(uri)) > 0:
parsed_uri = urlparse(uri)
f["content"] = uri
self.fields.append(f)
return self
def field_count(self):
return len(self.fields)
def build(self) -> dict:
d = {"name": self.name, "defaultUnitId": self.qu["id"], "fields": self.fields}
if hasattr(self, "description"):
d["description"] = self.description
return d
def _fields(self):
return self.fields
|
import requests
url = 'https://www.ipea.gov.br/geobr/metadata/metadata_gpkg.csv'
a = requests.get(url).content
# requests.get(url, verify=False)
#
# content = requests.get(url).content
|
## Python Project - Section A2 Group 1 - MyMovie
## This is KNN_DecisionTree_Regressions. This contains the functions for working through the KKN and Decision tree regressions.
## Made by Shayne Bement, Jeff Curran, Ghazal Erfani, Naphat Korwanich, and Asvin Sripraiwalsupakit
## Imported by myMovie
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn import tree
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.neighbors import KNeighborsClassifier
import warnings
## using k-nearest neighbors algorithm to classify user-inputted values into 1 of 10 ranges of world-wide gross values
def KNN_Regression(df, inputList):
### in case grader sklearn is not updated to most recent version, this will
### remove a depreciated warning regarding numpy
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
## Putting data required for model into pandas dataframe
df = pd.DataFrame(df, columns=['Genre', 'MPAA', 'Production_Budget', 'Book_Based', 'award_winning', 'Worldwide_Gross'])
### Obtaining Inputs from main
modelInput = inputList
#[Genre, MPAA, Budget, BookBool, AwardBool]
## Converting categorical variables
dfgenre = pd.get_dummies(df['Genre'], drop_first=True)
df = df.join(dfgenre)
df = df.drop(columns=['Genre'])
dfmpaa = pd.get_dummies(df['MPAA'])
df = df.join(dfmpaa)
df = df.drop(columns=['MPAA'])
## Creating categorical variable to be predicted
df['Production_Budget'] = df['Production_Budget'].astype('int')
df['Worldwide_Gross'] = df['Worldwide_Gross'].astype('int')
le5 = preprocessing.LabelEncoder()
conditions = [
(df['Worldwide_Gross'] <= 1000000),
(df['Worldwide_Gross'] > 1000000) & (df['Worldwide_Gross'] <= 5000000),
(df['Worldwide_Gross'] > 5000000) & (df['Worldwide_Gross'] <= 25000000),
(df['Worldwide_Gross'] > 25000000) & (df['Worldwide_Gross'] <= 50000000),
(df['Worldwide_Gross'] > 50000000) & (df['Worldwide_Gross'] <= 250000000),
(df['Worldwide_Gross'] > 250000000) & (df['Worldwide_Gross'] <= 500000000),
(df['Worldwide_Gross'] > 500000000) & (df['Worldwide_Gross'] <= 1000000000),
(df['Worldwide_Gross'] > 1000000000) & (df['Worldwide_Gross'] <= 1500000000),
(df['Worldwide_Gross'] > 1500000000) & (df['Worldwide_Gross'] <= 2000000000),
(df['Worldwide_Gross'] > 2000000000)]
choices = ['0 - 1,000,000', '1,000,000 - 5,000,000','5,000,000 - 25,000,000','25,000,000 - 50,000,000','50,000,000 - 250,000,000','250,000,000 - 500,000,000','500,000,000 - 1,000,000,000','1,000,000,000 - 1,500,000,000','1,500,000,000 - 2,000,000,000','> 2,000,000,000']
df['Worldwide_Gross'] = np.select(conditions, choices, default='0 - 1,000,000')
df['Worldwide_Gross'] = le5.fit_transform(df['Worldwide_Gross'])
## Generating user input dataframe
df1 = df.drop(columns=['Worldwide_Gross'])
userinputdf = df1[:1]
for col in userinputdf.columns:
userinputdf[col].values[:] = 0
userinputdf.at[0, 'Production_Budget'] = modelInput[2]
userinputdf.at[0, 'Book_Based'] = modelInput[3]
userinputdf.at[0, 'award_winning'] = modelInput[4]
userinputdf.at[0, modelInput[0] ] = 1 #Genre
userinputdf.at[0, modelInput[1] ] = 1 #MPAA
## Creating train and test data set
train , test = train_test_split(df, test_size = 0.3)
x_train = train.drop('Worldwide_Gross', axis=1)
y_train = train['Worldwide_Gross']
x_test = test.drop('Worldwide_Gross', axis = 1)
y_test = test['Worldwide_Gross']
## Calculating RMSE
model = KNeighborsClassifier(n_neighbors=10)
model.fit(x_train, y_train)
pred=model.predict(x_test)
rmse = int(sqrt(mean_squared_error(y_test,pred)))
## Making the prediction with inputs
userInputList = userinputdf.iloc[0,:].tolist()
predicted = int(model.predict([userInputList]))
predicted = str(le5.inverse_transform(predicted))
return predicted, rmse
## using decision tree regression model to predict a worldwide gross using sequences of branching operations based on comparisons of the variables
def DecisionTree_Regression(df, inputList):
## Putting data required for model into pandas dataframe
df = pd.DataFrame(df, columns=['Genre', 'MPAA', 'Production_Budget', 'Book_Based', 'award_winning', 'Worldwide_Gross'])
### Obtaining Inputs from main
modelInput = inputList
#[Genre, MPAA, Budget, BookBool, AwardBool]
## Converting categorical variables to dummy variables
dfgenre = pd.get_dummies(df['Genre'], drop_first=True)
df = df.join(dfgenre)
df = df.drop(columns=['Genre'])
dfmpaa = pd.get_dummies(df['MPAA'])
df = df.join(dfmpaa)
df = df.drop(columns=['MPAA'])
## Generating user input dataframe for prediction
df1 = df.drop(columns=['Worldwide_Gross'])
userinputdf = df1[:1]
for col in userinputdf.columns:
userinputdf[col].values[:] = 0
userinputdf.at[0, 'Production_Budget'] = modelInput[2]
userinputdf.at[0, 'Book_Based'] = modelInput[3]
userinputdf.at[0, 'award_winning'] = modelInput[4]
userinputdf.at[0, modelInput[0] ] = 1 #Genre
userinputdf.at[0, modelInput[1] ] = 1 #MPAA
## Creating train and test data set
train , test = train_test_split(df, test_size = 0.3)
x_train = train.drop('Worldwide_Gross', axis=1)
y_train = train['Worldwide_Gross']
x_test = test.drop('Worldwide_Gross', axis = 1)
y_test = test['Worldwide_Gross']
## Calculateing Model RMSE
model = tree.DecisionTreeRegressor()
model.fit(x_train, y_train)
pred = model.predict(x_test)
rmse = int(sqrt(mean_squared_error(y_test,pred)))
## Making the prediction with inputs
userInputList = userinputdf.iloc[0,:].tolist()
predicted = int(model.predict([userInputList]))
## Some discussion on error
# variables = ['Worldwide_Gross', 'Production_Budget']
# corr_matrix = df.corr()
# print(corr_matrix['WWorldwide_Gross'].sort_values(ascending=False))
# pd.scatter_matrix(df[variables], figsize=(12, 8))
# plt.show()
return predicted, rmse
if __name__ == "__main__":
####### Start #########
modelInput = ['Comedy', 'PG-13', 50000000, 1, 0]
movieDF = pd.read_csv(r'FullOutput.csv')
grossEarningsRange, errorTerm = KNN_Regression(movieDF, modelInput)
grossEarnings, errorTerm = DecisionTree_Regression(movieDF, modelInput)
print('\n\033[4mKNN Regression Model\033[0m')
print('Projected Budget: ', "${:,}".format(modelInput[2]))
print('Predicted Gross Budget: ', "${}".format(grossEarningsRange))
print('predicted Root Mean Squared Error: ', "{:,}".format(errorTerm))
print('\n\033[4mDecision Tree Model\033[0m')
print('Projected Budget: ', "${:,}".format(modelInput[2]))
print('Predicted Budget Gross Budget: ', "${:,}".format(grossEarnings))
print('predicted Budget Root Mean Squared Error: ', "${:,}".format(errorTerm))
|
import numpy
def spin_words(sentence):
words = sentence.split(" ")
output = ""
for word in words:
aux = word
if len(word) > 4:
aux = ""
while len(word) > 0:
aux = word[0] + aux
word = word[1:]
output += aux + " "
output = output[:-1]
return output
# print(spin_words("Welcome is"))
def find_outlier(integers):
oddity = 1 if integers[0] % 2 + integers[1] % 2 + integers[2] % 2 < 2 else 0
return [x for x in integers if x % 2 == oddity][0]
print(find_outlier([1, 2, 3]))
|
#!/usr/bin/env python
import argparse
import copy
import time
import sys
import utils
from cognition.newest_decision_making import DecisionMaker
from location.location import Location
from motion.motion_subsystem import MotionSubsystem
from radio.radio_subsystem import RadioSubsystem
from route.new_path import Path
from sensor.target_tracker import TargetTracker
class Controller(object):
def __init__(self):
self.current_location = 0
brick = utils.connect_to_brick()
self.cognition = DecisionMaker()
self.location = Location(self.location_callback)
self.motion = MotionSubsystem(brick)
self.radio = RadioSubsystem(self.radio_update_flag, self.radio_update_data,
self.radio_reconfig_flag)
self.tracker = TargetTracker(brick)
self.radio_update_flag = False
self.reconfig_flag = False
# self.iteration = 1
self.current_m = {}
self.previous_m = {}
self.re_explore = False
self.f = open('track_data', 'w')
def location_callback(self, current_location):
"""
Callback for location module.
This function is used by the location module to relay the
current location--as read by the barcode reader--back to the
controller.
Parameters
----------
current_location : int
Value of barcode representing current location.
"""
self.current_location = current_location
def radio_update_flag(self, flag=False):
"""
Callback for radio subsystem.
This function simply sets a flag true or false, to indicate if
there is data from the radio subsystem.
Parameters
----------
flag : bool
`True` if radio subsystem has passed data through the
`radio_callback_data`.
"""
self.radio_update_flag = flag
def radio_update_data(self, tx_packets=0, rx_packets=0, signal_strength=0):
"""
Callback for radio subsystem.
This function is used for the radio susbsytem to pass data
back to the controller.
Parameters
----------
tx_packets : int
Number of streamed packets sent by radio.
rx_packets : int
Number of streamed packets received by Node B.
rssi : int
"""
self.tx_packets = tx_packets
self.rx_packets = rx_packets
self.rssi = signal_strength
def radio_reconfig_flag(self, flag=False):
"""
Callback for radio subsystem.
This function is use by the radio subsystem to indicate that a
reconfiguration request has been acknowledged by Node B.
Parameters
----------
flag : bool
`True` if radio subsystem has received an acknowledgment from
Node B for a reconfiguration request.
"""
self.reconfig_flag = flag
def build_route(self):
"""
Build route graph.
"""
path_a = Path(name='A', distance=62.0, direction='left')
path_b = Path(name='B', distance=48.0, direction='straight')
path_c = Path(name='C', distance=87.5, direction='right')
# preload values for path, bypass initial exploration
path_a.current_meters['X'] = 5
path_a.current_meters['Y'] = 1
path_a.current_meters['RSSI'] = -86
path_b.current_meters['X'] = 3
path_b.current_meters['Y'] = 0
path_b.current_meters['RSSI'] = -86
path_c.current_meters['X'] = 5
path_c.current_meters['Y'] = 3
path_c.current_meters['RSSI'] = -86
self.path_names = ['A', 'B', 'C'] # this is a hack
self.paths = [path_a, path_b, path_c]
def run(self):
"""
Run AV controller.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", type=float, default=434e6, metavar='frequency', dest='frequency',
nargs=1, help="Transmit frequency (default: %(default)s)")
parser.add_argument("-m", type=str, default='gfsk', metavar='modulation', dest='modulation',
choices=['gfsk', 'fsk', 'ask'],
help="Select modulation from [%(choices)s] (default: %(default)s)")
parser.add_argument("-p" "--power", type=int, default=17, metavar='power', dest='power',
choices=[8, 11, 14, 17],
help="Select transmit power from [%(choices)s] (default: %(default)s)")
parser.add_argument("-r" "--bitrate", type=float, default=4.8e3, metavar='bitrate',
dest='bitrate', help="Set bitrate (default: %(default)s)")
args = parser.parse_args()
self.frequency = args.frequency
self.modulation = args.modulation
self.eirp = args.power
self.bitrate = args.bitrate
self.build_route()
self.location.start()
self.motion.start()
self.radio.start()
self.fsm()
print self.path_history
print self.choice_history
print self.score_history
print self.soln_idx
print self.current_m
print self.previous_m
self.shutdown()
def shutdown(self):
"""
Shutdown subsytems before stopping.
"""
# self.f.close()
self.tracker.kill_sensor()
self.motion.join()
self.location.join() # shut this down last
def fsm(self):
"""
AV finite state machine.
"""
fsm_state = 'first_time'
start = 1
destination = 2
convergence_iterator = 0
self.path_history = []
self.choice_history = []
self.score_history = []
self.prev_score = -10
self.prev_param = {}
self.prev_soln = []
self.soln_idx = []
iteration = 1
while iteration < 11:
if fsm_state == 'first_time':
###################################################################
self.motion.set_direction('straight')
self.motion.set_speed(25)
self.motion.set_state('go')
while not self.current_location == start:
time.sleep(0.01)
else:
self.motion.set_state('stop')
time.sleep(0.1)
self.radio.set_current_location(self.current_location)
self.radio.set_radio_configuration(self.modulation, self.eirp,
self.bitrate, self.frequency)
fsm_state = 'before_traverse'
continue
###################################################################
if fsm_state == 'before_traverse':
###################################################################
choice = self.cognition.choose_path(self.paths)
if choice != -1:
current_path = self.paths[choice]
self.soln_idx.append('Explore')
current_path.current_knobs['Modulation'] = self.modulation
current_path.current_knobs['Rs'] = self.bitrate
current_path.current_knobs['EIRP'] = self.eirp
current_path.current_knobs['Speed'] = 25
self.motion.set_speed(25)
else:
score, param, soln, s_i = self.cognition.solution(self.paths,
iteration)
print "score: ", score
print "prev_score: ", self.prev_score
if score > self.prev_score:
print "current solution is better"
self.soln_idx.append(s_i)
self.score_history.append(score)
self.prev_score = score
self.prev_param = param
self.prev_soln = soln
name_of_chosen_path = param['name']
choice = self.path_names.index(name_of_chosen_path)
current_path = self.paths[choice]
self.choice_history.append(current_path.name)
current_path.current_knobs['Modulation'] = 'fsk'
current_path.current_knobs['EIRP'] = param['EIRP']
current_path.current_knobs['Rs'] = param['Rs']
current_path.current_knobs['Speed'] = param['rotor_power']
self.radio.set_config_packet_data(current_path.current_knobs['Modulation'],
current_path.current_knobs['EIRP'],
current_path.current_knobs['Rs'])
self.radio.set_state('reconfigure')
while not self.reconfig_flag:
time.sleep(0.1)
else:
self.radio.set_current_location(self.current_location)
self.radio.set_radio_configuration(current_path.current_knobs['Modulation'],
current_path.current_knobs['EIRP'],
current_path.current_knobs['Rs'],
self.frequency)
self.motion.set_speed(current_path.current_knobs['Speed'])
else:
print "previous solution is better"
try:
name_of_chosen_path = self.prev_param['name']
except KeyError:
print "KeyError"
print self.prev_param
sys.exit(1)
comparison = self.compare(self.prev_param)
if comparison == True:
print "prev solution is better and old environment is unchanged"
convergence_iterator += 1
if convergence_iterator == 3:
convergence_iterator = 0
self.re_explore = True
self.soln_idx.append('prev result')
self.score_history.append(self.score_history[-1])
choice = self.path_names.index(name_of_chosen_path)
current_path = self.paths[choice]
self.choice_history.append(current_path.name)
current_path.current_knobs['Modulation'] = 'fsk'
current_path.current_knobs['EIRP'] = self.prev_param['EIRP']
current_path.current_knobs['Rs'] = self.prev_param['Rs']
current_path.current_knobs['Speed'] = self.prev_param['rotor_power']
self.radio.set_config_packet_data(current_path.current_knobs['Modulation'],
current_path.current_knobs['EIRP'],
current_path.current_knobs['Rs'])
self.radio.set_state('reconfigure')
while not self.reconfig_flag:
time.sleep(0.1)
else:
self.radio.set_current_location(self.current_location)
self.radio.set_radio_configuration(current_path.current_knobs['Modulation'],
current_path.current_knobs['EIRP'],
current_path.current_knobs['Rs'],
self.frequency)
self.motion.set_speed(current_path.current_knobs['Speed'])
else:
print "prev solution is better but old environment has changed"
print "use current solution"
self.soln_idx.append(s_i)
self.score_history.append(score)
self.prev_score = score
self.prev_param = param
self.prev_soln = soln
name_of_chosen_path = param['name']
choice = self.path_names.index(name_of_chosen_path)
current_path = self.paths[choice]
self.choice_history.append(current_path.name)
current_path.current_knobs['Modulation'] = 'fsk'
current_path.current_knobs['EIRP'] = param['EIRP']
current_path.current_knobs['Rs'] = param['Rs']
current_path.current_knobs['Speed'] = param['rotor_power']
self.radio.set_config_packet_data(current_path.current_knobs['Modulation'],
current_path.current_knobs['EIRP'],
current_path.current_knobs['Rs'])
self.radio.set_state('reconfigure')
while not self.reconfig_flag:
time.sleep(0.1)
else:
self.radio.set_current_location(self.current_location)
self.radio.set_radio_configuration(current_path.current_knobs['Modulation'],
current_path.current_knobs['EIRP'],
current_path.current_knobs['Rs'],
self.frequency)
self.motion.set_speed(current_path.current_knobs['Speed'])
self.motion.set_direction(current_path.direction)
self.path_history.append(current_path.name)
fsm_state = 'traverse_path'
continue
###################################################################
if fsm_state == 'traverse_path':
###################################################################
name = current_path.name
if not current_path.current_meters == {}:
# print "how about here?"
self.previous_m[name] = copy.deepcopy(current_path.current_meters)
# print "previous_m[name]: ", self.previous_m[name]
# print "maybe here?"
if self.re_explore == True:
for p in self.paths:
p.has_been_explored = False
self.re_explore = False
if not current_path.has_been_explored:
print "Exploring path"
self.radio.set_state('listen')
else:
print "Exploiting path"
self.radio.set_state('stream')
self.motion.set_state('go')
tic = time.time()
while not self.current_location == destination:
self.tracker.run()
time.sleep(0.1)
else:
self.motion.set_state('stop')
self.radio.set_state('stop')
toc = time.time()
time.sleep(1)
x, y = self.tracker.tally_results()
print "x = %d y = %d" %(x, y)
self.tracker.reset()
name = current_path.name
current_path.current_meters['X'] = x
current_path.current_meters['Y'] = y
current_path.solution_as_observed['T'] = toc - tic
fsm_state = 'after_traverse'
continue
###################################################################
if fsm_state == 'after_traverse':
###################################################################
print "updating meters"
# for p in self.paths:
# p.update_meters()
if not current_path.has_been_explored:
print "marking current path as explored"
current_path.has_been_explored = True
# fsm_state = 'go_to_beginning'
# continue
else:
self.radio.set_state('update')
while not self.radio_update_flag:
time.sleep(0.1)
else:
current_path.current_meters['RSSI'] = self.rssi
# current_path.solution_as_observed['G'] = self.rx_packets
# current_path.solution_as_observed['Z'] = self.cognition.calculate_z(x, y)
# current_path.solution_as_observed['B'] = self.cognition.estimate_ber(self.tx_packets,
# self.rx_packets)
name = current_path.name
self.current_m[name] = current_path.current_meters
iteration += 1
fsm_state = 'go_to_beginning'
continue
###################################################################
if fsm_state == 'go_to_beginning':
###################################################################
name = current_path.name
print "current_m: ", self.current_m[name]
print "previous_m: ", self.previous_m[name]
print "current_m: ", self.current_m
print "previous_m: ", self.previous_m
print ""
print "Iteration %d finished" %(iteration-1,)
s = raw_input("AVEP has completed an iteration, press Y/y to continue ")
self.motion.set_direction('straight')
self.motion.set_speed(25)
self.motion.set_state('go')
while not self.current_location == start:
time.sleep(0.01)
else:
self.motion.set_state('stop')
time.sleep(0.1)
fsm_state = 'before_traverse'
continue
###################################################################
def compare(self, param):
"""
Determine if the environment has changed from one iteration to
the next.
"""
name = param['name']
c_meters = self.current_m[name]
p_meters = self.previous_m[name]
if c_meters['X'] != p_meters['X']:
print "current_meters['X'] != previous_meters['X']"
return False
elif c_meters['Y'] != p_meters['Y']:
print "current_meters['Y'] != previous_meters['Y']"
return False
# elif current_meters['RSSI'] == previous_meters['RSSI']:
# print "current_meters['Noise'] == previous_meters['Y']"
# return False
else:
return True
if __name__ == '__main__':
main = Controller()
try:
main.run()
except KeyboardInterrupt:
main.shutdown()
# pass
# finally:
# main.run()
# def profile_speed(self):
# """
# Determine actual speed of AV.
# """
# speed = 75
# start = 1
# destination = 2
# while True:
# if self.fsm_state == 'beginning':
# self.motion.set_speed(speed)
# self.motion.set_state('go')
# while not self.current_location == start:
# time.sleep(0.01)
# else:
# self.motion.set_state('stop')
# time.sleep(0.1)
# self.fsm_state = 'traverse_path'
# continue
# if self.fsm_state == 'traverse_path':
# print "fsm: motion.set_State('go')"
# self.motion.set_speed(speed)
# self.motion.set_state('go')
# tic = time.time()
# while not self.current_location == destination:
# time.sleep(0.01)
# else:
# self.motion.set_state('stop')
# toc = time.time()
# print "speed = %d time = %f" %(speed, toc-tic)
# break
# if current_path.has_been_explored:
# self.radio.set_config_packet_data(current_path.current_knobs['Modulation'],
# current_path.current_knobs['EIRP'],
# current_path.current_knobs['Rs'])
# self.radio.set_state('reconfigure')
# while not self.reconfig_flag:
# time.sleep(0.1)
# else:
# self.radio.set_current_location(self.current_location)
# self.radio.set_radio_configuration(current_path.current_knobs['Modulation'],
# current_path.current_knobs['EIRP'],
# current_path.current_knobs['Rs'],
# self.frequency)
# self.motion.set_speed(current_path.current_knobs['Speed'])
# else:
# current_path.current_knobs['Modulation'] = self.modulation
# current_path.current_knobs['Rs'] = self.bitrate
# current_path.current_knobs['EIRP'] = self.eirp
# current_path.current_knobs['Speed'] = 25
# self.motion.set_speed(25)
# self.motion.set_direction(current_path.direction)
# else:
# # notify base station of new configuration
# self.radio.set_config_packet_data(current_path.current_knobs['Modulation'],
# current_path.current_knobs['EIRP'],
# current_path.current_knobs['Rs'])
# self.radio.set_state('reconfigure')
# while not self.reconfig_flag:
# # wait for acknowledgment
# time.sleep(0.1)
# else:
# # use new configuration
# self.radio.set_current_location(self.current_location)
# self.radio.set_radio_configuration(current_path.current_knobs['Modulation'],
# current_path.current_knobs['EIRP'],
# current_path.current_knobs['Rs'],
# self.frequency)
# self.motion.set_speed(current_path.current_knobs['Speed'])
# self.motion.set_direction(current_path.direction)
# s = "\n\n"
# s += "Before traverse.\n"
# s += "==================================================\n"
# s += "Iteration %d.\n" %(self.iteration,)
# for p in self.paths:
# s += "\n\nPath %s information:\n" %(p.name,)
# s += "Path explored yet? " + str(p.has_been_explored) + "\n"
# s += "solution_parameters: " + str(p.solution_parameters) + "\n"
# s += "solution_as_implemented: " + str(p.solution_as_implemented) + "\n"
# s += "previous_meters: " + str(p.previous_meters) + "\n"
# s += "current_knobs: " + str(p.current_knobs) + "\n"
# s += "\n\nChosen path is %s.\n" %(current_path.name,)
# s += "=================================================="
# i = self.cognition.choose_path(self.paths)
# current_path = self.paths[i]
# current_path.iteration = self.iteration
# if not current_path.has_been_explored:
# # use default values
# current_path.current_knobs['Modulation'] = self.modulation
# current_path.current_knobs['Rs'] = self.bitrate
# current_path.current_knobs['EIRP'] = self.eirp
# current_path.current_knobs['Speed'] = 25
# self.motion.set_speed(25)
|
import numpy as np
def redraw_scatter(drawn_object, new_x, new_y):
positions = np.vstack((new_x, new_y)).transpose()
drawn_object.set_offsets(positions)
return None
def redraw_line(drawn_line, new_x, new_y):
drawn_line.set_xdata(new_x)
drawn_line.set_ydata(new_y)
return None
|
import socket as mysoc
import sys
def fileLineCount(path):
with open(path) as fileIn:
for index, element in enumerate(fileIn):
pass
val = index + 1
return val
print ('Number of arguments:', len(sys.argv), 'arguments.')
print ('Argument List:', str(sys.argv))
# FIRST Socket | to RS server
try:
rs = mysoc.socket(mysoc.AF_INET, mysoc.SOCK_STREAM)
print("[C]: Socket for RS created")
except mysoc.error as err:
print('{} \n'.format("socket open error ", err))
# # SECOND SOCKET
# try:
# ts = mysoc.socket(mysoc.AF_INET, mysoc.SOCK_STREAM)
# print("[C]: Socket for TS created")
# except mysoc.error as err:
# print('{} \n'.format("TS socket open error ", err))
if (len(sys.argv) == 3):
RS_HOST = sys.argv[1]
DNS_HNS_TXT = sys.argv[2]
else:
RS_HOST = mysoc.gethostname()
DNS_HNS_TXT = 'PROJ2-HNS.txt'
#Port for RS
RsPort = 50020
# Client Host/IP setup
clientHost = mysoc.gethostname()
print("[C]: Client name is: " , clientHost)
clientIP = mysoc.gethostbyname(mysoc.gethostname())
print("[C]: Client IP is: " , clientIP)
# connect to RS_SERVER
rs_ip = mysoc.gethostbyname(RS_HOST)
print(rs_ip)
server_bindingRS = (rs_ip, RsPort)
rs.connect(server_bindingRS) # RS will be waiting for connection
print ("[C]: Connected to RS Server")
# Connection established
# Import from file
inPath = DNS_HNS_TXT
numLinesInFile = fileLineCount(inPath)
inFile = open(inPath, 'r')
print("Num Of Lines in HNS: " + str(numLinesInFile))
rs.send(str(numLinesInFile).encode('utf-8'))
data_from_server = rs.recv(100)
msg = data_from_server.decode('utf-8')
print("[C < RS]: Response: " + msg)
# send num of lookups
#create a file to output the data
fileOut = open("Resolved.txt", "w")
tsConnected = False
while True:
# Each iteration = one lookup in TS/RS
inLine = inFile.readline()
if not inLine:
break
# Send line to RS
inLine = inLine.strip('\n')
rs.send(inLine.encode('utf-8'))
print("[C > RS] Line Sent: " + inLine)
data_from_server = rs.recv(1024)
msg = data_from_server.decode('utf-8')
print("[C < RS]: Response : " + msg)
#split it in 3 and check 3rd portion.
# splitList = msg.split()
# if splitList[2] == 'NS':
# if tsConnected == False:
# tsConnected= True
# print("[C]: MUST CONNECT TO TS NOW.")
# TsPort = 60000
# tsHostName = splitList[0]
# ts_ip = mysoc.gethostbyname(tsHostName)
# print("GREP IP IS: ", ts_ip)
# #FIXME for testing purposes we run on same machine, uncomment for diff machine
# #server_bindingTS = (ts_ip, TsPort)
# server_bindingTS = (clientIP, TsPort)
# ts.connect(server_bindingTS)
# print("[C]: Connected to TS Server")
#
# #send the hostname to ts
# print("[C > TS] sending: " + inLine)
# ts.send(inLine.encode('utf-8'))
# data_from_ts = ts.recv(1024)
# print("[C < TS] received: ", data_from_ts.decode('utf-8'))
# msgTS= data_from_ts.decode('utf-8')
# splitListTS = msgTS.split()
#
# #write to file
# strToFileTS = msgTS + "\n"
# fileOut.write(strToFileTS)
# else:
# output the string to result file
strToFile = msg + "\n"
fileOut.write(strToFile)
print("[C]: Line is VALID: ", msg)
print("")
#ts.send("Kill TS".encode('utf-8'))
rs.close()
#ts.close()
#print("Stuff ended")
#data_from_server = rs.recv(1024)
#print("[C]: Data received from RS server: [", data_from_server.decode('utf-8'), "]")
#data_from_server_decoded= data_from_server.decode('utf-8')
|
from django.shortcuts import render
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from rest_framework.decorators import api_view
from restapp.models import MyService
from restapp.serializers import MyServiceSerializer
from restapp.models import SubService
from restapp.serializers import SubServiceSerializer
#################### Service
@api_view(['GET', 'POST', 'DELETE'])
def service_list(request):
# GET list of services, POST a new service, DELETE all service
if request.method == 'GET':
services = MyService.objects.all()
name = request.GET.get('name', None)
if name is not None:
services = services.filter(title__icontains=name)
services_serializer = MyServiceSerializer(services, many=True)
return JsonResponse(services_serializer.data, safe=False)
# 'safe=False' for objects serialization
elif request.method == 'POST':
service_data = JSONParser().parse(request)
service_serializer = MyServiceSerializer(data=service_data)
if service_serializer.is_valid():
service_serializer.save()
return JsonResponse(service_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(service_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
count = MyService.objects.all().delete()
return JsonResponse({'message': '{} Services were deleted successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'PUT', 'DELETE'])
def service_detail(request, pk):
# find service by pk (id)
try:
service = MyService.objects.get(pk=pk)
except MyService.DoesNotExist:
return JsonResponse({'message': 'The service does not exist'}, status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
service_serializer = MyServiceSerializer(service)
return JsonResponse(service_serializer.data)
elif request.method == 'PUT':
service_data = JSONParser().parse(request)
service_serializer = MyServiceSerializer(service, data=service_data)
if service_serializer.is_valid():
service_serializer.save()
return JsonResponse(service_serializer.data)
return JsonResponse(service_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
service.delete()
return JsonResponse({'message': 'Service was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
def service_list_published(request):
# GET all published services
services = MyService.objects.filter(is_available=True)
if request.method == 'GET':
service_serializer = MyServiceSerializer(services, many=True)
return JsonResponse(service_serializer.data, safe=False)
@api_view(['GET'])
def service_subservices(request, pk):
# GET all published services
subservices = SubService.objects.filter(pk=pk)
if request.method == 'GET':
subservice_serializer = SubServiceSerializer(subservices, many=True)
return JsonResponse(subservice_serializer.data, safe=False)
#################### SubService
@api_view(['GET', 'POST', 'DELETE'])
def subservice_list(request):
# GET list of services, POST a new service, DELETE all service
if request.method == 'GET':
sub_services = SubService.objects.all()
name = request.GET.get('name', None)
if name is not None:
sub_services = sub_services.filter(title__icontains=name)
subservices_serializer = SubServiceSerializer(sub_services, many=True)
return JsonResponse(subservices_serializer.data, safe=False)
# 'safe=False' for objects serialization
elif request.method == 'POST':
subservice_data = JSONParser().parse(request)
subservice_serializer = SubServiceSerializer(data=subservice_data)
if subservice_serializer.is_valid():
subservice_serializer.save()
return JsonResponse(subservice_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(subservice_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
count = SubService.objects.all().delete()
return JsonResponse({'message': '{} Subservices were deleted successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'PUT', 'DELETE'])
def subservice_detail(request, pk):
# find subservice by pk (id)
try:
subservice = SubService.objects.get(pk=pk)
except SubService.DoesNotExist:
return JsonResponse({'message': 'The subservice does not exist'}, status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
subservice_serializer = SubServiceSerializer(subservice)
return JsonResponse(subservice_serializer.data)
elif request.method == 'PUT':
subservice_data = JSONParser().parse(request)
subservice_serializer = SubServiceSerializer(subservice, data=subservice_data)
if subservice_serializer.is_valid():
subservice_serializer.save()
return JsonResponse(subservice_serializer.data)
return JsonResponse(subservice_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
subservice.delete()
return JsonResponse({'message': 'Subservice was deleted successfully!'}, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
def subservice_list_published(request):
# GET all published services
subservices = SubService.objects.filter(is_available=True)
if request.method == 'GET':
subservice_serializer = SubServiceSerializer(services, many=True)
return JsonResponse(subservice_serializer.data, safe=False) |
import os
import argparse
import random
import json
import logging
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from collections import defaultdict
from transformers import *
from utils import seed_everything, DialogueDataset, create_mini_batch, read_test_data
"""加载模型"""
def build_model(model_dir):
# 使用中文 BERT
# PRETRAINED_MODEL_NAME = "bert-base-chinese"
PRETRAINED_MODEL_NAME = 'bert_models/chinese-roberta-wwm-ext-large'
NUM_LABELS = 3
model = BertForSequenceClassification.from_pretrained(PRETRAINED_MODEL_NAME, num_labels=NUM_LABELS)
model.load_state_dict(torch.load(model_dir))
"""获得设备类型"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval()
print("***** Model Loaded *****")
return model
"""
预测阶段:输入句子对(对话,问题),进行3分类预测
0:没有
1:有
2:不确定
"""
def get_predicts(model, dataloader):
predictions = None
total = 0
eids, attrs = [],[]
with torch.no_grad():
# 遍历整个数据集
for data in dataloader:
# 将所有 tensors 移到 GPU 上
eid = data[0]
attr = data[4]
if next(model.parameters()).is_cuda:
data = [t.to("cuda") for t in data[1:4] if t is not None]
# 强烈建议在将这些 tensors 丟入 `model` 时指定对应的参数名称
tokens_tensors, segments_tensors, masks_tensors = data
outputs = model(input_ids=tokens_tensors,
token_type_ids=segments_tensors,
attention_mask=masks_tensors)
logits = outputs[0]
_, pred = torch.max(logits.data, 1)
# 将当前 batch 记录下来
if predictions is None:
predictions = pred
eids = eid
attrs = attr
else:
predictions = torch.cat((predictions, pred))
eids.extend(eid)
attrs.extend(attr)
return eids, attrs, predictions
def predict(args):
print("===============Start Prediction==============")
# model_version = 'bert-base-chinese'
model_version = 'bert_models/chinese-roberta-wwm-ext-large'
tokenizer = BertTokenizer.from_pretrained(model_version)
testset = DialogueDataset(read_test_data(args.test_input_file), "test", tokenizer=tokenizer)
# testset = DialogueDataset("../data/cls_data/dev_test2.csv", "test", tokenizer=tokenizer)
testloader = DataLoader(testset, batch_size=16, shuffle=False, collate_fn=create_mini_batch)
model = build_model(args.model_dir)
eids, attrs, predictions = get_predicts(model, testloader)
"""save result data to json"""
outputs = defaultdict(list)
for i in range(len(eids)) :
outputs[str(eids[i])].append([attrs[i], predictions[i].item()])
cnt = 0
for eid, pairs in outputs.items():
tmp_pred_new = {}
if len(pairs) != 0:
for pair in pairs:
if pair[1] != 3: # 4分类
tmp_pred_new[pair[0]] = str(pair[1])
else:
cnt += 1
outputs[eid]=tmp_pred_new
# 将那些预测为空的样本id也存入进来,防止输出的样本缺失
with open(args.test_input_file, 'r', encoding='utf-8') as fr:
eids_all = json.load(fr)
for eid in eids_all.keys():
if eid not in outputs:
outputs[eid] = {}
print("测试样本数量为:", len(outputs))
print("none数量为:", cnt)
pred_path = os.path.join(args.test_output_file)
with open(pred_path, 'w', encoding='utf-8') as json_file:
json.dump(outputs, json_file, ensure_ascii=False, indent=4)
print("Prediction Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', '-dd', type=str, default='data/near_data', help='Train/dev data path')
parser.add_argument('--model_dir', '-sd', type=str, default='./save_model_qe/net_params.pth', help='Path to save, load model')
parser.add_argument('--test_input_file', '-tif', type=str, default='./test_attr_pred.json', help='Input file for prediction')
parser.add_argument('--test_output_file', '-tof', type=str, default='./evaluate/preds_roberta-xlarge-q_e.json', help='Output file for prediction')
args = parser.parse_args()
seed_everything(66)
predict(args)
# 10033750 |
#!/usr/bin/env python2
import roslib
roslib.load_manifest('mobot_control')
import rospy
from mobot_control.msg import Keyboard
import sys, select, termios, tty
msg = """
Reading from the keyboard and Publishing to Keyboard.msg !
---------------------------
(1-7) or shift+(1-7) for position control of the manipulator in joint space
(q-y) or shift+(q-y) for position control of the manipulator in cartesian space
(uiojlm,.) for position control of movebase navigation
(890) for position control of swingarm
(-=) for the selection of the manipulator reference frame
(gG) for position control of gripper
CTRL-C to quit
"""
# the default velocity is 3deg/s or 2mm/s, and GMAS_TIME in test is 0.05s
jointBindings = {
'1':( 0.50, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
'2':( 0.0, 0.50, 0.0, 0.0, 0.0, 0.0, 0.0 ),
'3':( 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 ),
'4':( 0.0, 0.0, 0.0, 0.50, 0.0, 0.0, 0.0 ),
'5':( 0.0, 0.0, 0.0, 0.0, 0.50, 0.0, 0.0 ),
'6':( 0.0, 0.0, 0.0, 0.0, 0.0, 0.50, 0.0 ),
'7':( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.50 ),
'!':( -0.50, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
'@':( 0.0, -0.50, 0.0, 0.0, 0.0, 0.0, 0.0 ),
'#':( 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0 ),
'$':( 0.0, 0.0, 0.0, -0.50, 0.0, 0.0, 0.0 ),
'%':( 0.0, 0.0, 0.0, 0.0, -0.50, 0.0, 0.0 ),
'^':( 0.0, 0.0, 0.0, 0.0, 0.0, -0.50, 0.0 ),
'&':( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.50 )
}
# the default velocity is 3deg/s and 5mm/s, and GMAS_TIME in test is 0.05s
cartesianBindings={
'q':( 0.85, 0.0, 0.0, 0.0, 0.0, 0.0 ),
'w':( 0.0, 0.85, 0.0, 0.0, 0.0, 0.0 ),
'e':( 0.0, 0.0, 0.85, 0.0, 0.0, 0.0 ),
'r':( 0.0, 0.0, 0.0, 0.35, 0.0, 0.0 ),
't':( 0.0, 0.0, 0.0, 0.0, 0.35, 0.0 ),
'y':( 0.0, 0.0, 0.0, 0.0, 0.0, 0.35 ),
'Q':( -0.85, 0.0, 0.0, 0.0, 0.0, 0.0 ),
'W':( 0.0, -0.85, 0.0, 0.0, 0.0, 0.0 ),
'E':( 0.0, 0.0, -0.85, 0.0, 0.0, 0.0 ),
'R':( 0.0, 0.0, 0.0, -0.35, 0.0, 0.0 ),
'T':( 0.0, 0.0, 0.0, 0.0, -0.35, 0.0 ),
'Y':( 0.0, 0.0, 0.0, 0.0, 0.0, -0.35 )
}
# the default velocity is 1deg/s and 3mm/s, and GMAS_TIME in test is 0.05s
homeBindings = {
'z':1.0, 'x':2.0, 'c':3.0, 'v':4.0, 'b':5.0,
'n':6.0, 'h':7.0, ';':8.0, '/':9.0
}
moveBindings = {
'i':( 1.0, 0.0, 0.0, 0.0 ),
'o':( 1.0, 0.0, 0.0, -1.0 ),
'j':( 0.0, 0.0, 0.0, 1.0 ),
'l':( 0.0, 0.0, 0.0, -1.0 ),
'u':( 1.0, 0.0, 0.0, 1.0 ),
',':( -1.0, 0.0, 0.0, 0.0 ),
'.':( -1.0, 0.0, 0.0, 1.0 ),
'm':(-1.0, 0.0, 0.0, -1.0)
}
moveSpeeds = { 'p':3.0, '[':6.0, ']':9.0 }
swingBindings = {
'8':(2.0, 0.0),
'9':(0.0, 2.0),
'0':(2.0, 2.0),
'*':(-2.0, 0.0),
'(':(0.0, -2.0),
')':(-2.0, -2.0),
'a':(6.0, 6.0),
's':(7.0, 7.0),
'd':(8.0, 8.0),
}
gripperBindings = { 'g': 1, 'G': -1 }
frameSelections = { '-': 0, '=': 8 }
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def vels(jointSpeed, cartesianSpeed, moveSpeed, swingSpeed, gripperSpeed, frameID):
return "currently:\tJointSpeed %s\tCartesianSpeed %s\tMoveSpeed %s\tSwingSpeed %s\tGripperSpeed %s\tFrameID %s" % (jointSpeed, cartesianSpeed, moveSpeed, swingSpeed, gripperSpeed, frameID+1)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
pub = rospy.Publisher('keyboard_input', Keyboard, queue_size = 1)
rospy.init_node('teleop_keyboard')
jointSpeed = rospy.get_param("~JointSpeed", 1.00)
cartesianSpeed = rospy.get_param("~CartesianSpeed", 1.00)
moveSpeed = rospy.get_param("~MoveSpeed", 3.00)
swingSpeed=rospy.get_param("~SwingSpeed", 1.00)
gripperSpeed=rospy.get_param("~GripperSpeed", 1.00)
frameID=rospy.get_param("~FrameID",8)
joints = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
cartesians = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
moves = [ 0.0, 0.0, 0.0, 0.0 ]
swings = [ 0.0, 0.0 ]
grippers = 0.0
commandid = 8 # corresponding branches: 1.STOP,2.MOVE,3.joints,4.cartesians,5.moves,6.swings,7.grippers,8.frames
status = 0
try:
print msg
print vels(jointSpeed, cartesianSpeed, moveSpeed, swingSpeed, gripperSpeed, frameID)
rate = rospy.Rate(100)
while not rospy.is_shutdown():
joints = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
cartesians = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
moves = [ 0.0, 0.0, 0.0, 0.0 ]
swings = [ 0.0, 0.0 ]
grippers = 0.0
commandid = 8
key = getKey()
if (key == '`'):
commandid = 0
elif (key == '~'):
commandid = 1
elif key in jointBindings.keys():
joints[0] = jointBindings[key][0]
joints[1] = jointBindings[key][1]
joints[2] = jointBindings[key][2]
joints[3] = jointBindings[key][3]
joints[4] = jointBindings[key][4]
joints[5] = jointBindings[key][5]
joints[6] = jointBindings[key][6]
commandid = 2
elif key in cartesianBindings.keys():
cartesians[0] = cartesianBindings[key][0]
cartesians[1] = cartesianBindings[key][1]
cartesians[2] = cartesianBindings[key][2]
cartesians[3] = cartesianBindings[key][3]
cartesians[4] = cartesianBindings[key][4]
cartesians[5] = cartesianBindings[key][5]
commandid = 3
elif key in moveSpeeds.keys():
moveSpeed = moveSpeeds[key]
print vels(jointSpeed, cartesianSpeed, moveSpeed, swingSpeed, gripperSpeed, frameID)
if (status == 14):
print msg
status = (status + 1) % 15
elif key in moveBindings.keys():
moves[0] = moveBindings[key][0] * moveSpeed
moves[1] = moveBindings[key][1]
moves[2] = moveBindings[key][2]
moves[3] = moveBindings[key][3]
commandid = 4
elif key in swingBindings.keys():
swings[0] = swingBindings[key][0]
swings[1] = swingBindings[key][1]
commandid = 5
elif key in gripperBindings.keys():
grippers = gripperBindings[key]
commandid = 6
elif key in frameSelections.keys():
frameID = frameSelections[key]
if (status == 14):
print msg
status = (status + 1) % 15
elif key in homeBindings.keys():
joints[0] = homeBindings[key]
joints[1] = homeBindings[key]
joints[2] = homeBindings[key]
joints[3] = homeBindings[key]
joints[4] = homeBindings[key]
joints[5] = homeBindings[key]
joints[6] = homeBindings[key]
commandid = 9
elif (key == 'f'):
commandid = 10
elif (key == '?'):
commandid = 11
else:
joints = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
cartesians = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
moves = [ 0.0, 0.0, 0.0, 0.0 ]
swings = [ 0.0, 0.0 ]
grippers = 0.0
commandid = 8
if (key == '\x03'):
break
keyboard = Keyboard()
for i in range(len(joints)):
keyboard.joints.append(joints[i] * jointSpeed)
for i in range(len(cartesians)):
keyboard.cartesians.append(cartesians[i] * cartesianSpeed)
for i in range(len(moves)):
keyboard.moves.append(moves[i])
for i in range(len(swings)):
keyboard.swings.append(swings[i] * swingSpeed)
keyboard.grippers = grippers * gripperSpeed
keyboard.framesID = frameID
keyboard.commandID = commandid
print keyboard.joints, keyboard.commandID
print " "
pub.publish(keyboard)
rate.sleep()
except Exception , e:
print e,"shit"
|
# 블랙잭
# 쌉 노가다 풀이
def blackJack():
n, m = map(int, input().split())
cards = list(map(int, input().strip().split()))
res = 0
for i in range(len(cards)-2):
for j in range(i+1,len(cards)-1):
for k in range(j+1,len(cards)):
sum = cards[i]+cards[j]+cards[k]
if sum==m:
return sum
if sum<m and sum>res:
res = sum
return res
print(blackJack())
|
#!/usr/bin/evn python
# coding=utf-8
BROKER_URL = 'redis://127.0.0.1:6379' # 指定 Broker
CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0' # 指定 Backend
CELERY_TASK_SERIALIZER = 'msgpack'
CELERY_RESULT_SERIALZER = 'json'
CELERY_ACCEPT_CONTENT = ['json', 'msgpack']
CELERY_TIMEZONE='Asia/Shanghai' # 指定时区,默认是 UTC
# CELERY_TIMEZONE='UTC'
CELERY_IMPORTS = ( # 指定导入的任务模块
'celery_app.task',
# 'celery_app.task2'
)
|
import urllib2, json
from django.db import models
from h1ds_summary.utils import delete_attr_from_summary_table
from h1ds_summary.utils import update_attribute_in_summary_table
from h1ds_summary.tasks import populate_attribute_task
sa_help_text={
'slug':"Name of the attribute as it appears in the URL.",
'name':"Full name of the attribute.",
'source':"Either URL from H1DS web service (must start with \
http://) or name of class, e.g. h1nf.KappaH (will use h1ds_summary.attri\
butes.h1nf.KappaH).",
'description':"Full description of the summary attribute.",
'is_default':"If true, this attribute will be shown in the default \
list, e.g. for shot summary.",
'display_order':"When visible, attributes will be displayed from \
left to right with increasing display_order.",
'format_string':'How value is to be displayed on website (optional).\
e.g. %.2f will format a float to 2 decimal places.',
}
class SummaryAttribute(models.Model):
slug = models.SlugField(max_length=100, unique=True,
help_text=sa_help_text['slug'])
name = models.CharField(max_length=500,
help_text=sa_help_text['name'])
source = models.CharField(max_length=4096,
help_text=sa_help_text['source'])
description = models.TextField(help_text=sa_help_text['description'])
is_default = models.BooleanField(default=False, blank=True,
help_text=sa_help_text['is_default'])
display_order = models.IntegerField(default=1000, blank=False,
help_text=sa_help_text['display_order'])
format_string = models.CharField(max_length=64,
blank=True,
help_text=sa_help_text['format_string'])
class Meta:
ordering = ["display_order", "slug"]
permissions = (
("recompute_summaryattribute", "Can recompute the summary attribute and update the database."),
("raw_sql_query_summaryattribute", "Can query database with raw SQL."),
)
def save(self, *args, **kwargs):
super(SummaryAttribute, self).save(*args, **kwargs)
update_attribute_in_summary_table(self.slug)
populate_attribute_task.delay(self.slug)
def delete(self, *args, **kwargs):
delete_attr_from_summary_table(self.slug)
super(SummaryAttribute, self).delete(*args, **kwargs)
def __unicode__(self):
return self.name
def get_value(self, shot_number):
# TODO: handle errors better
if self.source.startswith('http://'):
try:
fetch_url = self.source.replace('__shot__', str(shot_number))
request = urllib2.Request(fetch_url)
response = json.loads(urllib2.urlopen(request).read())
value = response['data']
dtype = response['meta']['summary_dtype']
if value == None:
value = 'NULL'
return (value, dtype)
except:
return ('NULL', 'NULL')
else:
# assume source is inside a module in h1ds_summary.attributes
try:
split_name = self.source.split('.')
submodule_name = '.'.join(split_name[:-1])
module_name = '.'.join(['h1ds_summary.attributes', submodule_name])
class_name = split_name[-1]
source_module = __import__(module_name, globals(),
locals(), [class_name], -1)
source_class = source_module.__getattribute__(class_name)
return source_class(shot_number).do_script()
except:
return ('NULL', 'NULL')
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("tfrecords_dir","./tfrecords/captcha.tfrecords","验证码tfrecords文件")
tf.app.flags.DEFINE_string("captcha_dir","../data/Genpics/","验证码图片路径")
tf.app.flags.DEFINE_string("letter","ABCDEFGHIJKLMNOPQRSTUVWXYZ","验证码字符的种类")
def deal_lab(label_str):
'''
将字母转换为数字
:param label_str:
:return:
'''
# 构建字母和数字的对应字典,形如{'A':0, 'B':1...}
num_letter = dict(enumerate(list(FLAGS.letter)))
letter_num = dict(zip(num_letter.values(),num_letter.keys()))
# 将大写字母组成的标签,根据上面创建的字典,转换为数字列表
# 对标签数据进行处理[[b"NZPP"]...]
num_li = []
for i in label_str:
lettet_li = []
for letter in i[0].decode('utf-8'):
lettet_li.append(letter_num[letter])
num_li.append(lettet_li)
print(num_li)
'''
[[13, 25, 15, 15],
[22, 10, 7, 10],
[22, 15, 18, 9],
[16, 6, 13, 10],
[1, 0, 8, 17],
[0, 9, 24, 14]...]
'''
# 将构建的列表,转换为tensor类型
return tf.constant(num_li)
def get_captcha_img():
'''
读取验证码图片文件,转换为特征值数据
:return:
'''
# 构建文所有图片的文件名,
# 因为用os.listdir()文件会乱序,所以要自己构建文件名称
f_name_li = []
for i in range(len(os.listdir(FLAGS.captcha_dir))):
string = str(i) + ".jpg"
f_name_li.append(string)
file_list = [os.path.join(FLAGS.captcha_dir, file) for file in f_name_li]
# 构造文件队列
file_queue = tf.train.string_input_producer(file_list, shuffle=False)
# 构造阅读器
reader = tf.WholeFileReader()
# 读取第一张图片的内容
key,value = reader.read(file_queue)
# 解码图片数据
image = tf.image.decode_jpeg(value)
# 改变图片数据形状
image.set_shape([20,80,3])
# 批处理图片数据[6000, 20, 80, 3]
image_batch = tf.train.batch([image], batch_size=6000, num_threads=1, capacity=6000)
return image_batch
def get_captcha_lab():
'''
读取保存有图片标签的文件
:return:
'''
file_queue = tf.train.string_input_producer(["../data/Genpics/labels.csv"],shuffle=False)
reader = tf.TextLineReader()
key,value = reader.read(file_queue)
records = [[1],["None"]]
num,label = tf.decode_csv(
value,
record_defaults=records
)
lab_batch = tf.train.batch(
[label],
batch_size=6000,
num_threads=1,
capacity=6000,
)
return lab_batch
def build_tf(img_bat,lab_bat):
'''
将图片内容和标签写入到tfrecords文件当中
:param img_bat:特征值
:param lab_bat:标签值
:return:
'''
# 转换标签数据类型
lab_bat = tf.cast(lab_bat,tf.uint8)
# 建立TFRecords 存储器
writter = tf.python_io.TFRecordWriter(FLAGS.tfrecords_dir)
# 循环将每一个图片上的数据构造example协议块,序列化后写入
for i in range(len(os.listdir(FLAGS.captcha_dir))):
# 取出第i个图片数据,转换相应类型,图片的特征值要转换成字符串形式
img_str = img_bat[i].eval().tostring()
lab_str = lab_bat[i].eval().tostring()
# 构造协议块
example = tf.train.Example(
features=tf.train.Feature(feature={
"image": tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_str])),
"label": tf.train.Feature(bytes_list=tf.train.BytesList(value=[lab_str]))
})
)
writter.close()
return None
if __name__ == '__main__':
# 获取验证码文件当中的图片
img_batch = get_captcha_img()
# 获取验证码文件当中的标签数据
label = get_captcha_lab()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# [b'NZPP' b'WKHK' b'WPSJ' ..., b'FVQJ' b'BQYA' b'BCHR']
label_str = sess.run(label)
# 将字符串标签转换为数字张量
lab_batch = deal_lab(label_str)
# 将图片特征数据和内容,写入到tfrecords文件中
build_tf(img_batch, lab_batch)
coord.request_stop()
coord.join(threads) |
from odoo import models, fields, api
from odoo import http
class HelpdeskTicket(models.Model):
_inherit = 'helpdesk.ticket'
description = fields.Html()
attachment_id = fields.One2many('ir.attachment','helpdesk_ticket_ids', string="Attachments")
@api.model
def default_get(self,default_fields):
res = super(HelpdeskTicket, self).default_get(default_fields)
# for group_d in self.env['res.users'].browse(self.env.user.id).groups_id:
# if group_d.name == "Manager" and group_d.category_id.name == 'Helpdesk':
# res['is_helpdesk_manager'] = True
user_id = self.env['res.users'].search([('name','=','Odoo@gems-ksa.com')])
if user_id:
res['user_id'] = user_id.id
return res
# @api.model
# def create(self, vals):
# ticket = super(HelpdeskTicket, self).create(vals)
#
# if not vals.get('user_id'):
# team_member_ids = ticket.team_id.member_ids
# template = self.env.ref('helpdesk_custom.example_email_template')
# for user_id in team_member_ids:
# body_html = template.body_html.format(
# username=user_id.name,
# ticket_name=ticket.name,
# id=ticket.id,
# creator_name=ticket.create_uid.name,
# base_url=http.request.env['ir.config_parameter'].get_param('web.base.url'))
# subject = template.subject.format(name=ticket.name, ticket_no=" (#" + str(ticket.id) + ")")
# email_to = user_id.login
# mail_id = self.env['mail.mail'].create({
# 'body_html' : body_html,
# 'email_to' : email_to,
# 'subject' : subject,
# })
# if mail_id:
# mail_id.send()
# self.env['mail.mail'].browse(mail_id.id).unlink()
# return ticket |
#@author: Bharath HS
from PIL import Image
from io import BytesIO
import os
import datetime
import sys
import time
from PIL import Image, ImageChops
from scipy.misc import imread
from scipy.linalg import norm
from scipy import sum, average
import traceback
import sys
class Image_Processing():
#def __init__(self):
# Below function would take a screenshot for the element(image viewer) identified
def Convert_Image_to_String(self,image_path):
import base64
#image_path = 'C:/Git_Repository/Project_ISD/Package_ISD/Element_Screenshots/Test.png'
with open(image_path, "rb") as imageFile:
image_str = base64.b64encode(imageFile.read())
time.sleep(1)
Image_Slice = str(str(image_str).split("b'")[1])
Image_encrypt = str(Image_Slice).replace("'","")
return Image_encrypt
def Get_Screenshot_Ele(self,driver_arg,element_arg,Package_path):
try:
#find part of the page you want image of
location = element_arg.location
size = element_arg.size
png = driver_arg.get_screenshot_as_png() # saves screenshot of entire page
im = Image.open(BytesIO(png)) # uses PIL library to open image in memory
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom)) # defines crop points
#Pckg_Path = os.path.abspath(os.pardir) # Path of the Package folder
now = datetime.datetime.now()
time_stamp = str(now.strftime('%Y-%m-%dT%H:%M:%S') + ('-%02d' % (now.microsecond / 10000)))
print(time_stamp.replace(":","_").replace("-","_").replace(" ","_"))
time_stamp_new = time_stamp.replace(":","_").replace("-","_").replace(" ","_") #timestamp with delimenters replaced with '_'
file_name = 'screenshot_'+time_stamp_new
image_path = Package_path+'/Element_Screenshots/'+file_name+'.png'
im.save(image_path) # saves new cropped image
return image_path
except Exception:
print("Screenshot was not captured")
print(str(traceback.format_exc() + "--- "+ str(sys.exc_info()[0])))
#--------------Below functions would compare the images w.r.t pixels -------------------------------
def normalize(self,arr):
try:
rng = arr.max()-arr.min()
amin = arr.min()
return (arr-amin)*255/rng
except Exception:
print(ValueError)
def to_grayscale(self,arr):
"If arr is a color image (3D array), convert it to grayscale (2D array)."
if len(arr.shape) == 3:
return average(arr, -1) # average over the last axis (color channels)
else:
return arr
def compare_images(self,img1, img2):
# normalize to compensate for exposure difference, this may be unnecessary
# consider disabling it
try:
img1 = self.normalize(img1)
img2 = self.normalize(img2)
# calculate the difference and its norms
diff = img1 - img2 # elementwise for scipy arrays
m_norm = sum(abs(diff)) # m norm
z_norm = norm(diff.ravel(), 0) # Zero norm
return (m_norm, z_norm)
except:
return None
def Final_Image_Comparison(self,img1_arg,img2_arg):
try:
#file1, file2 = sys.argv[1:1+2]
# read images as 2D arrays (convert to grayscale for simplicity)
img1 = self.to_grayscale(imread(img1_arg).astype(float))
img2 = self.to_grayscale(imread(img2_arg).astype(float))
# compare
n_m, n_0 = self.compare_images(img1, img2)
if n_m is not None:
print(n_m)
print("M_norm:", n_m, "/ per pixel:", n_m/img1.size)
print("Z_norm:", n_0, "/ per pixel:", n_0*1.0/img1.size)
print("M_norm:", n_m, "/ per pixel:", n_m/img2.size)
print("Z_norm:", n_0, "/ per pixel:", n_0*1.0/img2.size)
return n_m
else:
return None
except:
print(Exception)
return None
#self.Report_Instance.Report_Log("Image Validation","Should be able to validate the images","Encountered with an exception - "+str(traceback.format_exc() + "--- "+ str(sys.exc_info()[0])),"FAILED")
# -------------Below functions would greyout the background of the images and highlight the differences of 2 images -------------------
# need to rework on the arguments and how to save the images without using timestamps.
def black_or_b(self,a, b):
diff = ImageChops.difference(a,b)
diff = diff.convert('L')
# diff = diff.point(point_table)
#h,w=diff.size
new = diff.convert('RGB')
new.paste(b, mask=diff)
return new
# Below code will highlight the mismatch (Mismatch could be the annotations/lesions)
def Image_Comparison_Stage_1(self,img1_arg,img2_arg,Package_path):
#Pckg_Path = os.path.abspath(os.pardir) # Path of the Package folder
now = datetime.datetime.now()
time_stamp = str(now.strftime('%Y-%m-%dT%H:%M:%S') + ('-%02d' % (now.microsecond / 10000)))
print(time_stamp.replace(":","_").replace("-","_").replace(" ","_"))
time_stamp_new = time_stamp.replace(":","_").replace("-","_").replace(" ","_") #timestamp with delimenters replaced with '_'
file_name = 'diff_'+time_stamp_new
image_path = Package_path+'/Element_Screenshots/'+file_name+'.png'
print("Image_Comparison_Stage_1 ",image_path)
a = Image.open(img1_arg)
b = Image.open(img2_arg)
c = self.black_or_b(a, b)
c.save(image_path)
return image_path |
"""
[Homework]
1. Write a Python program to generate and print a dictionary that contains a number (between 1 and n) in the form (x, x*x).
e.g. User inputs 5
Expected Output : {1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
2. Write a program to sort a dictionary by key in both ascending and descending order.
3. Write a program to sort a dictionary by value in both ascending and descending order.
""" |
#!/usr/bin/python
####
print "========================="
print "testing udp using sockets"
print "========================="
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print sock.sendto('[{"name":"udp_test", "columns": ["value"], "points":[[69]]}]', ('127.0.0.1', 5454))
|
#!/usr/bin/python
# -*-coding:utf-8-*-
# Description: get new user_account info from weixin.sogou.com
# Version: 1.0
# History: 2014-07-04 Created by Hou
import os
import time
from threading import Thread
from bs4 import *
from config import *
from connect_with_proxy_ip_and_fake_ua import get_connect_by_proxyip_ua
from help_func_for_user_account_crawl import *
################### Main function for get new account ####################
def write_keywords_to_redis_set(keywords):
for keyword in keywords:
r.sadd('keywords_set', keyword)
def get_new_account_info_by_single_nav_page(page_num, keyword):
"""
Return a tuple of 2 items
The first is the info of a account
The second is bool indicates whether is the last nav page
"""
single_nav_info_list = []
# get the url by keywords
tmp_nav_url = get_nav_page_url_by_keywords(keyword)
nav_url = tmp_nav_url[0] + str(page_num) + tmp_nav_url[1]
print "nav_url -> ", nav_url
# connect to the website, and build soup
# get connect to the website
c = get_connect_by_proxyip_ua(nav_url)
if (c is None):
return None
# build soup
soup_obj = BeautifulSoup(c.read())
if (soup_obj is None):
return None
is_last_page = is_last_page_by_soup(soup_obj)
# print soup_obj.prettify()
# parse the soup, and get the info tag
all_divs = soup_obj.find_all("div", class_="wx-rb bg-blue wx-rb_v1 _item")
if (all_divs is None):
return None
for info_div in all_divs:
# store all the info by single tag
weibo_info = get_info_by_tag(info_div, keyword)
if weibo_info is not None:
single_nav_info_list.append(weibo_info)
return (single_nav_info_list, is_last_page)
def get_new_account_info_by_nav_pages(keyword, max_page_number=20):
""" search keyword on all pages on weixin.sogou.com
Return a list of dict, which is the all_info found for keyword
So far the max nav page number is 20
"""
new_account_info_list = []
for page_num in xrange(1, max_page_number + 1):
log(str(keyword) + " : crawl page %d ..." % page_num)
# get and store the account info by single nav page
single_nav_info_list = get_new_account_info_by_single_nav_page(
page_num, keyword)
if (single_nav_info_list is None):
log("The search is failed, check the url or the proxy_ips. \n")
break
account_info_list = single_nav_info_list[0]
if account_info_list is not None:
new_account_info_list.extend(account_info_list)
# if it is the last page, then break
is_last_page = single_nav_info_list[-1]
if (is_last_page is True):
log("The max nav page for " + '\"' +
str(keyword) + '\"' + " is %d " % page_num)
break
sleep_for_a_while()
return new_account_info_list
def get_header_list_for_new_account():
"""
Return all the field for weibo info
"""
return ["weibo_name", "weibo_id", "home_page_url", "QR_code_url",
"sogou_openid", "tou_xiang_url", "function_description",
"is_verified", "verified_info", "keywords"]
def output_new_account_to_local_file(new_account_info_list):
"""
Output new account info to local file
"""
# get header, new account has no account_id and is_existed
header_list = get_header_list_for_new_account()
header_str = '\t'.join(header_list) + '\n'
# create new path and file to store the info
time_str = time.strftime('%Y%m%d')
path_dir = "./account/" + time_str
file_path = path_dir + "/" + "new_weixin_account.tsv"
try:
# determine wheter the path is existed or not
is_dir_existed = os.path.exists(path_dir)
if (not is_dir_existed):
# create the directory, and write header_str to the file
log("the path is not existed, create a new one")
os.makedirs(path_dir)
file_obj = open(file_path, 'w')
file_obj.write(header_str)
else:
log("the path is existed")
# open the file as append mode --> no header_str
file_obj = open(file_path, 'a')
# write all the new account info to file
for single_info in new_account_info_list:
single_info_list = []
# get single account info based on header_list
for field in header_list:
single_info_list.append(single_info.get(field, 'NA'))
single_info_str = '\t'.join(
[str(i) for i in single_info_list]) + '\n'
file_obj.write(single_info_str)
except BaseException as output_error:
print "error: output_new_account_to_local_file " + output_error.message
finally:
file_obj.close()
def get_new_account_info_by_keywords_from_redis():
total_num_keywords = r.scard('account_id_set')
keyword = r.spop('keywords_set')
while keyword is not None:
log("The total number of keywords is " +
str(total_num_keywords) +
" , and remain number of keywords is " +
str(r.scard('keywords_set')) +
" , current keyword is " + str(keyword))
new_account_info_list = get_new_account_info_by_nav_pages(keyword)
if new_account_info_list is None or new_account_info_list == []:
log('search failed, add the keyword to failed set in redis ' +
'the keyword is ' + str(keyword))
r.sadd("keyword_fail_set", keyword)
keyword = r.spop('keywords_set')
continue
else:
output_new_account_to_local_file(new_account_info_list)
# next iteration
keyword = r.spop('keywords_set')
def get_new_account_info_by_keywords(keywords):
""" Get new weixin account from weixin.sogou.com by keywords
Return a list of list of dict, which contains all the weixin_info
"""
write_keywords_to_redis_set(keywords) # keywords_set
get_new_account_info_by_keywords_from_redis()
def get_new_account_with_multi_thread(keywords):
"""
Verified the account with multi_thread
"""
write_keywords_to_redis_set(keywords)
threads = []
for i in xrange(THREAD_NUM):
t = Thread(target=get_new_account_info_by_keywords_from_redis)
t.setDaemon(True)
threads.append(t)
# start all the thread
for t in threads:
t.start()
# Wait until all thread terminates
for t in threads:
t.join()
def main():
keywords = ['it', 'df', 'houxianxu', 'movie']
# get_new_account_info_by_keywords(['IT', 'df', 'houxianxu'])
get_new_account_with_multi_thread(keywords)
if __name__ == '__main__':
main()
|
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
# based largely on https://djangosnippets.org/snippets/261
from django.contrib.gis.db import models
class Topic(models.Model):
name = models.CharField(max_length=50, unique=True)
def __unicode__(self):
return self.name
# A simple feedback form with four fields.
class Feedback(models.Model):
title = models.CharField(max_length=80, default='Sodo Feedback')
name = models.CharField(max_length=50)
email = models.EmailField()
topic = models.ForeignKey(Topic)
message = models.TextField()
def __unicode__(self):
return self.title
|
# Generated by Django 2.2.7 on 2019-11-25 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0008_auto_20191125_2044'),
]
operations = [
migrations.AlterField(
model_name='uploadss',
name='file',
field=models.FileField(upload_to='C:\\Users\\USER\\Desktop\\django text\\media'),
),
]
|
#!/user/bin/env python3
#----------------------------------------------------------------------#
# Script Name Ops Challenge 13
# Author Kimberley Cabrera-Boggs
# Date of last revision October 21, 2020
# Description of purpose Network Security Tool w/Scapy Part 1, 2 & 3
#----------------------------------------------------------------------#
# ,-~~-.___.
# / | ' \
# ( ) 0
# \_/-, ,----'
# ==== //
# / \-'~; /~~~(O)
# / __/~| / |
# =( _____| (_________|
#--------------------------Import Libraries--------------------#
import ipaddress
import sys
import socket
import random
from scapy.all import sr1,IP,ICMP,TCP,ARP,Ether,srp
#--------------------------Declare variable--------------------#
def interface():
user_choice = input("""
Which part do you want to perform?
1. part1 lab
2. part2 lab
3. part3 lab
4. Exit
""")
if(user_choice == "1"):
part1_lab(input("Please enter your ip: "))
elif(user_choice == "2"):
part2_lab()
elif(user_choice == "3"):
part3_lab()
else:
print("Exiting....")
exit()
def part1_lab(host):
if host == None:
return
host_count = 0
port_range = [21, 22, 23]
src_port = 22
dst_port = 22
#--------------------------(dst=host) which is--------------------#
response = sr1(IP(dst=host)/TCP(sport=src_port,dport=dst_port,flags="S"),timeout=1,verbose=0)
#--------------------------Function-------------------------------#
for dst_port in port_range:
src_port = random.randint(1025,65534)
if response is None:
print("packet filtered")
elif(response.haslayer(TCP)):
if(response.getlayer(TCP).flags == 0x12):
#----------port responding and open---------------#
print("Port " + str(dst_port) + " is open")
elif (response.getlayer(TCP).flags == 0x14):
#----------port responding and closed-------------#
print("Port " + str(dst_port) + " is closed")
else:
#----------Filter port and drop-------------------#
print("The port is filtered and dropped")
#--------------------------Part 2 of 3----------------------------#
def part2_lab():
network = input("Enter your network address with the CIDR block code: ")
addresses = ipaddress.IPv4Network(network)
host_count = 0
for host in addresses:
if (host in (addresses.network_address, addresses.broadcast_address)):
continue
resp = sr1(
IP(dst=str(host))/ICMP(),
timeout=2,
verbose=0,
)
if resp is None:
print(f"{host} is down or not responding.")
elif (
int(resp.getlayer(ICMP).type)==3 and
int(resp.getlayer(ICMP).code) in [1,2,3,9,10,13]
):
print(f"{host} is blocking ICMP.")
else:
print(f"{host} is responding.")
host_count += 1
print(f"{host_count}/{addresses.num_addresses} hosts are online.")
#--------------------------Part 3 of 3-------------------------------#
def part3_lab():
request = ARP()
request.pdst = input("Enter your ip with the CIDR block: ")
broadcast = Ether()
broadcast.dst = 'ff:ff:ff:ff:ff:ff'
request_broadcast = broadcast / request
clients = srp(request_broadcast, timeout = 1)[0]
for element in clients:
part1_lab(element[1].psrc)
print(element[1].psrc + " " + element[1].hwsrc)
#----------------------------Main--------------------------------#
while True:
interface()
#----------------------------End---------------------------------#
|
# -*- coding:utf-8 -*-#
# --------------------------------------------------------------
# NAME: for
# Description: for循环
# Author: xuezy
# Date: 2020/6/19 17:17
# --------------------------------------------------------------
"""
for循环实现1~100求和
range(101) 可以产生一个0~100的整数序列
range(1,100) 可以产生一个1~99的整数序列
range(1,100,2) 可以产生一个1~99的奇数序列,2是步长,即增量
"""
sum = 0
for x in range(101):
sum += x
print(sum)
"""
1~100间的偶数和
"""
sum1 = 0
for y in range(0, 101, 2):
sum1 += y
print(sum1)
"""
1~100之间的奇数和
"""
sum2 = 0
for z in range(1, 100, 2):
sum2 += z
print(sum2)
|
from flask import Flask, request, jsonify
import os
import logging
import time
import json
app = Flask(__name__)
with open('decoder.json') as _file:
decoder = json.load(_file)
with open('encoded.txt') as _file:
encoded_text = _file.read()
@app.route("/")
def index():
app.logger.info(vars(request))
return jsonify("Example Flask App")
@app.route("/tasking", methods=["GET"])
def tasking():
d = dict()
for key, value in request.args.items():
app.logger.info("{}, {}".format(key, value))
d[key] = value
return jsonify(d)
@app.route('/challenge', methods=['GET'])
def challenge():
return encoded_text
@app.route('/decode', methods=['GET'])
def decode():
value = request.args.get('value')
time.sleep(1)
if value is None:
return 'Error can\'t decode', 400
answer = decoder.get(str(value))
if answer is None:
return 'Value [{}] doesn\'t exsist in decoder map'.format(value) , 400
return answer, 200
if __name__ == "__main__":
ip = os.environ.get('LISTEN_HOST', '0.0.0.0')
port = os.environ.get('LISTEN_PORT', 8200)
debug = os.environ.get('DEBUG', True)
app.run(host=ip, port=port, debug=debug) |
import errno
import socket
import select
import sys
HEADER_LENGTH = 10
IP = '127.0.0.1'
PORT = 1234
my_username = input('username: ')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((IP, PORT))
client_socket.setblocking(False)
encoded_username = my_username.encode('utf-8')
encoded_username_header = f'{len(encoded_username):<{HEADER_LENGTH}}'.encode('utf-8')
client_socket.send(encoded_username_header + encoded_username)
while True:
message = input(f'{my_username} > ')
if message:
encoded_message = message.encode('utf-8')
encoded_message_header = f'{len(encoded_message):<{HEADER_LENGTH}}'.encode('utf-8')
client_socket.send(encoded_message_header + encoded_message)
try:
while True:
username_header = client_socket.recv(HEADER_LENGTH)
if not len(username_header):
print('close')
sys.exit()
username_length = int(username_header.decode('utf-8').strip())
username = client_socket.recv(username_length).decode('utf-8').strip()
message_header = client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode('utf-8').strip())
message = client_socket.recv(message_length).decode('utf-8')
print(f'{username} > {message}')
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EWOULDBLOCK):
print(f'reading error - {e}', file=sys.stderr)
sys.exit()
continue
except Exception as e:
print(f'error - {e}', file=sys.stderr)
sys.exit()
|
from nltk.tokenize import sent_tokenize, word_tokenize
#nltk.download()
#tokenizers
#word tokenizer - seperating by word
#sentence tokenizer - seperated by sentences
#lexicon and corporas
#corpora - body of text ex: presenditial speech, a paragraph from your book.
#lexicon - word and their mean (dictionary)
#words with dual meaning depending on the sentence
example_text = "The sky was tuned to the color of the dead televison. hello darkness, my old friend. i've come to talk to you gain."
print (sent_tokenize(example_text))
print (word_tokenize(example_text))
|
import pygame as py
import math
import cmath
import numpy as np
import os
import cv2
# Complex number Plane
class Plane:
# Create a pygame window where different objects can be graph
# Plane initialization
def __init__(self, grid,
center, win_scale):
self.rows = grid[0]
self.columns = grid[1]
self.center = center
self.width = win_scale[0]
self.height = win_scale[1]
self.dist_y = self.height//self.rows
self.dist_x = self.width//self.columns
# Plane begin
def begin(self):
py.init()
self.win = py.display.set_mode((self.width, self.height))
py.display.set_caption('Complex animation')
# Plane Update
def update(self, obj, text):
# background
self.win.fill((230,230,230))
#Grid
for i in range(self.rows - 1):
# horizontal
py.draw.line(self.win, (127, 127, 127),
(0, (i + 1) * self.dist_y),
(self.width, (i + 1) * self.dist_y))
for i in range(self.height - 1):
# vertical
py.draw.line(self.win, (127, 127, 127),
((i + 1) * self.dist_x, 0),
((i + 1) * self.dist_x, self.height))
# center
py.draw.circle(self.win,
(127, 127, 127),
(self.dist_x*self.center[0],
self.dist_y*self.center[1]), 5)
# Draw objects
for o in obj:
o.draw(self)
# Write Text
for t in text:
t.write(self)
py.display.update()
# Given a complex number return the Pygame window position
def real_position(self, z):
return (round((self.center[0] + z.real)*self.dist_x),
round((self.center[1] - z.imag)*self.dist_y))
# Quit plane
def quit(self):
py.quit()
# Point class
class Point:
# Create a point that represent a given number on the plane
# Point initialization
def __init__(self, z, color):
self.z = z
self.color = color
self.ratio = 10
# Draw Point
def draw(self, plane):
real_pos = plane.real_position(self.z)
py.draw.circle(plane.win, self.color, real_pos, self.ratio)
# Graph class
class Graph:
# Create a graphs object from its parametric form
# Graph initialization
def __init__(self, x_function, y_function, start, stop, step):
self.x_function = x_function
self.y_function = y_function
self.start = start
self.stop = stop
self.step = step
# Draw graphs
def draw(self, plane):
for t in np.arange(self.start, self.stop, self.step):
real_pos = plane.real_position(complex(self.x_function(t),self.y_function(t)))
py.draw.circle(plane.win, (0,0,0), real_pos, 2)
# Text class
class Text(object):
def __init__(self, text, pos, color, typeface, size, bold = False, italic = False):
self.pos = pos
self.font = py.font.SysFont(typeface, size, bold, italic)
self.text_to_write = self.font.render(text, True, color)
def write(self, plane):
plane.win.blit(self.text_to_write, plane.real_position(self.pos))
# Video Maker class
# Save animation frames on "images" folder
class Video_Maker():
def __init__(self, video_name, name, max_images, fps, save_flag = True):
self.video_name = video_name
self.save_flag = save_flag # If true the frames are saved
self.name = name
self.max_images = max_images
self.fps = fps
# Create counter for images names
self.counter = []
while max_images > 1:
max_images /= 10
self.counter.append(0)
# Create openCV videoWritter
def begin(self, plane):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.video_writter = cv2.VideoWriter(self.video_name,fourcc, self.fps, (plane.width,plane.height), True)
def save_image(self, plane):
if self.save_flag: # save images only if save_flag == True
# Verify self.counter digit overflow
for i in range(len(self.counter)):
if self.counter[i] > 9:
if i+1 < len(self.counter):
self.counter[i+1] += 1
self.counter[i] = 0
else:
self.save_flag = False
else:
break
# convert to string the counter number
number = ""
for i in range(len(self.counter)-1, -1, -1):
number += str(self.counter[i])
# Save image and increment the counter
if (int(number) <= self.max_images) and self.save_flag:
# save image
py.image.save(plane.win,
os.path.join("C:\\Repositories\\Complexn\\images\\",
self.name + number + ".jpeg"))
# get image
img = cv2.imread(os.path.join("C:\\Repositories\\Complexn\\images\\",
self.name + number + ".jpeg"))
# add to videdo
self.video_writter.write(img)
self.counter[0] += 1 # increment counter
else:
self.save_flag = False
# save the final video
def release(self):
self.video_writter.release() |
def fasta_reader(bestand):
"""Extract sequences from FASTA file"""
seqs = []
seq = []
with open(bestand) as inFile:
for line in inFile:
if not line.startswith(">"):
seq.append(line.strip())
else:
if seq != []:
seqs.append("".join(seq))
seq = []
seqs.append("".join(seq))
return seqs
def transitions_transversions(seqs):
"""Calcultate the transition transversion rate"""
transitions = 0
transversions = 0
for i in range(len(seqs[0])):
seq1 = seqs[0][i]
seq2 = seqs[1][i]
# Compare the two sequences for each position.
# Ignore the nucleotides that are equal
if seq1 == seq2:
continue
# Count the number of transitions
elif seq1 == "A" and seq2 == "G" or seq1 == "G" and seq2 == "A" or\
seq1 == "C" and seq2 == "T" or seq2 == "C" and seq1 == "T":
transitions += 1
# Count the number of transversions
else:
transversions+= 1
print(transitions/transversions)
if __name__ == '__main__':
bestand = "transitions_and_transversions.txt"
bestand="rosalind_tran.txt"
seqs = fasta_reader(bestand)
transitions_transversions(seqs) |
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django.forms import *
class RegistrationForm(UserCreationForm):
username = CharField(min_length=5, label='Логин')
password1 = CharField(min_length=8, widget=PasswordInput, label='Пароль')
password2 = CharField(min_length=8, widget=PasswordInput, label='Повторите ввод')
email = EmailField(label='Email')
first_name = CharField(max_length=30, label='Введите имя')
last_name = CharField(max_length=30, label='Введите фамилию')
class Meta:
fields = [
'username',
'password1',
'password2',
'email',
'first_name',
'last_name'
]
model = User
class AuthorizationForm(AuthenticationForm):
username = CharField(min_length=5, label='Логин')
password = CharField(min_length=8, widget=PasswordInput, label='Пароль')
|
# -*- coding: UTF-8 -*-
import requests
import re, json, os
def word2string(url, filename):
print("filename: ", filename.encode('utf-8').decode())
payload = {}
files = [
('file', open(filename.encode('utf-8').decode(), 'rb'))
]
headers = {}
response = requests.request("POST", url, headers=headers, data=payload, files=files)
string = response.text.encode('utf-8').decode('utf-8')
# try:
# string = response.text.encode('cp437').decode('utf-8')
# except:
# string = response.text.encode('utf-8').decode('utf-8')
return string
# test
if __name__ == "__main__":
url = "http://172.27.128.117:5022/api/word2md"
filename = 'upload/d_line1/20201209_000286_银华信用季季红债券型证券投资基金更新招募说明书(2020年第3号)的副本.docx'
string = word2string(url, filename)
print(string)
|
# -*- coding: utf-8 -*-
def social_pant(entity, argument):
return True
#- Fine Funzione -
|
from constructors.htmlObj import HTMLObject
from constructors import htmlSnippets
from functions.printFunctions import inputClear
from functions.misc import locationParse
from datetime import datetime
from bs4 import BeautifulSoup
import os
import time
def fileHandling():
PROJECT_NAME = inputClear('What should I name the project directory? ')
PROJECT_NAME = "{}-{}-{}_{}".format(str(datetime.now().year), str(datetime.now().month),
str(datetime.now().day), PROJECT_NAME)
os.system('mkdir ' + PROJECT_NAME)
os.chdir(PROJECT_NAME)
blank = inputClear(
'Made directory \"{}\". Move your image files into the folder created on your and then press enter. Just press enter if there are no images to optimize.'.format(PROJECT_NAME))
if len(os.listdir()) > 0:
os.system("mkdir img")
# optimize files
os.system("jpegoptim -m90 *.jpg")
os.system("jpegoptim -m90 *.jpeg")
os.system("mv *.jpg img")
os.system("mv *.jpeg img")
else:
print("I didn't find any images - continuing")
time.sleep(1)
return(PROJECT_NAME)
def mappingSequence():
# Creates HTMLObjects in a 2D array - each row has nested columns
vdom = []
hasVideo = False
rows = int(inputClear('How many rows? '))
for row in range(rows):
colList = []
cols = int(inputClear('How many columns in row {}? '.format(str(row+1))))
for col in range(cols):
colList.append(HTMLObject(row + 1, col + 1))
vdom.append(colList)
# IMAGES
numImages = int(inputClear('How many cells contain images (0 for none)? '))
if numImages > 0:
for i in range(numImages):
insertLocation = inputClear(
'What is the location for image {} in row/col format, e.g. 1,3? '.format(str(i+1)))
insertLocation = locationParse(insertLocation)
altText = inputClear('Enter alt text for this image: ')
vdom[insertLocation[0]][insertLocation[1]].setImage(altText)
# VIDEO
isVideo = int(inputClear('Is there a video (1 yes, 0 no)? '))
if isVideo:
hasVideo = True
insertLocation = inputClear(
'What is the location of the video in row/col format, e.g. 1,3? ')
insertLocation = locationParse(insertLocation)
altText = inputClear('Enter alt text for the thumbnail image: ')
link = inputClear('Enter embed link for the YT video: ')
vdom[insertLocation[0]][insertLocation[1]].setVideo(altText, link)
# Assemble HTML
finalHTML = ""
for row in range(len(vdom)):
columnCode = ""
for col in range(len(vdom[row])):
flexBasis = inputClear(
'Set flex basis percentage for row {} column {}. (Enter for 50% default): '.format(str(row+1), str(col + 1)))
vdom[row][col].setFlexBasis(flexBasis)
columnCode = columnCode + \
vdom[row][col].renderComponent()
rowCode = "<div class='container {}'>{}</div>".format(
"row{}".format(str(row+1)), columnCode)
finalHTML = finalHTML + rowCode
if hasVideo:
finalHTML = finalHTML + htmlSnippets.videoModal
finalHTML = htmlSnippets.cssWithVideoModal + \
"<div class='x'>" + finalHTML + "</div>"
else:
finalHTML = htmlSnippets.css + "<div class='x'>" + finalHTML + "</div>"
return(finalHTML)
PROJECT_NAME = fileHandling()
vdom = mappingSequence()
# prettify the HTML
vdom = BeautifulSoup(vdom, "html.parser")
vdom = vdom.prettify()
filename = PROJECT_NAME + '.html'
try:
with open(filename, 'w') as file:
file.write(vdom)
print("Saved file as {}.html".format(PROJECT_NAME))
except Exception as e:
print(e)
|
from sklearn.neural_network import MLPClassifier
'''
class MLPClassifier(hidden_layer_sizes=(100,), activation="relu", solver='adam', alpha=0.0001, batch_size='auto', learning_rate="constant", learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=1e-4, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic gradient descent.
Parameters
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default 'adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively large datasets (with thousands of training samples or more) in terms of both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by 'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate `learning_rate_` at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to 'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at least tol, or fail to increase validation score by at least tol if 'early_stopping' is on,
the current learning rate is divided by 5.
Only used when `solver='sgd'`.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when solver='sgd' or 'adam'.
random_state : int, RandomState instance or None, optional, default None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving by at least tol for two consecutive iterations,
unless `learning_rate` is set to 'adaptive', convergence is considered to be reached and training stops.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only used when solver='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when solver='sgd' and momentum 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation score is not improving.
If set to true,
it will automatically set aside 10% of training data as validation and terminate training when validation score is not improving by at least tol for two consecutive epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam, should be in [0, 1). Only used when solver='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam, should be in [0, 1). Only used when solver='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when solver='adam'
Attributes
classes_ : array or list of array of shape (n_classes,)
Class labels for each output.
loss_ : float
The current loss computed with the loss function.
coefs_ : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding to layer i.
intercepts_ : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to layer i + 1.
n_iter_ : int,
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : string
Name of the output activation function.
Notes
MLPClassifier trains iteratively since at each time step the partial derivatives of the loss function with respect to the model parameters are computed to update the parameters.
It can also have a regularization term added to the loss function that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or sparse scipy arrays of floating point values.
'''
tt = [[5, 2, 12], [7.8, 1.9, 15], [5.5, 2.5, 11],
[0.1, 0.005, 0.1], [0.5, -0.05, 0.15], [-0.2, 0, 0.4],
[5.1, 0.01, -0.3], [7, -0.3, 0.02], [4.8, 0.5, 0],
[5.7, 2.2, -0.1], [4.95, 1.98, -0.05], [6.2, 2.01, 0.2]]
labels = [0, 0, 0,
1, 1, 1,
2, 2, 2,
3, 3, 3]
clf = MLPClassifier(activation = 'logistic', tol = 0.000001, hidden_layer_sizes = (4, ), solver = 'lbfgs')
clf.fit(tt, labels)
classes = clf.classes_
loss = clf.loss_
coefs = clf.coefs_
intercepts = clf.intercepts_
n_iter = clf.n_iter_
n_layers = clf.n_layers_
n_outputs = clf.n_outputs_
out_activation = clf.out_activation_
print(clf)
t = [[6.72, 2.3, 13.4], [0.1, -0.08, 0.13], [5.1, 0.25, 0], [5.2, 1.8, 0.3]]
#t = np.array(t)
#t = np.transpose(t)
# t = np.mat(t)
#sims = [clf.predict(t[:, idx]) for idx in range(4)]
sim = clf.predict(t) |
som=0
count=-1
while True:
getal=eval(input("noem een getal: "))
som+= getal
count+=1
if getal == 0:
break
print("Er zijn "+str(count)+" getallen ingevoerd, de som is: "+ str(som)) |
# -*- coding:utf-8 -*-
'''
pytorch 0.4.0
data: https://download.pytorch.org/tutorial/faces.zip
tutorials: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
'''
from __future__ import print_function, division
import os
import torch
import pandas as pd # 用于解析csv文件
from skimage import io, transform # 用于读取图片和图像变换
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
plt.ion() # 交互模式
# ########################## 加载csv文件 ##################################################
def load_csv(path):
'''
加载csv文件
:param path: csv文件的路径
:return: 图片名字, 标记点
'''
landmarks_frame = pd.read_csv(path)
n = 65
img_name = landmarks_frame.iloc[n, 0]
landmarks = landmarks_frame.iloc[n, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
return img_name, landmarks
# #########################################################################################
# ############################### 显示图片 #################################################
def show_landmarks(image, landmarks):
'''
显示带有标记点的图片
:param image: 图片
:param landmarks:标记点
:return:
'''
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001)
# plt.figure()
# img_name, landmarks = load_csv('./faces/face_landmarks.csv')
# show_landmarks(io.imread(os.path.join('faces/', img_name)), landmarks)
# plt.show()
# #########################################################################################
# ############################### 显示图片 #################################################
'''
torch.utils.data.Dataset 是一个表示数据集的抽象类. 你自己的数据集一般应该继承``Dataset``, 并且重写下面的方法:
__len__ 使用``len(dataset)`` 可以返回数据集的大小
__getitem__ 支持索引, 以便于使用 dataset[i] 可以 获取第:math:i个样本(0索引)
'''
class FaceLandmarksDataset(Dataset):
'''
人脸标记数据集
'''
def __init__(self, csv_file, root_dir, transform=None):
'''
:param csv_file: 带有标记的CSV文件
:param root_dir:图片路径
:param transform:可以选择的图像变换
'''
self.landmarks_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.landmarks_frame.iloc[idx, 0])
image = io.imread(img_name)
landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
# ############################################################################################
# ############################### 改变图像大小 #################################################
'''
大多数神经网络需要输入 一个固定大小的图像, 因此我们需要写代码来处理. 让我们创建三个transform操作:
Rescale: 修改图片尺寸
RandomCrop: 随机裁切图片, 这是数据增强的方法
ToTensor: 将numpy格式的图片转为torch格式的图片(我们需要交换坐标轴)
我们不将它们写成简单的函数, 而是写成可以调用的类, 这样transform的参数不需要每次都传递 如果需要的话, 我们只
需实现 __call__ 方法和``__init__`` 方法.之后我们可以像下面这 样使用transform:
tsfm = Transform(params)
transformed_sample = tsfm(sample)
'''
class Rescale(object):
def __init__(self, output_size):
'''
Args:
output_size (tuple or int): 要求输出的尺寸. 如果是个元组类型, 输出
和output_size匹配. 如果时int类型,图片的短边和output_size匹配, 图片的
长宽比保持不变.
:param output_size:
:return:
'''
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2] # shaple=(hight, weight, channels)
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
# 对于标记点, h和w需要交换位置, 因为对于图像, x和y分别时第1维和第0维
landmarks = landmarks * [new_w / w, new_h / h]
return {'image': img, 'landmarks': landmarks}
# ############################################################################################
# ############################### 随机裁剪图片 #################################################
class RandomCrop(object):
'''
Args:
output_size (tuple or int): 期望输出的尺寸, 如果时int类型, 裁切成正方形.
'''
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h-new_h)
left = np.random.randint(0, w - new_w)
image = image[top:top + new_h, left:left + new_w]
landmarks = landmarks - [left, top]
return {'image': image, 'landmarks': landmarks}
# ############################################################################################
# ############################### 数据类型转变,ndarrays->Tensor ###############################
class ToTensor(object):
'''
ndarrays->Tensor
'''
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
# numpy 图像:H, W, C
# torch 图像:C, H, W
image = torch.from_numpy(image.transpose((2, 0, 1)))
landmarks = torch.from_numpy(landmarks)
return {'image': image, 'landmarks': landmarks}
# ############################################################################################
# ############################### Compose transforms and DataLoader ##########################
'''
如果我们想将图片的短边变为256像素, 并且随后随机裁切成224像素的正方形. i.e,
``Rescale``和``RandomCrop``变换. torchvision.transforms.Compose
就是一个可以做这样一个组合的可调用的类.
scale = Rescale(256)
crop = RandomCrop(128)
composed = transforms.Compose([Rescale(256),
RandomCrop(224)])
'''
def get_data_loader(path, root_path):
transformed_dataset = FaceLandmarksDataset(csv_file=path,
root_dir=root_path,
transform=transforms.Compose([
Rescale(256),
RandomCrop(224),
ToTensor()
]))
data_loader = DataLoader(transformed_dataset, batch_size=4, shuffle=True)
return data_loader
# ############################################################################################
# ############################### 显示指定 batch 的数据样本的图片和标记点 #########################
def show_landmarks_batch(sample_batched):
images_batch, landmarks_batch = sample_batched['image'], sample_batched['landmarks']
batch_size = len(images_batch)
im_size = images_batch.size(2)
gird = utils.make_grid(images_batch)
plt.imshow(gird.numpy().transpose((1, 2, 0)))
for i in range(batch_size):
plt.scatter(landmarks_batch[i, :, 0].numpy() + i * im_size,
landmarks_batch[i, :, 1].numpy(),
s=10, marker='.', c='r')
plt.title('Batch from dataloader')
# ############################################################################################
data_loader = get_data_loader('./faces/face_landmarks.csv','./faces')
for i_batch, sample_batched in enumerate(data_loader):
print(i_batch, sample_batched['image'].size(),
sample_batched['landmarks'].size())
# 观察到第四批数据时停止
if i_batch == 3:
plt.figure()
show_landmarks_batch(sample_batched)
plt.axis('off')
plt.ioff()
plt.show()
break
|
from tkinter import *
from V_windows.V_search.V_SearchMain import V_Search
from V_windows.V_readerEntrance.V_ReaderEntrance import V_ReaderEntrance
from V_windows.V_adminEntrance.V_AdminEntrance import V_AdminEntrance
class V_Home():
def __init__(self):
self.size = '450x250'
self.locate = '+10+10'
self.root = Tk()
self.root.title('主页')
self.root.geometry(self.size)
self.root.geometry(self.locate)
self.root.resizable(0,0)
labelBlank1 = Label(self.root)
labelBlank1.pack(side = TOP)
buttonSearch = Button(self.root,text = '查询书籍',command = self.openSearchEntrance,font = 'Consoles')
buttonSearch.pack(side = TOP)
labelBlank2 = Label(self.root)
labelBlank2.pack(side = TOP)
buttonReader = Button(self.root,text = '读者入口',command = self.openReaderWindow,font = 'Consoles')
buttonReader.pack(side = TOP)
labelBlank3 = Label(self.root)
labelBlank3.pack(side = TOP)
buttonAdmin = Button(self.root,text = '管理员入口',command = self.openAdminWindow,font = 'Consoles')
buttonAdmin.pack(side = TOP)
labelBlankLast = Label(self.root)
labelBlankLast.pack(side = TOP)
buttonClose = Button(self.root,text = '关闭本系统', command = self.root.destroy, font = 'Consoles')
buttonClose.pack(side = TOP)
mainloop()
def openSearchEntrance(self):
searchEntrance = V_Search()
return
def openReaderWindow(self):
readerEntrance = V_ReaderEntrance()
return
def openAdminWindow(self):
adminEntrance = V_AdminEntrance()
return
a = V_Home() |
import numpy as np
lats = np.arange(-90, 91, 1) #268
longs = np.arange(-180, 181, 1) #381 #102108
lats, longs = np.meshgrid(lats, longs)
lats = lats.flatten()
longs = longs.flatten()
types = np.ones(len(lats), dtype=np.int64)
coors = np.concatenate((lats, longs, types))
coors = coors.reshape((3, len(lats))).T
print(coors)
np.savetxt("MapCoors.txt", coors, fmt=('%.4f', '%.4f', '%d'), delimiter=',') |
import schedule
import time
import datetime
import threading
import os
from googlemaps import Client
from time import gmtime, strftime
def partial(func, *args, **kwargs):
def f(*args_rest, **kwargs_rest):
kw = kwargs.copy()
kw.update(kwargs_rest)
return func(*(args + args_rest), **kw)
return f
def format_time(hours, minutes):
return str(hours)+":"+str(minutes)+"0"
def find_traffic(hours, minutes):
addresses = []
gmaps = Client('AIzaSyCaQlauoQ1njrABzhVCliY49DaByZNYkTY')
cassie_work = '3237 S 16th St, Milwaukee, WI 53215'
joey_work = '1550 Innovation Way, Hartford, WI 53027'
with open('address.txt') as f:
addresses = f.readlines()
file = open('times.csv', 'a')
day = datetime.datetime.today().weekday()
for addr_newline in addresses:
addr = addr_newline.rstrip()
directions_cassie = None
directions_joey = None
if(hours < 8):
directions_cassie = gmaps.directions(addr, cassie_work)
directions_joey = gmaps.directions(addr, joey_work)
else:
directions_cassie = gmaps.directions(cassie_work, addr)
directions_joey = gmaps.directions(joey_work, addr)
file.write(str(addr)+','+format_time(hours,minutes)+',Cassie,'+str(directions_cassie[0]['legs'][0]['duration']['value'])+',Joey,'+str(directions_joey[0]['legs'][0]['duration']['value'])+','+str(day)+'\n')
file.close()
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
def test_job():
print("TEST TEST TEST")
# def upload_file():
# times = []
# with open('times.csv') as f:
# times = f.readlines()
# for time in times:
# time = time.rstrip()
# time = time.split(',')
# data = {'address':time[0]+time[1]+time[2], 'time':time[3], 'Cassie':time[5], 'Joey':time[7]}
# r = requests.post('localhost:8000', data=data)
def schedule_tasks():
hours = 5
minutes = 0
for y in range(0,6):
for x in range(0,6):
p = partial(find_traffic, hours, minutes)
schedule.every().day.at(format_time(hours, minutes)).do(p)
minutes += 1
minutes = 0
if(y == 2):
hours += 7
else:
hours += 1
#schedule.every().day.at('9:00').do(upload_file())
#schedule.every().day.at('18:00').do(upload_file())
def schedule_tasks_test():
hours = 18
minutes = 0
for y in range(0,2):
for x in range(0,1):
p = partial(find_traffic, hours, minutes)
schedule.every().day.at(format_time(hours, minutes)).do(p)
minutes += 1
minutes = 0
hours += 1
#schedule.every().day.at('22:03').do(run_threaded,test_job)
#schedule_tasks()
print(os.getpid())
schedule_tasks_test()
#find_traffic(5, 69)
#upload_file()
while True:
schedule.run_pending()
time.sleep(30)
|
import numpy as np
def store_number(coords, board): #x = which row, y = which column, z = what number
print(coords)
z = coords[0]
x = coords[1]
y = coords[2]
#xy[x][y] = z #stores number (z) in x,y
for i in range(9): #places -1 in locatinos numbers can't be.
board[i][x][y] = -1
board[z-1][i][y] = -1
board[z-1][x][i] = -1
subgrid_rule_out(coords, board)
board[z-1][x][y] = z #for debugging to see what slice I'm in more easily
#board[z-1][x][y] = 1 #when not debugging.
#print(x,y,z, board[x][y][z-1])
print("I stored a %d in row %d, column %d" % (z,x,y))
#function used in store_number()
def subgrid_rule_out(coords, board):
# identifies which 3x3 subgrid store_number is storing
zz = coords[0]
xx = coords[1]
yy = coords[2]
xbox = (xx)//3
ybox = (yy)//3
for i in range(3):
for j in range(3):
board[zz-1][i+(xbox*3)][j+(ybox*3)] = -1
board[zz-1][i+(xbox*3)][j+(ybox*3)] = -1
#searching rows for all -1 except a 0.
def search_rows(board):
tempy = board.sum(axis = 2)
#print (board)
#print(tempy)
#counter = 0
for a in np.argwhere(tempy == -8):
print(a[0],a[1], "I found a ", a[0]+1, "to be placed somewhere in row", a[1]+1, ". Let's go search where it goes")
for i in range(9):
#print(board[a[0]][a[1]][i], i,a)
if board[a[0]][a[1]][i] == 0:
print("i =",i, "a[0] =", a[0], "a[1] =",a[1])
#store_number(a[1],i,(a[0]+1))
return ((a[0]+1), a[1], i)
#counter += 1
#print (counter)
def search_cols(board):
tempx = board.sum(axis = 1)
#counter = 0
#print(board)
#print(tempx)
for a in np.argwhere(tempx == -8):
print(a[0],a[1], "I found a ", a[0]+1, "to be placed somewhere in column", a[1]+1, ". Let's go search where it goes")
for i in range(9):
#print("board[a[0]][i][a[1]] contains ", board[a[0]][i][a[1]], ", i =",i, ", a[0] =", a[0], ", a[1] =",a[1])
if board[a[0]][i][a[1]] == 0:
return((a[0]+1), i, a[1])
#store_number(i,a[1],(a[0]+1))
#counter += 1
def search_depths(board):
tempz = board.sum(axis = 0)
#counter = 0
#print(board)
#print(tempz)
for a in np.argwhere(tempz == -8):
print("I found a number must be in coordinates:", a[0], a[1], ". Let's find out what number it is.")
for i in range(9):
#print("board[a[0]][i][a[1]] contains ", board[a[0]][i][a[1]], ", i =",i, ", a[0] =", a[0], ", a[1] =",a[1])
if board[i][a[0]][a[1]] == 0:
return((i+1), a[0], a[1])
#store_number(i,a[1],(a[0]+1))
#counter += 1
def get_threebythree(board): #looks at 3x3 boxes to find instances of eight -1's and one 0, signifying the zero must be turned into a 1
for i in range(9):#search every slice in depth (z-axis)
number = i+1
#print(number, "number")
for ybox in range(3):#searches every column of boxes
#print(ybox, "ybox")
for xbox in range(3):#and every row of boxes
#print(xbox, "xbox")
if(np.count_nonzero(board[i,(ybox*3):(ybox*3+3),(xbox*3):(xbox*3+3)]) == 8): #if box has eight -1s
print("FOUND!!!", i, ybox, xbox) #FOUND!!!
for m in range(3):
for n in range(3): #finds coordinates of 0 in box
if board[i, (ybox*3)+ m, (xbox*3)+ n] == 0:
return (i+1, (ybox*3)+ m, (xbox*3)+ n) #returns coords where known number was found! (number, y coordinate, xcoordinate)
def threetotwodimensions(board, boardina):
for a in range(9):
for b in range(9):
for c in range(9):
if board[a,b,c] > 0:
#print(board[a,b,c])
boardina[b,c] = a+1
|
from django.urls import path
from chinook.views import (
AlbumListAPIView,
PlaylistListAPIView,
ReportDataAPIView,
GenreListAPIView,
TrackListAPIView,
CustomerSimplifiedAPIView,
TotalPerCustomerAPIView,
)
urlpatterns = [
path("albums/", AlbumListAPIView.as_view()),
path("genres/", GenreListAPIView.as_view()),
path("tracks/", TrackListAPIView.as_view()),
path("playlists/", PlaylistListAPIView.as_view()),
path("report_data/", ReportDataAPIView.as_view()),
path("customer_simplified/", CustomerSimplifiedAPIView.as_view()),
path("total_per_customer/", TotalPerCustomerAPIView.as_view()),
]
|
from datetime import datetime
def current_date():
return datetime.now().strftime("%Y/%m/%d")
def current_time():
return datetime.now().strftime("%H:%M:%S")
|
from pyengine.common.components.component import Component
class ScriptComponent(Component):
def __init__(self, game_object):
super().__init__(game_object)
self.name = "ScriptComponent "
def to_dict(self):
return {
"name": self.name,
}
@classmethod
def from_dict(cls, game_object, values):
comp = cls(game_object)
comp.name = values.get("name", "")
return comp
|
def divisors(integer):
divs = [ divider for divider in range(1, integer) if integer % divider == 0 and divider != 1 ]
return "{} is prime".format(integer) if len(divs) == 0 else divs
|
from selenium import webdriver
from bs4 import BeautifulSoup
import xlwt
dr=webdriver.Chrome()
for i in range(0,10):
dr.get('https://www.pagalworld.mobi/home/updates?page='+str(i))
g=dr.page_source
soup=BeautifulSoup(g,'html.parser')
#f=soup.find_all('ul')
urls = []
for a in soup.find_all('a', href=True):
urls.append(a['href'])
songs=[]
for u in range(1,61,3):
songs.append(urls[u])
break
import time
wb = xlwt.Workbook()
ws = wb.add_sheet('A Test Sheet')
i=1
for s in songs:
#dr = webdriver.Chrome()
dr.get(s)
time.sleep(4)
soupp = BeautifulSoup(dr.page_source,'html.parser')
n=soupp.find('div',class_='col-xs-12 col-sm-12 col-md-12 col-lg-12 padding-10px')
n=n.text
name=n.strip()
ws.write(i, 0, name)
print(n.strip())
pp=soupp.find_all('div',class_='col-xs-8 col-sm-12 col-md-9 col-lg-9 f-desc')
print(pp[1].text)
ws.write(i, 1, pp[1].text)
g=(soupp.find('div',class_='alb-img-det'))
print('https://www.pagalworld.mobi'+str(g.img['src']))
ws.write(i,2,'https://www.pagalworld.mobi'+str(g.img['src']))
dr.find_element_by_class_name('dbutton').click()
# Cloudinary settings using python code. Run before pycloudinary is used.
time.sleep(60)
import cloudinary
cloudinary.config(
cloud_name='cloud name',
api_key='key',
api_secret='api'
)
import cloudinary.uploader
import cloudinary.api
url = "C:/Users/dell/Downloads/"
file = name + '.mp3'
url = url + file
f = cloudinary.uploader.upload(url,
resource_type="video")
ws.write(i,3,f['url'])
print(f['url'])
wb.save('example.xls')
i+=1
|
######################################################################
# **Medial axis skeletonization**
#
# The medial axis of an object is the set of all points having more than one
# closest point on the object's boundary. It is often called the *topological
# skeleton*, because it is a 1-pixel wide skeleton of the object, with the same
# connectivity as the original object.
#
# Here, we use the medial axis transform to compute the width of the foreground
# objects. As the function ``medial_axis`` returns the distance transform in
# addition to the medial axis (with the keyword argument ``return_distance=True``),
# it is possible to compute the distance to the background for all points of
# the medial axis with this function. This gives an estimate of the local width
# of the objects.
#
# For a skeleton with fewer branches, ``skeletonize`` or ``skeletonize_3d``
# should be preferred.
from skimage.morphology import medial_axis, skeletonize, skeletonize_3d
from skimage.io import imread
from skimage.filters import threshold_otsu
import matplotlib.pyplot as plt
# Generate the data
data = imread('CharImage/hangul_32.jpeg')
global_thresh = threshold_otsu(data)
binary_global = data > global_thresh
data = binary_global
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
# Compare with other skeletonization algorithms
skeleton = skeletonize(data)
skeleton3d = skeletonize_3d(data)
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('original')
ax[0].axis('off')
ax[1].imshow(dist_on_skel, cmap='magma', interpolation='nearest')
ax[1].contour(data, [0.5], colors='w')
ax[1].set_title('medial_axis')
ax[1].axis('off')
ax[2].imshow(skeleton, cmap=plt.cm.gray, interpolation='nearest')
ax[2].set_title('skeletonize')
ax[2].axis('off')
ax[3].imshow(skeleton3d, cmap=plt.cm.gray, interpolation='nearest')
ax[3].set_title('skeletonize_3d')
ax[3].axis('off')
fig.tight_layout()
plt.show()
|
import logging
import os
from collections import OrderedDict
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
from PyQt5 import QtCore, QtWidgets, QtGui
import io
from matplotlib.colors import LogNorm
from module.Module import ProcModule
from module.OneDScanProc import OneDScanProc
from module.RawFile import RawFile
RO2 = 7.94E-30
LAMBDA = 1.5418E-10 # Cu-1 wavelength
F_GAP = 3249.001406
L = 1.846012265 # Lorentz factor
P = 0.853276107 # Polarization factor
V_A = 5.4505E-10 ** 3 # GaP cell volume
U = 1000000 / 37.6416
def _bragg_angle_cal(lattice, xtal_hkl):
"""
Calculation the bragg angle based on the crystal miller
index.
>>> hkl_l = [(0, 0, 2), (0, 0, 4), (0, 0, 6), (2, 2, -4)]
>>> hkl_d = {i: _bragg_angle_cal(0.54505, i) for i in hkl_l}
>>> assert abs(hkl_d[(0, 0, 2)]-32.8) < 0.1
"""
rms = lambda x: np.sqrt(np.sum(np.asarray(x) ** 2))
bragg_angle = np.arcsin(
LAMBDA / (2 * lattice * 1E-9 / rms(xtal_hkl))
)
return np.rad2deg(bragg_angle) * 2
class PolesFigureProc(ProcModule):
refresh_canvas = QtCore.pyqtSignal(bool)
def __init__(self, *args):
super(PolesFigureProc, self).__init__(*args)
self.figure = plt.figure()
self.param = OrderedDict([
('ADVANCED_SELECTION', False),
('POLAR_AXIS', True),
('V_MIN', "10"),
('V_MAX', "1000"),
('THICKNESS', "900"),
('SQUARE_SX', "16"),
('SQUARE_SY', "16"),
('PHI_OFFSET', "0"),
('BEAM_INT', "100000"),
])
self.xi = None
self.yi = None
self._gridded_flag = False
self._build_plot_widget()
@property
def name(self):
return __name__.split('.')[-1]
@property
def supp_type(self):
return "PolesFigure",
def _build_plot_widget(self):
super(PolesFigureProc, self)._build_plot_widget()
self._toolbar.addAction(
QtGui.QIcon(QtGui.QPixmap('icons/search.png')),
"Auto search peak...",
self._pk_search,
)
self._toolbar.addAction(
QtGui.QIcon(QtGui.QPixmap('icons/vertical-alignment.png')),
"Horizontal align...",
self._int2fraction,
)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self._toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self._status_bar)
self.plot_widget.setLayout(self.layout)
self.plot_widget.resize(1000, 400)
self.plot_widget.closeEvent = self.closeEvent
self.refresh_canvas.connect(self.repaint)
def _build_config_widget(self):
config_widget = QtWidgets.QWidget(self.plot_widget)
config_layout = QtWidgets.QVBoxLayout()
config_widget.setLayout(config_layout)
"""
If use advanced selection mode, then polygon instead of rectangle will
be used for peak selection.
"""
advanced_selection_q_checkbox = QtWidgets.QCheckBox(
"Use Advanced Selection")
advanced_selection_q_checkbox.setChecked(
self.param["ADVANCED_SELECTION"])
advanced_selection_q_checkbox.toggled.connect(
partial(self._upt_param, "ADVANCED_SELECTION"))
polar_draw_q_checkbox = QtWidgets.QCheckBox("POLAR_AXIS")
polar_draw_q_checkbox.setChecked(
self.param["POLAR_AXIS"])
polar_draw_q_checkbox.toggled.connect(
partial(self._upt_param, "POLAR_AXIS"))
intensity_input_layout = IntensityInputWidget(self.param)
v_min_input_layout = QtWidgets.QVBoxLayout()
v_min_input_layout.addWidget(QtWidgets.QLabel('Norm Minimum:'))
v_min_line_edit = QtWidgets.QLineEdit()
v_min_line_edit.setText(self.param['V_MIN'])
v_min_line_edit.setInputMask("999999999")
v_min_line_edit.textChanged.connect(
partial(self._upt_param, "V_MIN")
)
v_min_input_layout.addWidget(v_min_line_edit)
v_max_input_layout = QtWidgets.QVBoxLayout()
v_max_input_layout.addWidget(QtWidgets.QLabel('Norm Maximum:'))
v_max_line_edit = QtWidgets.QLineEdit()
v_max_line_edit.setText(self.param['V_MAX'])
v_max_line_edit.setInputMask("999999999")
v_max_line_edit.textChanged.connect(
partial(self._upt_param, "V_MAX")
)
v_max_input_layout.addWidget(v_max_line_edit)
thickness_input_layout = QtWidgets.QVBoxLayout()
thickness_input_layout.addWidget(
QtWidgets.QLabel('Thickness of Sample(\u212B):'))
thickness_line_edit = QtWidgets.QLineEdit()
thickness_line_edit.setText(self.param['THICKNESS'])
thickness_line_edit.setInputMask("999999999")
thickness_line_edit.textChanged.connect(
partial(self._upt_param, "THICKNESS")
)
thickness_input_layout.addWidget(thickness_line_edit)
square_sx_input_layout = QtWidgets.QVBoxLayout()
square_sx_input_layout.addWidget(
QtWidgets.QLabel('Square Sx:'))
square_sx_line_edit = QtWidgets.QLineEdit()
square_sx_line_edit.setText(self.param['SQUARE_SX'])
square_sx_line_edit.setInputMask("99")
square_sx_line_edit.textChanged.connect(
partial(self._upt_param, "SQUARE_SX")
)
square_sx_input_layout.addWidget(square_sx_line_edit)
square_sy_input_layout = QtWidgets.QVBoxLayout()
square_sy_input_layout.addWidget(
QtWidgets.QLabel('Square Sy:'))
square_sy_line_edit = QtWidgets.QLineEdit()
square_sy_line_edit.setText(self.param['SQUARE_SY'])
square_sy_line_edit.setInputMask("99")
square_sy_line_edit.textChanged.connect(
partial(self._upt_param, "SQUARE_SY")
)
square_sy_input_layout.addWidget(square_sy_line_edit)
phi_offset_input_layout = QtWidgets.QVBoxLayout()
phi_offset_input_layout.addWidget(
QtWidgets.QLabel('Phi offset:'))
phi_offset_line_edit = QtWidgets.QLineEdit()
phi_offset_line_edit.setText(self.param['PHI_OFFSET'])
phi_offset_line_edit.setValidator(
QtGui.QIntValidator(0, 360, phi_offset_line_edit))
phi_offset_line_edit.textChanged.connect(
partial(self._upt_param, 'PHI_OFFSET')
)
phi_offset_input_layout.addWidget(phi_offset_line_edit)
config_layout.addWidget(advanced_selection_q_checkbox)
config_layout.addWidget(polar_draw_q_checkbox)
config_layout.addLayout(intensity_input_layout)
config_layout.addLayout(v_min_input_layout)
config_layout.addLayout(v_max_input_layout)
config_layout.addLayout(thickness_input_layout)
config_layout.addLayout(square_sx_input_layout)
config_layout.addLayout(square_sy_input_layout)
config_layout.addLayout(phi_offset_input_layout)
return config_widget
def _configuration(self):
self._configuration_wd = self._build_config_widget()
self.q_tab_widget = QtWidgets.QTabWidget()
self.q_tab_widget.addTab(self._configuration_wd, "Poles Figure")
self.q_tab_widget.closeEvent = self._configuration_close
self.q_tab_widget.show()
@QtCore.pyqtSlot(bool)
def repaint(self, message):
from scipy.interpolate import griddata
"""
This function is called when the canvas need repainting(including the
first time painting).
:param message:
:return:
"""
self.figure.clf()
try:
v_max = self.attr['V_MAX']
v_min = self.attr['V_MIN']
except KeyError:
v_min = 10
v_max = 10000
try:
ver_min = int(self.attr['DRV_2'].min())
ver_max = int(self.attr['DRV_2'].max())
hor_min = int(self.attr['DRV_1'].min())
hor_max = int(self.attr['DRV_1'].max())
phi_offset = int(self.param['PHI_OFFSET'])
except KeyError:
ver_min = np.int64(self.attr['phi_min'])
ver_max = np.int64(self.attr['phi_max'])
hor_min = np.int64(self.attr['khi_min'])
hor_max = np.int64(self.attr['khi_max'])
phi_offset = np.int64(self.param['PHI_OFFSET'])
plt.figure(self.figure.number)
h, v = self.data.shape
x = np.arange(ver_min, ver_max + 1, 1)
y = np.arange(hor_min, hor_max + 1, 1)
xx, yy = np.meshgrid(
x,
y,
)
x_r = np.linspace(ver_min, ver_max, v)
y_r = np.linspace(hor_min, hor_max, h)
xx_r, yy_r = np.meshgrid(
x_r,
y_r
)
self._gridded_data = griddata(
(xx_r.flatten(), yy_r.flatten()),
self.data.flatten(),
(xx, yy),
method='nearest',
)
logging.info("Gridded")
if self.param["POLAR_AXIS"]:
ax2d = plt.gcf().add_subplot(111, polar=True)
xx, yy = np.meshgrid(
np.radians(np.arange(ver_min, ver_max + 1, 1) + phi_offset),
np.arange(hor_min, hor_max + 1, 1),
)
minim = 1
maxim = 10000
im = ax2d.pcolormesh(
xx,
yy,
self._gridded_data,
norm=LogNorm(vmin=minim, vmax=maxim)
)
ax2d.tick_params(
axis="y",
labelsize=14,
labelcolor="white"
)
ax2d.tick_params(
axis="x",
labelsize=22,
pad=15
)
ax2d.set_rmin(0.)
ax2d.grid(color="white")
ax2d.tick_params(axis='both', which='major', labelsize=16)
cbar = plt.colorbar(
im,
fraction=0.04,
format="%.e",
extend='max',
ticks=np.logspace(
1,
np.log10(float(maxim)),
np.log10(float(maxim)),
),
orientation='horizontal',
)
cbar.ax.tick_params(labelsize=22)
else:
ax2d = plt.gcf().add_subplot(111)
x = np.arange(ver_min, ver_max + 1, 1)
y = np.arange(hor_min, hor_max + 1, 1)
xx, yy = np.meshgrid(
x,
y,
)
im = ax2d.pcolormesh(
xx,
yy,
self._gridded_data,
norm=LogNorm(vmin=v_min, vmax=v_max),
alpha=0.5,
)
ax2d.tick_params(axis='both', which='major', labelsize=10)
plt.colorbar(
im,
# fraction=0.012,
# pad=0.04,
format="%.e", extend='max',
ticks=np.logspace(1, np.log10(int(v_max)),
np.log10(int(v_max))),
orientation='horizontal',
)
self.canvas.draw()
self.cidmotion = self.canvas.mpl_connect(
'motion_notify_event', self.on_motion_show_data)
# External methods.
def plot(self):
"""Plot Image."""
self.repaint("")
self.plot_widget.show()
return self.plot_widget
@staticmethod
def i_theory(i_0, v, theta, omega, th, index):
"""
:param i_0: The source intensity
:param v: angular velocity
:param theta: tth/2(emergence angle)
:param omega: omega(incident angle)
:param th: thickness of sample
:param index: correction coefficient
:return:
"""
RO2 = 7.94E-30 # scattering cross section of electron
LAMBDA = 1.5418E-10 # X-ray beam length
F_GAP = 12684.62554 # unit cell structure factor (GaP)
L = 1 / np.sin(2 * theta) # Lorentz factor
P = (1 + np.cos(2 * theta) ** 2) / 2 # Polarization factor
V_A = 5.4506E-10 ** 3 # volume of the crystal
U = 1000000 / 37.6416 # mu
c_0 = (
np.sin(2 * theta - omega) /
(np.sin(2 * theta - omega) + np.sin(omega))
)
c_1 = (
1 -
np.exp(
- U * th / 1E10 *
(
1 / np.sin(omega) + 1 / np.sin(2 * theta - omega)
)
)
)
c_2 = RO2 * LAMBDA ** 3 * F_GAP * P * L / V_A ** 2
i_theo = i_0 * c_0 * c_1 * c_2 * index / (v * U)
return i_theo
# Canvas Event
def _on_press(self, event):
ax_list = self.figure.axes
self._selected_square = []
# If square have not been added, return
if (not hasattr(self, 'res')) or len(self.res) < 3:
return
if event.inaxes != ax_list[0]:
return
for square in self.res[2]:
if [event.xdata, event.ydata] in square:
self._selected_square.append(square)
def on_motion_show_data(self, event):
if event.inaxes != plt.gca():
return
if self.param['POLAR_AXIS']:
self._status_bar.showMessage(
"({0:.2f}\u00B0, {1:.2f}\u00B0)".format(
np.rad2deg(event.xdata),
event.ydata
))
else:
self._status_bar.showMessage(
"({0:.2f}\u00B0, {1:.2f}\u00B0)".format(event.xdata, event.ydata))
def _on_motion(self, event):
if not hasattr(self, '_selected_square'):
return
if event.inaxes != self.figure.gca():
return
[init_mouse_x, init_mouse_y] = self._selected_square[0].cr_l
for square in self._selected_square:
dx = event.xdata - init_mouse_x + 30
dy = event.ydata - init_mouse_y
square.move((dx, dy))
self.canvas.draw()
def _on_release(self, event):
# If square have not been added, return
if (not hasattr(self, 'res')) or len(self.res) < 3:
return
# Clear selected items.
del self._selected_square
outer_index_list = [i.cr_l for i in self.res[2][:4]]
self._sq_pk_integrate(
repaint=False,
outer_index_list=outer_index_list
)
# Peak Detection.
def _pk_search(self):
try:
is_advanced = self.param['ADVANCED_SELECTION']
except KeyError:
is_advanced = self.param['Advanced Selection']
if is_advanced:
self.res = self._poly_pk_integrate()
else:
self.res = self._sq_pk_integrate()
def _poly_pk_integrate(self, repaint=True):
"""
Integrate Peak intensity with polygon method.
:param
repaint: if re-draw the canvas.
:return:
int_vsot_bg_m: The intensity of the peak without background 1*4 matrix
ind_l: The middle position of square. Same format as outer_index_list.
Format: [[chi1, phi1], [chi2, phi2], [chi3, phi3], [chi4, phi4]]
"""
from scipy.ndimage.filters import gaussian_filter
from skimage import img_as_float
from skimage.morphology import reconstruction
from skimage.measure import label, regionprops
from skimage.filters import threshold_niblack
from skimage.segmentation import clear_border
from skimage.morphology import closing, square
from skimage import feature
n, bins = np.histogram(
self._gridded_data.ravel(),
bins=int(self._gridded_data.max() - self._gridded_data.min()),
)
bk_int = bins[np.argmax(n)]
image = img_as_float(self._gridded_data)
try:
ver_min = int(self.attr['DRV_2'].min())
ver_max = int(self.attr['DRV_2'].max())
hor_min = int(self.attr['DRV_1'].min())
hor_max = int(self.attr['DRV_1'].max())
except KeyError:
ver_min = np.int64(self.attr['phi_min'])
ver_max = np.int64(self.attr['phi_max'])
hor_min = np.int64(self.attr['khi_min'])
hor_max = np.int64(self.attr['khi_max'])
image = gaussian_filter(image, 1, mode='nearest')
h = 0.2
seed = image - h
mask = image
# Dilate image to remove noise.
dilated = reconstruction(seed, mask, method='dilation')
# Use local threshold to identify all the close area.
thresh = threshold_niblack(dilated, window_size=27, k=0.05)
# Select large closed area.
bw = closing(image > thresh, square(3))
# Remove area connected to bord.
cleared = clear_border(bw)
# label area.
label_image = label(cleared)
l, w = image.shape
binary_img = np.zeros(shape=(l, w))
int_vsot_bg_m = []
ind_l = []
for i in regionprops(label_image, self._gridded_data):
if i.area >= 100:
for k in i.coords:
int_sum = np.sum(i.intensity_image) - i.area * bk_int
if int_sum > 0:
binary_img[k[0] + hor_min, k[1] + ver_min] = 1
int_vsot_bg_m.append(int_sum)
ind_l.append(i.weighted_centroid)
int_vsot_bg_m = np.asarray(int_vsot_bg_m)
# Draw edge of peaks.
edges2 = feature.canny(binary_img, sigma=2) # Find the edge.
edge = np.full([l, w], np.nan) # Create new image and fill with nan.
edge[np.where(edges2 > 1e-2)] = 100000000 # Set edge to 1.
plt.figure(self.figure.number)
# plt.clf()
plt.imshow(
np.roll(np.roll(edge, -hor_min, axis=0), -ver_min, axis=1),
origin='lower',
extent=[ver_min, ver_max, hor_min, hor_max],
)
self.canvas.draw()
return int_vsot_bg_m, ind_l
def _sq_pk_integrate(self, repaint=True, **kwargs):
"""
Integrate Peak intensity with square method.
:param
repaint: if re-draw the canvas.
param(optional):
:outer_index_list: The middle position of square. Used for set up
Square manually.
Format: [[chi1, phi1], [chi2, phi2], [chi3, phi3], [chi4, phi4]]
:return:
int_vsot_bg_m: The intensity of the peak without background 1*4 matrix
ind_l: The middle position of square. Same format as outer_index_list.
sq_ins_l: The square plot handle.
"""
int_data = self._gridded_data
try:
ver_min = np.int64(self.attr['phi_min'])
hor_min = np.int64(self.attr['khi_min'])
sq_sz_l = [int(self.param['Square Sx']),
int(self.param['Square Sy'])]
except KeyError:
ver_min = int(self.attr['DRV_2'].min())
hor_min = int(self.attr['DRV_1'].min())
sq_sz_l = [int(self.param['SQUARE_SX']),
int(self.param['SQUARE_SY'])]
if 'outer_index_list' in kwargs:
ind_l = kwargs['outer_index_list']
else:
ind_l = self._sq_pk_search()
# Create Square instances.
in_sq_l = [
Square(
i,
sq_sz_l,
int_m=int_data,
lm_t=(ver_min, hor_min),
color='C3',
)
for i in ind_l
]
# Draw squares.
if repaint:
[i.plot() for i in in_sq_l]
logging.debug("Square size - {0}".format(sq_sz_l))
logging.debug("Square centre - {0}".format(ind_l))
n, bins = np.histogram(
self._gridded_data.ravel(),
bins=int(self._gridded_data.max() - self._gridded_data.min()),
)
bk_int = bins[np.argmax(n)]
logging.info("Background Intensity: {0}".format(bk_int))
int_vsot_bg_m = np.asarray([i - bk_int for i in in_sq_l])
sq_ins_l = in_sq_l
if repaint:
self.canvas.draw()
try:
self.canvas.mpl_disconnect(self.cid_press)
self.canvas.mpl_disconnect(self.cid_release)
self.canvas.mpl_disconnect(self.cid_motion)
except AttributeError:
pass
self.cid_press = self.canvas.mpl_connect(
'button_press_event', self._on_press
)
self.cid_release = self.canvas.mpl_connect(
'button_release_event', self._on_release
)
self.cid_motion = self.canvas.mpl_connect(
'motion_notify_event', self._on_motion
)
return int_vsot_bg_m, ind_l, sq_ins_l
def _sq_pk_search(self):
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from scipy.ndimage.morphology import generate_binary_structure
def sort_index_list(index_list):
"""
Sort index list to fit ABCD micro-Twins, where chi of A is max.
:param index_list: list for each point with form [chi, khi]
:return: sorted list.
"""
phi_sorted_l = sorted(index_list, key=lambda pair: pair[0])
chi_index_list = [l[1] for l in phi_sorted_l]
shifted_index_int = chi_index_list.index(max(chi_index_list))
from collections import deque
phi_deque = deque(phi_sorted_l)
phi_deque.rotate(-shifted_index_int)
sorted_index_list = list(phi_deque)
logging.debug("index list before sort:{0}".format(index_list))
logging.debug(
"index list after sort:{0}".format(sorted_index_list)
)
return sorted_index_list
int_data_m = self._gridded_data
try:
ver_min = np.int64(self.attr['phi_min'])
hor_min = np.int64(self.attr['khi_min'])
except KeyError:
ver_min = int(self.attr['DRV_2'].min())
hor_min = int(self.attr['DRV_1'].min())
neighborhood = generate_binary_structure(2, 2)
for i in range(3):
int_data_m = gaussian_filter(int_data_m, 4, mode='nearest')
local_max = (
maximum_filter(int_data_m,
footprint=neighborhood) == int_data_m
)
index = np.asarray(np.where(local_max))
ft_index_list = [[i, j] for (i, j) in zip(index[1, :], index[0, :])]
chi_threshold = 40
ft_index_list = [i for i in ft_index_list if i[1] < chi_threshold]
in_sq_l = [
Square(i, [10, 10], int_data_m, (ver_min, hor_min))
for i in ft_index_list
]
ot_sq_l = [
Square(i, [20, 20], int_data_m, (ver_min, hor_min))
for i in ft_index_list
]
int_list = [k - i for (i, k) in zip(in_sq_l, ot_sq_l)]
ft_index_list = [
x for (y, x)
in sorted(zip(int_list, ft_index_list),
key=lambda pair: pair[0])
][-4:]
ft_index_list = sort_index_list(ft_index_list)
while len(ft_index_list) < 4:
ft_index_list.append([0, 0])
return ft_index_list
# Intensity to volume fraction
def _int2fraction(self):
"""
Change the peak intensity to volume fraction.
"""
if (not hasattr(self, 'res')) or len(self.res) < 2:
return
int_vsot_bg_m = self.res[0]
ind_l = self.res[1]
if not hasattr(self, 'q_dialog'):
self.q_dialog = self._fraction_calculation_param_dialog()
self.q_dialog.exec_()
try:
th = int(self.param['Thickness of sample'])
bm_int = int(self.param['Beam Int'])
v = abs(float(self.attr['vit_ang']))
except KeyError:
th = int(self.param['THICKNESS'])
bm_int = float(self.param['BEAM_INT'])
v = abs(float(self.attr['VIT_ANGLE']))
omega = [
(np.pi / 2 - np.arccos(
np.cos(np.deg2rad(chi[1])) * np.sin(np.deg2rad(14.22))))
for chi in ind_l]
cor_eff = [334.3835417, 437.8887181, 702.504497, 583.5963464]
i_theo_l = [
self.i_theory(bm_int, v, i, i, th, k) for i, k in zip(omega, cor_eff)]
# volume_fraction_matrix = int_vsot_bg_m / i_theo_l * 100
volume_fraction_matrix = int_vsot_bg_m * np.asarray([5.741E-05, 4.828E-05, 3.203E-05, 3.537E-05])
self._show_res_wd = self._res_dialog(
int_vsot_bg_m,
volume_fraction_matrix
)
self._show_res_wd.show()
logging.info("Theorical Intensity is {0}".format(i_theo_l))
logging.debug("Sample th is {0}\n".format(th))
logging.info("Beam intensity is {0}".format(bm_int))
logging.info("VF is {0}".format(volume_fraction_matrix))
logging.info("Chi is {0}".format(ind_l))
def _fraction_calculation_param_dialog(self):
q_dialog = QtWidgets.QDialog()
thickness_input_layout = QtWidgets.QVBoxLayout()
thickness_input_layout.addWidget(
QtWidgets.QLabel('Thickness of Sample(\u212B):'))
thickness_line_edit = QtWidgets.QLineEdit()
thickness_line_edit.setText(self.param['THICKNESS'])
thickness_line_edit.setInputMask("999999999")
thickness_line_edit.textChanged.connect(
partial(self._upt_param, "THICKNESS")
)
thickness_input_layout.addWidget(thickness_line_edit)
intensity_input_layout = IntensityInputWidget(self.param)
q_push_button = QtWidgets.QPushButton("OK")
q_push_button.clicked.connect(q_dialog.close)
layout = QtWidgets.QVBoxLayout()
layout.addLayout(thickness_input_layout)
layout.addLayout(intensity_input_layout)
layout.addWidget(q_push_button)
q_dialog.setLayout(layout)
return q_dialog
def _res_dialog(self, int_vsot_bg_m, volume_fraction_matrix):
def _save2csv(int_list, fraction_list):
"""Save data to csv file.
:param int_list: List contains intensity of peaks
:param fraction_list: List contains volume fraction of peaks.
:return: Csv file name.
"""
file_name = QtWidgets.QFileDialog.getSaveFileName(
caption='Save to csv file...',
directory="/",
filter="Comma-separated values file (*.csv)"
)
file_name = str(file_name[0])
if not file_name:
return
import csv
with open(file_name, 'w') as fp:
spam_writer = csv.writer(fp, dialect='excel', delimiter=";")
spam_writer.writerow(
["", 'MT-A', 'MT-D', 'MT-C', 'MT-B', 'MT'])
spam_writer.writerow(["Intensity"] + int_list)
spam_writer.writerow(["Volume fraction"] + fraction_list)
return file_name,
q_table = QtWidgets.QTableWidget()
q_table.resize(700, 200)
q_table.setColumnCount(5)
q_table.setRowCount(2)
q_table.setHorizontalHeaderLabels(
['MT-A', 'MT-D', 'MT-C', 'MT-B', 'MT'])
q_table.setVerticalHeaderLabels(
["Abs int", "Volume Fraction(%)"])
i_l = list(int_vsot_bg_m.tolist())
i_l.append(np.sum(int_vsot_bg_m))
i_l = list(map(partial(round, ndigits=4), i_l))
for i in range(len(i_l)):
q_table.setItem(
0, i,
QtWidgets.QTableWidgetItem(str(i_l[i]))
)
f_l = list(volume_fraction_matrix.tolist())
f_l.append(np.sum(volume_fraction_matrix))
f_l = list(map(partial(round, ndigits=2), f_l))
for i in range(len(f_l)):
q_table.setItem(
1, i,
QtWidgets.QTableWidgetItem(str(f_l[i]))
)
q_table.resizeColumnsToContents()
q_table.resizeRowsToContents()
# Save to Csv Button
_save2csv_button = QtWidgets.QPushButton("Save to Csv...")
_save2csv_button.clicked.connect(lambda: _save2csv(i_l, f_l))
# Button Group sub layout
_show_res_wd_sub_layout = QtWidgets.QHBoxLayout()
_show_res_wd_sub_layout.addWidget(
_save2csv_button, alignment=QtCore.Qt.AlignRight)
# Main layout
_show_res_wd_layout = QtWidgets.QVBoxLayout()
_show_res_wd_layout.addWidget(q_table)
_show_res_wd_layout.addLayout(_show_res_wd_sub_layout)
# Main widget
_show_res_wd = QtWidgets.QWidget()
_show_res_wd.setLayout(_show_res_wd_layout)
# Resize
w = 140
for i in range(q_table.columnCount()):
w += q_table.columnWidth(i)
h = 76
for i in range(q_table.rowCount()):
h += q_table.rowHeight(i)
_show_res_wd.resize(w, h)
return _show_res_wd
class Square(object):
def __init__(
self,
cr_l,
sz_t,
int_m=None,
lm_t=(0, 0),
color='b',
):
self.cr_l = cr_l # Centre position of the square
self.sz_t = sz_t # Size of the square
self.int_m = int_m
self.lm_t = lm_t
self.color = color
self._fh = None
def lim(self):
if self.int_m is None:
x_min = self.cr_l[0] - self.sz_t[0] / 2
x_max = self.cr_l[0] + self.sz_t[0] / 2
y_min = self.cr_l[1] - self.sz_t[1] / 2
y_max = self.cr_l[1] + self.sz_t[1] / 2
else:
w, h = self.int_m.shape
x_min = max(0, int(np.floor(self.cr_l[0] - self.sz_t[0] / 2)))
x_max = min(int(np.floor(self.cr_l[0] + self.sz_t[0] / 2)), h)
y_min = max(0, int(np.floor(self.cr_l[1] - self.sz_t[1] / 2)))
y_max = min(int(np.floor(self.cr_l[1] + self.sz_t[1] / 2)), w)
return x_min, x_max, y_min, y_max
def plot(self, **kwargs):
from matplotlib.patches import Rectangle
x_min, x_max, y_min, y_max = self.lim()
(x_lmt, y_lmt) = self.lm_t
self._fh = plt.gca().add_patch(
Rectangle(
xy=(x_min + x_lmt, y_min + y_lmt),
width=x_max - x_min,
height=y_max - y_min,
linewidth=1,
fill=False,
color=self.color,
**kwargs
)
)
def move(self, direction_tuple):
self.remove()
self.cr_l = [i + j for (i, j) in zip(self.cr_l, list(direction_tuple))]
self.plot()
@property
def intensity_image(self):
x_min, x_max, y_min, y_max = self.lim()
if self.int_m is None:
raise AttributeError("Need intensity matrix.")
intensity_result_matrix = self.int_m[y_min:y_max, x_min:x_max]
peak_intensity_int = np.sum(intensity_result_matrix)
return peak_intensity_int
@property
def points(self):
x_min, x_max, y_min, y_max = self.lim()
return (y_max - y_min) * (x_max - x_min)
def remove(self):
"""
Remove the the square _lines.
:return: None
"""
try:
self._fh.remove()
except ValueError as e:
logging.debug(str(e))
logging.debug("Could not find the square.")
def __sub__(self, x):
if isinstance(x, Square):
pk_int = self.intensity_image
pk_pt = self.points
x_pk_int = x.intensity_image
x_pk_pt = x.points
bg_noise_float = (pk_int - x_pk_int) / (pk_pt - x_pk_pt)
return pk_int - pk_pt * bg_noise_float
elif isinstance(x, (float, int, np.int, np.float)):
pk_int = self.intensity_image
pk_pt = self.points
return pk_int - pk_pt * x
else:
return NotImplemented
def __contains__(self, item):
"""
Check if the point is in the square.
:param item: the position of point [x,y].
:return: The boolean value.
"""
x_min, x_max, y_min, y_max = self.lim()
(x_limit, y_limit) = self.lm_t
if (
x_min + x_limit < item[0] < x_max + x_limit and
y_min + y_limit < item[1] < y_max + y_limit):
return True
else:
return False
class IntensityInputWidget(QtWidgets.QVBoxLayout):
def __init__(self, linked_param):
super().__init__()
self._int_line_edit = QtWidgets.QLineEdit(
str(linked_param['BEAM_INT']))
self._int_line_edit.textChanged.connect(
partial(linked_param.__setitem__, 'BEAM_INT'))
q_int_button = self._int_line_edit.addAction(
QtGui.QIcon(QtGui.QPixmap('icons/more.png')),
QtWidgets.QLineEdit.TrailingPosition
)
q_int_button.triggered.connect(self._get_beam_intensity)
self.addWidget(QtWidgets.QLabel("Beam Intensity"))
self.addWidget(self._int_line_edit)
def _get_beam_intensity(self):
def file2int(i):
file_instance = RawFile()
file_instance.get_file(i)
data, attr = file_instance.get_data()
del file_instance
scan_instance = OneDScanProc()
scan_instance.set_data(data, attr)
maxmium_int = scan_instance.get_max(mode='direct')
return maxmium_int
file_names = QtWidgets.QFileDialog.getOpenFileNames(
caption='Open intensity file...',
directory="/",
filter="Raw file (*.raw)"
)
source_file_list = file_names[0]
if not source_file_list:
return
int_l = [file2int(str(i)) for i in source_file_list]
beam_int = np.mean(np.asarray(int_l))*8940
self._int_line_edit.setText(str(beam_int))
|
from sqlmodel import Session, select
from warehouse import engine
from warehouse.models import Product, Supplier
from warehouse.ultis import update_attr
def get_all():
with Session(engine) as session:
statement = select(Product, Supplier).join(Supplier, isouter=True)
results = session.exec(statement)
return results.fetchall()
def get_by_id(product_id: int):
with Session(engine) as session:
statement = select(Product, Supplier).join(Supplier, isouter=True).where(Product.id == product_id)
product = session.exec(statement).one_or_none()
return product
def create(product: Product):
with Session(engine) as session:
session.add(product)
session.commit()
session.refresh(product)
return product
def delete(product_id: int):
with Session(engine) as session:
statement = select(Product).where(Product.id == product_id)
product = session.exec(statement).one_or_none()
if product is not None:
session.delete(product)
session.commit()
return True
return False
def update(product_id: int, payload: Product):
with Session(engine) as session:
statement = select(Product).where(Product.id == product_id)
product = session.exec(statement).one_or_none()
if product is not None:
update_attr(product, payload)
session.add(product)
session.commit()
session.refresh(product)
return product
return None
def get_without_supplier(product_id: int):
with Session(engine) as session:
statement = select(Product).where(Product.id == product_id)
product = session.exec(statement).one_or_none()
return product
|
#!/usr/bin/python3
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
# run as follows:
# python myssl.py
# then in your browser/curl -k, visit:
# https://localhost:5000
#
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
import ssl
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def inspect_master(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"skip_rules":["hiya","hiyb","helo","test_service"]}')
def info(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"nodes":["127.0.0.1"], "services":{"test_service": ["127.0.0.1"]}}')
def do_GET(self):
# expect: curl -ks https://127.0.0.1:5000/inspect/master
path = urlparse(self.path).path
print(f"path is {path}")
if path == "/":
self.info()
elif path == "/inspect/master":
self.inspect_master()
else:
self.send_response(404)
self.end_headers()
self.wfile.write(b'{"error":"Not Found"}')
# note: don't bind to localhost(or 127.0.0.1) when using docker; use 0.0.0.0 instead.
httpd = HTTPServer(('0.0.0.0', 443), SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='./server.pem', server_side=True)
httpd.serve_forever()
|
lado = input("Digite o valor correspondente ao lado de um quadrado: ")
x = float(lado) * 4
y = float(lado) ** 2
print("perímetro:", int(x), "-", "área:", int(y)) |
import numpy as np
import tensorflow as tf
import cv2
import time
import os
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end_time = time.time()
print("Elapsed Time For Detection:", end_time-start_time)
im_height, im_width,_ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0,i,0] * im_height),
int(boxes[0,i,1]*im_width),
int(boxes[0,i,2] * im_height),
int(boxes[0,i,3]*im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
# main flow
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = dir_path + '/frozen_inference_graph.pb'
odapi = DetectorAPI(path_to_ckpt=model_path)
def detectPerson(imgframe, confthreshold = 0.7):
imgcpy = imgframe.copy()
boxes, scores, classes, num = odapi.processFrame(imgcpy)
final_score = np.squeeze(scores)
count = 0
bboxes, bscores = [], []
# Rect of detection
for i in range(len(boxes)):
if classes[i] == 1 and scores[i] > confthreshold:
bboxes.append(boxes[i])
bscores.append(scores[i])
# bboxes(ltrb)
return bboxes, bscores
def releaseModel():
odapi.close()
if __name__ == "__main__":
cap = cv2.VideoCapture('E:\\_WORK_Upwork_Evgenii\\Asad Riaz\\002/(4).mp4')
while cap.isOpened():
r, img = cap.read()
# img = cv2.resize(img, (480, 848))
img = cv2.resize(img, (1200, 800))
# img = ndimage.rotate(img, 90)
bboxes, bscores = detectPerson(img)
for ii in range(len(bboxes)):
bx = bboxes[ii]
bs = bscores[ii]
cv2.rectangle(img, (bx[1], bx[0]), (bx[3], bx[2]),
(0, 0, 255), 5)
croped = img[bx[0]:bx[2], bx[1]:bx[3]]
cv2.imshow(str(ii), croped)
cv2.imshow("main", img)
cv2.waitKey(1) |
#
# Tested on following pretrained models:
# mask_rcnn_inception_v2_coco_2018_01_28
#
#
import cv2
import sys
import argparse
import imutils
from detection_boxes import DetectBoxes
def arg_parse():
""" Parsing Arguments for detection """
parser = argparse.ArgumentParser(description='Pytorch Yolov3')
parser.add_argument("--video", help="Path where video is located",
default="assets/cars.mp4", type=str)
parser.add_argument("--pbtxt", help="pbtxt file", default="mask_rcnn_inception_v2_coco_2018_01_28/graph.pbtxt")
parser.add_argument("--frozen", help="Frozen inference pb file", default="mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb")
parser.add_argument("--conf", dest="confidence", help="Confidence threshold for predictions", default=0.5)
parser.add_argument("--mask", help="Mask threshold for predictions", default=0.3)
parser.add_argument("--webcam", help="Detect with web camera", default=False)
return parser.parse_args()
def main():
args = arg_parse()
VIDEO_PATH = args.video if not args.webcam else 0
print("Loading network.....")
net = cv2.dnn.readNetFromTensorflow(args.frozen, args.pbtxt)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
print("Network successfully loaded")
# class names ex) person, car, truck, and etc.
PATH_TO_LABELS = "labels/mscoco_labels.names"
# load detection class, default confidence threshold is 0.5
detect = DetectBoxes(PATH_TO_LABELS, confidence_threshold=args.confidence, mask_threshold=args.mask, has_mask=True)
# Set window
winName = 'Mask-RCNN-Opencv-DNN'
try:
# Read Video file
cap = cv2.VideoCapture(VIDEO_PATH)
except IOError:
print("Input video file", VIDEO_PATH, "doesn't exist")
sys.exit(1)
while cap.isOpened():
hasFrame, frame = cap.read()
# if end of frame, program is terminated
if not hasFrame:
break
# Resizing given frame to increase process time
# frame = imutils.resize(frame, width=450)
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
# Set the input to the network
net.setInput(blob)
# Runs the forward pass
network_output, masks = net.forward(['detection_out_final', 'detection_masks'])
# Extract the bounding box and draw rectangles
detect.detect_bounding_boxes(frame, network_output, masks)
# Efficiency information
t, _ = net.getPerfProfile()
elapsed = abs(t * 1000.0 / cv2.getTickFrequency())
label = 'Time per frame : %0.0f ms' % elapsed
cv2.putText(frame, label, (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)
cv2.imshow(winName, frame)
print("FPS {:5.2f}".format(1000/elapsed))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("Video ended")
# releases video and removes all windows generated by the program
cap.release()
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import common_pb2 as common__pb2
import ether_service_pb2 as ether__service__pb2
class ether_serviceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.rpc_get_ether_service = channel.unary_unary(
'/ether_service/rpc_get_ether_service',
request_serializer=ether__service__pb2.req_ether_service_key.SerializeToString,
response_deserializer=ether__service__pb2.res_ether_service.FromString,
)
self.rpc_list_ether_services = channel.stream_stream(
'/ether_service/rpc_list_ether_services',
request_serializer=ether__service__pb2.req_ether_service_key.SerializeToString,
response_deserializer=ether__service__pb2.res_ether_service.FromString,
)
self.rpc_register_ether_service = channel.unary_unary(
'/ether_service/rpc_register_ether_service',
request_serializer=ether__service__pb2.res_ether_service.SerializeToString,
response_deserializer=ether__service__pb2.res_ether_service.FromString,
)
self.rpc_unregister_ether_service = channel.unary_unary(
'/ether_service/rpc_unregister_ether_service',
request_serializer=ether__service__pb2.req_ether_service_key.SerializeToString,
response_deserializer=common__pb2.null.FromString,
)
self.rpc_taskflow_create_ether_service = channel.unary_unary(
'/ether_service/rpc_taskflow_create_ether_service',
request_serializer=ether__service__pb2.req_service_create_spec.SerializeToString,
response_deserializer=common__pb2.null.FromString,
)
self.rpc_taskflow_delete_ether_service = channel.unary_unary(
'/ether_service/rpc_taskflow_delete_ether_service',
request_serializer=ether__service__pb2.req_service_delete_spec.SerializeToString,
response_deserializer=common__pb2.null.FromString,
)
self.rpc_taslflow_update_ether_lan_service = channel.unary_unary(
'/ether_service/rpc_taslflow_update_ether_lan_service',
request_serializer=ether__service__pb2.req_lan_service_update_spec.SerializeToString,
response_deserializer=common__pb2.null.FromString,
)
self.rpc_push_ether_services = channel.stream_unary(
'/ether_service/rpc_push_ether_services',
request_serializer=ether__service__pb2.res_ether_service.SerializeToString,
response_deserializer=common__pb2.null.FromString,
)
self.rpc_pull_ether_services = channel.unary_stream(
'/ether_service/rpc_pull_ether_services',
request_serializer=ether__service__pb2.req_ether_service_key.SerializeToString,
response_deserializer=ether__service__pb2.res_ether_service.FromString,
)
class ether_serviceServicer(object):
# missing associated documentation comment in .proto file
pass
def rpc_get_ether_service(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_list_ether_services(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_register_ether_service(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_unregister_ether_service(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_taskflow_create_ether_service(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_taskflow_delete_ether_service(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_taslflow_update_ether_lan_service(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_push_ether_services(self, request_iterator, context):
"""rpc for e3neta&e3netd interaction
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def rpc_pull_ether_services(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ether_serviceServicer_to_server(servicer, server):
rpc_method_handlers = {
'rpc_get_ether_service': grpc.unary_unary_rpc_method_handler(
servicer.rpc_get_ether_service,
request_deserializer=ether__service__pb2.req_ether_service_key.FromString,
response_serializer=ether__service__pb2.res_ether_service.SerializeToString,
),
'rpc_list_ether_services': grpc.stream_stream_rpc_method_handler(
servicer.rpc_list_ether_services,
request_deserializer=ether__service__pb2.req_ether_service_key.FromString,
response_serializer=ether__service__pb2.res_ether_service.SerializeToString,
),
'rpc_register_ether_service': grpc.unary_unary_rpc_method_handler(
servicer.rpc_register_ether_service,
request_deserializer=ether__service__pb2.res_ether_service.FromString,
response_serializer=ether__service__pb2.res_ether_service.SerializeToString,
),
'rpc_unregister_ether_service': grpc.unary_unary_rpc_method_handler(
servicer.rpc_unregister_ether_service,
request_deserializer=ether__service__pb2.req_ether_service_key.FromString,
response_serializer=common__pb2.null.SerializeToString,
),
'rpc_taskflow_create_ether_service': grpc.unary_unary_rpc_method_handler(
servicer.rpc_taskflow_create_ether_service,
request_deserializer=ether__service__pb2.req_service_create_spec.FromString,
response_serializer=common__pb2.null.SerializeToString,
),
'rpc_taskflow_delete_ether_service': grpc.unary_unary_rpc_method_handler(
servicer.rpc_taskflow_delete_ether_service,
request_deserializer=ether__service__pb2.req_service_delete_spec.FromString,
response_serializer=common__pb2.null.SerializeToString,
),
'rpc_taslflow_update_ether_lan_service': grpc.unary_unary_rpc_method_handler(
servicer.rpc_taslflow_update_ether_lan_service,
request_deserializer=ether__service__pb2.req_lan_service_update_spec.FromString,
response_serializer=common__pb2.null.SerializeToString,
),
'rpc_push_ether_services': grpc.stream_unary_rpc_method_handler(
servicer.rpc_push_ether_services,
request_deserializer=ether__service__pb2.res_ether_service.FromString,
response_serializer=common__pb2.null.SerializeToString,
),
'rpc_pull_ether_services': grpc.unary_stream_rpc_method_handler(
servicer.rpc_pull_ether_services,
request_deserializer=ether__service__pb2.req_ether_service_key.FromString,
response_serializer=ether__service__pb2.res_ether_service.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ether_service', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
#_*_ coding:utf-8 _*_
'''
map
'''
# list01 = [1,2,3,4,5,6,7,8,9]
# list02 = [1,2,3,4,5,6,7,8,9]
# map = map(lambda x,y : x * y,list01,list02)
# print(list(map))
'''
filter
'''
# list01 = [1,2,3,4,5,6,7,8,9]
# filter = filter(lambda x : x > 5,list01)
# print(list(filter))
'''
reduce
'''
from functools import reduce
list02 = [1,2,3,4,5,6,7,8,9,10]
new_list = reduce(lambda x,y:x + y,list02,0)
print(new_list) |
"""
AbstractStack is an abstract data type for stack implementation.
CONSTANTS
POP_NIL = 0 # pop() not called yet
POP_OK = 1 # last pop() call completed successfully
POP_ERR = 2 # stack storage is empty
PEEK_NIL = 0 # peek() not called yet
PEEK_OK = 1 # last call peek() returned correct item
PEEK_ERR = 2 # stack storage is empty
CONSTRUCTOR
__new__(cls) -> new stack instance
Post-condition:
created a new empty stack instance.
__init__(self):
Initializing the instance after it's been created.
COMMANDS
push(self, value)
Post-condition:
new item added into stack storage.
pop(self)
Pre-condition:
stack storage is not empty.
Post-condition:
last added item removed from stack storage.
clear(self)
Post-condition:
stack storage is empty.
REQUESTS
peek(self) -> last pushed item
Pre-condition:
stack storage is not empty.
size(self) -> number of items in the stack storage
ADDITIONAL REQUESTS
get_pop_status(self) -> status of last pop() call (POP_* constant)
get_peek_status(self) -> status of last peek() call (PEEK_* constant)
"""
from abc import ABCMeta, abstractmethod
class AbstractStack(metaclass=ABCMeta):
POP_NIL = 0 # pop() not called yet
POP_OK = 1 # last pop() call completed successfully
POP_ERR = 2 # stack storage is empty
PEEK_NIL = 0 # peek() not called yet
PEEK_OK = 1 # last peek() call returned correct item
PEEK_ERR = 2 # stack storage is empty
# constructor
def __new__(cls) -> object:
"""
Create a class instance
Post-condition:
created a new empty stack instance
NOTE: __new__ is NOT abstract method. You shouldn't
need to override __new__. See:
https://mail.python.org/pipermail/tutor/2008-April/061426.html
"""
new_instance = super().__new__(cls)
return new_instance
@abstractmethod
def __init__(self):
"""Initializing the instance after it's been created"""
# commands
@abstractmethod
def push(self, value: object):
"""Post-condition: new item added into stack storage"""
@abstractmethod
def pop(self):
"""
Pre-condition: stack storage is not empty.
Post-condition: last added item removed from stack storage.
"""
@abstractmethod
def clear(self):
"""Post-condition: stack storage is empty."""
# requests:
@abstractmethod
def peek(self) -> object:
"""
Return last pushed item.
Pre-condition: stack storage is not empty.
"""
return 0
@abstractmethod
def size(self) -> int:
"""Return the number of items in the stack storage"""
return 0
# additional requests:
@abstractmethod
def get_pop_status(self) -> int:
"""Return status of last pop() call:
one of the POP_* constants"""
@abstractmethod
def get_peek_status(self) -> int:
"""Return status of last peek() call:
one of the PEEK_* constants"""
|
from django.views.generic import TemplateView
from prices.models import PriceTable, SecondPriceTable
from ceilings.models import Ceiling
class PriceView(TemplateView):
template_name = 'prices/price.html'
def get_context_data(self, **kwargs):
context = super(PriceView, self).get_context_data(**kwargs)
context['ceilings'] = Ceiling.objects.all()
context['price_one'] = PriceTable.objects.all()
context['fields_two'] = SecondPriceTable.objects.all()
return context
|
import cashtag_analyzer # Import the modules from the __init__ script.
import ccxt # Import ccxt to connect to exchange APIs.
import collections # Import collections to create lists within dictionaries on the fly.
import datetime # Import datetime for the timedelta and utcfromtimestamp functions.
import numpy # Import numpy to compare the contents of lists.
import re # Import re to split up the lists of symbols into individual items.
import sqlalchemy # Import sqlalchemy to do specific data selection from the MySQL database.
# Determines what symbols in the cashtag list are traded on the selected exchange.
def create_match_list(exchange, twitter_base_list, twitter_dict):
print('Checking list of cashtags against supported symbols in {}...'.format(exchange.name))
match_list = []
base_set = set()
base_dict = collections.defaultdict(list)
markets = exchange.load_markets()
for symbol in markets:
base = markets[symbol]['base']
base_set.add(base)
base_dict[base].append(symbol)
base_list = list(base_set)
match = numpy.isin(base_list, twitter_base_list, assume_unique=True)
for i in range(len(base_list)):
if (match[i] == True):
for created_at in twitter_dict[base_list[i]]:
match_list.append([created_at, base_list[i], base_dict[base_list[i]]])
print('Supported symbols check complete.')
return match_list
# Queries the exchange for market data for the time period around the Tweet each symbol in the match list.
def create_market_data_list(exchange, match_list, limit=2, timeframe='1d'):
print('Getting market data for each cashtag...')
market_data_list = []
for i in range(len(match_list)):
base = match_list[i][1]
created_at = match_list[i][0]
since = int((created_at - datetime.timedelta(days=1)).timestamp() * 1000)
symbols = match_list[i][2]
for symbol in symbols:
uohlcv_list = exchange.fetch_ohlcv(symbol, limit=limit, since=since, timeframe=timeframe)
if (uohlcv_list and len(uohlcv_list) == 2):
for uohlcv in uohlcv_list:
print(since, uohlcv)
candle_ts = datetime.datetime.utcfromtimestamp(uohlcv[0] // 1000)
close_price = float(uohlcv[4])
high_price = float(uohlcv[2])
low_price = float(uohlcv[3])
open_price = float(uohlcv[1])
volume = float(uohlcv[5])
uohlcv_dict = {'base': base, 'candle_ts': candle_ts, 'close': close_price, 'high': high_price,
'low': low_price, 'open': open_price, 'symbol': symbol, 'tweet_ts': created_at,
'volume': volume}
market_data_list.append(uohlcv_dict)
print('Market data collection complete.')
return market_data_list
# Get a list of cashtags for the current screen name and turn it into a list (for direct processing) and a dictionary
# (for lookup purposes during the direct processing).
def create_twitter_lists(screen_name, table):
print('Creating list of cashtags...')
select_query = table.select(whereclause="`screen_name` = '{}'".format(screen_name))
results = db_connection.execute(select_query)
twitter_base_set = set()
twitter_dict = collections.defaultdict(list)
for result in results.fetchall():
regex_result = re.findall('(\w+)', result[0])
for r in regex_result:
twitter_base_set.add(r)
twitter_dict[r].append(result['created_at'])
twitter_base_list = list(twitter_base_set)
print('Cashtag list created.')
return twitter_base_list, twitter_dict
# Load the settings from the settings file and turn them into variables.
settings = cashtag_analyzer.load_settings()
exchange_id = settings['exchange_options']['exchange_id']
limit = settings['exchange_options']['limit']
results_table = settings['mysql_connection']['results_table']
timeframe = settings['exchange_options']['timeframe']
tweets_table = settings['mysql_connection']['tweets_table']
# Dynamically load the exchange method from the ccxt module.
exchange_method = getattr(ccxt, exchange_id)
exchange = exchange_method()
# Connect to the database.
db_connection = cashtag_analyzer.connect_to_db(settings['mysql_connection'])
table = cashtag_analyzer.get_table(db_connection, tweets_table)
# Select a list of screen names from the database.
select_query = sqlalchemy.select([table.c['screen_name']]).distinct()
results = db_connection.execute(select_query)
# Loop through the screen name list and collect market data for each cashtag.
for result in results:
screen_name = result[0]
print('Getting results for screen name {}...'.format(screen_name))
twitter_base_list, twitter_dict = create_twitter_lists(screen_name, table)
match_list = create_match_list(exchange, twitter_base_list, twitter_dict)
market_data_list = create_market_data_list(exchange, match_list, limit=limit, timeframe=timeframe)
# As a sanity check, get the number of rows in the table before executing the INSERT statement and print the results.
results_text = 'Pre-INSERT row count: ' + cashtag_analyzer.get_row_count(db_connection, table)
print(results_text)
# Insert the market data into the database.
cashtag_analyzer.insert_data(db_connection, market_data_list, table)
|
#!/usr/bin/env python
import json
import visit
"""Visits ns_server metadata and emits flat metadata
on the fast-changing time-series metrics to stdout as JSON."""
first = True
def store_conv_fast(root, parents, data, meta, coll,
key, val, meta_val, meta_inf, level):
global first
if not first:
print ","
first = False
name = '-'.join(parents + [key])
print ' "ns-%s": {' % name
print ' "name": "%s"' % name
if meta_inf:
i = 0
for k, v in meta_inf.iteritems():
print ' ,'
print ' "%s": "%s"' % (k, v)
i = i + 1
print ' }'
root["run"]["tot_fast"] += 1
def store_conv_slow(root, parents, data, meta, coll,
key, val, meta_val, meta_inf, level):
root["run"]["tot_slow"] += 1
def url_conv_before(context, path):
return context, path
def url_conv_after(context, path, root):
return
if __name__ == '__main__':
print "{"
visit.main("127.0.0.1", 8091, "/pools/default",
{"fast": store_conv_fast,
"slow": store_conv_slow},
{"url_before": url_conv_before,
"url_after": url_conv_after})
print "}"
|
# -*- coding:utf-8 -*-
__author__ = "leo"
# 输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverse_print(self, head):
return self.reverse_print(head.next) + [head.val] if head else []
class Solution1:
def reverse_print(self, head):
stack = []
while head:
stack.append(head.val)
head = head.next
return stack[::-1]
class Solution2:
def reverse_print(self, head):
res = []
p = head
def helper(p):
if not p:
return None
helper(p.next)
res.append(p.val)
helper(p)
return res
|
import unittest
import os
from robot.utils import *
from robot.utils.asserts import *
class TestNormalizing(unittest.TestCase):
def test_normpath(self):
if os.sep == '/':
inputs = [ ('/tmp/', '/tmp'),
('/tmp', '/tmp'),
('/tmp/foo/..', '/tmp'),
('/tmp//', '/tmp'),
('/tmp/./', '/tmp'),
('/var/../opt/../tmp/.', '/tmp'),
('/non/Existing/..', '/non'),
('/', '/') ]
else:
inputs = [ ('c:\\temp', 'c:\\temp'),
('C:\\TEMP\\', 'c:\\temp'),
('c:\\Temp\\foo\..', 'c:\\temp'),
('c:\\temp\\\\', 'c:\\temp'),
('c:\\temp\\.\\', 'c:\\temp'),
('C:\\xxx\\..\\yyy\\..\\temp\\.', 'c:\\temp'),
('c:\\Non\\Existing\\..', 'c:\\non') ]
for x in 'ABCDEFGHIJKLMNOPQRSTUVXYZ':
base = '%s:\\' % x
exp = base.lower()
inputs.append((base, exp))
inputs.append((base[:2], exp))
inputs.append((base + '\\foo\\..\\.\\BAR\\\\', exp + 'bar'))
for inp, exp in inputs:
assert_equal(normpath(inp), exp, inp)
def test_normalize_with_defaults(self):
for inp, exp in [ ('', ''),
(' ', ''),
(' \n\t\r', ''),
('foo', 'foo'),
(' f o o ', 'foo'),
('_BAR', '_bar'),
('Fo OBar\r\n', 'foobar'),
('foo\tbar', 'foobar'),
('\n \n \n \n F o O \t\tBaR \r \r \r ', 'foobar') ]:
assert_equals(exp, normalize(inp))
def test_normalize_with_caseless(self):
assert_equals(normalize('Fo o BaR', caseless=False), 'FooBaR')
assert_equals(normalize('Fo O B AR', caseless=True), 'foobar')
def test_normalize_with_spaceless(self):
assert_equals(normalize('Fo o BaR', spaceless=False), 'fo o bar')
assert_equals(normalize('Fo O B AR', spaceless=True), 'foobar')
def test_normalize_with_ignore(self):
assert_equals(normalize('Foo_ bar', ignore=['_']), 'foobar')
assert_equals(normalize('Foo_ bar', ignore=['_', 'f', 'o']), 'bar')
assert_equals(normalize('Foo_ bar', ignore=['_', 'F', 'o']), 'bar')
assert_equals(normalize('Foo_ bar', ignore=['_', 'f', 'o'],
caseless=False), 'Fbar')
assert_equals(normalize('Foo_\n bar\n', ignore=['\n'],
spaceless=False), 'foo_ bar')
def test_normalize_tags(self):
for inp, exp in [ ([], []),
(['lower'], ['lower']),
(['UPPER', 'MiXeD'], ['MiXeD', 'UPPER']),
(['Some spaces here'], ['Some spaces here']),
(['remove empty', '', ' ', '\n'], ['remove empty']),
(['dupes', 'DUPES', 'DuPeS', 'd u p e s'],['dupes']),
(['SORT','1','B','2','a'], ['1','2','a','B','SORT']),
(['ALL', 'all', '10', '1', 'A', 'a', '', 'A L L'],
['1', '10', 'A', 'ALL']) ]:
assert_equals(normalize_tags(inp), exp)
class TestNormalizedDict(unittest.TestCase):
def test_default_constructor(self):
nd = NormalizedDict()
nd['foo bar'] = 'value'
assert_equals(nd['foobar'], 'value')
assert_equals(nd['F oo\nBar'], 'value')
def test_initial_values(self):
nd = NormalizedDict({'key': 'value', 'F O\tO': 'bar'})
assert_equals(nd['key'], 'value')
assert_equals(nd['K EY'], 'value')
assert_equals(nd['foo'], 'bar')
def test_ignore(self):
nd = NormalizedDict(ignore=['_'])
nd['foo_bar'] = 'value'
assert_equals(nd['foobar'], 'value')
assert_equals(nd['F oo\nB ___a r'], 'value')
def test_caseless_and_spaceless(self):
nd = NormalizedDict(caseless=False, spaceless=False)
nd['F o o B AR'] = 'value'
for key in ['foobar', 'f o o b ar', 'FooBAR']:
assert_false(nd.has_key(key))
assert_equals(nd['F o o B AR'], 'value')
def test_has_key_and_contains(self):
nd = NormalizedDict({'Foo': 'bar'})
fail_unless(nd.has_key('Foo') and nd.has_key(' f O o '))
fail_unless('Foo' in nd and 'foo' in nd and 'FOO' in nd)
def test_original_keys_are_kept(self):
nd = NormalizedDict()
nd['Foo'] = nd['a b c'] = nd['UP'] = 1
keys = nd.keys()
items = nd.items()
keys.sort()
items.sort()
assert_equals(keys, ['Foo', 'UP', 'a b c'])
assert_equals(items, [('Foo', 1), ('UP', 1), ('a b c', 1)])
def test_removing_values(self):
nd = NormalizedDict({'A':1, 'b':2})
del nd['a']
del nd['B']
assert_equals(nd.data, {})
assert_false(nd.has_key('a') or nd.has_key('b'))
def test_removing_values_removes_also_original_keys(self):
nd = NormalizedDict({'a':1})
del nd['a']
assert_equals(nd.data, {})
assert_equals(nd.keys(), [])
def test_keys_values_and_items_are_returned_in_same_order(self):
nd = NormalizedDict()
for i, c in enumerate('abcdefghijklmnopqrstuvwxyz'):
nd[c.upper()] = i
nd[c+str(i)] = 1
items = nd.items()
keys = nd.keys()
values = nd.values()
assert_equals(items, zip(keys, values))
def test_len(self):
nd = NormalizedDict()
assert_equals(len(nd), 0)
nd['a'] = nd['b'] = nd['c'] = 1
assert_equals(len(nd), 3)
def test_true_and_false(self):
assert_false(NormalizedDict())
assert_true(NormalizedDict({'a': 1}))
def test_copy(self):
nd = NormalizedDict({'a': 1, 'B': 1})
cd = nd.copy()
assert_equals(nd, cd)
assert_equals(nd.data, cd.data)
assert_equals(nd._keys, cd._keys)
assert_equals(nd._normalize, cd._normalize)
nd['C'] = 1
cd['b'] = 2
assert_equals(nd._keys, {'a': 'a', 'b': 'B', 'c': 'C'})
assert_equals(cd._keys, {'a': 'a', 'b': 'B'})
def test_str(self):
nd = NormalizedDict({'a': 1, 'B': 1})
assert_equals(str(nd), "{'a': 1, 'B': 1}")
def test_update(self):
nd = NormalizedDict({'a': 1})
nd.update({'b': 2})
assert_equals(nd['b'], 2)
assert_true('b' in nd.keys())
def test_update_with_kwargs(self):
nd = NormalizedDict({'a': 0, 'c': 1})
nd.update({'b': 2, 'c': 3}, b=4, d=5)
for k, v in [('a', 0), ('b', 4), ('c', 3), ('d', 5)]:
assert_equals(nd[k], v)
assert_true(k in nd)
assert_true(k in nd.keys())
def test_iter(self):
nd = NormalizedDict({'a': 0, 'B': 1, 'c': 2})
assert_equals(sorted(list(nd)), ['B', 'a', 'c'])
keys = []
for key in nd:
keys.append(key)
assert_equals(sorted(keys), ['B', 'a', 'c'])
if __name__ == '__main__':
unittest.main()
|
# _*_coding: utf-8_*_
# Created by yls on 2020/10/27 21:16
import tools.DoExcel
# class ReplaceRelyOnValue():
# def replace_rely_on_value(self): |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(-1,1,1e-3)
x1=np.arange(-0.999,0.9999,1e-1)
f=np.arctan(x)
f1=[]
for x2 in x1:
y=0
for j in range(1,1000):
y=y+((-1)**(j+1))*(x2**j/(2*j-1))
f1.append(y)
plt.plot(x,f)
plt.plot(x1,f1,'o')
plt.grid()
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.savefig('../figs/7.eps')
plt.show()
|
#!/usr/bin/env python3
import logging
import argparse
import os
from pathlib import Path
import re
import time
import traceback
import glob
from unittest import result
import psycopg2
import psycopg2.extras
import psycopg2.pool
from datetime import datetime, timedelta
from filenames import filename_parser
from image_tools import makeThumb
from image_tools import read_tiff_info
import settings as imgdb_settings
__connection_pool = None
def get_connection():
global __connection_pool
if __connection_pool is None:
__connection_pool = psycopg2.pool.SimpleConnectionPool(1, 2, user = imgdb_settings.DB_USER,
password = imgdb_settings.DB_PASS,
host = imgdb_settings.DB_HOSTNAME,
port = imgdb_settings.DB_PORT,
database = imgdb_settings.DB_NAME)
return __connection_pool.getconn()
def put_connection(pooled_connection):
global __connection_pool
if __connection_pool:
__connection_pool.putconn(pooled_connection)
def get_modtime(file):
# get last modified date of this file
modtime = os.path.getmtime(file)
return modtime
def delete_image_from_db(path, dry_run=False):
logging.debug("Inside delete_image_from_db, path: " + str(path))
conn = None
try:
# Build query
query = ("DELETE FROM images WHERE path = %s")
logging.debug("query" + str(query))
if dry_run:
logging.info("dry_run=True, return here")
return
conn = get_connection()
cursor = conn.cursor()
retval = cursor.execute(query, (path,))
conn.commit()
cursor.close()
except Exception as err:
logging.exception("Message")
raise err
finally:
if conn is not None:
put_connection(conn)
def select_image_path(plate_acquisition_id, well, site, channel):
#logging.debug("Inside select_image_path(plate_acquisition_id, well, site, channel)" + str(channel))
conn = None
try:
query = ("SELECT path, well "
"FROM images "
"WHERE plate_acquisition_id = %s "
"AND well = %s "
"AND site = %s "
"AND channel = %s")
conn = get_connection()
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query, (plate_acquisition_id,
well,
site,
channel)
)
paths = cursor.fetchall()
cursor.close()
return paths
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def get_duplicate_channel_images():
logging.info("Inside get_duplicate_channel_images()")
conn = None
try:
conn = get_connection()
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
dupe_query = ("SELECT plate_acquisition_id, plate_barcode, well, site, channel, count(*) as dupecount"
" FROM images"
" GROUP BY plate_acquisition_id, plate_barcode, well, site, channel HAVING count(*)> 1")
cursor.execute(dupe_query)
dupes = cursor.fetchall()
cursor.close()
return dupes
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def deal_with_orfans():
orfans = find_orfan_images()
# Save orfan images as textfile
with open('orfans.txt', 'w+') as orfan_file:
for line in orfans:
orfan_file.write('%s\n' %line)
# delete orfan image-entries from database:
for orfan_path in orfans:
delete_image_from_db(orfan_path, dry_run=False)
def deal_with_dupes():
dupes = get_duplicate_channel_images()
for dupe in dupes:
#logging.debug(dupe)
paths = select_image_path(
dupe['plate_acquisition_id'],
dupe['well'],
dupe['site'],
dupe['channel'])
# only deal with first two, if there are more, just run everything again
path_0 = paths[0]['path']
modtime_path_0 = get_modtime(path_0)
path_1 = paths[1]['path']
modtime_path_1 = get_modtime(path_1)
# Leave file with longest time (since 1970-01-01)
if modtime_path_0 > modtime_path_1:
logging.info("Leavin image path_0, modtime: " + str(modtime_path_0) + ", path: " + str(path_0))
logging.info("Delete image path_1, modtime: " + str(modtime_path_1) + ", path: " + str(path_1))
else:
logging.info("Leavin image path_1, modtime: " + str(modtime_path_1) + ", path: " + str(path_1))
logging.info("Delete image path_0, modtime: " + str(modtime_path_0) + ", path: " + str(path_0))
def find_orfan_images():
logging.info("Inside find_orfan_images()")
conn = None
try:
conn = get_connection()
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
query = ("SELECT path"
" FROM images"
" ORDER BY plate_acquisition_id")
cursor.execute(query)
orfans = list()
counter = 0
for row in cursor:
path = row['path']
if counter % 10000 == 0:
logging.debug("files verified counter: " + str(counter))
if not os.path.exists(path):
logging.debug(path)
orfans.append(path)
counter += 1
return orfans
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def add_more_plate_acq():
logging.info("Inside add_more_plate_acq()")
conn = None
try:
conn = get_connection()
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
query = ("SELECT plate_barcode, timepoint, imaged, folder"
" FROM images")
cursor.execute(query)
counter = 0
for row in cursor:
plate_barcode = row['plate_barcode']
timepoint = row['timepoint']
imaged_timepoint = row['imaged']
folder = row['folder']
if counter % 10000 == 0:
logging.debug("files verified counter: " + str(counter))
select_or_insert_plate_acq(plate_barcode, 'ImageXpress', timepoint, imaged_timepoint, folder)
counter += 1
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def select_or_insert_plate_acq(plate_barcode, microscope, timepoint, imaged_timepoint, folder):
# First select to see if plate_acq already exists
plate_acq_id = select_plate_acq_id(plate_barcode, timepoint, folder)
if plate_acq_id is None:
plate_acq_id = insert_plate_acq(plate_barcode, microscope, timepoint, imaged_timepoint, folder)
return plate_acq_id
def select_channel_map_id(plate_barcode, timepoint):
conn = None
try:
query = ("SELECT channel_map_id "
"FROM plate_acquisition "
"WHERE plate_barcode = %s "
"AND timepoint = %s ")
conn = get_connection()
cursor = conn.cursor()
cursor.execute(query, (plate_barcode,
timepoint
))
channel_map_id = cursor.fetchone()
cursor.close()
return channel_map_id
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def select_plate_acq_id(plate_barcode, timepoint, folder):
conn = None
try:
query = ("SELECT id "
"FROM plate_acquisition "
"WHERE plate_barcode = %s "
"AND timepoint = %s "
"AND folder = %s ")
conn = get_connection()
cursor = conn.cursor()
cursor.execute(query, (plate_barcode,
timepoint,
folder
))
plate_acq_id = cursor.fetchone()
cursor.close()
return plate_acq_id
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def select_finished_plate_acq_folder():
conn = None
try:
query = ("SELECT folder "
"FROM plate_acquisition "
"WHERE finished IS NOT NULL")
conn = get_connection()
cursor = conn.cursor()
cursor.execute(query)
# get result as list instead of tuples
result = [r[0] for r in cursor.fetchall()]
cursor.close()
return result
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def insert_plate_acq(plate_barcode, microscope, timepoint, imaged_timepoint, folder):
conn = None
try:
channel_map_id = select_channel_map_id(plate_barcode, timepoint)
query = "INSERT INTO plate_acquisition(plate_barcode, imaged, microscope, channel_map_id, timepoint, folder) VALUES(%s, %s, %s, %s, %s, %s) RETURNING id"
conn = get_connection()
logging.info("query: " + query)
logging.info("folder: " + str(folder))
cursor = conn.cursor()
cursor.execute(query, (plate_barcode,
imaged_timepoint,
microscope,
channel_map_id,
timepoint,
folder
))
plate_acq_id = cursor.fetchone()[0]
cursor.close()
conn.commit()
return plate_acq_id
except Exception as err:
logging.exception("Message")
raise err
finally:
put_connection(conn)
def find_dirs_containing_img_files_recursive(path):
"""Yield lowest level directories containing image files as Path (not starting with '.')
the method is called recursively to find all subdirs """
for entry in os.scandir(path):
# recurse directories
if not entry.name.startswith('.') and entry.is_dir():
yield from find_dirs_containing_img_files_recursive(entry.path)
if entry.is_file():
# return parent path if file is imagefile, then break scandir-loop
if entry.path.lower().endswith(('.png','.tif','tiff')):
yield(Path(entry.path).parent)
break
#
# Main entry for script
#
try:
#
# Configure logging
#
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
rootLogger = logging.getLogger()
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-targ', '--testarg', help='Description for xxx argument',
default='default-value')
args = parser.parse_args()
logging.debug("all args" + str(args))
logging.debug("args.testarg" + str(args.testarg))
start = time.time()
# deal_with_orfans()
# deal_with_dupes()
#update_plate_acq()
# add_more_plate_acq()
# get all image dirs within root dirs
#img_dirs = set(find_dirs_containing_img_files_recursive("/share/mikro/IMX/MDC_pharmbio/"))
print("elapsed: " + str(time.time() - start))
except Exception as e:
print(traceback.format_exc())
logging.info("Exception out of script")
|
import random
class Game:
def __init__(self, teamA, teamB, extra=True):
self.teamA = teamA
self.teamB = teamB
self.extra = extra
if (teamA != None and teamB != None):
self.teamAHOS, self.teamBHOS = self.getNormalHardness()
def play(self):
if (self.teamA == None):
# print(self.teamB.name + " won the Game.")
return self.teamB
if (self.teamB == None):
# print(self.teamA.name + " won the Game.")
return self.teamA
scores = self.getScores()
# print(self.teamA.name, scores[0], self.teamB.name, scores[1])
if scores[0] > scores[1]:
print(self.teamA.name + " won the Game.")
return self.teamA
else:
print(self.teamB.name + " won the Game.")
return self.teamB
def getScores(self):
bOpponents = self.teamB.opponents
aOpponents = self.teamA.opponents
similar = []
for i in bOpponents:
if i in aOpponents:
if i not in similar:
similar += [i]
aSimPoints = 0
bSimPoints = 0
for i in similar:
aSimPoints += self.teamA.getResult(i)
bSimPoints += self.teamB.getResult(i)
if (self.teamA.name in bOpponents):
aSimPoints += self.teamA.getResult(self.teamB.name)
bSimPoints += self.teamB.getResult(self.teamA.name)
aStats = 0
bStats = 0
for key in self.teamA.stats.keys():
if (key == "Scoring Offense"):
aStats += (((float(self.teamA.stats[key]["value"]) - 88.8) * -1) + 100) * 1.8
bStats += (((float(self.teamB.stats[key]["value"]) - 88.8) * -1) + 100) * 1.8
if (key == "Scoring Defense"):
aStats += (((float(self.teamA.stats[key]["value"]) - 55.1) * -1) + 100) * 1.7
bStats += (((float(self.teamB.stats[key]["value"]) - 55.1) * -1) + 100) * 1.7
if (key == "Turnover Margin"):
aStats += (((float(self.teamA.stats[key]["value"]) - 5.8) * -1) + 100) * 1.5
bStats += (((float(self.teamB.stats[key]["value"]) - 5.8) * -1) + 100) * 1.5
if (key == "Assist Turnover Ratio"):
aStats += (((float(self.teamA.stats[key]["value"]) - 1.76) * -1) + 100) * 2
bStats += ((float(self.teamB.stats[key]["value"]) - 1.76) * -1) + 100 * 2
if (key == "Field-Goal Percentage"):
aStats += (((float(self.teamA.stats[key]["value"]) - 53.2)*-1) + 100) * 2
bStats += (((float(self.teamB.stats[key]["value"]) - 53.2)*-1) + 100) * 2
if (key == "Field-Goal Percentage Defense"):
aStats += (((float(self.teamA.stats[key]["value"]) - 36.7) * -1) + 100) * 2
bStats += (((float(self.teamB.stats[key]["value"]) - 36.7) * -1) + 100) * 2
if (key == "Three-Point Field-Goal Percentage"):
aStats += float(self.teamA.stats[key]["value"]) * (float(self.teamA.stats["Three-Point Field Goals Per Game"]["value"])/4) * 1.5
bStats += float(self.teamB.stats[key]["value"]) * (float(self.teamB.stats["Three-Point Field Goals Per Game"]["value"])/4) * 1.5
aNormHard = aStats * (1/(self.teamAHOS * 2.5)) * 1000
bNormHard = bStats * (1/(self.teamBHOS * 2.5)) * 1000
if (self.extra):
print(self.teamA.name, aStats, self.teamAHOS)
print(self.teamB.name, bStats, self.teamBHOS)
if (self.extra):
print(self.teamA.name)
aFinal = self.linearCombo(aSimPoints,aNormHard)
if (self.extra):
print(self.teamB.name)
bFinal = self.linearCombo(bSimPoints,bNormHard)
return aFinal, bFinal
def getNormalHardness(self):
teamAHardness = self.teamA.HOS
teamBHardness = self.teamB.HOS
totA = self.teamA.totGames
totB = self.teamB.totGames
avgA = teamAHardness/totA
avgB = teamBHardness/totB
if totA>totB:
while(totA>totB):
teamAHardness = teamAHardness - avgA
totA = totA - 1
if totB>totA:
while(totB>totA):
teamBHardness = teamBHardness - avgB
totB = totB - 1
return teamAHardness, teamBHardness
def linearCombo(self, sim, stat):
varChangeStat = (random.randint(-10,10)/100) + 1
varChangeSim = (random.randint(-5,5)/100) + 1
result = varChangeSim*sim + varChangeStat*stat
if (self.extra):
print(sim, "*", varChangeSim, "+", stat, "*", varChangeStat, "=", result)
return result
|
biggest = 0
def triangle(z):
global biggest
list = []
for c in range(1,int(z**.5) + 1):
if z % c == 0:
list.append(c)
if c**2 != z:
list.append((z / c))
length = len(list)
if length >= biggest:
biggest = length
print "%r is the triangle with the most divisors (at %r) so far" % (z, length)
a = 2001000
b = 2001
while a < 100000000:
triangle(a)
a = a + b
b = b + 1
# The answer is 76,576,500 |
import datetime
from unittest import mock
import pytest
import pytz
from django.core import mail
from django.utils import timezone
from freezegun import freeze_time
from customers.services import SMSNotificationService
from payments.enums import OrderStatus
from payments.models import Order
@freeze_time("2020-10-20T08:00:00Z")
@pytest.mark.parametrize(
"order", ["berth_order", "winter_storage_order"], indirect=True
)
@pytest.mark.parametrize(
"notification_sent,until_due_date,should_be_sent",
[
(None, 8, False),
(None, 7, True),
(None, 6, True),
(None, 3, True),
(None, 1, True),
(None, -1, False),
# 7 days before and after
(datetime.datetime(2020, 10, 19, 7), 8, False), # Before 7 day threshold
(datetime.datetime(2020, 10, 19, 7), 7, True), # Should send 7 day reminder
(datetime.datetime(2020, 10, 20, 7), 7, False), # Already sent 7 day reminder
(datetime.datetime(2020, 10, 18, 7), 6, True), # Should send 7 day reminder
(datetime.datetime(2020, 10, 20, 7), 6, False), # Already sent 7 day reminder
# 3 days before and after
(datetime.datetime(2020, 10, 19, 7), 3, True), # Should send 3 day reminder
(datetime.datetime(2020, 10, 20, 7), 3, False), # Already sent 3 day reminder
(datetime.datetime(2020, 10, 18, 7), 2, True), # Should send 3 day reminder
(datetime.datetime(2020, 10, 20, 7), 2, False), # Already sent 3 day reminder
# 1 days before and after
(datetime.datetime(2020, 10, 19, 7), 1, True), # Should send 1 day reminder
(datetime.datetime(2020, 10, 20, 7), 1, False), # Already sent 1 day reminder
(datetime.datetime(2020, 10, 18, 7), 0, True), # Should send 1 day reminder
(datetime.datetime(2020, 10, 20, 7), 0, False), # Already sent 1 day reminder
# Past due date
(datetime.datetime(2020, 10, 1, 7), -1, False),
],
)
def test_payment_reminder_is_sent(
notification_sent,
until_due_date,
should_be_sent,
order: Order,
notification_template_orders_approved,
):
"""A payment reminder should be sent 7, 3 and 1 days before the due date."""
sent_timestamp = (
notification_sent.replace(tzinfo=pytz.UTC) if notification_sent else None
)
order.payment_notification_sent = sent_timestamp
order.status = OrderStatus.OFFERED
order.due_date = timezone.localdate() + datetime.timedelta(days=until_due_date)
order.save()
with mock.patch.object(
SMSNotificationService, "send", return_value=None
) as mock_send_sms:
changes = Order.objects.send_payment_reminders_for_unpaid_orders()
order = Order.objects.get(pk=order.pk)
if should_be_sent:
assert order.payment_notification_sent == timezone.now()
assert changes == 1
assert len(mail.outbox) == 1
mock_send_sms.assert_called_once()
else:
assert order.payment_notification_sent == sent_timestamp
assert changes == 0
assert len(mail.outbox) == 0
mock_send_sms.assert_not_called()
@freeze_time("2020-10-20T08:00:00Z")
@pytest.mark.parametrize(
"order", ["berth_order", "winter_storage_order"], indirect=True
)
@pytest.mark.parametrize("order_status", [status for status in OrderStatus])
def test_payment_reminder_is_sent_only_to_offered_orders(
order: Order, order_status: OrderStatus, notification_template_orders_approved
):
order.status = order_status
order.due_date = timezone.localdate() + datetime.timedelta(days=7)
order.save()
with mock.patch.object(
SMSNotificationService, "send", return_value=None
) as mock_send_sms:
changes = Order.objects.send_payment_reminders_for_unpaid_orders()
order = Order.objects.get(pk=order.pk)
if order_status == OrderStatus.OFFERED:
assert order.payment_notification_sent == timezone.now()
assert changes == 1
assert len(mail.outbox) == 1
mock_send_sms.assert_called_once()
else:
assert order.payment_notification_sent is None
assert changes == 0
assert len(mail.outbox) == 0
mock_send_sms.assert_not_called()
|
from flask import Flask, render_template
import os
import threading , time
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
webcam_flag = False
def webcamCap():
while(True):
global webcam_flag
ret , frame = cap.read()
# RGB -> Grey Conversion (Optional)
gray_frame = cv2.cvtColor(frame , cv2.COLOR_BGR2GRAY)
cv2.imshow('Grey SelfCam' , gray_frame)
global webcam_flag
print(webcam_flag)
if webcam_flag:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
webcam_thread = threading.Thread(target = webcamCap)
app = Flask(__name__)
app.secret_key = os.urandom(24)
@app.route('/start')
def startRender():
global webcam_flag
webcam_flag = False
webcam_thread.run()
print('[STATUS] Thread running...')
@app.route('/stop')
def stopRender():
global webcam_flag
webcam_flag = True
print('[STATUS] Thread stops...')
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
app.run(debug = True) |
import http.client
import importlib
import os
import re
import urllib.parse
from wsgiref.headers import Headers
from wsgiref.simple_server import make_server
class Request:
def __init__(self, environ):
self.environ = environ
@property
def args(self):
get_args = urllib.parse.parse_qs(self.environ['QUERY_STRING'])
return {k:v[0] for k,v in get_args.items()}
@property
def path(self):
return self.environ['PATH_INFO']
class Response:
def __init__(self, response=None, status=200, charset='utf-8', content_type='text/html'):
if response is None:
self.response = []
elif isinstance(response, (str, bytes)):
self.response = [response]
else:
self.response = response
self.charset = charset
self.headers = Headers()
self.headers.add_header('content-type', f'{content_type}; charset={charset}')
self._status = status
@property
def status(self):
status_string = http.client.responses.get(self._status, 'UNKNOWN STATUS')
return f'{self._status} {status_string}'
# Response has to be iterable
def __iter__(self):
for k in self.response:
# Has to be bytes
if isinstance(k, bytes):
yield k
else:
yield k.encode(self.charset)
class NotFound(Exception):
pass
class Router:
def __init__(self):
self.routing_table = []
def add_route(self, pattern, callback):
self.routing_table.append((pattern, callback))
def match(self, path):
for (pattern, callback) in self.routing_table:
m = re.match(pattern, path)
if m:
return (callback, m.groups())
raise NotFound()
def application(environ, start_response):
# Module specified in the command line while running the server
module = os.environ['YOWF_APP']
# Import the module
module = importlib.import_module(module)
# Get the router module specified in the app
router = getattr(module, 'routes')
try:
request = Request(environ)
# Lookup routing table to call the view function
callback, args = router.match(request.path)
response = callback(request, *args)
except NotFound:
response = Response("<h1>Not Found!</h1>", status=404)
# WSGI wants you to do this.
start_response(response.status, response.headers.items())
return iter(response)
if __name__ == '__main__':
with make_server('', 5000, application) as server:
server.serve_forever()
|
import cv2 as cv
def input_boolean(prompt):
while True:
print(prompt)
retval = input('> ')
switch = {
'y': True,
'yes': True,
'n': False,
'no': False
}
if retval in switch:
return switch[retval]
def print_prediction(input_image, ground_truth, predicted_image):
cv.namedWindow('Input image', cv.WINDOW_FREERATIO)
cv.namedWindow('Ground truth', cv.WINDOW_FREERATIO)
cv.namedWindow('Predicted image', cv.WINDOW_FREERATIO)
cv.imshow('Input image', input_image)
cv.imshow('Ground truth', ground_truth)
cv.imshow('Predicted image', predicted_image)
cv.waitKey(0)
cv.destroyAllWindows()
|
"""
HSC-specific overrides for FgcmBuildStarsTable
"""
import os.path
from lsst.utils import getPackageDir
# Minimum number of observations per band for a star to be considered for calibration
config.minPerBand = 2
# Match radius to associate stars from src catalogs (arcseconds)
config.matchRadius = 1.0
# Isolation radius: stars must be at least this far from a neighbor to be considered (arcseconds)
config.isolationRadius = 2.0
# Measure the stellar density with healpix nside=densityCutNside
config.densityCutNside = 128
# If there are more than densityCutMaxPerPixel stars per pixel, sample them
config.densityCutMaxPerPixel = 1500
# Dictionary that maps "filters" (instrumental configurations) to "bands"
# (abstract names). All filters must be listed in the LUT.
config.filterMap = {'g': 'g', 'r': 'r', 'i': 'i', 'z': 'z', 'y': 'y'}
# Which bands are required to be observed to be considered a calibration star
config.requiredBands = ['g', 'r', 'i', 'z']
# The reference CCD is a good CCD used to select visit to speed up the scanning
config.referenceCCD = 40
# If smatch matching is available, use this nside. Not used with default LSST stack.
config.matchNside = 4096
# A star must be observed in one of these bands to be considered as a calibration star
config.primaryBands = ['i']
# Match reference catalog as additional constraint on calibration
config.doReferenceMatches = True
# Subtract the local background before performing calibration?
config.doSubtractLocalBackground = True
# Number of visits read between checkpoints
config.nVisitsPerCheckpoint = 100
# Reference object loader configuration parameters
config.fgcmLoadReferenceCatalog.refObjLoader.ref_dataset_name = 'ps1_pv3_3pi_20170110'
config.fgcmLoadReferenceCatalog.refFilterMap = {'g': 'g', 'r': 'r', 'i': 'i', 'z': 'z', 'y': 'y'}
config.fgcmLoadReferenceCatalog.applyColorTerms = True
hscConfigDir = os.path.join(getPackageDir('obs_subaru'), 'config')
config.fgcmLoadReferenceCatalog.colorterms.load(os.path.join(hscConfigDir, 'colorterms.py'))
config.fgcmLoadReferenceCatalog.referenceSelector.doSignalToNoise = True
config.fgcmLoadReferenceCatalog.referenceSelector.signalToNoise.fluxField = 'i_flux'
config.fgcmLoadReferenceCatalog.referenceSelector.signalToNoise.errField = 'i_fluxErr'
config.fgcmLoadReferenceCatalog.referenceSelector.signalToNoise.minimum = 10.0
|
#!/usr/bin/env python3
# coding=utf-8
"""
githubAutomatic takeover
"""
import json
import base64
import requests
from config import settings
HEADERS = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.9",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/63.0.3239.84 Safari/537.36",
}
def github_takeover(url):
# Read config configuration file
repo_name = url
print('[*]Reading configuration file')
user = settings.github_api_user
token = settings.github_api_token
headers = {
"Authorization": 'token ' + token,
"Accept": "application/vnd.github.switcheroo-preview+json"
}
repos_url = 'https://api.github.com/repos/' + user + '/' + repo_name
repos_r = requests.get(url=repos_url, headers=headers)
# Verify that the token is correct
if 'message' in repos_r.json():
if repos_r.json()['message'] == 'Bad credentials':
print('[*]Please check if the Token is correct')
elif repos_r.json()['message'] == 'Not Found':
print('[*]Building takeover library') # Generate takeover library
creat_repo_dict = {
"name": repo_name,
"description": "This is a subdomain takeover Repository",
}
creat_repo_url = 'https://api.github.com/user/repos'
creat_repo_r = requests.post(url=creat_repo_url,
headers=headers,
data=json.dumps(creat_repo_dict))
creat_repo_status = creat_repo_r.status_code
if creat_repo_status == 201:
print('[*]Create a takeover library' + repo_name + 'Success, automatic takeover is in progress')
# Take over file generation
# index.html file
html = b'''
<html>
<p>Subdomain Takerover Test!</>
</html>
'''
html64 = base64.b64encode(html).decode('utf-8')
html_dict = {
"message": "my commit message",
"committer": {
"name": "user", # Submit id, optional item
"email": "user@163.com" # Same as above
},
"content": html64
}
# CNAME file
cname_url = bytes(url, encoding='utf-8')
cname_url64 = base64.b64encode(cname_url).decode('utf-8')
url_dict = {
"message": "my commit message",
"committer": {
"name": "user",
"email": "user@163.com"
},
"content": cname_url64
}
base_url = 'https://api.github.com/repos/'
html_url = base_url + user + '/' + repo_name + '/contents/index.html'
url_url = base_url + user + '/' + repo_name + '/contents/CNAME'
html_r = requests.put(url=html_url, data=json.dumps(html_dict),
headers=headers) # Upload index.html
cname_r = requests.put(url=url_url, data=json.dumps(url_dict),
headers=headers) # Upload CNAME
rs = cname_r.status_code
if rs == 201:
print('[*]The takeover library was successfully generated and Github pages is being opened')
page_url = "https://api.github.com/repos/" + user + "/" + url + "/pages"
page_dict = {
"source": {
"branch": "master"
}
}
page_r = requests.post(url=page_url,
data=json.dumps(page_dict),
headers=headers) # Open page
if page_r.status_code == 201:
print('[+]Automatic takeover succeeded, please visit http later://' + str(url) + 'View Results')
else:
print('[+] Failed to open Github pages, please check the network or try again later')
else:
print('[+] Failed to generate the takeover library, please check the network or try again later')
elif url in repos_r.json()['name']:
print('[*]Failed to generate takeover library, please check https://github.com/' + user +
'?tab=repositories whether there is a takeover library with the same name')
|
def handler(doc):
from quart import Blueprint, request
from quart.json import jsonify
swagger_blueprint = Blueprint(
doc.blueprint_name,
__name__,
url_prefix=doc.url_prefix,
static_url_path=doc.static_uri_relative,
static_folder=doc.static_dir,
root_path=doc.static_dir
)
@swagger_blueprint.route(
doc.root_uri_relative(slashes=True), methods=['GET'])
async def swagger_blueprint_doc_handler():
return doc.doc_html
if doc.editor:
@swagger_blueprint.route(
doc.editor_uri_relative(slashes=True), methods=['GET'])
async def swagger_blueprint_editor_handler():
return doc.editor_html
if doc.config_rel_url is None:
@swagger_blueprint.route(doc.swagger_json_uri_relative, methods=['GET'])
async def swagger_blueprint_config_handler():
return jsonify(doc.get_config(request.host))
doc.app.register_blueprint(
blueprint=swagger_blueprint, url_prefix=doc.url_prefix)
def match(doc):
try:
import quart
if isinstance(doc.app, quart.Quart):
return handler
except ImportError:
pass
return None
|
#! /usr/bin/env python
##########################################################################
# Hopla - Copyright (C) AGrigis, 2015
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import unittest
import os
import tempfile
import shutil
import multiprocessing
import time
# Hopla import
# Apparently the 'hopla' modules must be imported after coverage is started
import hopla as root
from hopla.converter import hopla
from hopla.workers import worker
from hopla.workers import qsub_worker
from hopla.signals import FLAG_ALL_DONE
from hopla.signals import FLAG_WORKER_FINISHED_PROCESSING
class TestSchedulerHopla(unittest.TestCase):
""" Test the scheduler module.
"""
def setUp(self):
""" Define some parameters.
"""
self.demodir = os.path.abspath(os.path.dirname(root.__file__))
self.script = os.path.join(self.demodir, "demo", "my_ls_script.py")
def create_jobs(self):
""" Create a new test settings.
"""
tasks = multiprocessing.Queue()
returncodes = multiprocessing.Queue()
commands = [[self.script, "-d", self.demodir, "-l", "1", "2"]] * 5
for cnt, cmd in enumerate(commands):
job_name = "job_{0}".format(cnt)
tasks.put((job_name, cmd))
tasks.put(FLAG_ALL_DONE)
return tasks, returncodes
def test_worker_execution(self):
""" Test local worker.
"""
tasks, returncodes = self.create_jobs()
worker(tasks, returncodes)
time.sleep(1)
outputs = []
while not returncodes.empty():
outputs.append(returncodes.get())
self.assertEqual(len(outputs), 5 + 1)
self.assertEqual(outputs[-1], FLAG_WORKER_FINISHED_PROCESSING)
def test_qsubworker_execution(self):
""" Test cluster qsub worker.
"""
logdir = tempfile.mkdtemp()
tasks, returncodes = self.create_jobs()
qsub_worker(tasks, returncodes, logdir, "DUMMY")
time.sleep(1)
outputs = []
while not returncodes.empty():
outputs.append(returncodes.get())
self.assertEqual(len(outputs), 5 + 1)
self.assertEqual(outputs[-1], FLAG_WORKER_FINISHED_PROCESSING)
shutil.rmtree(logdir)
def test_local_execution(self):
""" Test local execution.
"""
logfile = tempfile.NamedTemporaryFile(suffix=".log").name
hopla(self.script, d=[self.demodir, self.demodir], l=[2, 3],
fbreak=[False, True], verbose=0,
hopla_iterative_kwargs=["d", "fbreak"], hopla_logfile=logfile,
hopla_cpus=4, hopla_optional=["fbreak", "verbose"])
os.remove(logfile)
def test_cluster_execution(self):
""" Test cluster execution.
"""
logfile = tempfile.NamedTemporaryFile(suffix=".log").name
logdir = tempfile.mkdtemp()
hopla(self.script, d=[self.demodir, self.demodir], l=[2, 3],
fbreak=[False, True], verbose=0,
hopla_iterative_kwargs=["d", "fbreak"], hopla_logfile=logfile,
hopla_cpus=4, hopla_optional=["fbreak", "verbose"],
hopla_cluster=True, hopla_cluster_logdir=logdir,
hopla_cluster_queue="DUMMY")
shutil.rmtree(logdir)
os.remove(logfile)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
"""Numerical integration in python using the Lotka-Volterra model"""
__appname__ = "LV1.py"
__author__ = "Joseph Palmer <joseph.palmer18@imperial.ac.uk>"
__version__ = "0.0.1"
__license__ = "License for this code/"
__date__ = "Nov-2018"
## imports ##
import sys
import scipy as sc
import scipy.stats
import scipy.integrate as intergrate
import matplotlib.pylab as p
def dCR_dt(pops, t=0):
"""dCR_dt: A function to return the growth rate of consumer and
resource population at any given time step.
:param pops: list
:param t: int
"""
R = pops[0]
C = pops[1]
dRdt = r * R - a * R * C
dCdt = -z * C + e * a * R * C
return sc.array([dRdt, dCdt])
# assign some parameter values
r = 1.
a = 0.1
z = 1.5
e = 0.75
# define the time vector
t = sc.linspace(0, 15, 1000)
# set the inital conditions for the two populations
R0 = 10
C0 = 5
RC0 = sc.array([R0,C0])
# now, numerically integrate this system forwards from these conditions
pops, infodict = intergrate.odeint(dCR_dt, RC0, t, full_output = True)
# plot it
f1 = p.figure()
p.plot(t, pops[:,0], 'g-', label='Resource density') # Plot
p.plot(t, pops[:,1] , 'b-', label='Consumer density')
p.grid()
p.legend(loc='best')
p.xlabel('Time')
p.ylabel('Population density')
p.title('Consumer-Resource population dynamics')
# save the figure as a pdf`
f1.savefig('../Results/Plots/LV_model.pdf')
# plot the second circular figure. this just has the values from above as axis
f2 = p.figure()
p.plot(pops[:,0], pops[:,1], 'r-') # Plot
p.grid()
p.legend(loc='best')
p.xlabel('Resource density')
p.ylabel('Consumer density')
p.title('Consumer-Resource population dynamics')
# save as pdf
f2.savefig("../Results/Plots/LV_FacePlot.pdf")
|
from typing import Any, Dict, List, Union
from app.interpreter.interpreter import ALL_ASSERTIONS, ALL_RULES, Interpreter
from app.parser import AST
def ast_to_string(ast_dict: Dict[str, List[AST]]) -> Dict[str, List[Union[str, List[Any]]]]:
def to_string(tree: AST) -> List[Union[str, List[Any]]]:
str_ast = []
for node in tree:
if isinstance(node, list):
str_ast.append(to_string(node))
else:
str_ast.append(node.value)
return str_ast
res = {}
for key, ast in ast_dict.items():
res[key] = to_string(ast)
return res
def run_commands(commands: List[str]) -> List[str]:
results = []
i = Interpreter(results.append)
for cmd in commands:
i.run(cmd)
return results
def test_simple():
assert run_commands([
"(@new (hello world))",
"(@new (hello (Pichugin Vladislav)))",
"(hello $x)"
]) == [
"(hello world)",
"(hello (Pichugin Vladislav))"
]
def test_simple2():
assert run_commands([
"(@new (position (Pichugin Vladislav) (junior developer)))",
"(@new (position (Ivan Ivanov) (senior developer)))",
"(@new (position Jack (junior developer)))",
"(position $name (junior developer))"
]) == [
"(position (Pichugin Vladislav) (junior developer))",
"(position Jack (junior developer))"
]
def test_simple3():
assert run_commands([
"(@new (address Vlad (Moscow 9)))",
"(@new (address (Ivan Ivanov) Spb))",
"(@new (nonAddress (Some Name) empty))",
"(@new (address (Nikita Nikitin) (Nizhnevartovsk (Mira street) 123)))",
"(address $x $y)"
]) == [
"(address Vlad (Moscow 9))",
"(address (Ivan Ivanov) Spb)",
"(address (Nikita Nikitin) (Nizhnevartovsk (Mira street) 123))"
]
def test_simple_identical_vars():
assert run_commands([
"(@new (boss Mike Jack))",
"(@new (boss Bob Jack))",
"(@new (boss Jack Jack))",
"(boss $x $x)"
]) == [
"(boss Jack Jack)"
]
def test_simple_dot_tail():
assert run_commands([
"(@new (position (Pichugin Vladislav) (developer frontend backend)))",
"(@new (position (Ivan Ivanov) (developer android)))",
"(@new (position Ekaterina (HR)))",
"(@new (position Nikita (developer)))",
"(position $x (developer . $type))"
]) == [
"(position (Pichugin Vladislav) (developer frontend backend))",
"(position (Ivan Ivanov) (developer android))",
"(position Nikita (developer))"
]
def test_simple_without_dot_tail():
assert run_commands([
"(@new (position (Pichugin Vladislav) (developer frontend backend)))",
"(@new (position (Ivan Ivanov) (developer android)))",
"(@new (position Ekaterina (HR)))",
"(@new (position Nikita (developer)))",
"(position $x (developer $type))"
]) == [
"(position (Ivan Ivanov) (developer android))"
]
def test_compound_and():
assert run_commands([
"(@new (position Vlad developer))",
"(@new (position Ekaterina HR))",
"(@new (position Anna developer))",
"(@new (address Vlad (Moscow (street 9) 20)))",
"(@new (address Ekaterina (Spb 13)))",
"(@new (address Anna (Nizhnevartovsk (Lenin street 22) 19)))",
"(@and (position $person developer) (address $person $where))"
]) == [
"(@and (position Vlad developer) (address Vlad (Moscow (street 9) 20)))",
"(@and (position Anna developer) (address Anna (Nizhnevartovsk (Lenin street 22) 19)))"
]
def test_compound_or():
assert run_commands([
"(@new (boss Vlad Denis))",
"(@new (boss Nikita Boris))",
"(@new (boss Oleg Sergey))",
"(@new (boss Alexander Boris))",
"(@new (boss Anna Vlad))",
"(@or (boss $x Denis) (boss $x Boris))"
]) == [
"(@or (boss Vlad Denis) (boss Vlad Boris))",
"(@or (boss Nikita Denis) (boss Nikita Boris))",
"(@or (boss Alexander Denis) (boss Alexander Boris))",
]
def test_compound_not():
assert run_commands([
"(@new (boss Vlad Denis))",
"(@new (position Vlad developer))",
"(@new (position Ekaterina HR))",
"(@new (boss Ekaterina Denis))",
"(@new (boss Alex Denis))",
"(@new (position Alex developer))",
"(@new (position Nikita analyst))",
"(@new (boss Nikita Denis))",
"(@and (boss $person Denis) (@not (position $person developer)))"
]) == [
"(@and (boss Ekaterina Denis) (@not (position Ekaterina developer)))",
"(@and (boss Nikita Denis) (@not (position Nikita developer)))"
]
def test_compound_apply():
assert run_commands([
"(@new (salary Vlad 90))",
"(@new (salary John 330))",
"(@new (salary Sergey 12))",
"(@new (salary Viktor 66))",
"(@new (salary Ekaterina 5))",
"(@and (salary $person $amount) (@apply > $amount 50))"
]) == [
"(@and (salary Vlad 90) (@apply > 90 50))",
"(@and (salary John 330) (@apply > 330 50))",
"(@and (salary Viktor 66) (@apply > 66 50))"
]
def test_compound_apply2():
assert run_commands([
"(@new (salary Vlad 90))",
"(@new (salary John 330))",
"(@new (salary Sergey 12))",
"(@new (salary Viktor 66))",
"(@new (salary Ekaterina 5))",
"""
(@and
(salary $person $amount)
(@or
(@apply > $amount 70)
(@apply < $amount 10)
)
)
"""
]) == [
"(@and (salary Vlad 90) (@or (@apply > 90 70) (@apply < 90 10)))",
"(@and (salary John 330) (@or (@apply > 330 70) (@apply < 330 10)))",
"(@and (salary Ekaterina 5) (@or (@apply > 5 70) (@apply < 5 10)))"
]
def test_rule1():
assert run_commands([
"""
(@new (@rule (livesAbout $person1 $person2)
(@and (address $person1 ($town . $rest1))
(address $person2 ($town . $rest2))
(@not (same $person1 $person2)))))
""",
"(@new (@rule (same $x $x)))",
"(@new (address Vlad (Moscow (street sample) 322)))",
"(@new (address Darya (Moscow Arbat 1337)))",
"(@new (address Andrey (Spb (street 1) 22)))",
"(livesAbout $x $y)"
]) == [
"(livesAbout Vlad Darya)",
"(livesAbout Darya Vlad)"
]
def test_rule2():
assert run_commands([
"""
(@new (@rule (bigBoss $person)
(@and (boss $middleManager $person)
(boss $x $middleManager))))
""",
"(@new (position Denis developer))",
"(@new (boss Vlad Denis))",
"(@new (position Vlad developer))",
"(@new (boss Alex Vlad))",
"(@new (position Alex developer))",
"(@new (position Nika HR))",
"(@new (boss Alla Nika))",
"(@new (position Alla HR))",
"(@new (boss Ekaterina Alla))",
"(@new (position Ekaterina HR))",
"(@and (position $person developer) (bigBoss $person))"
]) == [
"(@and (position Denis developer) (bigBoss Denis))"
]
def test_rule_append():
assert run_commands([
"(@new (@rule (append () $y $y)))",
"(@new (@rule (append ($u . $v) $y ($u . $z)) (append $v $y $z)))",
"(append (a b) (c d) $z)"
]) == [
"(append (a b) (c d) (a b c d))"
]
def test_rule_append2():
assert run_commands([
"(@new (@rule (append () $y $y)))",
"(@new (@rule (append ($u . $v) $y ($u . $z)) (append $v $y $z)))",
"(append (a b) $y (a b c d))"
]) == [
"(append (a b) (c d) (a b c d))"
]
def test_rule_append3():
assert run_commands([
"(@new (@rule (append () $y $y)))",
"(@new (@rule (append ($u . $v) $y ($u . $z)) (append $v $y $z)))",
"(append $x $y (a b c d))"
]) == [
"(append () (a b c d) (a b c d))",
"(append (a) (b c d) (a b c d))",
"(append (a b) (c d) (a b c d))",
"(append (a b c) (d) (a b c d))",
"(append (a b c d) () (a b c d))"
]
def test_rule_next():
assert run_commands([
"(@new (@rule ($x nextTo $y in ($x $y . $u))))",
"(@new (@rule ($x nextTo $y in ($v . $z)) ($x nextTo $y in $z)))",
"($x nextTo $y in (1 (2 3) 4))"
]) == [
"(1 nextTo (2 3) in (1 (2 3) 4))",
"((2 3) nextTo 4 in (1 (2 3) 4))"
]
def test_rule_next2():
assert run_commands([
"(@new (@rule ($x nextTo $y in ($x $y . $u))))",
"(@new (@rule ($x nextTo $y in ($v . $z)) ($x nextTo $y in $z)))",
"($x nextTo 1 in (2 1 3 1))"
]) == [
"(2 nextTo 1 in (2 1 3 1))",
"(3 nextTo 1 in (2 1 3 1))"
]
def test_indexing():
i = Interpreter(None)
i.run("(@new (position (Pichugin Vladislav) developer))")
i.run("(@new (@rule (selfBoss $x) (boss $x $x)))")
i.run("(@new ((birth date) Vlad (19 April)))")
i.run("(@new (@rule ($x nextTo $y in ($x $y . $u))))")
i.run("(@new (position Ekaterina HR))")
i.run("(@new (@rule ($x nextTo $y in ($v . $z)) ($x nextTo $y in $z)))")
i.run("(@new (city Vlad Nizhnevartovsk))")
i.run("(@new (@rule ((not index) $x) (test $x)))")
i.run("(@new (3 follows 2))")
assert ast_to_string(i.assertions) == {
ALL_ASSERTIONS: [
["position", ["Pichugin", "Vladislav"], "developer"],
[["birth", "date"], "Vlad", ["19", "April"]],
["position", "Ekaterina", "HR"],
["city", "Vlad", "Nizhnevartovsk"],
["3", "follows", "2"]
],
"position": [
["position", ["Pichugin", "Vladislav"], "developer"],
["position", "Ekaterina", "HR"]
],
"city": [
["city", "Vlad", "Nizhnevartovsk"]
],
"3": [
["3", "follows", "2"]
]
}
assert ast_to_string(i.rules) == {
ALL_RULES: [
["@rule", ["selfBoss", "$x"], ["boss", "$x", "$x"]],
["@rule", ["$x", "nextTo", "$y", "in", ["$x", "$y", ".", "$u"]]],
["@rule", ["$x", "nextTo", "$y", "in", ["$v", ".", "$z"]], ["$x", "nextTo", "$y", "in", "$z"]],
["@rule", [["not", "index"], "$x"], ["test", "$x"]]
],
"selfBoss": [
["@rule", ["selfBoss", "$x"], ["boss", "$x", "$x"]]
],
"$": [
["@rule", ["$x", "nextTo", "$y", "in", ["$x", "$y", ".", "$u"]]],
["@rule", ["$x", "nextTo", "$y", "in", ["$v", ".", "$z"]], ["$x", "nextTo", "$y", "in", "$z"]]
]
}
def test_depends_on():
assert run_commands([
"(@new (@rule (testDepends $y ($z $y))))",
"(testDepends $x $x)"
]) == []
def test_apply_without_binding():
assert run_commands([
"(@new (salary Vlad 90))",
"(@new (salary John 330))",
"(@new (salary Sergey 12))",
"(@new (salary Viktor 66))",
"(@new (salary Ekaterina 5))",
"(@and (salary $person $amount) (@apply > $nonBind 50))"
]) == []
def test_apply_nested_vars():
assert run_commands([
"(@new (@rule (testHelper $x $y)))",
"(@new (@rule (test $x $y) (@and (testHelper $x $y) (@apply > $x $y))))",
"(test $x $y)"
]) == []
def test_get_all():
assert run_commands([
"""
(@new (@rule (bigBoss $person)
(@and (boss $middleManager $person)
(boss $x $middleManager))))
""",
"(@new (position Denis developer))",
"(@new (boss Vlad Denis))",
"(@new (position Vlad developer))",
"(@new (boss Alex Vlad))",
"(@new (position Alex developer))",
"(@new (position Nika HR))",
"(@new (boss Alla Nika))",
"(@new (position Alla HR))",
"(@new (boss Ekaterina Alla))",
"(@new (position Ekaterina HR))",
"(. $all)"
]) == [
"(position Denis developer)",
"(boss Vlad Denis)",
"(position Vlad developer)",
"(boss Alex Vlad)",
"(position Alex developer)",
"(position Nika HR)",
"(boss Alla Nika)",
"(position Alla HR)",
"(boss Ekaterina Alla)",
"(position Ekaterina HR)",
"(bigBoss Denis)",
"(bigBoss Nika)"
]
def test_empty_select():
assert run_commands([
"""
(@new (@rule (bigBoss $person)
(@and (boss $middleManager $person)
(boss $x $middleManager))))
""",
"(@new (position Denis developer))",
"(@new (boss Vlad Denis))",
"(@new (position Vlad developer))",
"(@new (boss Alex Vlad))",
"(@new (position Alex developer))",
"(@new (position Nika HR))",
"(@new (boss Alla Nika))",
"(@new (position Alla HR))",
"(@new (boss Ekaterina Alla))",
"(@new (position Ekaterina HR))",
"()"
]) == []
def test_instantiate_with_non_bind():
assert run_commands([
"(@new (@rule (test $y)))",
"(test $x)"
]) == [
"(test $y)"
]
def test_unknown_apply():
assert run_commands([
"(@new (salary Vlad 90))",
"(@new (salary John 330))",
"(@new (salary Sergey 12))",
"(@new (salary Viktor 66))",
"(@new (salary Ekaterina 5))",
"(@and (salary $person $amount) (@apply newPredicate $amount 50))"
]) == []
def test_batch_new():
assert run_commands([
"""
(@new
(@rule (bigBoss $person)
(@and
(boss $middleManager $person)
(boss $x $middleManager)
)
)
(position Denis developer)
(boss Vlad Denis)
(position Vlad developer)
(boss Alex Vlad)
(position Alex developer)
(position Nika HR)
(boss Alla Nika)
(position Alla HR)
(boss Ekaterina Alla)
(position Ekaterina HR)
)
""",
"(. $all)"
]) == [
"(position Denis developer)",
"(boss Vlad Denis)",
"(position Vlad developer)",
"(boss Alex Vlad)",
"(position Alex developer)",
"(position Nika HR)",
"(boss Alla Nika)",
"(position Alla HR)",
"(boss Ekaterina Alla)",
"(position Ekaterina HR)",
"(bigBoss Denis)",
"(bigBoss Nika)"
]
|
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import scipy.io as scio
import h5py
import time
LOG = open("/home/hyli/Data/InternData/log_full_sgd_lr0002.txt", "w")
rng = np.random.RandomState(1234)
random_state = 42
par = scio.loadmat('/home/hyli/Data/InternData/mvn_store.mat')
mean_noisy = np.array(par['global_mean'], dtype='float32')
std_noisy = np.array(par['global_std'], dtype='float32')
mean_noisy = mean_noisy[0, :]
std_noisy = std_noisy[0, :]
def make_window_buffer(x, neighbor=3):
m, n = x.shape
tmp = np.zeros(m * n * (neighbor * 2 + 1), dtype='float32').reshape(m, -1)
for i in range(2 * neighbor + 1):
if (i <= neighbor):
shift = neighbor - i
tmp[shift:m, i * n: (i + 1) * n] = x[:m - shift]
for j in range(shift):
tmp[j, i * n: (i + 1) * n] = x[0, :]
else:
shift = i - neighbor
tmp[:m-shift, i * n: (i+1) * n] = x[shift:m]
for j in range(shift):
tmp[m-(j + 1), i * n: (i + 1) * n] = x[m-1, :]
return tmp
def Normalize_data(x, mean_noisy, std_noisy):
mean_noisy_10 = np.tile(mean_noisy, [1, 8])[0, :]
std_noisy_10 = np.tile(std_noisy, [1, 8])[0, :]
tmp = (x-mean_noisy_10)/std_noisy_10[np.newaxis, :]
return np.array(tmp, dtype='float32')
def Normalize_label(x, mean_noisy, std_noisy):
mean_noisy_2 = np.tile(mean_noisy, [1, 2])[0, :]
std_noisy_2 = np.tile(std_noisy, [1, 2])[0, :]
tmp = (x-mean_noisy_2)/std_noisy_2[np.newaxis, :]
return np.array(tmp, dtype='float32')
def gen_context(x, nat, sentence_id, neighbor, global_mean, global_std):
m = x.shape[0]
data = np.zeros([m, 257*8])
#sentence_id = np.r_[np.zeros([1,1]),sentence_id]
for ind in range(len(sentence_id)-1):
tmp_data = make_window_buffer(
x[sentence_id[ind]:sentence_id[ind+1], :], neighbor)
tmp_data = np.c_[tmp_data, nat[sentence_id[ind]:sentence_id[ind+1]]]
tmp_data = Normalize_data(tmp_data, global_mean, global_std)
data[sentence_id[ind]:sentence_id[ind+1]] = tmp_data
return data
class Autoencoder:
def __init__(self, vis_dim, hid_dim, W, function=lambda x: x):
self.W = W
self.a = tf.Variable(np.zeros(vis_dim).astype('float32'), name='a')
self.b = tf.Variable(np.zeros(hid_dim).astype('float32'), name='b')
self.function = function
self.params = [self.W, self.a, self.b]
def encode(self, x):
u = tf.matmul(x, self.W) + self.b
return self.function(u)
def decode(self, x):
u = tf.matmul(x, tf.transpose(self.W)) + self.a
return self.function(u)
def f_prop(self, x):
y = self.encode(x)
return self.decode(y)
def reconst_error(self, x, noise):
tilde_x = x * noise
reconst_x = self.f_prop(tilde_x)
error = tf.reduce_mean(tf.reduce_sum((x - reconst_x)**2, 1))
return error, reconst_x
class Dense:
def __init__(self, in_dim, out_dim, function=lambda x: x):
self.W = tf.Variable(rng.uniform(low = -0.1,
high = 0.1, size=(in_dim, out_dim)).astype('float32'), name='W')
self.b = tf.Variable(np.zeros([out_dim]).astype('float32'))
self.function = function
self.params = [self.W, self.b]
self.ae = Autoencoder(in_dim, out_dim, self.W, self.function)
def f_prop(self, x):
u = tf.matmul(x, self.W) + self.b
self.z = self.function(u)
return self.z
def pretrain(self, x, noise):
cost, reconst_x = self.ae.reconst_error(x, noise)
return cost, reconst_x
layers = [
Dense(257*8, 2048, tf.nn.sigmoid),
Dense(2048, 2048, tf.nn.sigmoid),
Dense(2048, 2048, tf.nn.sigmoid),
Dense(2048, 257)
]
keep_prob = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32, [None, 257*8])
t = tf.placeholder(tf.float32, [None, 257])
def f_props(layers, x):
for i, layer in enumerate(layers):
x = layer.f_prop(x)
if(i != len(layers)-1):
x = tf.nn.dropout(x, keep_prob)
return x
y = f_props(layers, x)
cost_fine = tf.reduce_mean(tf.reduce_sum((y - t)**2, 1))
lrate_p = tf.placeholder(tf.float32)
mt_p = tf.placeholder(tf.float32)
train_fine = tf.train.MomentumOptimizer(
learning_rate=lrate_p, momentum=mt_p).minimize(cost_fine)
saver = tf.train.Saver()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
n_epochs = 50
batch_size = 128
part_num_total = 230
data_file = h5py.File(
'/home/hyli/Data/InternData/trainDB_lps/RawData_Part888.mat')
data_valid = np.array(data_file['data'], dtype='float32').transpose()
nat = np.array(data_file['nat'], dtype='float32').transpose()
sentence_id = np.array(data_file['sentence_id'], dtype='int32').transpose()
sentence_id = sentence_id[:,0]
data_valid = gen_context(data_valid, nat, sentence_id,
3, mean_noisy, std_noisy)
label_valid = np.array(data_file['label'], dtype='float32').transpose()
label_valid = Normalize_label(label_valid, mean_noisy, std_noisy)
label_valid = label_valid[:,:257]
del data_file
del nat
del sentence_id
#saver.restore(sess,'/home/hyli/Data/InterData/DNN_full_sgd_lr0002_model')
print("FineTuning begin")
Cost_validation = sess.run(cost_fine,
feed_dict={x: data_valid, t: label_valid, keep_prob: 1.0})
print('EPOCH: 0, Validation cost: %.3f ' % (Cost_validation))
cost_valid_best = 1000000
for epoch in range(n_epochs):
lrate = 0.001
#if(epoch>3):
# lrate = 0.0005
#if(epoch>10):
# lrate = 0.0002
# if(epoch>20):
# lrate = 0.0001
if(epoch>10):
lrate = 0.0005
mt = 0.9
time_start = time.time()
part_num_list = shuffle(range(part_num_total))
for part_num in part_num_list:
try:
del data_part
del _data
del _label
del _nat
del sentence_id
except:
pass
data_part = scio.loadmat(
'/home/hyli/Data/InternData/trainDB_lps_shuffle/NormContextData_Part'+str(part_num+1)+'.mat')
_data = np.array(data_part['data'], dtype='float32')
_label = np.array(data_part['label'], dtype='float32')
del data_part
# doing normalization
_label = _label[:,:257]
_data, _label = shuffle(_data, _label)
n_batches = _data.shape[0] // batch_size
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
sess.run(train_fine,
feed_dict={x: _data[start:end],
t: _label[start:end],
keep_prob: 0.8,
lrate_p : lrate,
mt_p: mt})
#print('part %i finished'%(part_num+1))
Cost_validation = sess.run(cost_fine,
feed_dict={x: data_valid, t: label_valid, keep_prob: 1.0})
time_end = time.time()
print('EPOCH: %i, Validation cost: %.3f ' % (epoch + 1, Cost_validation))
print('Elapsed time for one epoch is %.3f' % (time_end-time_start))
LOG.write('EPOCH: %i, Validation cost: %.3f \n' %
(epoch + 1, Cost_validation))
LOG.flush()
if(Cost_validation < cost_valid_best):
save_dict = {}
save_dict['W1'] = sess.run(layers[0].W)
save_dict['b1'] = sess.run(layers[0].b)
save_dict['W2'] = sess.run(layers[1].W)
save_dict['b2'] = sess.run(layers[1].b)
save_dict['W3'] = sess.run(layers[2].W)
save_dict['b3'] = sess.run(layers[2].b)
save_dict['W4'] = sess.run(layers[3].W)
save_dict['b4'] = sess.run(layers[3].b)
MATFILE = '/home/hyli/Data/InternData/DNN_full_sgd_lr0002.mat'
scio.savemat(MATFILE, save_dict)
cost_valid_best = Cost_validation
print('Model in EPOCH:%d is saved' % (epoch+1))
LOG.write('Model in EPOCH:%d is saved' % (epoch+1))
saver.save(sess,'/home/hyli/Data/InterData/DNN_full_sgd_lr0002_next_model')
LOG.close()
del data_valid
del label_valid
del _data
del _label
sess.close() |
# # Python program to find maximum product of two
# # non-intersecting paths
# # Returns maximum length path in subtree rooted
# # at u after removing edge connecting u and v
# def dfs(g, curMax, u, v):
# # To find lengths of first and second maximum
# # in subtrees. currMax is to store overall
# # maximum.
# max1 = 0
# max2 = 0
# total = 0
# # loop through all neighbors of u
# for i in range(len(g[u])):
# # if neighbor is v, then skip it
# if (g[u][i] == v):
# continue
# # call recursively with current neighbor as root
# total = max(total, dfs(g, curMax, g[u][i], u))
# # get max from one side and update
# if (curMax[0] > max1):
# max2 = max1
# max1 = curMax[0]
# else:
# max2 = max(max2, curMax[0])
# # store total length by adding max
# # and second max
# total = max(total, max1 + max2)
# # update current max by adding 1, i.e.
# # current node is included
# curMax[0] = max1 + 1
# return total
# # method returns maximum product of length of
# # two non-intersecting paths
# def maxProductOfTwoPaths(g, N):
# res = -999999999999
# path1, path2 = None, None
# # one by one removing all edges and calling
# # dfs on both subtrees
# for i in range(N):
# for j in range(len(g[i])):
# # print(g[i][j], i, '-', i, g[i][j])
# # calling dfs on subtree rooted at
# # g[i][j], excluding edge from g[i][j]
# # to i.
# curMax = [0]
# path1 = dfs(g, curMax, g[i][j], i)
# # calling dfs on subtree rooted at
# # i, edge from i to g[i][j]
# curMax = [0]
# path2 = dfs(g, curMax, i, g[i][j])
# print(path1, path2)
# res = max(res, path1 * path2)
# return res
# # Utility function to add an undirected edge (u,v)
# def addEdge(g, u, v):
# g[u].append(v)
# g[v].append(u)
# # Driver code
# if __name__ == '__main__':
# edges = [[1, 8], [2, 6], [3, 1], [5, 3], [7, 8], [8, 4], [8, 6]]
# N = len(edges)
# # there are N edges, so +1 for nodes and +1
# # for 1-based indexing
# g = [[] for i in range(N + 2)]
# for i in range(N):
# addEdge(g, edges[i][0], edges[i][1])
# print(g)
# print(maxProductOfTwoPaths(g, N))
# # This code is contributed by PranchalK
from Graph import UnDirectedGraph
import sys
class MaxProductNInterPath(UnDirectedGraph):
def __init__(self):
UnDirectedGraph.__init__(self)
def findMax(self):
visited = {key: 0 for key, val in self.graph.items()}
path1, path2 = 0, 0
print(self.graph)
res = -sys.maxsize
for src in self.graph:
for dest in self.graph[src]:
# print(src, dest)
path1 = self.DFS(src, visited, 0, dest)
# print(path1)
# print(dest, src)
path2 = self.DFS(dest, visited,0, src)
print(path1, path2)
print(res)
def DFS(self, src, visited, max1, dest = None):
visited[src] = 1
for i in self.graph[src]:
if i is dest:
continue
# print(visited)
if not visited[i]:
print(visited)
visited[i] = visited[src] + 1
max2 = self.DFS(i, visited, max1+1)
if max1 < max2:
max1 = max2
return max1
graph = MaxProductNInterPath()
edges = [(8, 4), (8, 7), (8, 6), (8, 1), (6, 2), (1, 3), (3, 5)]
for src, dest in edges:
graph.addEdge(src, dest)
graph.findMax() |
#!/usr/bin/env python
# license removed for brevity
import rospy
import time
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
#import numpy as np
import cv2
def talker():
global cap
global frequency
global video_resolution
image_size = video_resolution / 2
bridge = CvBridge()
pub = rospy.Publisher('image_raw', Image, queue_size=10)
rate = rospy.Rate(frequency)
while not rospy.is_shutdown():
cap.grab()
stamp = rospy.Time.from_sec(time.time())
ret, frame = cap.retrieve()
image = frame[0:image_size, :]
image_message = bridge.cv2_to_imgmsg(image, encoding="bgr8")
image_message.header.stamp = stamp
pub.publish(image_message)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('capture', anonymous=True)
if rospy.has_param('~video_input'):
video_input = rospy.get_param('~video_input')
else:
video_input = 0
supported_resolutions = [1280, 1920]
if rospy.has_param('~resolution'):
video_resolution = rospy.get_param('~resolution')
if not video_resolution in supported_resolutions:
print("Unsupported resolution %d: valid options %s" % (video_resolution, supported_resolutions))
video_resolution = supported_resolutions[0]
print("Using default: %d" % video_resolution)
else:
video_resolution = supported_resolutions[0]
if rospy.has_param('~frequency'):
frequency = rospy.get_param('~frequency')
if frequency < 1 or frequency > 30:
print("Unsupported frequency %d: valid interval [1, 30]" % frequency)
frequency = 30
print("Using default: %d" % frequency)
else:
frequency = 30
video_height = {1280: 720, 1920: 1080}
cap = cv2.VideoCapture(video_input)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, video_resolution)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,video_height[video_resolution])
try:
talker()
except rospy.ROSInterruptException:
pass
cap.release()
cv2.destroyAllWindows()
|
from src.Public import Public
class GamePlay(object):
def __init__(self, status, message):
self.pb = Public()
self.status = status
self.message = message
def login(self):
self.pb.color("login......", "cyan")
lss = self.pb.find(self.status, self.message)
# login success
while lss is True:
self.coinMatch()
def coinMatch(self):
self.pb.color("join coinMatch......", "cyan")
cms = self.pb.find(self.status, self.message)
# room join success
while cms is True:
self.ready()
def ready(self):
self.pb.color("ready play......", "cyan")
rs = self.pb.find(self.status, self.message)
# ready success
while rs is True:
self.enableRobot()
def enableRobot(self):
self.pb.color("enableRobot......", "cyan")
ers = self.pb.find(self.status, self.message)
#
while ers is True:
self.gameOver()
def gameOver(self):
self.pb.color("gameOver......","cyan")
gos = self.pb.find(self.status,self.message)
while gos is True:
pass
|
from functools import lru_cache
class LargePower:
def __init__(self, n):
self.a = int(n[0])
self.b = int(n[1])
@lru_cache
def compute(self):
return pow(self.a, self.b, int(1e9+7))
def get(self):
if self.a == 0:
return str(0)
if self.b == 0:
return str(1)
return str(self.compute())
|
import shutil
import os
# for line in open(r"G:\list_bbox_celeba2.txt"):
# print(line)
# exit()
import glob
import os
# path = r"G:\list_bbox_celeba2.txt"
# f = r"G:\list_bbox_celeba3.txt"
#
# file = open(path)
# for line in file.readlines():
# print(line)
# strs = line.strip().split(" ")
# print(strs)
# # shutil.copyfile(path + '/' + line, f + '/' + line)
# shutil.copy(path + "strs", f + "strs")
# exit()
# _*_ coding:utf-8 _*_
# 开发人员 : lenovo
# 开发时间 :2019/12/2920:44
# 文件名称 :Processing.py
# 开发工具 : PyCharm
# 对实现收集好的数据进行预处理(将单独的名词,换成句子(定义类))
# _*_ coding:utf-8 _*_
with open(r"D:\CelebA\list_bbox_celeba.txt", "r") as f1: # 原txt存放路径
# with open("Noun.txt","r") as f:
Read_data = f1.readlines() # 将打开文件的内容读到内存中,with 在执行完命令后,会关闭文件
f2 = open(r"D:\ACelebA\CelebA_5K.txt", "w") # 新txt存放路径
# 此处如果是'wb',则会出现TypeError: a bytes-like object is required, not 'str'的报错
# 'wb'表示每次写入前格式化文本,如果此文件不存在,则创建一个此文件名的文件
# f2.write("这是一个测试")
count = 0
for x in Read_data:
f2.write(x) # 将原记事本的文件写入到另外一个记事本
count += 1
if count == 1002:
break
f2.close() # 执行完毕,关闭文件
# print(x)#
# print(data,"\n")
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from json import loads as str_to_json
from cafe.engine.models.base import AutoMarshallingModel
class RouteTarget(AutoMarshallingModel):
def __init__(self, worker_id, ip_address_v4, ip_address_v6, status):
super(RouteTarget, self).__init__()
self.worker_id = worker_id
self.ip_address_v4 = ip_address_v4
self.ip_address_v6 = ip_address_v6
self.status = status
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = str_to_json(serialized_str)
return cls._dict_to_obj(json_dict)
@classmethod
def _dict_to_obj(cls, json_dict):
return RouteTarget(**json_dict)
class Route(AutoMarshallingModel):
def __init__(self, service_domain, targets):
super(Route, self).__init__()
self.service_domain = service_domain
self.targets = targets
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = str_to_json(serialized_str)
return cls._dict_to_obj(json_dict)
@classmethod
def _dict_to_obj(cls, json_dict):
domain = json_dict.get('service_domain')
targets_dict = json_dict.get('targets')
targets = [RouteTarget._dict_to_obj(child) for child in targets_dict]
return Route(service_domain=domain, targets=targets)
class AllRoutes(AutoMarshallingModel):
ROOT_TAG = 'routes'
def __init__(self, routes=None):
super(AllRoutes, self).__init__()
self.routes = routes
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = str_to_json(serialized_str)
return cls._dict_to_obj(json_dict.get(cls.ROOT_TAG))
@classmethod
def _dict_to_obj(cls, json_dict):
routes = [Route._dict_to_obj(child) for child in json_dict]
return AllRoutes(routes=routes)
|
def spiralOrder(matrix):
if not matrix:
return []
rowbegin=0
rowend=len(matrix)
columnbegin=0
columnend=len(matrix[0])
res=[]
while rowend>rowbegin and columnend>columnbegin:
for i in range(columnbegin,columnend):
res.append(matrix[rowbegin][i])
for j in range(rowbegin+1,rowend-1):
res.append(matrix[j][columnend-1])
if rowend!=rowbegin+1:
for i in range(columnend-1,columnbegin-1,-1):
res.append(matrix[rowend-1][i])
if columnbegin!=columnend-1:
for j in range(rowend-2,rowbegin,-1):
res.append(matrix[j][columnbegin])
rowbegin=rowbegin+1
rowend=rowend-1
columnbegin=columnbegin+1
columnend=columnend-1
return res
mymatrix=[[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
print(spiralOrder(mymatrix)) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('urly_app', '0016_auto_20161130_2154'),
]
operations = [
migrations.AddField(
model_name='link',
name='testing_date',
field=models.DateTimeField(default=None, null=True, blank=True),
),
migrations.AlterField(
model_name='link',
name='short_base',
field=models.URLField(default=b'http://www.spe.org/go/', max_length=28, choices=[(b'http://2s.pe/', b'2s.pe'), (b'http://4s.pe/', b'4s.pe'), (b'http://go.spe.org/', b'GO.spe.org'), (b'http://www.spe.org/go/', b'www.SPE.org/go'), (b'http://go.iptcnet.org/', b'GO.IPTCnet.org'), (b'http://www.iptcnet.org/go/', b'www.IPTCnet.org/go'), (b'http://go.otcnet.org/', b'GO.OTCnet.org'), (b'http://www.otcnet.org/go/', b'www.OTCnet.org/go'), (b'http://go.otcbrasil.org/', b'GO.OTCBrasil.org'), (b'http://www.otcbrasil.org/go/', b'www.OTCBrasil.org/go'), (b'http://go.otcasia.org/', b'GO.OTCAsia.org'), (b'http://www.otcasia.org/go/', b'www.OTCAsia.org/go')]),
),
]
|
#!/usr/bin/env python3
"""xev output data parser to get mouse polling rate.
Usage:
$ chmod a+x mouserate
$ xev | mouserate
"""
import sys
import re
previous = 0
time_re = re.compile('time (\d+)')
while True:
line = sys.stdin.readline()
match = time_re.search(line)
if match:
time = int(match.group(1))
if previous:
try:
rate = round(1000 / (time - previous), 2)
print("{:.2f}Hz".format(rate))
sys.stdout.flush()
except ZeroDivisionError:
pass
previous = time
|
array = [7, 5, 9, 0, 3, 1, 6, 2, 9, 1, 4, 8, 0, 5, 2]
def count_sort(array):
sorted_list = []
count = [0] * (max(array)+1)
for i in range(len(array)):
count[array[i]] += 1 # 각데이터에 해당하는 인덱스의 값 증가
for i in range(len(count)):
for j in range(count[i]):
sorted_list.append(i)
return sorted_list
print(count_sort(array)) |
# Problem Code: CCISLAND
T=int(input())
while T!=0:
X,Y,x,y,D = map(int,input().split())
if D*x <=X and D*y<=Y:
print("YES")
else:
print("NO")
T-=1 |
# pylint: disable=W0613
# w0613 coresponde a "unused argument"
import random
import pygame
import collision_bullshit as Fuck
import math
import numopers as ops
import sfx as sfx
_bbGrace = 60*1.5
def _moveBounce(ent, gs, momx, momy):
if ent.x + ent.width + momx > gs.scrsq - gs.hbound:
ent.momx = -ent.momx
ent.x = gs.scrsq - gs.hbound - ent.width
if ent.x + momx < gs.hbound:
ent.momx = -ent.momx
ent.x = gs.hbound
if ent.y + ent.width + momy > gs.scrsq - gs.vbound:
ent.momy = -ent.momy
ent.y = gs.scrsq - gs.vbound - ent.width
if ent.y + momy < gs.vbound:
ent.momy = -ent.momy
ent.y = gs.vbound
def _plyDaemon(ent, gs):
keys = pygame.key.get_pressed()
#print("HI MOM")
maxspd = 8
spdinc = 2
#ent.momx,ent.momy = 0,0
anyhkey = bool(keys[pygame.K_RIGHT] or keys[pygame.K_LEFT])
anyvkey = bool(keys[pygame.K_UP] or keys[pygame.K_DOWN])
#anykey = bool(anyhkey or anyvkey)
if (keys[pygame.K_RIGHT]):
ent.momx += spdinc
if (keys[pygame.K_LEFT]):
ent.momx -= spdinc
if (keys[pygame.K_UP]):
ent.momy -= spdinc
if (keys[pygame.K_DOWN]):
ent.momy += spdinc
if not anyhkey: ent.momx = ops.slideToZero(ent.momx, spdinc/2)
if not anyvkey: ent.momy = ops.slideToZero(ent.momy, spdinc/2)
ent.momx = ops.clamp(ent.momx, -maxspd, maxspd)
ent.momy = ops.clamp(ent.momy, -maxspd, maxspd)
#print(ent.momx)
def plyColl(ent, gs, oent):
if ent.kind == "player":
if oent.kind == "coin":
sfx.playSfx("coin")
gs.score += 1
oent.x = random.randint(0+32, 768-32-16)
oent.y = random.randint(0+32, 768-32-16)
bb = gs.spawnEnt("badball")
bb.x = random.randint(0+32, 768-32-16)
bb.y = random.randint(0+32, 768-32-16)
rnd = random.random()
poss = (rnd*gs.score)/2
print(poss)
force = max(2, min(12, (rnd*gs.score)/2))
ohmy = not (gs.score % 5 == 0)
mdir = 0
if ohmy:
mdir = random.choice([0,90,180,270])
else:
mdir = random.randint(0,360)
bb.sprite = "yelball"
bb.momx = math.sin( math.radians(mdir) )*force
bb.momy = math.cos( math.radians(mdir) )*force
#bb.momy = 8
if oent.kind == "badball" and oent.lifetime > _bbGrace:
sfx.playSfx("boom")
ent.valid = False
if not ent.valid: sfx.stopSong()
def bbDaemon(ent, gs):
if int(ent.lifetime/4)%2 == 0 and ent.lifetime < _bbGrace:
ent.dodraw = False
else: ent.dodraw = True
# pylint: disable=W
def _noDaemon(*args): pass
# pylint: enable=W
def commonDaemon(ent, gs):
#print("sdfasdf")
if not ent.valid:
return
#ent.momy += ent.gravity
#if _moveAsFarAsPossibleSolid(ent, gs, ent.momx, 0): ent.momx = 0
#if _moveAsFarAsPossibleSolid(ent, gs, 0, ent.momy): ent.momy = 0
ent.x += ent.momx
ent.y += ent.momy
_moveBounce(ent, gs, ent.momx, ent.momy)
#_moveBounce(ent, gs, 0, ent.momy)
entdefs = {
"default": {
"width": 0,
"height": 0,
"gravity": 0,
"sprite": "error",
"solid": False,
"daemon": _noDaemon,
"colldaemon": _noDaemon
},
"player": {
"width": 16,
"height": 16,
"gravity": 0,
"sprite": "ball",
"daemon": _plyDaemon,
"colldaemon": plyColl
},
"bad_ortho": {
"width": 16,
"height": 16,
"sprite": "ball",
"daemon": _noDaemon
},
"coin": {
"width": 16,
"height": 16,
"sprite": "vbuck",
"daemon": _noDaemon
},
"badball": {
"width": 16,
"height": 16,
"sprite": "redball",
"daemon": bbDaemon
}
}
def get(ent):
if ent not in entdefs:
raise Exception("ENT DOES NOT EXIST LMAO")
return entdefs[ent]
def getprop(ent, prop):
me = get(ent)
if prop not in me:
#print("well shit this prop doesn't exist, let's check with default")
if prop in get("default"):
return get("default")[prop]
else:
raise Exception("WEIRD prop DOES NOT EXIST")
return me[prop]
|
ganhoPorHora = float(input('Digite seu ganho por hora: '))
horasTrabalhadas = int(input('Digite a quantidade de horas trabalhas: '))
salarioBruto = ganhoPorHora * horasTrabalhadas
ir = salarioBruto * 11 / 100
inss = salarioBruto * 8 / 100
sindicato = salarioBruto * 5 / 100
salarioLiquido = salarioBruto - ir - inss - sindicato
print(f'- Seu salário bruto é: {salarioBruto:.2f}')
print(f'- IR: R${ir:.2}')
print(f'- INSS: R${inss:.2f}')
print(f'- Sindicato: R${sindicato:.2f}')
print(f'- Seu salário líquido é de: R${salarioLiquido:.2f}')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0026_auto_20151108_1530'),
]
operations = [
migrations.AddField(
model_name='speakerlist',
name='first',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='speakerlist',
name='next_speaker',
field=models.ForeignKey(to='events.SpeakerList', blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='extra_deadline_text',
field=models.CharField(verbose_name='beskrivning till det extra anmälningsstoppet', max_length=255,
help_text='Ex. få mat, garanteras fika osv. '
'Lämna tomt om extra anmälningsstopp ej angivits.',
blank=True, null=True),
),
]
|
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from helperFunctions import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
torch.set_default_tensor_type('torch.cuda.FloatTensor')
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
return np.eye(num_classes, dtype='uint8')[y.astype(int)]
class LSTMClassifier(nn.Module):
def __init__(self, hidden_dim=64,label_size=49,modified_input_dim=64):
super(LSTMClassifier, self).__init__()
self.hidden_dim = hidden_dim
self.fully_connected = nn.Sequential(nn.Linear(75, 70),nn.ReLU(),nn.Linear(70, 64),nn.ReLU())
self.lstm = nn.LSTM(input_size=modified_input_dim, hidden_size=hidden_dim)
self.hidden2label = nn.Linear(hidden_dim, label_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# the first is the hidden h
# the second is the cell c
return (autograd.Variable(torch.zeros(1,1, self.hidden_dim)),
autograd.Variable(torch.zeros(1 ,1, self.hidden_dim)))
def forward(self, joint_3d_vec):
#print(joint_3d_vec.size())
x = joint_3d_vec
x = self.fully_connected(x.view(x.size()[0],x.size()[2]))
x = x.view(x.size()[0],1,x.size()[1])
#print(x.size())
#print(self.hidden[0].size(), self.hidden[1].size())
lstm_out, self.hidden = self.lstm(x, self.hidden)
y = self.hidden2label(lstm_out[-1])
log_probs = F.log_softmax(y)
return log_probs
trainingData = torch.from_numpy(getData())
labels = getLabels()
#indices = torch.from_numpy((labels.reshape(labels.shape[0])<5).dtype()).type(torch.LongTensor)
#indices = (torch.from_numpy(labels)<5).numpy()
number = int((labels<5).sum())
#indices = (labels<5)
# labels = labels[indices,:]
# trainingData = trainingData[indices,:,:,:]
neededData = torch.randn(number, 300, 25, 3)
neededLabels = np.zeros((number,1))
currentIndex = 0
for i in range(labels.shape[0]):
if labels[i, 0] < 5:
neededData[currentIndex,:,:,:] = trainingData[i,:,:,:]
neededLabels[currentIndex,:] = labels[i,:]
currentIndex+=1
#labels = torch.from_numpy(to_categorical((neededLabels),5)).view(number,-1)
labels = torch.from_numpy(neededLabels).view(number,-1).type(torch.cuda.LongTensor)
trainingData = neededData
def checkAcc(model0,data,labels):
l = labels.size()[0]
labelsdash = autograd.Variable(labels.view(l))
l = 1000
out_labels = autograd.Variable(torch.zeros(l))
for i in range(l):
temp = model0(autograd.Variable(trainingData[i,:,:,:].view(300,1,75)))
# print(temp)
# print(temp.size(), type(temp))
out_labels[i] = temp.max(1)[1]
return(torch.mean((labelsdash[0:l].type(torch.cuda.LongTensor)==out_labels.type(torch.cuda.LongTensor)).type(torch.cuda.FloatTensor)))
model0 = LSTMClassifier(label_size=5).cuda()
def TrainAcc():
print(checkAcc(model0,trainingData,labels))
#print(labels.size())
def train(model, num_epoch, num_iter, lr=1e-3,rec_interval=2, disp_interval=10):
optimizer = optim.Adam(model.parameters(), lr)
loss_values = []
rec_step = 0
for eph in range(num_epoch):
print('epoch {} starting ...'.format(eph))
avg_loss = 0
n_samples = 0
randpermed = torch.randperm(trainingData.size()[0])[:num_iter]
for i in range(num_iter):
model.hidden = (model.hidden[0].detach(), model.hidden[1].detach())
model.zero_grad()
j = randpermed[i]
X,Y = trainingData[j,:,:,:].view(300,1,75),labels[j,:]
#print(X.size())
n_samples += len(X)
X = autograd.Variable(X)
#print(X)
Y = autograd.Variable(Y.view(1))
y_hat = model(X)
loss = F.cross_entropy(y_hat, Y)
avg_loss += loss.data[0]
if i % disp_interval == 0:
print('epoch: %d iterations: %d loss :%g' % (eph, i, loss.data[0]))
if rec_step%rec_interval==0:
loss_values.append(loss.data[0])
loss.backward()
optimizer.step()
rec_step += 1
avg_loss /= n_samples
#evaluating model accuracy
#TrainAcc()
print('epoch: {} <====train track===> avg_loss: {} \n'.format(eph, avg_loss))
return loss_values
#l = train(model0, 10, 100, 2, 20)
def PlotLoss(l,name):
plt.plot(l)
plt.show()
plt.savefig(name)
def Scheduler():
loss0 = train(model0,3,3300,6e-3)
loss1 = train(model0,20,3300,1e-3)
PlotLoss(loss1,'loss1.png')
TrainAcc()
loss2 = train(model0,20,3300,1e-3)
TrainAcc()
loss3 = train(model0,20,3300,1e-4)
PlotLoss(loss1+loss2+loss3,'loss2.png')
TrainAcc()
loss4 = train(model0,20,3300,1e-4)
TrainAcc()
loss5 = train(model0,50,3300,1e-5)
PlotLoss(loss1+loss2+loss3+loss4+loss5,'loss3.png')
TrainAcc() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.