text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# generated
type = "champion"
format = "standAloneComplex"
version = v"11.17.1"
for locale in ("en_US", "ko_KR")
gendir = normpath(@__DIR__, "11.17.1", "generated", locale)
include(normpath(gendir, "module.jl"))
end
|
{"hexsha": "1025e9a10b58886f0765c39af15713e3bd05aaa9", "size": 224, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "gen/locales.jl", "max_stars_repo_name": "wookay/LOLToolsDataDragon.jl", "max_stars_repo_head_hexsha": "37194b9ac1ea230e18b7db606f1cd6005ddf2f1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gen/locales.jl", "max_issues_repo_name": "wookay/LOLToolsDataDragon.jl", "max_issues_repo_head_hexsha": "37194b9ac1ea230e18b7db606f1cd6005ddf2f1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gen/locales.jl", "max_forks_repo_name": "wookay/LOLToolsDataDragon.jl", "max_forks_repo_head_hexsha": "37194b9ac1ea230e18b7db606f1cd6005ddf2f1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8888888889, "max_line_length": 63, "alphanum_fraction": 0.6741071429, "num_tokens": 75}
|
import sys
import nett_python as nett
from float_vector_message_pb2 import *
from float_message_pb2 import *
from color_table_message_pb2 import *
import pyqtgraph as pg
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import helper
use_ip_endpoint = None
fixed_selection = None
if len(sys.argv) != 3:
print 'switching to default view mode'
use_ip_endpoint = 'tcp://127.0.0.1:2001'
else:
print 'using fixed view mode'
use_ip_endpoint = sys.argv[1]
fixed_selection = int(sys.argv[2])
nett.initialize(use_ip_endpoint)
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self.setDockOptions(QtGui.QMainWindow.AnimatedDocks)
self.setWindowTitle('Activity Chart')
if fixed_selection != None:
self.setWindowTitle('Firing Rate: ' +str(fixed_selection))
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
win = pg.PlotWidget()
self.setCentralWidget(win)
self.plot = win.getPlotItem()
self.plot_legend = self.plot.addLegend()
self.ca_e_data = {}
self.ca_i_data = {}
self.curves_e = {}
self.curves_i = {}
f = open('color_table.bin', "rb")
msg = color_table_message()
msg.ParseFromString(f.read())
f.close()
self.color_table = msg
#hack
for i in range(0,68):
self.ca_e_data[i] = np.array( [] )
self.ca_i_data[i] = np.array( [] )
self.setup_color()
self.init_actions()
self.init_menus()
self.monitor_feed_ = monitor_feed()
self.connect(self.monitor_feed_, self.monitor_feed_.signal_ca_e, self.update_data_e)
self.connect(self.monitor_feed_, self.monitor_feed_.signal_ca_i, self.update_data_i)
self.monitor_feed_.start()
self.monitor_reset = monitor_reset()
self.connect(self.monitor_reset, self.monitor_reset.signal, self.reset_data)
self.monitor_reset.start()
self.monitor_selection = monitor_selection()
self.connect(self.monitor_selection, self.monitor_selection.signal, self.set_selection)
self.monitor_color = monitor_color()
self.connect(self.monitor_color, self.monitor_color.signal, self.color_update)
self.monitor_color.start()
#only listen to update when in non fixed mode
if fixed_selection == None:
self.monitor_selection.start()
else:
self.set_selection(float_vector_message()) #fake selection message
def color_update(self, msg):
print 'color update!'
self.color_table = msg
def setup_color(self):
self.plot.showGrid(x=True, y=True, alpha=0.3)
def init_actions(self):
self.exit_action = QtGui.QAction('Quit', self)
self.exit_action.setShortcut('Ctrl+Q')
self.exit_action.setStatusTip('Exit application')
self.connect(self.exit_action, QtCore.SIGNAL('triggered()'), self.close)
def init_menus(self):
self.addAction(self.exit_action)
def update_data_i(self, data):
for x in range(0, len(data.value)):
self.ca_i_data[x] = np.append(self.ca_i_data[x], data.value[x])
#only update values in current selection:
for keys in self.curves_i:
pen = self.create_pen(keys, False)
self.plot.removeItem(self.curves_i[keys])
self.curves_i[keys] = self.plot.plot(self.ca_i_data[keys], pen = pen, name = 'i' + str(int(keys)) + ': ' +"{0:.3f}".format(self.ca_i_data[keys][-1]))
def update_data_e(self, data):
self.plot_legend.scene().removeItem(self.plot_legend)
self.plot_legend = self.plot.addLegend()
for x in range(0, len(data.value)):
self.ca_e_data[x] = np.append(self.ca_e_data[x], data.value[x])
#only update values in current selection:
for keys in self.curves_e:
pen = self.create_pen(keys, True)
self.plot.removeItem(self.curves_e[keys])
self.curves_e[keys] = self.plot.plot(self.ca_e_data[keys], pen = pen, name = 'e' + str(int(keys)) + ': ' + "{0:.3f}".format(self.ca_e_data[keys][-1]))
def create_pen(self, key, is_e):
pen = pg.mkPen(color=(0,0,0))
if self.color_table != None:
for x in range(0, len(self.color_table.value)):
if self.color_table.value[x].region_number == key:
if is_e == True:
pen = pg.mkPen(color = (self.color_table.value[x].color_e_r, self.color_table.value[x].color_e_g, self.color_table.value[x].color_e_b))
pen.setWidth(int(self.color_table.value[x].thickness_e))
if self.color_table.value[x].style_e == "SolidLine":
pen.setStyle(QtCore.Qt.SolidLine)
elif self.color_table.value[x].style_e == "DashLine":
pen.setStyle(QtCore.Qt.DashLine)
elif self.color_table.value[x].style_e == "DashDotLine":
pen.setStyle(QtCore.Qt.DashDotLine)
elif self.color_table.value[x].style_e == "DashDotDotLine":
pen.setStyle(QtCore.Qt.DashDotDotLine)
else:
pen = pg.mkPen(color = (self.color_table.value[x].color_i_r, self.color_table.value[x].color_i_g, self.color_table.value[x].color_i_b))
pen.setWidth(int(self.color_table.value[x].thickness_i))
if self.color_table.value[x].style_i == "SolidLine":
pen.setStyle(QtCore.Qt.SolidLine)
elif self.color_table.value[x].style_i == "DashLine":
pen.setStyle(QtCore.Qt.DashLine)
elif self.color_table.value[x].style_i == "DashDotLine":
pen.setStyle(QtCore.Qt.DashDotLine)
elif self.color_table.value[x].style_i == "DashDotDotLine":
pen.setStyle(QtCore.Qt.DashDotDotLine)
return pen
def reset_data(self):
self.clear_selection()
def clear_selection(self):
self.plot_legend.scene().removeItem(self.plot_legend)
for keys in self.curves_i:
self.plot.removeItem(self.curves_i[keys])
self.curves_i = {}
for keys in self.curves_e:
self.plot.removeItem(self.curves_e[keys])
self.curves_e = {}
self.plot_legend = self.plot.addLegend()
def set_selection(self, msg):
if fixed_selection != None:
msg.value.append(fixed_selection)
self.clear_selection()
for value in msg.value:
pen = self.create_pen(value, False)
self.curves_i[value] = self.plot.plot(self.ca_i_data[value], pen = pen, name='i' + str(int(value)) + ': ' +"{0:.3f}".format(self.ca_i_data[value][-1]))
pen = self.create_pen(value, True)
self.curves_e[value] = self.plot.plot(self.ca_e_data[value], pen = pen, name = 'e' + str(int(value)) + ': ' +"{0:.3f}".format(self.ca_e_data[value][-1]))
class monitor_feed(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.signal_ca_e = QtCore.SIGNAL("signal_e")
self.signal_ca_i = QtCore.SIGNAL("signal_i")
self.ca_e_slot_in = nett.slot_in_float_vector_message()
self.ca_i_slot_in = nett.slot_in_float_vector_message()
ip = helper.obtain_ip_address_compute()
self.ca_e_slot_in.connect('tcp://'+ip+':8000', 'fr_e')
self.ca_i_slot_in.connect('tcp://'+ip+':8000', 'fr_i')
def run(self):
msg = float_vector_message()
while True:
msg.ParseFromString(self.ca_e_slot_in.receive())
self.emit(self.signal_ca_e, msg )
msg = float_vector_message()
msg.ParseFromString(self.ca_i_slot_in.receive())
self.emit(self.signal_ca_i, msg )
class monitor_reset(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.signal = QtCore.SIGNAL("signal")
self.reset_slot_in = nett.slot_in_float_message()
self.reset_slot_in.connect('tcp://127.0.0.1:2003', 'reset')
def run(self):
msg = float_message()
while True:
msg.ParseFromString(self.reset_slot_in.receive())
self.emit(self.signal)
class monitor_selection(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.signal = QtCore.SIGNAL("signal")
self.area_list_slot_in = nett.slot_in_float_vector_message()
self.area_list_slot_in.connect('tcp://127.0.0.1:2014', 'regions_selected')
def run(self):
msg = float_vector_message()
while True:
msg.ParseFromString(self.area_list_slot_in.receive())
self.emit(self.signal, msg)
class monitor_color(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.signal = QtCore.SIGNAL("signal")
self.color_slot_in = nett.slot_in_color_table_message()
self.color_slot_in.connect('tcp://127.0.0.1:2008', 'color_table')
def run(self):
msg = color_table_message()
while True:
msg.ParseFromString(self.color_slot_in.receive())
self.emit(self.signal, msg)
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
|
{"hexsha": "b4d631e0b50463797ce2f0c4e2df74b71b8e4809", "size": 8853, "ext": "py", "lang": "Python", "max_stars_repo_path": "ca_plotter.py", "max_stars_repo_name": "jeliason/isv_neuroscience", "max_stars_repo_head_hexsha": "ce4cf35e57ce2e517cca249ee10d2c302c5c2901", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-15T15:16:47.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-15T15:16:47.000Z", "max_issues_repo_path": "ca_plotter.py", "max_issues_repo_name": "jeliason/isv_neuroscience", "max_issues_repo_head_hexsha": "ce4cf35e57ce2e517cca249ee10d2c302c5c2901", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ca_plotter.py", "max_forks_repo_name": "jeliason/isv_neuroscience", "max_forks_repo_head_hexsha": "ce4cf35e57ce2e517cca249ee10d2c302c5c2901", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-19T18:13:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-19T18:13:22.000Z", "avg_line_length": 34.3139534884, "max_line_length": 159, "alphanum_fraction": 0.6694905682, "include": true, "reason": "import numpy", "num_tokens": 2137}
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
import math
import random
import pandas as pd
import csv
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def rot90(m, k=1, axis=2):
"""Rotate an array by 90 degrees in the counter-clockwise direction around the given axis"""
m = np.swapaxes(m, 2, axis)
m = np.rot90(m, k)
m = np.swapaxes(m, 2, axis)
return m
def rotate_oasis():
for x in range (1, 317):
img_data = np.load("data2\\" + str(x) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy")
img_data = rot90(img_data, 3, 0)
img_data = rot90(img_data, 1, 2)
print("img" + str(x) + " rotated: ")
np.save("data2\\" + str(x) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy", img_data)
def shape_oasis():
for x in range (1, 317):
img_data = np.load("data2\\" + str(x) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy")
npad = ((5, 5), (0, 0), (0, 0))
img_data = np.pad(img_data, pad_width=npad, mode='constant', constant_values=0)
startz = 65//2-(55//2)
img_data = img_data[0:65,0:65, startz:startz+55]
print("img" + str(x) + " shaped: ")
np.save("data2\\" + str(x) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy", img_data)
def calculate_mean():
only_img = []
for x in range (1, 1201):
img_data = np.load("shuffled2\\" + str(x) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy")
print("img" + str(x) + " appended: ")
only_img.append(img_data)
mean_img = np.mean(only_img, dtype=np.int, axis=0)
np.save('mean_img2.npy', mean_img)
def shuffle():
index = [i for j in (range(1,317), range(907, 2073)) for i in j]
#index = [x for x in range(1,2073)]
original = index[:]
for n,i in enumerate(index):
if i>906:
index[n]=index[n]-590
temp = index[:]
random.shuffle(index)
print(original)
print("------------------------")
print(temp)
print("------------------------")
print(index)
csvfile = "index_reference2.csv"
with open(csvfile, "w", newline='') as output:
writer = csv.writer(output)
for val in index:
writer.writerows([[val]])
for n,i in enumerate(original):
img_data = np.load("data2\\" + str(i) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy")
new_index = index[n]
np.save("shuffled2\\" + str(new_index) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy", img_data)
print ("saved " + str(i))
def combine_preprocess (start, end):
combined = []
mean_image = np.load('mean_img2.npy')
for x in range (start, end+1):
img_data = np.load("shuffled2\\" + str(x) + "#(" + str(IMG_SIZE_PX)+ ", " + str(IMG_SIZE_PX) + ", " +str(SLICE_COUNT)+ ").npy")
selected = labels_file[labels_file.shuffledID == x]
for index, row in selected.iterrows():
print (str(row['ID']) + " 's age is " + str(row['AGE_AT_SCAN']))
age = row['AGE_AT_SCAN']
label = np.array([age])
img_data -= mean_image
combined.append([img_data,label])
np.save('combined2\\' + 'batch({},{})-{}-{}-{}.npy'.format(start,end,IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT), combined)
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='SAME')
def maxpool3d(x):
# size of window movement of window
return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='SAME')
def convolutional_neural_network(x):
weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32])),
'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64])),
'W_fc':tf.Variable(tf.random_normal([258944,1024])),
'out':tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'b_conv1':tf.Variable(tf.random_normal([32])),
'b_conv2':tf.Variable(tf.random_normal([64])),
'b_fc':tf.Variable(tf.random_normal([1024])),
'out':tf.Variable(tf.random_normal([n_classes]))}
x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1])
conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool3d(conv1)
conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool3d(conv2)
fc = tf.reshape(conv2,[-1, 258944])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])
fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(fc, weights['out'])+biases['out']
return output
def train_neural_network(x):
prediction = convolutional_neural_network(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits = prediction,labels = y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
saver = tf.train.Saver()
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for current_batch in range (0, train_batch):
batch_data = np.load('/home/lvruyi/combined/' + 'batch({},{})-{}-{}-{}.npy'.format(current_batch*batch_size+1,current_batch*batch_size+batch_size,IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT))
for data in batch_data:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
epoch_loss += c
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)
for current_validation in range (train_batch, train_batch+validation_batch):
validation_data = np.load('/home/lvruyi/combined/' + 'batch({},{})-{}-{}-{}.npy'.format(current_validation*batch_size+1,current_validation*batch_size+batch_size,IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT))
evaluated_accuracy += accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]})
print('Accuracy:',evaluated_accuracy/batch_val)
saver.save(sess, 'fyp_model')
IMG_SIZE_PX = 65
SLICE_COUNT = 55
n_classes = 1
train_batch = 75
batch_size = 16
validation_batch = 18
labels_file = pd.read_csv('FYP_Phenotypic2.csv')
x = tf.placeholder('float')
y = tf.placeholder('float')
keep_rate = 0.8
keep_prob = tf.placeholder(tf.float32)
#rotate_oasis ()
#shape_oasis()
#shuffle()
#calculate_mean()
for batch in range (0, train_batch+validation_batch-1):
combine_preprocess(batch*batch_size+1, batch*batch_size+batch_size)
combine_preprocess(1473, 1482)
#train_neural_network(x)
|
{"hexsha": "cb9139fc6d01279199427ff577e02eb2a61f9b92", "size": 7217, "ext": "py", "lang": "Python", "max_stars_repo_path": "Codes/preprocess.py", "max_stars_repo_name": "bijiuni/brain_age", "max_stars_repo_head_hexsha": "8a768e29046d525fdef3d57a58c742b52ed6f8e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-09-07T03:37:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-25T11:34:09.000Z", "max_issues_repo_path": "Codes/preprocess.py", "max_issues_repo_name": "bijiuni/brain_age", "max_issues_repo_head_hexsha": "8a768e29046d525fdef3d57a58c742b52ed6f8e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-03T05:03:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-18T13:27:05.000Z", "max_forks_repo_path": "Codes/preprocess.py", "max_forks_repo_name": "bijiuni/brain_age", "max_forks_repo_head_hexsha": "8a768e29046d525fdef3d57a58c742b52ed6f8e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-04-25T23:12:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-20T07:55:36.000Z", "avg_line_length": 35.0339805825, "max_line_length": 214, "alphanum_fraction": 0.5838991271, "include": true, "reason": "import numpy", "num_tokens": 2077}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from divmachines.classifiers import MF
from divmachines.logging import TrainingLogger as TLogger
cols = ['user', 'item', 'rating', 'timestamp']
train = pd.read_csv('../../../../data/ua.base', delimiter='\t', names=cols)
# map_user = train.groupby('user').count().reset_index()[['user']].reset_index()
# map_user.columns = ['u_idx', 'user']
# map_item = train.groupby('item').count().reset_index()[['item']].reset_index()
# map_item.columns = ['i_idx', 'item']
# train = pd.merge(pd.merge(train, map_user, on="user"), map_item, on="item")
logger = TLogger()
model = MF(n_iter=100,
n_jobs=2,
batch_size=1000,
learning_rate=0.60653066,
use_cuda=False,
logger=logger,
early_stopping=True,
verbose=True)
interactions = train[['user', 'item', 'rating']].values
n_users = np.unique(train[["user"]].values).shape[0]
n_items = np.unique(train[["item"]].values).shape[0]
print("Number of users: %s" % n_users)
print("Number of items: %s" % n_items)
x = interactions[:100, :-1]
y = interactions[:100, -1]
model.fit(x,
y,
dic={'users': 0, 'items': 1},
n_users=n_users, n_items=n_items)
print(model.predict(x))
model.save("./time.pth.tar")
model = MF(n_iter=1,
n_jobs=8,
batch_size=10,
learning_rate=0.60653066,
use_cuda=False,
logger=logger,
early_stopping=True,
model="./time.pth.tar",
verbose=True)
x = interactions[:100, :-1]
y = interactions[:100, -1]
print(model.predict(x))
plt.plot(logger.epochs, logger.losses)
plt.show()
|
{"hexsha": "5eca414b16b8dd26e80a2feb5bc457ead64b1bce", "size": 1696, "ext": "py", "lang": "Python", "max_stars_repo_path": "divmachines/demo/classifiers/mf/movielens.py", "max_stars_repo_name": "DanielMorales9/FactorizationPyTorch", "max_stars_repo_head_hexsha": "50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-12-14T22:34:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-12T17:18:34.000Z", "max_issues_repo_path": "divmachines/demo/classifiers/mf/movielens.py", "max_issues_repo_name": "DanielMorales9/FactorizationPyTorch", "max_issues_repo_head_hexsha": "50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "divmachines/demo/classifiers/mf/movielens.py", "max_forks_repo_name": "DanielMorales9/FactorizationPyTorch", "max_forks_repo_head_hexsha": "50f0644fdb4a903550fb3f1ba78fb9fb8649ceb1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-12-14T22:35:00.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-14T22:35:00.000Z", "avg_line_length": 27.8032786885, "max_line_length": 80, "alphanum_fraction": 0.6185141509, "include": true, "reason": "import numpy", "num_tokens": 444}
|
import os
import numpy as np
from utils import calculate_iou
import matplotlib.pyplot as plt
def main():
root_dir = '../data/OTB100'
list = os.listdir(root_dir)
iou_list = []
for name in list:
pred_path = os.path.join(root_dir, name, 'pred_rect_sl2.txt')
gt_path = os.path.join(root_dir, name, 'groundtruth_rect.txt')
if os.path.isfile(pred_path):
print (name)
pred_bbox = np.loadtxt(pred_path)
gt_bbox = np.genfromtxt(gt_path, delimiter=',')
if len(gt_bbox.shape) == 1:
gt_bbox = np.genfromtxt(gt_path)
for i in range(len(pred_bbox)):
pb = pred_bbox[i, :]
gb = gt_bbox[i, :]
gb[2] = gb[0] + gb[2]
gb[3] = gb[1] + gb[3]
iou = calculate_iou(pb, gb)
iou_list.append(iou)
else:
continue
iou_list.sort()
iou_list = np.array(iou_list)
total = len(iou_list)
precicion = np.zeros(6)
for i in range(6):
thresh = i * 0.2
precicion[i] = (iou_list >= thresh).sum()/total
print(precicion)
x = np.arange(0, 1.2, 0.2)
plt.plot(x, precicion, 'r--')
t = plt.xlabel('IoU [AUC: %.3f]' % precicion.mean(), fontsize=14, color='black')
t = plt.ylabel('success_rate', fontsize=14, color='black')
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "4d1a6df12e59bf095629b911bc60dfb0e68bbb97", "size": 1424, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch/plot_curve.py", "max_stars_repo_name": "jiweeo/RL-Tracking", "max_stars_repo_head_hexsha": "ef038569ab6b5663a36f6c3843ca17169ea2f0fe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-08-23T18:27:00.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-29T07:06:07.000Z", "max_issues_repo_path": "pytorch/plot_curve.py", "max_issues_repo_name": "jiweeo/RL-Tracking", "max_issues_repo_head_hexsha": "ef038569ab6b5663a36f6c3843ca17169ea2f0fe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch/plot_curve.py", "max_forks_repo_name": "jiweeo/RL-Tracking", "max_forks_repo_head_hexsha": "ef038569ab6b5663a36f6c3843ca17169ea2f0fe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2978723404, "max_line_length": 84, "alphanum_fraction": 0.5470505618, "include": true, "reason": "import numpy", "num_tokens": 396}
|
import os
import numpy as np
import torch
from transformers import glue_compute_metrics
from utils.miscellaneous import progress_bar
def evaluate(task_name, model, eval_dataloader, model_type, output_mode = 'classification', device='cuda'):
# results = {}
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch_idx, batch in enumerate(eval_dataloader):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
progress_bar(batch_idx, len(eval_dataloader), 'Evaluating...')
eval_loss = eval_loss / nb_eval_steps
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
result = glue_compute_metrics(task_name, preds, out_label_ids) # [
# print(result)
# results.update(result)
return result
|
{"hexsha": "46cdf026990743a0c39e2f1590a24db0b6b526b2", "size": 1766, "ext": "py", "lang": "Python", "max_stars_repo_path": "WorkSpace/utils/train.py", "max_stars_repo_name": "csyhhu/transformers", "max_stars_repo_head_hexsha": "87b779d521092e138dc8cd18aa36fd5325b52fd7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-03T09:14:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-03T09:14:17.000Z", "max_issues_repo_path": "WorkSpace/utils/train.py", "max_issues_repo_name": "csyhhu/transformers", "max_issues_repo_head_hexsha": "87b779d521092e138dc8cd18aa36fd5325b52fd7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "WorkSpace/utils/train.py", "max_forks_repo_name": "csyhhu/transformers", "max_forks_repo_head_hexsha": "87b779d521092e138dc8cd18aa36fd5325b52fd7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0408163265, "max_line_length": 107, "alphanum_fraction": 0.6189127973, "include": true, "reason": "import numpy", "num_tokens": 427}
|
import os
from src.data.make_dataset import read_params
import numpy as np
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from urllib.parse import urlparse
import argparse
import joblib
import json
import pandas as pd
def eval_metrics(actual,prediction):
rmse = np.sqrt(mean_squared_error(actual, prediction))
mae = mean_absolute_error(actual, prediction)
r2 = r2_score(actual, prediction)
return rmse,mae,r2
def train_and_evaluate(config_path):
config = read_params(config_path)
test_data_path = config['split_data']['test_path']
train_data_path = config['split_data']['train_path']
random_state = config['base']['random_state']
model_dir = config['saved_models']['model_dir']
alpha = config['estimators']['ElasticNet']['params']['alpha']
l1_ratio = config['estimators']['ElasticNet']['params']['l1_ratio']
target = config['base']['target_col']
train = pd.read_csv(train_data_path,sep=',')
test = pd.read_csv(test_data_path,sep=',')
train_y = train[target]
test_y = test[target]
train_x = train.drop(target,axis=1)
test_x = test.drop(target,axis=1)
lr = ElasticNet(alpha=alpha,l1_ratio=l1_ratio,random_state=random_state)
lr.fit(train_x,train_y)
predicted_qualities = lr.predict(test_x)
(rmse,mae,r2) = eval_metrics(test_y,predicted_qualities)
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
os.makedirs(model_dir, exist_ok=True)
model_path = os.path.join(model_dir, "model.joblib")
joblib.dump(lr, model_path)
if __name__ == '__main__':
args = argparse.ArgumentParser()
default_config_path = os.path.join('config','params.yaml')
train_and_evaluate(default_config_path)
|
{"hexsha": "f6e8bdfdb7d29e07c7c775404c0f3e864a99e0ad", "size": 1950, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/train_model.py", "max_stars_repo_name": "anjibabupalla/mlops_aks_gitworklow", "max_stars_repo_head_hexsha": "b53619876ac4a5aa10e46db2cb40aebfce09f637", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/train_model.py", "max_issues_repo_name": "anjibabupalla/mlops_aks_gitworklow", "max_issues_repo_head_hexsha": "b53619876ac4a5aa10e46db2cb40aebfce09f637", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/train_model.py", "max_forks_repo_name": "anjibabupalla/mlops_aks_gitworklow", "max_forks_repo_head_hexsha": "b53619876ac4a5aa10e46db2cb40aebfce09f637", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5, "max_line_length": 76, "alphanum_fraction": 0.7148717949, "include": true, "reason": "import numpy", "num_tokens": 502}
|
#include <boost/mpl/set/aux_/numbered.hpp>
|
{"hexsha": "460055f5a8263b255609c192c79bf188ab5a468a", "size": 43, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_mpl_set_aux__numbered.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_mpl_set_aux__numbered.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_mpl_set_aux__numbered.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 21.5, "max_line_length": 42, "alphanum_fraction": 0.7674418605, "num_tokens": 13}
|
import cv2
import numpy as np
import random
oldx = oldy = -1
img = np.ones((480, 640, 3), dtype=np.uint8) * 255
def on_mouse(event, x, y, flags, param):
global oldx, oldy
if event == cv2.EVENT_LBUTTONDOWN:
oldx, oldy = x, y
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x, y), random.randint(10,100), (random.randint(10,255),random.randint(10,255),random.randint(10,255)), -1)
cv2.imshow('image', img)
cv2.imshow('image', img)
cv2.setMouseCallback('image', on_mouse, img)
cv2.waitKey()
|
{"hexsha": "f8f805a83e08a0336f516b01b85537d2aafda49d", "size": 531, "ext": "py", "lang": "Python", "max_stars_repo_path": "ocv06-02.py", "max_stars_repo_name": "LeeCheahyun/20210823-0930", "max_stars_repo_head_hexsha": "12927fe918452b3e7da5fcd4d31da6095c400106", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ocv06-02.py", "max_issues_repo_name": "LeeCheahyun/20210823-0930", "max_issues_repo_head_hexsha": "12927fe918452b3e7da5fcd4d31da6095c400106", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ocv06-02.py", "max_forks_repo_name": "LeeCheahyun/20210823-0930", "max_forks_repo_head_hexsha": "12927fe918452b3e7da5fcd4d31da6095c400106", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2352941176, "max_line_length": 131, "alphanum_fraction": 0.65913371, "include": true, "reason": "import numpy", "num_tokens": 175}
|
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
import matplotlib.pyplot as plt
class NumericalSolver():
def __init__(self):
self.eps = 1 # Multiplier unique to specific biological systems
self.h = 0.05 # Spacial step
self.alpha = 3.0 # Exponent
self.k = 0.4 * self.h ** 2 / self.eps # Time step
self.Tf = 42.0 # Total time frame
self.x = np.arange(-15, 15, self.h) # 1D spacial coordinate
self.N = len(self.x) # Size of x-space
self.bc = np.concatenate(([1], np.zeros(self.N-1)))/self.h**2 # Application of the Neumann boundary conditions
self.x_anal = np.linspace(-15, 15, 10) # 1D spacial coordinate for the scatter plot of the analytical solution
def Analytical_Solution_Special(self, x, t):
"""
Function that generates the analytical solution for the specialised Fischer Equation
:param alpha: Exponent in the specialised Fischer equation
:param x: x space
:param t: time
:return: Analytical solution of the specialised Fischer equation with respect to x at time t.
"""
return (-(1/2)*np.tanh(self.alpha/(2*(2*self.alpha+4)**(1/2))*(x-((self.alpha+4)*t)/((2*self.alpha + 4)**(1/2))))+1/2)**(2/self.alpha)
def construct_laplace_matrix_1d(self):
"""
Function generates an NxN 1D laplace matrix
:param N: Number of iterations
:param h: Spacial step
:return: N x N 1D laplace matrix
"""
e = np.ones(self.N)
diagonals = [e, -2*e, e]
offsets = [-1,0,1]
L = scipy.sparse.spdiags(diagonals, offsets, self.N, self.N) / self.h**2
return L
def get_sols(self, u, L, out):
"""
:return: Generates an array of numerical solutions to the specialised Fischer equation
"""
for i in range(int(self.Tf / self.k)):
u_new = u + self.k*(self.eps * (L * u + self.bc) + u*(1 - u**self.alpha))
out.append([u_new])
u[:] = u_new
return u, out
def solve_turing(self):
"""
:return: Solves the specialised Fischer Equation given the default parameters
"""
L = NumericalSolver().construct_laplace_matrix_1d()
u = NumericalSolver().Analytical_Solution_Special(self.x, 0)
out = []
u, out = NumericalSolver().get_sols(u, L, out)
out.append([u])
return u, self.x, out
def plot_fig(self):
"""
:return: Generates plot comparing the numerical and analytical solutions to the specialised Fischer equation, comparing solutions at t=0, t=2 and t=4.
Second plot compares the squared error between the analytical and numerical solution at t=0, t=2 and t=4 as a function of x.
"""
u, x, out = NumericalSolver().solve_turing()
#Analytical solution at t=0, t=2 and t=4 respectively
u_analstart = NumericalSolver().Analytical_Solution_Special(self.x_anal, 0)
u_analhalf = NumericalSolver().Analytical_Solution_Special(self.x_anal, 2)
u_analend = NumericalSolver().Analytical_Solution_Special(self.x_anal, 4)
plt.figure()
plt.plot(x, out[0][0], label='Numerical t=0', color='blue')
plt.plot(x, out[int(len(out) / self.Tf) * 2][0], label='Numerical t=2', color='green')
plt.plot(x, out[int(len(out) / self.Tf) * 4][0], label='Numerical t=4', color='red')
plt.scatter(self.x_anal, u_analstart, label='Analytical t=0', color='blue')
plt.scatter(self.x_anal, u_analhalf, label='Analytical t=2', color='green')
plt.scatter(self.x_anal, u_analend, label='Analytical t=4', color='red')
plt.xlabel('x')
plt.ylabel('Solution')
plt.legend()
plt.show()
u_anal0_error = NumericalSolver().Analytical_Solution_Special(x, 0)
u_anal2_error = NumericalSolver().Analytical_Solution_Special(x, 2)
u_anal4_error = NumericalSolver().Analytical_Solution_Special(x, 4)
# Initialise arrays
error0 = []
error2 = []
error4 = []
for i in range(len(x)):
error0.append((u_anal0_error[i] - out[0][0][i]) ** 2)
error2.append((u_anal2_error[i] - out[int(len(out) / self.Tf) * 2][0][i]) ** 2)
error4.append((u_anal4_error[i] - out[int(len(out) / self.Tf) * 4][0][i]) ** 2)
# plt.figure()
plt.plot(x, error0, label='Error t=0', color='blue')
plt.plot(x, error2, label='Error t=2', color='green')
plt.plot(x, error4, label='Error t=4', color='red')
plt.xlabel('x')
plt.ylabel('Error')
plt.show()
# a = NumericalSolver()
# a.plot_fig()
|
{"hexsha": "c42481b9dd5373bee9ea3e3b3a619407e0c8b7c1", "size": 4731, "ext": "py", "lang": "Python", "max_stars_repo_path": "turing_class.py", "max_stars_repo_name": "turimang/turingpatterns", "max_stars_repo_head_hexsha": "570fd2e441e0ab5f3e37bce99e554017886f9c33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-20T14:55:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-20T14:55:11.000Z", "max_issues_repo_path": "turing_class.py", "max_issues_repo_name": "turimang/turingpatterns", "max_issues_repo_head_hexsha": "570fd2e441e0ab5f3e37bce99e554017886f9c33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "turing_class.py", "max_forks_repo_name": "turimang/turingpatterns", "max_forks_repo_head_hexsha": "570fd2e441e0ab5f3e37bce99e554017886f9c33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-28T10:05:40.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-28T10:05:40.000Z", "avg_line_length": 41.8672566372, "max_line_length": 158, "alphanum_fraction": 0.6049461002, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1320}
|
import pandas as pd
import numpy as np
from datetime import datetime
from arch import arch_model
from volatility.utils import get_percent_chg, Option
import statsmodels.api as sm
from sklearn import linear_model
def get_IV_predict(df, df_option, test_size, keyList, ir_free):
df_ret = pd.DataFrame()
df_ret['Date'] = df['Date'][len(df)-test_size:]
df_ret['Date_str'] = df['Date_str'][len(df)-test_size:]
lm = linear_model.LinearRegression()
dates = list(df_ret['Date_str'])
for key in keyList:
df_ret[key] = df[key]
returns = 100 * df[key].dropna()
predictions = []
predictions_c_iv = []
predictions_p_iv = []
print('key', key)
for i in range(test_size):
date_str = dates[i].replace('-', '')
df_option_ = df_option[df_option['Date_str']==date_str]
train = returns[:-(test_size-i)]
model = arch_model(train, p=2, q=2)
model_fit = model.fit(disp='off')
pred_val = model_fit.forecast(horizon=1)
p_val = np.sqrt(pred_val.variance.values[-1,:][0])
rows_iv = []
s = 0
for ii, row in df_option_.iterrows():
k = row['Strike']
exp_date = row['Expiration']
price = row['Close']
type_ = row['Type'][0]
s = row['RootClose']
d1 = datetime.strptime(date_str, "%Y%m%d")
d2 = datetime.strptime(exp_date, "%Y%m%d")
days_exp = (d2 - d1).days
if days_exp > 50 or days_exp < 10: continue
if float(abs(s-k))/k > 0.2: continue
opt = Option(s=s, k=k, eval_date=date_str, exp_date=exp_date, price=price, rf=ir_free, vol=0.01*p_val, right=type_)
iv = opt.get_implied_vol()*100
rows_iv.append({'Strike':k, 'Days_exp':days_exp, 'Type':type_, 'IV':iv})
df_iv = pd.DataFrame(rows_iv)
df_iv_c, df_iv_p = df_iv[df_iv['Type'] == 'C'], df_iv[df_iv['Type'] == 'P']
X = np.array(df_iv_c[['Strike', 'Days_exp']])
y = np.array(df_iv_c['IV'])
model = lm.fit(X, y)
x_ = np.array(pd.DataFrame([{'Strike':s, 'Days_exp':30}]))
iv_am_c = model.predict(x_)[0]
X = np.array(df_iv_p[['Strike', 'Days_exp']])
y = np.array(df_iv_p['IV'])
model = lm.fit(X, y)
x_ = np.array(pd.DataFrame([{'Strike':s, 'Days_exp':30}]))
iv_am_p = model.predict(x_)[0]
predictions.append(p_val)
predictions_c_iv.append(iv_am_c)
predictions_p_iv.append(iv_am_p)
df_ret['predict_'+key] = predictions
df_ret['IV_predict_c_'+key] = predictions_c_iv
df_ret['IV_predict_p_'+key] = predictions_p_iv
df_ret.set_index('Date', inplace=True)
return df_ret
def get_IV(df, df_option, test_size, ir_free, keyList=[], keyList_vol=[], keyList_ATR=[]):
df_ret = pd.DataFrame()
df_ret['Date'] = df['Date'][len(df)-test_size:]
df_ret['Date_str'] = df['Date_str'][len(df)-test_size:]
dates = list(df_ret['Date_str'])
for k in range(len(keyList)):
key, key_vol, key_ATR = keyList[k], keyList_vol[k], keyList_ATR[k]
df_ret[key] = df[key]
returns = 100 * df[key].dropna()
vols = []
c_ivs = [[], [], []]
p_ivs = [[], [], []]
print('key', key)
for i in range(test_size):
#print('test_size', test_size, 'i', i)
date_str = dates[i].replace('-', '')
df_option_ = df_option[df_option['Date_str']==date_str]
df_ = df[df['Date_str']==date_str]
# train = returns[:-(test_size-i)]
# model = arch_model(train, p=2, q=2)
# model_fit = model.fit(disp='off')
# pred_val = model_fit.forecast(horizon=1)
# p_val = np.sqrt(pred_val.variance.values[-1,:][0])
vol = df_[key_vol]
rows_iv = []
s = 0
for ii, row in df_option_.iterrows():
k = row['Strike']
exp_date = row['Expiration']
price = row['Close']
type_ = row['Type'][0]
s = row['RootClose']
d1 = datetime.strptime(date_str, "%Y%m%d")
d2 = datetime.strptime(exp_date, "%Y%m%d")
days_exp = (d2 - d1).days
if days_exp > 50 or days_exp < 10: continue
if float(abs(s-k))/k > 0.2: continue
opt = Option(s=s, k=k, eval_date=date_str, exp_date=exp_date, price=price, rf=ir_free, vol=0.01*vol, right=type_)
iv = opt.get_implied_vol()*100
rows_iv.append({'Strike':k, 'Days_exp':days_exp, 'Type':type_, 'IV':iv})
df_iv = pd.DataFrame(rows_iv)
df_iv_c, df_iv_p = df_iv[df_iv['Type'] == 'C'], df_iv[df_iv['Type'] == 'P']
X_c, y_c = np.array(df_iv_c[['Strike', 'Days_exp']]), np.array(df_iv_c['IV'])
lm_c = linear_model.LinearRegression()
model_c = lm_c.fit(X_c, y_c)
X_p, y_p = np.array(df_iv_p[['Strike', 'Days_exp']]), np.array(df_iv_p['IV'])
lm_p = linear_model.LinearRegression()
model_p = lm_p.fit(X_p, y_p)
x_, x_10u, x_10d = np.array(pd.DataFrame([{'Strike':s, 'Days_exp':30}])), np.array(pd.DataFrame([{'Strike':s*1.1, 'Days_exp':30}])), np.array(pd.DataFrame([{'Strike':s*0.9, 'Days_exp':30}]))
iv_am_c, iv_am_c10u, iv_am_c10d = model_c.predict(x_)[0], model_c.predict(x_10u)[0], model_c.predict(x_10d)[0]
iv_am_p, iv_am_p10u, iv_am_p10d = model_p.predict(x_)[0], model_p.predict(x_10u)[0], model_p.predict(x_10d)[0]
vols.append(vol)
c_ivs[0].append(iv_am_c)
p_ivs[0].append(iv_am_p)
c_ivs[1].append(iv_am_c10u)
p_ivs[1].append(iv_am_p10u)
c_ivs[2].append(iv_am_c10d)
p_ivs[2].append(iv_am_p10d)
df_ret[key_vol] = vols
df_ret['IV_c_0_'+key] = c_ivs[0]
df_ret['IV_p_0_'+key] = p_ivs[0]
df_ret['IV_c_u_'+key] = c_ivs[1]
df_ret['IV_p_u_'+key] = p_ivs[1]
df_ret['IV_c_d_'+key] = c_ivs[2]
df_ret['IV_p_d_'+key] = p_ivs[2]
df_ret.set_index('Date', inplace=True)
return df_ret
|
{"hexsha": "7855a7236b34a67441f9a402e3fce4d7610c197f", "size": 6364, "ext": "py", "lang": "Python", "max_stars_repo_path": "volatility/models_IV.py", "max_stars_repo_name": "larrys54321/quant_corner", "max_stars_repo_head_hexsha": "3dc6f3f3d1ce1fa002c226bd5c5f845b91710687", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "volatility/models_IV.py", "max_issues_repo_name": "larrys54321/quant_corner", "max_issues_repo_head_hexsha": "3dc6f3f3d1ce1fa002c226bd5c5f845b91710687", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "volatility/models_IV.py", "max_forks_repo_name": "larrys54321/quant_corner", "max_forks_repo_head_hexsha": "3dc6f3f3d1ce1fa002c226bd5c5f845b91710687", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.1407407407, "max_line_length": 202, "alphanum_fraction": 0.5477686989, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1788}
|
import Data.List1
import Data.Nat
import Data.String.Parser
import System.File
data SnailfishNum = Regular Nat | Pair SnailfishNum SnailfishNum
Show SnailfishNum where
show (Regular k) = show k
show (Pair x y) = "[" ++ show x ++ "," ++ show y ++ "]"
data ReduceResult = None | Done | Add Nat Nat | AddL Nat | AddR Nat
Input : Type
Input = List1 SnailfishNum
pairParser : Parser a -> Parser (a, a)
pairParser p = do fst <- p
skip $ char ','
snd <- p
pure (fst, snd)
numParser : Parser SnailfishNum
numParser = (char '[' *> (uncurry Pair <$> pairParser numParser) <* char ']') <|>
Regular <$> natural
parser : Parser Input
parser = do Just l <- fromList <$> some (numParser <* spaces)
| Nothing => fail "empty list"
pure l
reduce : SnailfishNum -> SnailfishNum
reduce n = case explode 0 n of
(r, None) => case split r of
(r, True) => reduce r
(r, False) => r
(r, _ ) => reduce r
where
split : SnailfishNum -> (SnailfishNum, Bool)
split (Regular k) =
if k > 9
then (Pair (Regular $ divNatNZ k 2 SIsNonZero) (Regular $ divCeilNZ k 2 SIsNonZero), True)
else (Regular k, False)
split (Pair x y) = case split x of
(z, True) => (Pair z y, True)
(z, False) => let (w, b) = split y in
(Pair z w, b)
addLeftMost : Nat -> SnailfishNum -> SnailfishNum
addLeftMost n (Regular k) = Regular $ n + k
addLeftMost n (Pair x y) = Pair (addLeftMost n x) y
addRightMost : Nat -> SnailfishNum -> SnailfishNum
addRightMost n (Regular k) = Regular $ n + k
addRightMost n (Pair x y) = Pair x (addRightMost n y)
explode : Nat -> SnailfishNum -> (SnailfishNum, ReduceResult)
explode _ (Regular k) = (Regular k, None)
explode 4 (Pair (Regular k) (Regular j)) = (Regular 0, Add k j)
explode d (Pair x y) = case explode (min (d + 1) 4) x of
(z, Done) => (Pair z y, Done)
(z, (Add k j)) => (Pair z (addLeftMost j y), AddL k)
(z, (AddL k)) => (Pair z y, AddL k)
(z, (AddR j)) => (Pair z (addLeftMost j y), Done)
(z, None) => case explode (min (d + 1) 4) y of
(w, (Add k j)) => (Pair (addRightMost k z) w, AddR j)
(w, (AddL k)) => (Pair (addRightMost k z) w, Done)
(w, r) => (Pair z w, r)
add : SnailfishNum -> SnailfishNum -> SnailfishNum
add n1 n2 = reduce $ Pair n1 n2
magnitude : SnailfishNum -> Nat
magnitude (Regular n) = n
magnitude (Pair x y) = 3 * (magnitude x) + 2 * (magnitude y)
part1 : Input -> IO String
part1 = pure . show . magnitude . foldl1 add
part2 : Input -> IO String
part2 a = pure $ show $ foldl1 max $ (\n1 => foldl1 max $ (\n2 => (magnitude $ add n1 n2)) <$> a) <$> a
main : IO ()
main = do Right input <- readFile "input.txt"
| Left err => printLn err
Right (a, _) <- pure $ parse parser input
| Left err => printLn err
part1 a >>= putStrLn
part2 a >>= putStrLn
|
{"hexsha": "5218134f57b9e208c722cf17938bf1a855f62dbf", "size": 3416, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "18/Main.idr", "max_stars_repo_name": "Olavhaasie/aoc-2021", "max_stars_repo_head_hexsha": "0a0b293bd9c41da785f4a1a0207a72a823944b1c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "18/Main.idr", "max_issues_repo_name": "Olavhaasie/aoc-2021", "max_issues_repo_head_hexsha": "0a0b293bd9c41da785f4a1a0207a72a823944b1c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "18/Main.idr", "max_forks_repo_name": "Olavhaasie/aoc-2021", "max_forks_repo_head_hexsha": "0a0b293bd9c41da785f4a1a0207a72a823944b1c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7311827957, "max_line_length": 103, "alphanum_fraction": 0.5029274005, "num_tokens": 965}
|
# using Revise
using Test
@testset "BifurcationKit" begin
@testset "Linear Solvers" begin
include("precond.jl")
include("test_linear.jl")
end
@testset "Newton" begin
include("test_newton.jl")
include("test-bordered-problem.jl")
end
@testset "Continuation" begin
include("test_bif_detection.jl")
include("test-cont-non-vector.jl")
include("simple_continuation.jl")
include("testNF.jl")
end
@testset "Events / User function" begin
include("event.jl")
end
@testset "Fold Codim 2" begin
include("testJacobianFoldDeflation.jl")
include("codim2.jl")
end
@testset "Hopf Codim 2" begin
include("testHopfMA.jl")
include("lorenz84.jl")
include("COModel.jl")
end
@testset "Periodic orbits" begin
include("test_potrap.jl")
include("test_SS.jl")
include("poincareMap.jl")
include("stuartLandauSH.jl")
include("stuartLandauTrap.jl")
include("stuartLandauCollocation.jl")
include("testLure.jl")
end
@testset "Wave" begin
include("test_wave.jl")
end
end
|
{"hexsha": "bddb7e21a1456953f65b7c52138d5b9bbe3582b6", "size": 1007, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "free-Gift-card/BifurcationKit.jl", "max_stars_repo_head_hexsha": "07938db6909fa00b10736f916750d19f92b87e22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2019-01-25T04:31:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-02T11:44:04.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "free-Gift-card/BifurcationKit.jl", "max_issues_repo_head_hexsha": "07938db6909fa00b10736f916750d19f92b87e22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2019-01-25T04:43:33.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-04T13:04:19.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "rveltz/PseudoArcLengthContinuation.jl", "max_forks_repo_head_hexsha": "61935866594ac0b669a343895b294eec1c93bed6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-01-25T04:34:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-24T18:53:31.000Z", "avg_line_length": 19.3653846154, "max_line_length": 41, "alphanum_fraction": 0.7169811321, "num_tokens": 324}
|
from math import exp
from scipy import optimize
'''
References
-----------
[1] D. Sera, R. Teodorescu, and P. Rodriguez, "PV panel model based on datasheet values," in Industrial Electronics, 2007. ISIE 2007. IEEE International Symposium on, 2007, pp. 2392-2396.
'''
class ParameterExtraction(object):
boltzmann_constant = 1.38065e-23
charge_of_electron = 1.602e-19
nominal_temperature = 25 + 273
def __init__(self, short_circuit_current, open_circuit_voltage,
maximum_power_point_current, maximum_power_point_voltage,
number_of_cells_in_series = 1,
**optional_keyword_arguments):
self.__short_circuit_current = short_circuit_current
self.__open_circuit_voltage = open_circuit_voltage
self.__maximum_power_point_current = maximum_power_point_current
self.__maximum_power_point_voltage = maximum_power_point_voltage
self.__number_of_cells_in_series = number_of_cells_in_series
self.number_of_iterations = optional_keyword_arguments.get('number_of_iterations', None)
#
# Alias methods in order to make long equations readable:
#
def isc(self):
return self.__short_circuit_current
def voc(self):
return self.__open_circuit_voltage
def impp(self):
return self.__maximum_power_point_current
def vmpp(self):
return self.__maximum_power_point_voltage
def ns(self):
return self.__number_of_cells_in_series
#
# End of alias methods definition
#
# parameter_estimates: [series_resistance, shunt_resistance, diode_quality_factor]
# Note: The third element of parameter_estimates is not thermal_voltage but thermal_voltage is used as the third unknown parameter for the calculation.
def calculate(self, parameter_estimates = [1, 1, 1]):
thermal_voltage_estimate = self.__thermal_voltage_estimate(parameter_estimates[2])
if self.number_of_iterations == None:
solution = optimize.root(self.__function_of_three_equations, [parameter_estimates[0], parameter_estimates[1], thermal_voltage_estimate])
else:
solution = optimize.root(self.__function_of_three_equations, [parameter_estimates[0], parameter_estimates[1], thermal_voltage_estimate], options={'maxfev': self.number_of_iterations})
self.series_resistance = solution.x[0]
self.shunt_resistance = solution.x[1]
self.thermal_voltage = solution.x[2]
self.diode_quality_factor = self.__diode_quality_factor()
return solution
def __diode_quality_factor(self):
return (self.thermal_voltage * self.charge_of_electron) / (self.boltzmann_constant * self.nominal_temperature)
def __thermal_voltage_estimate(self, diode_quality_factor_estimate):
return (diode_quality_factor_estimate * self.boltzmann_constant * self.nominal_temperature) / self.charge_of_electron
# unknown_parameters_vector = [series_resistance, shunt_resistance, thermal_voltage]
def __function_of_three_equations(self, unknown_parameters_vector):
# return [equation_1, equation_2, equation_3]
# First element: Equation (12) of [1] with Impp moved to the right hand side to make the equation with the form "0 = ....".
# Second element: Equation (18) of [1] with dP/dV (at I = Impp) = 0 since at Maximum Power Point, dP/dV = 0.
# Third element: Equation (19) of [1] with -1/Rsh moved to the right hand side to make the equation with the form "0 = ....".
# return [self.__short_circuit_current - ((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__short_circuit_current * unknown_parameters_vector[0]) / unknown_parameters_vector[1]) \
# -(self.__short_circuit_current - ((self.__open_circuit_voltage - self.__short_circuit_current * unknown_parameters_vector[0]) / unknown_parameters_vector[1])) \
# * exp((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2])) \
# - self.__maximum_power_point_current,
# self.__maximum_power_point_current + self.__maximum_power_point_voltage * \
# ((-(((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) - (1 / unknown_parameters_vector[1])) \
# / (1 + (((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__maximum_power_point_voltage + self.__maximum_power_point_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) + (unknown_parameters_vector[0] / unknown_parameters_vector[1]))),
# ((-(((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__short_circuit_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) - (1 / unknown_parameters_vector[1])) \
# / (1 + (((self.__short_circuit_current * unknown_parameters_vector[1] - self.__open_circuit_voltage + self.__short_circuit_current * unknown_parameters_vector[0]) * exp((self.__short_circuit_current * unknown_parameters_vector[0] - self.__open_circuit_voltage) / (self.__number_of_cells_in_series * unknown_parameters_vector[2]))) / (self.__number_of_cells_in_series * unknown_parameters_vector[2] * unknown_parameters_vector[1])) + (unknown_parameters_vector[0] / unknown_parameters_vector[1]))) \
# + (1 / unknown_parameters_vector[1])]
return [self.isc() \
- ((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.isc() * unknown_parameters_vector[0]) / unknown_parameters_vector[1]) \
-(self.isc() - ((self.voc() - self.isc() * unknown_parameters_vector[0]) / unknown_parameters_vector[1])) \
* exp((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
- self.impp(),
self.impp() \
+ self.vmpp() * \
( \
( \
-( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
) \
/ (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1]) \
) \
- (1 / unknown_parameters_vector[1]) \
) / \
( \
1 + \
( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.vmpp() + self.impp() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2]))) \
/ (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1]) \
) \
+ (unknown_parameters_vector[0] / unknown_parameters_vector[1]) \
) \
),
( \
( \
-( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.isc() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
) \
/ (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1])) \
- (1 / unknown_parameters_vector[1]) \
) / \
( \
1 + \
( \
( (self.isc() * unknown_parameters_vector[1] - self.voc() + self.isc() * unknown_parameters_vector[0]) \
* exp((self.isc() * unknown_parameters_vector[0] - self.voc()) / (self.ns() * unknown_parameters_vector[2])) \
) / (self.ns() * unknown_parameters_vector[2] * unknown_parameters_vector[1])
) \
+ (unknown_parameters_vector[0] / unknown_parameters_vector[1]) \
) \
) \
+ (1 / unknown_parameters_vector[1])]
# Note: Decided to rely on the numerical estimate of root function instead of calculating it.
# But partically-done calculation is left here for the future reference just in case:
# unknown_parameters_vector = [series_resistance, shunt_resistance, thermal_voltage]
# def __jacobian_of_function_of_three_equations(self, unknown_parameters_vector):
# series_resistance = unknown_parameters_vector[0]
# shunt_resistance = unknown_parameters_vector[1]
# thermal_voltage = unknown_parameters_vector[1]
# exponential_factor_1 = self.__exponential_factor_1(series_resistance, thermal_voltage)
# element_1_1 = -((self.__maximum_power_point_current - self.__short_circuit_current) / shunt_resistance) \
# - (self.__short_circuit_current / shunt_resistance) * exp(exponential_factor_1) \
# - (self.__short_circuit_current - ((self.__open_circuit_voltage - self.__short_circuit_current * series_resistance) / shunt_resistance)) * exponential_factor_1 * exp(self.__maximum_power_point_current / (self.__number_of_cells_in_series * thermal_voltage))
# element_1_2 = (self.__maximum_power_point_voltage + self.__maximum_power_point_current * series_resistance - self.__short_circuit_current * series_resistance) / (shunt_resistance**2) \
# - (self.__open_circuit_voltage - self.__short_circuit_current * series_resistance) * exp(exponential_factor_1) / (shunt_resistance**2)
# element_1_3 = (self.__short_circuit_current - ((self.__open_circuit_voltage - self.__short_circuit_current * series_resistance) / shunt_resistance)) * exponential_factor_1 * exp(exponential_factor_1) * ((self.__maximum_power_point_voltage + self.__maximum_power_point_current * series_resistance - self.__open_circuit_voltage) / (thermal_voltage**2))
# # element_2_1 =
|
{"hexsha": "73e5293816f85dc84fbad570d4a98c48e7f15b30", "size": 11575, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/lib/python3.5/site-packages/photovoltaic_modeling/parameter_extraction.py", "max_stars_repo_name": "tadatoshi/photovoltaic_modeling_python", "max_stars_repo_head_hexsha": "65affdc07497e592ec1fedc386ccd26932587284", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2017-12-25T23:22:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-28T00:23:37.000Z", "max_issues_repo_path": "venv/lib/python3.5/site-packages/photovoltaic_modeling/parameter_extraction.py", "max_issues_repo_name": "tadatoshi/photovoltaic_modeling_python", "max_issues_repo_head_hexsha": "65affdc07497e592ec1fedc386ccd26932587284", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-01-12T09:51:20.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-09T23:19:57.000Z", "max_forks_repo_path": "venv/lib/python3.5/site-packages/photovoltaic_modeling/parameter_extraction.py", "max_forks_repo_name": "tadatoshi/photovoltaic_modeling_python", "max_forks_repo_head_hexsha": "65affdc07497e592ec1fedc386ccd26932587284", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-01-11T15:40:52.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-25T16:03:52.000Z", "avg_line_length": 69.7289156627, "max_line_length": 559, "alphanum_fraction": 0.6586609071, "include": true, "reason": "from scipy", "num_tokens": 2581}
|
\subsection{Savings}
Alice starts cutting back on meat to save shells for the future. This gives Bob a dilemma; he has less income so he can either:
\begin{itemize}
\item Continue spending, drawing down on his shells; or
\item Spend less (for simplicity, on fish).
\end{itemize}
In the first case Alice’s savings are equal to Bob’s “borrowing”, and net saving is zero. In the second case, Alice can’t get the extra shells to save, because Bob is not buying her fish. Net savings are also zero.
This is cash saving. In the real world total savings can be above zero because of stockpiling and investment.
|
{"hexsha": "f30be6bb37e6458de27d5db3e3ddd3fb8b7530f5", "size": 610, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/pug/theory/economics/saving/01-01-saving.tex", "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pug/theory/economics/saving/01-01-saving.tex", "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_forks_repo_path": "src/pug/theory/economics/saving/01-01-saving.tex", "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6666666667, "max_line_length": 214, "alphanum_fraction": 0.7672131148, "num_tokens": 145}
|
import librosa
import os
import numpy as np
import scipy.io.wavfile as wavfile
RANGE = (0,2000)
if(not os.path.isdir('norm_audio_train')):
os.mkdir('norm_audio_train')
for num in range(RANGE[0],RANGE[1]):
path = 'audio_train/trim_audio_train%s.wav'% num
norm_path = 'norm_audio_train/trim_audio_train%s.wav'% num
if (os.path.exists(path)):
audio,_= librosa.load(path,sr=16000)
max = np.max(np.abs(audio))
norm_audio = np.divide(audio,max)
wavfile.write(norm_path,16000,norm_audio)
|
{"hexsha": "2d4042208b7cd471769bb75de22e64d087e31ab6", "size": 549, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/audio/audio_norm.py", "max_stars_repo_name": "SutirthaChakraborty/speech_separation", "max_stars_repo_head_hexsha": "20bf0d26e8af24948f59e6894d8e9f5ab7631a39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-03-24T03:52:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-12T08:12:35.000Z", "max_issues_repo_path": "data/audio/audio_norm.py", "max_issues_repo_name": "SutirthaChakraborty/speech_separation", "max_issues_repo_head_hexsha": "20bf0d26e8af24948f59e6894d8e9f5ab7631a39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/audio/audio_norm.py", "max_forks_repo_name": "SutirthaChakraborty/speech_separation", "max_forks_repo_head_hexsha": "20bf0d26e8af24948f59e6894d8e9f5ab7631a39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-18T03:37:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-18T03:37:20.000Z", "avg_line_length": 15.25, "max_line_length": 62, "alphanum_fraction": 0.6684881603, "include": true, "reason": "import numpy,import scipy", "num_tokens": 148}
|
# You can use this code to evaluate the trained model of CSPN on VOC validation data, adapted from SEC
import numpy as np
import pylab
import scipy.ndimage as nd
import imageio
from matplotlib import pyplot as plt
from matplotlib import colors as mpl_colors
import krahenbuhl2013
import sys
sys.path.insert(0,'/home/briq/libs/caffe/python')
import caffe
import scipy
caffe.set_device(0)
caffe.set_mode_gpu()
voc_classes = [ 'background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor',
]
max_label = 20
mean_pixel = np.array([104.0, 117.0, 123.0])
palette = [(0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (0.0, 0.5, 0.0), (0.5, 0.5, 0.0),
(0.0, 0.0, 0.5), (0.5, 0.0, 0.5), (0.0, 0.5, 0.5), (0.5, 0.5, 0.5),
(0.25, 0.0, 0.0), (0.75, 0.0, 0.0), (0.25, 0.5, 0.0), (0.75, 0.5, 0.0),
(0.25, 0.0, 0.5), (0.75, 0.0, 0.5), (0.25, 0.5, 0.5), (0.75, 0.5, 0.5),
(0.0, 0.25, 0.0), (0.5, 0.25, 0.0), (0.0, 0.75, 0.0), (0.5, 0.75, 0.0),
(0.0, 0.25, 0.5)]
my_cmap = mpl_colors.LinearSegmentedColormap.from_list('Custom cmap', palette, 21)
def preprocess(image, size, mean_pixel=mean_pixel):
image = np.array(image)
image = nd.zoom(image.astype('float32'),
(size / float(image.shape[0]),
size / float(image.shape[1]), 1.0),
order=1)
image = image[:, :, [2, 1, 0]]
image = image - mean_pixel
image = image.transpose([2, 0, 1])
return image
def predict_mask(image_file, net, smooth=True):
im = pylab.imread(image_file)
net.blobs['images'].data[0] = preprocess(im, 321)
net.forward()
scores = np.transpose(net.blobs['fc8-SEC'].data[0], [1, 2, 0])
d1, d2 = float(im.shape[0]), float(im.shape[1])
scores_exp = np.exp(scores - np.max(scores, axis=2, keepdims=True))
probs = scores_exp / np.sum(scores_exp, axis=2, keepdims=True)
probs = nd.zoom(probs, (d1 / probs.shape[0], d2 / probs.shape[1], 1.0), order=1)
eps = 0.00001
probs[probs < eps] = eps
if smooth:
result = np.argmax(krahenbuhl2013.CRF(im, np.log(probs), scale_factor=1.0), axis=2)
else:
result = np.argmax(probs, axis=2)
return result
def evaluate(res, gt_img):
intersect_gt_res = np.sum( (res == gt_img) & (res!=0) & (gt_img!=0) )
union_gt_res = np.sum( (res!=0) | (gt_img!=0) )
acc = float(intersect_gt_res) / union_gt_res
return acc
model = '/home/briq/libs/CSPN/training/models/model_iter_3000.caffemodel'
draw = False
smoothing = True
if __name__ == "__main__":
num_classes = len(voc_classes)
gt_path = '/media/datasets/VOC2012/SegmentationClassAug/'
orig_img_path = '/media/datasets/VOC2012/JPEGImages/'
img_list_path = '/home/briq/libs/CSPN/list/val_id.txt'
with open(img_list_path) as f:
content = f.readlines()
f.close()
content = [x.strip() for x in content]
num_ims = 0
cspn_net = caffe.Net('deploy.prototxt', model, caffe.TEST)
for line in content:
img_name = line.strip()
gt_name = gt_path + img_name
gt_name = gt_name + '.png'
gt_img = imageio.imread(gt_name)
orig_img_name = orig_img_path + img_name
orig_img_name = orig_img_name + '.jpg'
res = predict_mask(orig_img_name, cspn_net, smooth=smoothing)
num_ims += 1
if(num_ims%100==0):
print '-----------------im:{}---------------------\n'.format(num_ims)
acc = evaluate(res, gt_img)
print img_name, str(num_ims), "{}%\n".format(acc*100)
if draw:
fig = plt.figure()
ax = fig.add_subplot('221')
ax.imshow(pylab.imread(orig_img_name))
plt.title('image')
ax = fig.add_subplot('222')
ax.matshow(gt_img, vmin=0, vmax=21, cmap=my_cmap)
plt.title('GT')
ax = fig.add_subplot('223')
ax.matshow(res, vmin=0, vmax=21, cmap=my_cmap)
plt.title('CSPN')
plt.show()
|
{"hexsha": "5192727e7ef6bada3e0ab12f97b06319e6263dad", "size": 4540, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluate_VOC_val.py", "max_stars_repo_name": "briqr/CSPN", "max_stars_repo_head_hexsha": "d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-07-25T05:50:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-06T23:28:25.000Z", "max_issues_repo_path": "evaluate_VOC_val.py", "max_issues_repo_name": "briqr/CSPN", "max_issues_repo_head_hexsha": "d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-20T16:22:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-31T08:57:25.000Z", "max_forks_repo_path": "evaluate_VOC_val.py", "max_forks_repo_name": "briqr/CSPN", "max_forks_repo_head_hexsha": "d3d01e5a4e29d0c2ee4f1dfda1f2e7815163d346", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-07-26T03:48:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-25T14:35:22.000Z", "avg_line_length": 26.5497076023, "max_line_length": 102, "alphanum_fraction": 0.5341409692, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1418}
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
Documentation file, introduction to the abstract specification.
*)
chapter "Introduction"
(*<*)
theory Intro_Doc
imports Main
begin
(*>*)
text \<open>
The seL4 microkernel is an operating system kernel designed to be a
secure, safe, and reliable foundation for systems in a wide variety of
application domains. As a microkernel, seL4 provides a minimal number of
services to applications. This small number of services directly
translates to a small implementation of approximately $8700$ lines of C code,
which has allowed the kernel to be formally proven in the Isabelle/HOL
theorem prover to adhere to a formal specification.
This document gives the text version of the formal Isabelle/HOL
specification used in this proof. The document starts by giving a brief
overview of the seL4 microkernel design, followed by text generated from
the Isabelle/HOL definitions.
This document is not a user manual to seL4, nor is it intended to
be read as such. Instead, it is a precise reference to the
behaviour of the seL4 kernel.
Further information on the models and verification techniques
can be found in previous publications~\cite{Boyton_09,Cock_08,Cock_KS_08,Derrin_EKCC_06,Elkaduwe_GE_07,Elkaduwe_GE_08,Elphinstone_KDRH_07,Heiser_EKKP_07,Klein_09,Klein_DE_09,Klein_EHACDEEKNSTW_09,Klein_EHACDEEKNSTW_10,Tuch_08,Tuch_09,Tuch_KH_05,Tuch_KN_07,Tuch_Klein_05,Tuch:phd,Winwood_KSACN_09}.
\section{The seL4 Microkernel}
The seL4 microkernel is a small operating system kernel of the L4
family. SeL4 provides a minimal number of services to applications, such
as abstractions for virtual address spaces, threads, inter-process
communication (IPC).
SeL4 uses a capability-based access-control model. All memory, devices,
and microkernel-provided services require an associated
\emph{capability} (access right) to utilise them
\cite{Dennis_VanHorn_66}. The set of capabilities an application
possesses determines what resources that application can directly
access. SeL4 enforces this access control by using the hardware's memory
management unit (MMU) to ensure that userspace applications only have
access to memory they possess capabilities to.
\autoref{fig:sample} shows a representative seL4-based system. It
depicts the microkernel executing on top of the hardware as the only
software running in privileged mode of the processor. The first
application to execute is the supervisor OS. The supervisor OS (also
termed a \emph{booter} for simple scenarios) is responsible for
initialising, configuring and delegating authority to the specific
system layered on top.
\begin{figure}[tb]
\centering
\includegraphics[width=6cm]{imgs/seL4-background_01}
\caption{Sample seL4 based system}
\label{fig:sample}
\end{figure}
In \autoref{fig:sample}, the example system set up by the supervisor consists
of an instance of Linux on the left, and several instances of trusted or
sensitive applications on the right. The group of applications on the left and
the group on the right are unable to directly communicate or interfere with
each other without explicit involvement of the supervisor (and the
microkernel) --- a barrier is thus created between the untrusted left and the
trusted right, as indicated in the figure. The supervisor has a
kernel-provided mechanism to determine the relationship between applications
and the presence or absence of any such barriers.
\subsection{Kernel Services}
\label{s:kernel_services}
A limited number of service primitives are provided by the
microkernel; more complex services may be implemented as applications
on top of these primitives. In this way, the functionality of the
system can be extended without increasing the code and complexity in
privileged mode, while still supporting a potentially wide number of
services for varied application domains.
The basic services the microkernel provides are as follows:
\begin{description}
\item[Threads] are an abstraction of CPU execution that support running
software;
\item[Address Spaces] are virtual memory spaces that each contain an
application. Applications are limited to accessing memory in their
address space;
\item[Interprocess Communication] (IPC) via \emph{endpoints} allows
threads to communicate using message passing;
\item[Device Primitives] allow device drivers to be implemented as unprivileged
applications. The kernel exports hardware device interrupts
via IPC messages; and
\item[Capability Spaces] store capabilities (i.e., access rights) to kernel services along
with their book-keeping information.
\end{description}
All kernel services are accessed using kernel-provided system calls
that \emph{invoke} a capability;
the semantics of the system call depends upon the type of the
capability invoked. For example, invoking the \meth{Call} system call on a
thread control block (TCB) with certain arguments will suspend the
target thread, while invoking \meth{Call} on an endpoint will result
in a message being sent. In general, the message sent to a capability
will have an entry indicating the desired operation, along with any
arguments.
The kernel provides to clients the following system calls:
\begin{description}
\item[\meth{Send}] delivers the system call arguments to the target object
and allows the application to continue. If the
target object is unable to receive and/or process the arguments
immediately, the sending application will be blocked until the arguments
can be delivered.
\item[\meth{NBSend}] performs non-blocking send in a similar fashion
to \meth{Send} except that if the object is unable to receive the
arguments immediately, the message is silently dropped.
\item[\meth{Call}] is a \meth{Send} that blocks the application until the
object provides a response, or the receiving application replies. In
the case of delivery to an application (via an \obj{Endpoint}), an
additional capability is added to the arguments and delivered to the
receiver to give it the right to respond to the sender.
\item[\meth{Recv}] is used by an application to block until the target
object is ready.
\item[\meth{Reply}] is used to respond to a \meth{Call}, using the
capability generated by the \meth{Call} operation.
\item[\meth{ReplyRecv}] is a combination of \meth{Reply} and
\meth{Recv}. It exists for efficiency reasons: the common case of
replying to a request and waiting for the next can be performed in a
single kernel system call instead of two.
\end{description}
\subsection{Capability-based Access Control}
\label{s:sel4_cap_access_control}
The seL4 microkernel provides a capability-based access control model.
Access control governs all kernel services; in order to perform any
system call, an application must invoke a capability in its possession
that has sufficient access rights for the requested service. With
this, the system can be configured to isolate software components from
each other, and also to enable authorised controlled communication
between components by selectively granting specific communication
capabilities. This enables software component isolation with a high
degree of assurance, as only those operations explicitly authorised by
capability possession are permitted.
A capability is an unforgeable token that references a specific kernel
object (such as a thread control block) and carries access
rights that control what operations may be performed when it is invoked.
Conceptually, a capability resides in an application's
\emph{capability space}; an address in this space refers to a
\emph{slot} which may or may not contain a capability. An application
may refer to a capability --- to request a kernel service, for example
--- using the address of the slot holding that capability. The seL4
capability model is an instance of a \emph{segregated} (or
\emph{partitioned}) capability
model, where capabilities are managed by the kernel.
Capability spaces are implemented as a directed graph of kernel-managed
\emph{capability nodes} (\obj{CNode}s). A \obj{CNode} is a table of
slots, where each slot may contain further \obj{CNode} capabilities.
An address in a capability space is then the concatenation of the
indices of the \obj{CNode} capabilities forming the path to the
destination slot; we discuss \obj{CNode} objects further in
\autoref{s:cnode_obj}.
Capabilities can be copied and moved within capability spaces, and
also sent via IPC. This allows creation of applications with specific
access rights, the delegation of authority to another application, and
passing to an application authority to a newly created (or selected)
kernel service. Furthermore, capabilities can be \emph{minted} to
create a derived capability with a subset of the rights of the
original capability (never with more rights). A newly minted
capability can be used for partial delegation of authority.
Capabilities can also be revoked in their entirety to withdraw
authority. Revocation includes any capabilities that may have
been derived from the original capabilities. The propagation of
capabilities through the system is controlled by a
\emph{take-grant}-based model~\cite{Elkaduwe_GE_07}.
\subsection{Kernel Objects}
\label{s:sel4_internals}
In this section we give a brief overview of the kernel implemented
objects that can be invoked by applications. The interface to these
objects forms the interface to the kernel itself. The creation and use
of the high-level kernel services is achieved by the creation,
manipulation, and combination of these kernel objects.
\subsubsection{\obj{CNodes}}
\label{s:cnode_obj}
As mentioned in the previous section, capabilities in seL4 are stored
in kernel objects called \obj{CNodes}. A \obj{CNode} has a fixed
number of slots, always a power of two, determined when the
\obj{CNode} is created. Slots can be empty or contain a capability.
\begin{figure}[htb]
\centering
\includegraphics[height=5cm]{imgs/sel4objects_01.pdf}
\caption{\obj{CNodes} forming a \obj{CSpace}}
\label{fig:cnode}
\end{figure}
\obj{CNodes} have the following operations:
\begin{description}
\item[\meth{Mint}] creates a new capability in a specified \obj{CNode} slot
from an existing capability. The newly created capability may have fewer rights than the original.
\item[\meth{Copy}] is similar to \meth{Mint}, but the newly created capability
has the same rights as the original.
\item[\meth{Move}] moves a capability between two specified capability slots.
\item[\meth{Mutate}] is an atomic combination of \meth{Move} and
\meth{Mint}. It is a performance optimisation.
\item[\meth{Rotate}] moves two capabilities between three specified
capability slots. It is essentially two \meth{Move} operations: one
from the second specified slot to the first, and one from the third
to the second. The first and third specified slots may be the same,
in which case the capability in it is swapped with the capability in
the second slot. The operation is atomic; either both or neither capabilities
are moved.
\item[\meth{Delete}] removes a capability from the specified slot.
\item[\meth{Revoke}] is equivalent to calling \meth{Delete} on each
derived child of the specified capability. It has no effect on the
capability itself.
\item[\meth{SaveCaller}] moves a kernel-generated reply capability of the
current thread from the special \obj{TCB} slot it was created in, into
the designated \obj{CSpace} slot.
\item[\meth{Recycle}] is equivalent to \meth{Revoke}, except that it
also resets most aspects of the object to its initial state.
\end{description}
\subsubsection{IPC Endpoints and Notifications}
The seL4 microkernel supports \emph{synchronous} IPC (\obj{EP}) endpoints,
used to facilitate
interprocess communication between threads. Capabilities to endpoints
can be restricted to be send-only or receive-only. They can also
specify whether capabilities can be passed through the endpoint.
Endpoints allow both data and capabilities to be
transferred between threads, depending on the rights on the endpoint
capability. Sending a message will block the sender until the message
has been received; similarly, a waiting thread will be blocked until a
message is available (but see \meth{NBSend} above).
When only notification of an event is required, notification objects can
be used. These have the following invocations:
%
\begin{description}
\item[\meth{Notify}] simply sets the given set of semaphore bits in the
notification object.
Multiple \meth{Notify} system calls without an intervening \meth{Recv}
result in the bits being ``or-ed'' with any bits already set. As such,
\meth{Notify} is always non-blocking, and has no indication of whether
a receiver has received the notification.
\end{description}
%
Additionally, the \meth{Recv} system call may be used with an
notification object, allowing the calling thread to retrieve all set
bits from the notification object. By default, if no \meth{Notify}
operations have taken place since the last \meth{Recv} call, the
calling thread will block until the next \meth{Notify} takes place.
There is also a non-blocking (polling) variant of this invocaction.
\subsubsection{\obj{TCB}}
The \emph{thread control block} (\obj{TCB}) object represents a thread
of execution in seL4. Threads are the unit of execution that is
scheduled, blocked, unblocked, etc., depending on the applications
interaction with other threads.
As illustrated in
\autoref{fig:sel4_internals}, a thread needs both a \obj{CSpace} and a
\obj{VSpace} in which to execute to form an application (plus some
additional information not represented here). The \obj{CSpace}
provides the capabilities (authority) required to manipulated kernel
objects, in order to send messages to another application for example. The
\obj{VSpace} provides the virtual memory environment required to
contain the code and data of the application. A \obj{CSpace} is
associated with a thread by installing a capability to the root
\obj{CNode} of a \obj{CSpace} into the \obj{TCB}. Likewise, a
\obj{VSpace} is associated with a thread by installing a capability to
a \obj{Page Directory} (described shortly) into the \obj{TCB}. Note that multiple threads
can share the same \obj{CSpace} and \obj{VSpace}.
\begin{figure}[htb]
\centering
\includegraphics[width=0.8\textwidth]{imgs/sel4_internals_01}
\caption{Internal representation of an application in seL4}
\label{fig:sel4_internals}
\end{figure}
The TCB object has the following methods:
\begin{description}
\item[\meth{CopyRegisters}] is used for copying the state of a
thread. The method is given an additional capability argument, which
must refer to a \obj{TCB} that will be used as the source of the
transfer; the invoked thread is the destination. The caller may
select which of several subsets of the register context will be
transferred between the threads. The operation may also suspend the
source thread, and resume the destination thread.
Two subsets of the context that might be copied (if indicated by the
caller) include: firstly, the parts of the register state that are used or preserved
by system calls, including the instruction and stack pointers, and
the argument and message registers; and secondly, the
remaining integer registers. Other subsets are architecture-defined,
and typically include coprocessor registers such as the floating
point registers. Note that many integer registers are modified or
destroyed by system calls, so it is not generally useful to use
\meth{CopyRegisters} to copy integer registers to or from the
current thread.
\item[\meth{ReadRegisters}] is a variant of \meth{CopyRegisters} for
which the destination is the calling thread. It uses the message
registers to transfer the two subsets of the integer registers; the
message format has the more commonly transferred instruction
pointer, stack pointer and argument registers at the start, and will
be truncated at the caller's request if the other registers are not
required.
\item[\meth{WriteRegisters}] is a variant of \meth{CopyRegisters} for
which the source is the calling thread. It uses the message
registers to transfer the integer registers, in the same order used
by \meth{ReadRegisters}. It may be truncated if the later registers
are not required; an explicit length argument is given to allow
error detection when the message is inadvertently truncated by a
missing IPC buffer.
\item[\meth{SetPriority}] configures the thread's scheduling
parameters. In the current version of seL4, this is simply a
priority for the round-robin scheduler.
\item[\meth{SetIPCBuffer}] configures the thread's local storage,
particularly the IPC buffer used for sending parts of the message
payload that don't fit in hardware registers.
\item[\meth{SetSpace}] configures the thread's virtual memory and
capability address spaces. It sets the roots of the trees (or other
architecture-specific page table structures) that represent the two
address spaces, and also nominates the \obj{Endpoint} that the kernel uses
to notify the thread's pager\footnote{A \emph{pager} is a term for a
thread that manages the \obj{VSpace} of another application. For
example, Linux would be called the pager of its applications.} of
faults and exceptions.
\item[\meth{Configure}] is a batched version of the three
configuration system calls: \meth{SetPriority}, \meth{SetIPCBuffer},
and \meth{SetSpace}. \meth{Configure} is simply a performance
optimisation.
\item[\meth{Suspend}] makes a thread inactive. The thread will not
be scheduled again until a \meth{Resume} operation is performed on
it. A \meth{CopyRegisters} or \meth{ReadRegisters} operation may
optionally include a \meth{Suspend} operation on the source thread.
\item[\meth{Resume}] resumes execution of a thread that is
inactive or waiting for a kernel operation to complete. If the
invoked thread is waiting for a kernel operation, \meth{Resume} will
modify the thread's state so that it will attempt to perform the
faulting or aborted operation again. \meth{Resume}-ing a thread that
is already ready has no effect. \meth{Resume}-ing a thread that is
in the waiting phase of a \meth{Call} operation may cause the
sending phase to be performed again, even if it has previously
succeeded.
A \meth{CopyRegisters} or \meth{WriteRegisters} operation may
optionally include a \meth{Resume} operation on the destination
thread.
\end{description}
\subsubsection{Virtual Memory}
A virtual address space in seL4 is called a \obj{VSpace}. In a similar
way to \obj{CSpaces}, a \obj{VSpace} is composed of objects provided
by the microkernel. Unlike \obj{CSpaces}, these objects for managing
virtual memory largely directly correspond to those of the hardware,
that is, a page directory pointing to page tables, which in turn map
physical frames. The kernel also includes \obj{ASID Pool} and
\obj{ASID Control} objects for tracking the status of address spaces.
\autoref{fig:vspace} illustrates a \obj{VSpace} with the requisite
components required to implement a virtual address space.
\begin{figure}[htb]
\centering
\includegraphics[height=5cm]{imgs/sel4objects_05.pdf}
\caption{Virtual Memory in seL4.}
\label{fig:vspace}
\end{figure}
These \obj{VSpace}-related objects are sufficient to implement the
hardware data structures required to create, manipulate, and destroy
virtual memory address spaces. It should be noted that, as usual, the
manipulator of a virtual memory space needs the appropriate
capabilities to the required objects.
\paragraph{\obj{Page Directory}}
The \obj{Page Directory} (PD) is the top-level page table of the ARM
two-level page table structure. It has a hardware defined format, but
conceptually contains 1024 page directory entries (PDE), which are one
of a pointer to a page table, a 4 megabyte \obj{Page}, or an invalid
entry . The \obj{Page Directory} has no methods itself, but it is used
as an argument to several other virtual memory related object calls.
\paragraph{\obj{Page Table}} The \obj{Page Table} object forms the
second level of the ARM page table. It contains 1024 slots, each of which
contains a page table entry (PTE). A page table entry contains either an
invalid entry, or a pointer to a 4 kilobyte \obj{Page}.
\obj{Page Table} objects possess only a single method:
\begin{description}
\item[\meth{Map}] takes a \obj{Page Directory}
capability as an argument, and installs a reference to the invoked
\obj{Page Table} to a specified slot in the \obj{Page
Directory}.
\end{description}
\paragraph{\obj{Page}}
A \obj{Page} object is a region of physical memory that is used to
implement virtual memory pages in a virtual address space. The
\obj{Page} object has the following methods:
\begin{description}
\item[\meth{Map}] takes a
\obj{Page Directory} or a \obj{Page Table} capability as an argument
and installs a PDE or PTE referring to the \obj{Page} in the
specified location, respectively. In addition, \meth{Map} has a
remapping mode which is used to change the access permissions on an
existing mapping.
\item[\meth{Unmap}] removes an existing mapping.
\end{description}
\paragraph{\obj{ASID Control}}
For internal kernel book-keeping purposes, there is a fixed maximum
number of applications the system can support. In order to manage
this limited resource, the microkernel provides an \obj{ASID Control}
capability. The \obj{ASID Control} capability is used to generate a
capability that authorises the use of a subset of available address
space identifiers. This newly created capability is called an
\obj{ASID Pool}. \obj{ASID Control} only has a single method:
\begin{description}
\item[\meth{MakePool}] together with a capability to
\obj{Untyped Memory} (described shortly) as argument creates an \obj{ASID Pool}.
\end{description}
\paragraph{\obj{ASID Pool}}
An \obj{ASID Pool} confers the right to create a subset of the available
maximum applications. For a \obj{VSpace} to be usable by an application, it
must be assigned to an ASID. This is done using a capability to an
\obj{ASID Pool}. The \obj{ASID Pool} object has a single method:
\begin{description}
\item[\meth{Assign}] assigns an ASID to the \obj{VSpace}
associated with the \obj{Page Directory} passed in as an argument.
\end{description}
\subsubsection{Interrupt Objects}
Device driver applications need the ability to receive and acknowledge
interrupts from hardware devices.
A capability to \obj{IRQControl} has the ability to create a new
capability to manage a specific interrupt source associated with a
specific device. The new capability is then delegated to a device
driver to access an interrupt source. \obj{IRQControl} has one method:
\begin{description}
\item[\meth{Get}] creates an \obj{IRQHandler} capability for the
specified interrupt source.
\end{description}
An \obj{IRQHandler} object is used by driver application to handle
interrupts for the device it manages. It has three methods:
\begin{description}
\item[\meth{SetEndpoint}] specifies the \obj{NTFN} that a
\meth{Notify} should be sent to when an interrupt occurs. The driver
application usually \meth{Recv}-s on this endpoint for interrupts to
process.
\item[\meth{Ack}] informs the kernel that the userspace driver has finished
processing the interrupt and the microkernel can send further pending
or new interrupts to the application.
\item[\meth{Clear}] de-registers the \obj{NTFN} from the
\obj{IRQHandler} object.
\end{description}
\subsubsection{\obj{Untyped Memory}}
The \obj{Untyped Memory} object is the foundation of memory allocation
in the seL4 kernel. Untyped memory capabilities have a single method:
\begin{description}
\item[\meth{Retype}] creates a number of new kernel objects. If this
method succeeds, it returns capabilities to the newly-created objects.
\end{description}
In particular, untyped memory objects can be divided into a group of
smaller untyped memory objects. We discuss memory management in
general in the following section.
\subsection{Kernel Memory Allocation}
\label{sec:kernmemalloc}
The seL4 microkernel has no internal memory allocator: all kernel
objects must be explicitly created from application controlled memory
regions via \obj{Untyped Memory} capabilities. Applications must have
explicit authority to memory (via \obj{Untyped Memory} capabilities)
in order to create other services, and services consume no extra
memory once created (other than the amount of untyped memory from
which they were originally created). The mechanisms can be used to
precisely control the specific amount of physical memory available to
applications, including being able to enforce isolation of physical
memory access between applications or a device. Thus, there are no
arbitrary resource limits in the kernel apart from those dictated by
the hardware\footnote{The treatment of virtual ASIDs imposes a fixed
number of address spaces, but this limitation is to be removed in
future versions of seL4.}, and so many denial-of-service attacks via
resource exhaustion are obviated.
At boot time, seL4 pre-allocates all the memory required for the
kernel itself, including the code, data, and stack sections (seL4 is a
single kernel-stack operating system). The remainder of the memory is
given to the first task in the form of capabilities to \obj{Untyped
Memory}, and some additional capabilities to kernel objects that were
required to bootstrap the supervisor task. These objects can then be
split into smaller untyped memory regions or other kernel objects
using the \meth{Retype} method; the created objects are termed
\emph{children} of the original untyped memory object.
See \autoref{fig:alloc2} for an
example.
\begin{figure}[htb]
\centering
\includegraphics[width=7cm]{imgs/seL4-background_03}
\caption{Memory layout at boot time}
\label{fig:alloc2}
\end{figure}
\begin{figure}[htb]
\centering
\includegraphics[width=7cm]{imgs/seL4-background_04}
\caption{Memory layout after supervisor creates kernel services.}
\label{fig:alloc-sup}
\end{figure}
The user-level application that creates an object using \meth{Retype}
receives full authority over the resulting object. It can then
delegate all or part of the authority it possesses over this object to
one or more of its clients. This is done by selectively granting each
client a capability to the kernel object, thereby allowing the client
to obtain kernel services by invoking the object.
For obvious security reasons, kernel data must be protected from user
access. The seL4 kernel prevents such access by using two mechanisms.
First, the above allocation policy guarantees that typed objects never
overlap. Second, the kernel ensures that each physical frame mapped
by the MMU at a user-accessible address corresponds to a
\obj{Page} object (described above); \obj{Page} objects contain no kernel
data, so direct
user access to kernel data is not possible. All other kernel objects
are only indirectly manipulated via their corresponding capabilities.
\subsubsection{Re-using Memory}
\label{s:memRevoke}
The model described thus far is sufficient for applications to
allocate kernel objects, distribute authority among client
applications, and obtain various kernel services provided by these
objects. This alone is sufficient for a simple static system
configuration.
The seL4 kernel also allows memory re-use. Reusing a region of memory
is sound only when there are no dangling references (e.g.\
capabilities) left to the objects implemented by that memory. The
kernel tracks \emph{capability derivations}, that is, the children
generated by various \obj{CNode} methods (\meth{Retype}, \meth{Mint},
\meth{Copy}, and \meth{Mutate}). Whenever a user requests that the
kernel create new objects in an untyped memory region, the kernel uses
this information to check that there are no children in the region,
and thus no live capability references.
The tree structure so generated is termed the \emph{capability
derivation tree} (CDT)\footnote{Although we model the CDT as a
separate data structure, it is implemented as part of the CNode object
and so requires no additional kernel meta-data.}. For example, when a
user creates new kernel objects by retyping untyped memory, the newly
created capabilities would be inserted into the CDT as children of the
untyped memory capability.
Finally, recall that the \meth{Revoke} operation destroys all
capabilities derived from the argument capability. Revoking the last
capability to a kernel object is easily detectable, and triggers the
\emph{destroy} operation on the now unreferenced object. Destroy
simply deactivates the object if it was active, and cleans up any
in-kernel dependencies between it and other objects.
By calling \meth{Revoke} on the original
capability to an untyped memory object, the user removes all of the
untyped memory object's children --- that is, all capabilities pointing to
objects in the untyped memory region.
Thus, after this operation there are no valid references
to any object within the untyped region, and the region may be
safely retyped and reused.
\section{Summary}
\label{s:backsum}
This chapter has given an overview of the seL4 microkernel. The
following chapters are generated from the formal Isabelle/HOL
definitions that comprise the formal specification of the seL4 kernel
on the ARM11 architecture. The specification does not cover any other
architectures or platforms.
The order of definitions in this document is as processed by
Isabelle/HOL: bottom up. All concepts are defined before first used.
This means the first chapters mainly introduce basic data types and
structures while the top-level kernel entry point is defined in the
last chapter (\autoref{c:syscall}). The following section shows
the dependency graph between the theory modules in this specification.
We assume a familiarity with Isabelle syntax; see Nipkow et
al.~\cite{LNCS2283} for an introduction. In addition to the standard
Isabelle/HOL notation, we sometimes write @{text "f $ x"} for
@{text "(f x)"} and use monadic do-notation extensively. The latter
is defined in \autoref{c:monads}.
\section{Theory Dependencies}
\centerline{\includegraphics[height=0.8\textheight]{session_graph}}
\<close>
(*<*)
end
(*>*)
|
{"author": "seL4", "repo": "l4v", "sha": "9ba34e269008732d4f89fb7a7e32337ffdd09ff9", "save_path": "github-repos/isabelle/seL4-l4v", "path": "github-repos/isabelle/seL4-l4v/l4v-9ba34e269008732d4f89fb7a7e32337ffdd09ff9/spec/abstract/Intro_Doc.thy"}
|
from collections import MutableMapping
import numpy as np
import pytest
from hrv.rri import (RRi, _validate_rri, _create_time_array, _validate_time,
_prepare_table)
from tests.test_utils import FAKE_RRI
class TestRRiClassArguments:
def test_transform_rri_to_numpy_array(self):
validated_rri = _validate_rri(FAKE_RRI)
np.testing.assert_array_equal(validated_rri, np.array(FAKE_RRI))
def test_transform_rri_in_seconds_to_miliseconds(self):
rri_in_seconds = [0.8, 0.9, 1.2]
validated_rri = _validate_rri(rri_in_seconds)
assert isinstance(validated_rri, np.ndarray)
np.testing.assert_array_equal(_validate_rri(rri_in_seconds),
[800, 900, 1200])
def test_rri_values(self):
rri = RRi(FAKE_RRI).values
assert isinstance(rri, np.ndarray)
np.testing.assert_array_equal(rri, np.array(FAKE_RRI))
def test_create_time_array(self):
rri_time = _create_time_array(FAKE_RRI)
assert isinstance(rri_time, np.ndarray)
expected = np.cumsum(FAKE_RRI) / 1000
expected -= expected[0]
np.testing.assert_array_equal(rri_time, expected)
def test_rri_time_auto_creation(self):
rri = RRi(FAKE_RRI)
expected = np.cumsum(FAKE_RRI) / 1000
expected -= expected[0]
np.testing.assert_array_equal(
rri.time,
expected
)
def test_rri_time_passed_as_argument(self):
rri_time = [1, 2, 3, 4]
rri = RRi(FAKE_RRI, rri_time)
assert isinstance(rri.time, np.ndarray)
np.testing.assert_array_equal(rri.time, np.array([1, 2, 3, 4]))
def test_raises_exception_if_rri_and_time_havent_same_length(self):
with pytest.raises(ValueError) as e:
_validate_time(FAKE_RRI, [1, 2, 3])
with pytest.raises(ValueError):
RRi(FAKE_RRI, [1, 2, 3])
msg = 'rri and time series must have the same length'
assert e.value.args[0] == msg
def test_rri_and_time_have_same_length_in_class_construction(self):
rri = RRi(FAKE_RRI, [1, 2, 3, 4])
np.testing.assert_array_equal(rri.time, np.array([1, 2, 3, 4]))
def test_time_has_no_zero_value_besides_in_first_position(self):
with pytest.raises(ValueError) as e:
_validate_time(FAKE_RRI, [1, 2, 0, 3])
msg = 'time series cannot have 0 values after first position'
assert e.value.args[0] == msg
def test_time_is_monotonically_increasing(self):
with pytest.raises(ValueError) as e:
_validate_time(FAKE_RRI, [0, 1, 4, 3])
msg = 'time series must be monotonically increasing'
assert e.value.args[0] == msg
def test_time_series_have_no_negative_values(self):
with pytest.raises(ValueError) as e:
_validate_time(FAKE_RRI, [-1, 1, 2, 3])
assert e.value.args[0] == ('time series cannot have negative values')
def test_rri_series_have_no_negative_values(self):
with pytest.raises(ValueError) as e:
_validate_rri([0.0, 1.0, 2.0, 3.0])
with pytest.raises(ValueError):
_validate_rri([1.0, 2.0, -3.0, 4.0])
assert e.value.args[0] == ('rri series can only have positive values')
def test_rri_class_encapsulation(self):
rri = RRi(FAKE_RRI)
with pytest.raises(AttributeError):
rri.rri = [1, 2, 3, 4]
with pytest.raises(AttributeError):
rri.time = [1, 2, 3, 4]
def test_class_repr_short_array(self):
rri = RRi([1, 2, 3, 4])
assert rri.__repr__() == 'RRi array([1000., 2000., 3000., 4000.])'
def test_class_repr_long_array(self):
rri = RRi(range(1, 100000))
assert rri.__repr__() == (
'RRi array([1.0000e+00, 2.0000e+00, 3.0000e+00, ..., '
'9.9997e+04, 9.9998e+04,\n 9.9999e+04])'
)
def test__mul__method(self):
rri = RRi(FAKE_RRI)
result = rri * 10
assert isinstance(result, RRi)
np.testing.assert_equal(result, rri.values * 10)
def test__add__method(self):
rri = RRi(FAKE_RRI)
result = rri + 10
assert isinstance(result, RRi)
np.testing.assert_equal(result, rri.values + 10)
def test__sub__method(self):
rri = RRi(FAKE_RRI)
result = rri - 10
assert isinstance(result, RRi)
np.testing.assert_equal(result, rri.values - 10)
def test__truediv__method(self):
rri = RRi(FAKE_RRI)
result = rri / 10
assert isinstance(result, RRi)
np.testing.assert_equal(result, rri.values / 10)
def test__abs__method(self):
rri = RRi(FAKE_RRI)
result = abs(rri)
assert isinstance(result, RRi)
np.testing.assert_equal(result, abs(rri.values))
def test__eq__method(self):
rri = RRi(FAKE_RRI)
result = rri == 810
np.testing.assert_equal(result, rri.values == 810)
def test__ne__method(self):
rri = RRi(FAKE_RRI)
result = rri != 810
np.testing.assert_equal(result, rri.values != 810)
def test__gt__method(self):
rri = RRi(FAKE_RRI)
result = rri > 810
np.testing.assert_equal(result, rri.values > 810)
def test__ge__method(self):
rri = RRi(FAKE_RRI)
result = rri >= 810
np.testing.assert_equal(result, rri.values >= 810)
def test__lt__method(self):
rri = RRi(FAKE_RRI)
result = rri < 810
np.testing.assert_equal(result, rri.values < 810)
def test__le__method(self):
rri = RRi(FAKE_RRI)
result = rri <= 810
np.testing.assert_equal(result, rri.values <= 810)
def test__pow__method(self):
rri = RRi(FAKE_RRI)
result = rri ** 2
np.testing.assert_equal(result, rri.values ** 2)
class TestRRiClassMethods:
def test_rri_statistical_values(self):
rri = RRi(FAKE_RRI)
np.testing.assert_array_equal(rri.mean(), np.mean(FAKE_RRI))
np.testing.assert_array_equal(rri.var(), np.var(FAKE_RRI))
np.testing.assert_array_equal(rri.std(), np.std(FAKE_RRI))
np.testing.assert_array_equal(rri.median(), np.median(FAKE_RRI))
np.testing.assert_array_equal(rri.max(), np.max(FAKE_RRI))
np.testing.assert_array_equal(rri.min(), np.min(FAKE_RRI))
np.testing.assert_array_equal(
rri.amplitude(),
np.max(FAKE_RRI) - np.min(FAKE_RRI),
)
np.testing.assert_array_equal(
rri.rms(),
np.sqrt(np.mean(np.square(FAKE_RRI))),
)
def test_prepare_rri_description_table(self):
rri = RRi(FAKE_RRI)
descr_table = _prepare_table(rri)
expected = [
['', 'rri', 'hr'],
['min', 750.0, 73.61963190184049],
['max', 815.0, 80.0],
['amplitude', 65.0, 6.380368098159508],
['mean', 793.75, 75.67342649397864],
['median', 805.0, 74.53703703703704],
['var', 667.1875, 6.487185483887203],
['std', 25.829972899714782, 2.546995383562209],
]
for row in descr_table:
assert row in expected
def test_rri_describe(self):
rri = RRi(FAKE_RRI)
rri_descr = rri.describe()
assert isinstance(rri_descr, MutableMapping)
expected = [
['', 'rri', 'hr'],
['min', 750.0, 73.61963190184049],
['max', 815.0, 80.0],
['amplitude', 65.0, 6.380368098159508],
['mean', 793.75, 75.67342649397864],
['median', 805.0, 74.53703703703704],
['var', 667.1875, 6.487185483887203],
['std', 25.829972899714782, 2.546995383562209],
]
expected__repr__ = (
'----------------------------------------\n',
' rri hr\n',
'----------------------------------------\n',
'min 750.00 73.62\n',
'max 815.00 80.00\n',
'mean 793.75 75.67\n',
'var 667.19 6.49\n',
'std 25.83 2.55\n',
'median 805.00 74.54\n',
'amplitude 65.00 6.38\n'
)
for field in expected[1:]:
assert rri_descr[field[0]]['rri'] == field[1]
assert rri_descr[field[0]]['hr'] == field[2]
rri_descr_rep = rri_descr.__repr__()
for table_row in expected__repr__:
assert table_row in rri_descr_rep
def test_rri_to_heart_rate(self):
rri = RRi(FAKE_RRI)
heart_rate = rri.to_hr()
expected = np.array([75., 74.07407407, 73.6196319, 80.])
np.testing.assert_array_almost_equal(heart_rate, expected)
def test_get_rri_time_interval(self):
rri = RRi(FAKE_RRI + [817, 785, 910], time=[2, 4, 6, 8, 10, 12, 14])
rri_interval = rri.time_range(start=10, end=14)
expected = RRi([817, 785, 910], time=[10, 12, 14])
assert isinstance(rri_interval, RRi)
np.testing.assert_array_equal(rri_interval.values, expected.values)
np.testing.assert_array_equal(rri_interval.time, expected.time)
def test_reset_time_offset(self):
rri = RRi(FAKE_RRI, time=[4, 5, 6, 7])
rri_reset = rri.reset_time()
expected = RRi(FAKE_RRI, time=[0, 1, 2, 3])
assert isinstance(rri_reset, RRi)
np.testing.assert_array_equal(rri_reset.values, expected.values)
np.testing.assert_array_equal(rri_reset.time, expected.time)
def test_reset_time_offset_inplace(self):
rri = RRi(FAKE_RRI, time=[4, 5, 6, 7])
rri.reset_time(inplace=True)
expected = RRi(FAKE_RRI, time=[0, 1, 2, 3])
assert isinstance(rri, RRi)
np.testing.assert_array_equal(rri.values, expected.values)
np.testing.assert_array_equal(rri.time, expected.time)
|
{"hexsha": "17529738ecff70a83ca6ca7a9606fd48877740d5", "size": 10057, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_rri.py", "max_stars_repo_name": "raphaelvallat/hrv", "max_stars_repo_head_hexsha": "6d8c9f0a0187b382697b90f39362cf91c7ea3a76", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-20T01:05:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-20T01:05:45.000Z", "max_issues_repo_path": "tests/test_rri.py", "max_issues_repo_name": "raphaelvallat/hrv", "max_issues_repo_head_hexsha": "6d8c9f0a0187b382697b90f39362cf91c7ea3a76", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_rri.py", "max_forks_repo_name": "raphaelvallat/hrv", "max_forks_repo_head_hexsha": "6d8c9f0a0187b382697b90f39362cf91c7ea3a76", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-26T10:20:09.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-26T10:20:09.000Z", "avg_line_length": 31.3302180685, "max_line_length": 78, "alphanum_fraction": 0.59033509, "include": true, "reason": "import numpy", "num_tokens": 2888}
|
[STATEMENT]
lemma heap_is_wellformed_one_disc_parent: "heap_is_wellformed h \<Longrightarrow>
h \<turnstile> get_disconnected_nodes document_ptr \<rightarrow>\<^sub>r disc_nodes \<Longrightarrow>
h \<turnstile> get_disconnected_nodes document_ptr' \<rightarrow>\<^sub>r disc_nodes' \<Longrightarrow> set disc_nodes \<inter> set disc_nodes' \<noteq> {} \<Longrightarrow>
document_ptr = document_ptr'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>heap_is_wellformed h; h \<turnstile> get_disconnected_nodes document_ptr \<rightarrow>\<^sub>r disc_nodes; h \<turnstile> get_disconnected_nodes document_ptr' \<rightarrow>\<^sub>r disc_nodes'; set disc_nodes \<inter> set disc_nodes' \<noteq> {}\<rbrakk> \<Longrightarrow> document_ptr = document_ptr'
[PROOF STEP]
using CD.heap_is_wellformed_one_disc_parent local.heap_is_wellformed_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>heap_is_wellformed\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M ?h; ?h \<turnstile> get_disconnected_nodes ?document_ptr \<rightarrow>\<^sub>r ?disc_nodes; ?h \<turnstile> get_disconnected_nodes ?document_ptr' \<rightarrow>\<^sub>r ?disc_nodes'; set ?disc_nodes \<inter> set ?disc_nodes' \<noteq> {}\<rbrakk> \<Longrightarrow> ?document_ptr = ?document_ptr'
heap_is_wellformed = (\<lambda>h. heap_is_wellformed\<^sub>C\<^sub>o\<^sub>r\<^sub>e\<^sub>_\<^sub>D\<^sub>O\<^sub>M h \<and> acyclic (parent_child_rel h \<union> local.a_host_shadow_root_rel h \<union> local.a_ptr_disconnected_node_rel h) \<and> local.a_all_ptrs_in_heap h \<and> local.a_distinct_lists h \<and> local.a_shadow_root_valid h)
goal (1 subgoal):
1. \<lbrakk>heap_is_wellformed h; h \<turnstile> get_disconnected_nodes document_ptr \<rightarrow>\<^sub>r disc_nodes; h \<turnstile> get_disconnected_nodes document_ptr' \<rightarrow>\<^sub>r disc_nodes'; set disc_nodes \<inter> set disc_nodes' \<noteq> {}\<rbrakk> \<Longrightarrow> document_ptr = document_ptr'
[PROOF STEP]
by blast
|
{"llama_tokens": 709, "file": "Shadow_SC_DOM_Shadow_DOM", "length": 2}
|
import numpy as np
import torch
import torch.nn.functional as F
# DETR imports
from detr.util.box_ops import box_cxcywh_to_xyxy
# Detectron Imports
from detectron2.structures import Boxes
# Project Imports
from probabilistic_inference import inference_utils
from probabilistic_inference.inference_core import ProbabilisticPredictor
from probabilistic_modeling.modeling_utils import covariance_output_to_cholesky, clamp_log_variance
class DetrProbabilisticPredictor(ProbabilisticPredictor):
def __init__(self, cfg):
super().__init__(cfg)
# These are mock variables to be compatible with probabilistic detectron library. No NMS is performed for DETR.
# Only needed for ensemble methods
self.test_nms_thresh = 0.5
self.test_topk_per_image = self.model.detr.num_queries
def detr_probabilistic_inference(self,
input_im):
outputs = self.model(input_im,
return_raw_results=True,
is_mc_dropout=self.mc_dropout_enabled)
image_width = input_im[0]['image'].shape[2]
image_height = input_im[0]['image'].shape[1]
# Handle logits and classes
predicted_logits = outputs['pred_logits'][0]
if 'pred_logits_var' in outputs.keys():
predicted_logits_var = outputs['pred_logits_var'][0]
box_cls_dists = torch.distributions.normal.Normal(
predicted_logits, scale=torch.sqrt(
torch.exp(predicted_logits_var)))
predicted_logits = box_cls_dists.rsample(
(self.model.cls_var_num_samples,))
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob_vectors = predicted_prob_vectors.mean(0)
else:
predicted_prob_vectors = F.softmax(predicted_logits, dim=-1)
predicted_prob, classes_idxs = predicted_prob_vectors[:, :-1].max(-1)
# Handle boxes and covariance matrices
predicted_boxes = outputs['pred_boxes'][0]
# Rescale boxes to inference image size (not COCO original size)
pred_boxes = Boxes(box_cxcywh_to_xyxy(predicted_boxes))
pred_boxes.scale(scale_x=image_width, scale_y=image_height)
predicted_boxes = pred_boxes.tensor
# Rescale boxes to inference image size (not COCO original size)
if 'pred_boxes_cov' in outputs.keys():
predicted_boxes_covariance = covariance_output_to_cholesky(
outputs['pred_boxes_cov'][0])
predicted_boxes_covariance = torch.matmul(
predicted_boxes_covariance, predicted_boxes_covariance.transpose(
1, 2))
transform_mat = torch.tensor([[[1.0, 0.0, -0.5, 0.0],
[0.0, 1.0, 0.0, -0.5],
[1.0, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.5]]]).to(self.model.device)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
transform_mat,
predicted_boxes_covariance),
transform_mat.transpose(
1,
2))
scale_mat = torch.diag_embed(
torch.as_tensor(
(image_width,
image_height,
image_width,
image_height),
dtype=torch.float32)).to(
self.model.device).unsqueeze(0)
predicted_boxes_covariance = torch.matmul(
torch.matmul(
scale_mat,
predicted_boxes_covariance),
torch.transpose(scale_mat, 2, 1))
else:
predicted_boxes_covariance = []
return predicted_boxes, predicted_boxes_covariance, predicted_prob, classes_idxs, predicted_prob_vectors
def post_processing_standard_nms(self, input_im):
"""
This function produces results using standard non-maximum suppression. The function takes into
account any probabilistic modeling method when computing the results.
Args:
input_im (list): an input im list generated from dataset handler.
Returns:
result (instances): object instances
"""
outputs = self.detr_probabilistic_inference(input_im)
return inference_utils.general_standard_nms_postprocessing(
input_im, outputs)
def post_processing_output_statistics(self, input_im):
"""
Output statistics does not make much sense for DETR architecture. There is some redundancy due to forced 100
detections per image, but cluster sizes would be too small for meaningful estimates. Might implement it later
on.
"""
raise NotImplementedError
pass
def post_processing_mc_dropout_ensembles(self, input_im):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
# Merge results:
results = [
inference_utils.general_standard_nms_postprocessing(
input_im,
self.detr_probabilistic_inference(input_im),
self.test_nms_thresh,
self.test_topk_per_image) for _ in range(
self.num_mc_dropout_runs)]
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list = [
result.pred_boxes.tensor for result in results]
ensemble_pred_prob_vectors_list = [
result.pred_cls_probs for result in results]
ensembles_class_idxs_list = [
result.pred_classes for result in results]
ensembles_pred_box_covariance_list = [
result.pred_boxes_covariance for result in results]
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_ensembles(self, input_im, model_dict):
if self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE == 'pre_nms':
raise NotImplementedError
else:
outputs_list = []
for model in model_dict:
self.model = model
outputs_list.append(
self.post_processing_standard_nms(input_im))
# Merge results:
ensemble_pred_box_list = []
ensemble_pred_prob_vectors_list = []
ensembles_class_idxs_list = []
ensembles_pred_box_covariance_list = []
for results in outputs_list:
# Append per-ensemble outputs after NMS has been performed.
ensemble_pred_box_list.append(results.pred_boxes.tensor)
ensemble_pred_prob_vectors_list.append(results.pred_cls_probs)
ensembles_class_idxs_list.append(results.pred_classes)
ensembles_pred_box_covariance_list.append(
results.pred_boxes_covariance)
return inference_utils.general_black_box_ensembles_post_processing(
input_im,
ensemble_pred_box_list,
ensembles_class_idxs_list,
ensemble_pred_prob_vectors_list,
ensembles_pred_box_covariance_list,
self.test_nms_thresh,
self.test_topk_per_image,
self.cfg.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD,
is_generalized_rcnn=True,
merging_method=self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE)
def post_processing_bayes_od(self, input_im):
"""
Since there is no NMS step in DETR, bayesod is not implemented. Although possible to add NMS
and implement it later on.
"""
raise NotImplementedError
pass
|
{"hexsha": "6cbcd1678b5da0a60855f3fe01e38b272ecb9d81", "size": 8454, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/probabilistic_inference/probabilistic_detr_predictor.py", "max_stars_repo_name": "jskhu/probdet-1", "max_stars_repo_head_hexsha": "b8bda3bd7cdd573aa9f70a62453d147664211af6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2021-01-14T03:44:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T12:27:22.000Z", "max_issues_repo_path": "src/probabilistic_inference/probabilistic_detr_predictor.py", "max_issues_repo_name": "jskhu/probdet-1", "max_issues_repo_head_hexsha": "b8bda3bd7cdd573aa9f70a62453d147664211af6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-01-15T22:39:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-22T15:52:03.000Z", "max_forks_repo_path": "src/probabilistic_inference/probabilistic_detr_predictor.py", "max_forks_repo_name": "jskhu/probdet-1", "max_forks_repo_head_hexsha": "b8bda3bd7cdd573aa9f70a62453d147664211af6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-02-03T02:55:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T14:30:31.000Z", "avg_line_length": 42.27, "max_line_length": 119, "alphanum_fraction": 0.6219541046, "include": true, "reason": "import numpy", "num_tokens": 1637}
|
[STATEMENT]
lemma Reals_cases [cases set: Reals]:
assumes "q \<in> \<real>"
obtains (of_real) r where "q = of_real r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>r. q = of_real r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding Reals_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>r. q = of_real r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>r. q = of_real r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
from \<open>q \<in> \<real>\<close>
[PROOF STATE]
proof (chain)
picking this:
q \<in> \<real>
[PROOF STEP]
have "q \<in> range of_real"
[PROOF STATE]
proof (prove)
using this:
q \<in> \<real>
goal (1 subgoal):
1. q \<in> range of_real
[PROOF STEP]
unfolding Reals_def
[PROOF STATE]
proof (prove)
using this:
q \<in> range of_real
goal (1 subgoal):
1. q \<in> range of_real
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
q \<in> range of_real
goal (1 subgoal):
1. (\<And>r. q = of_real r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
q \<in> range of_real
[PROOF STEP]
obtain r where "q = of_real r"
[PROOF STATE]
proof (prove)
using this:
q \<in> range of_real
goal (1 subgoal):
1. (\<And>r. q = of_real r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
q = of_real r
goal (1 subgoal):
1. (\<And>r. q = of_real r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
q = of_real r
[PROOF STEP]
show thesis
[PROOF STATE]
proof (prove)
using this:
q = of_real r
goal (1 subgoal):
1. thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 768, "file": null, "length": 13}
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
(*
* Test force/prevent heap abstraction.
*)
theory heap_lift_force_prevent
imports "AutoCorres.AutoCorres"
begin
external_file "heap_lift_force_prevent.c"
install_C_file "heap_lift_force_prevent.c"
autocorres [
no_heap_abs = unlifted_a unlifted_b,
force_heap_abs = lifted_a lifted_b,
ts_force nondet = unlifted_a unlifted_b lifted_a lifted_b
] "heap_lift_force_prevent.c"
context heap_lift_force_prevent begin
lemma heap_w32_hrs_mem [simp]:
"\<lbrakk> is_valid_w32 (lift_global_heap s) p; heap_w32 (lift_global_heap s) p = a \<rbrakk>
\<Longrightarrow> h_val (hrs_mem (t_hrs_' s)) p = a"
by (fastforce simp: lifted_globals_ext_simps(3) lifted_globals_ext_simps(4) h_val_simple_lift)
lemma lifted_a_wp [wp]:
"\<lbrace> \<lambda>s. is_valid_w32 s p \<and> (\<exists>a. heap_w32 s p = a \<and> P a s) \<rbrace> lifted_a' p \<lbrace> \<lambda>r s. P r s \<rbrace>"
by (clarsimp simp: lifted_a'_def, wp, auto)
lemma unlifted_a_wp [wp]:
"\<lbrace> \<lambda>s. c_guard p \<and> P (h_val (hrs_mem (t_hrs_' s)) p) s \<rbrace>
unlifted_a' p \<lbrace> \<lambda>r s. P r s \<rbrace>"
by (clarsimp simp: unlifted_a'_def, wp, auto)
lemma lifted_b_wp [wp]:
"\<lbrace> \<lambda>s. is_valid_w32 s p \<and> (\<exists>a. heap_w32 s p = a \<and> P (a * 3) s) \<rbrace> lifted_b' p \<lbrace> \<lambda>r s. P r s \<rbrace>"
apply (clarsimp simp: lifted_b'_def)
including no_pre apply wp
apply (auto simp: simple_lift_c_guard lift_global_heap_def field_simps)
done
lemma unlifted_b_wp [wp]:
"\<lbrace> \<lambda>s. heap_ptr_valid (hrs_htd (t_hrs_' s)) p
\<and> (\<forall>t. lift_global_heap t = lift_global_heap s \<longrightarrow> P (h_val (hrs_mem (t_hrs_' t)) p * 3) t) \<rbrace>
unlifted_b' p \<lbrace> \<lambda>r s. P r s \<rbrace>"
apply (clarsimp simp: unlifted_b'_def)
apply wp
apply (rule conjI)
apply (metis simple_lift_c_guard simple_lift_def)
apply clarsimp
apply (rule conjI)
apply (clarsimp simp: lift_global_heap_def )
apply (unfold simple_lift_def)
apply (clarsimp split: option.splits)
apply (clarsimp simp: field_simps)
apply (erule allE, erule (1) impE)
apply (subgoal_tac "h_val (hrs_mem (t_hrs_' s)) p = h_val (hrs_mem (t_hrs_' t)) p"
"heap_w32 (lift_global_heap s) p = h_val (hrs_mem (t_hrs_' t)) p")
apply clarsimp
apply (clarsimp simp: lift_global_heap_def)
apply (drule fun_cong [where x = p])
apply (clarsimp simp: simple_lift_def)
apply (metis heap_w32_hrs_mem lifted_globals_ext_simps(4) simple_lift_def)
done
end
end
|
{"author": "seL4", "repo": "l4v", "sha": "9ba34e269008732d4f89fb7a7e32337ffdd09ff9", "save_path": "github-repos/isabelle/seL4-l4v", "path": "github-repos/isabelle/seL4-l4v/l4v-9ba34e269008732d4f89fb7a7e32337ffdd09ff9/tools/autocorres/tests/proof-tests/heap_lift_force_prevent.thy"}
|
import networkx as nx
import matplotlib.pyplot as plt
filename = 'SCC.txt'
DG = nx.DiGraph()
with open(filename) as f:
for line in f:
# Parse the line
parsed_line = line.rsplit(' ')
DG.add_edge(int(parsed_line[0]), int(parsed_line[1]))
|
{"hexsha": "2d524706d09a77b7a12e17756cea002fa62b181d", "size": 269, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithms_01_stanford/4_week/play.py", "max_stars_repo_name": "h-mayorquin/coursera", "max_stars_repo_head_hexsha": "508e800566f3a2ceba759656bb5ee407de927b14", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "algorithms_01_stanford/4_week/play.py", "max_issues_repo_name": "h-mayorquin/coursera", "max_issues_repo_head_hexsha": "508e800566f3a2ceba759656bb5ee407de927b14", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithms_01_stanford/4_week/play.py", "max_forks_repo_name": "h-mayorquin/coursera", "max_forks_repo_head_hexsha": "508e800566f3a2ceba759656bb5ee407de927b14", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.9333333333, "max_line_length": 61, "alphanum_fraction": 0.6394052045, "include": true, "reason": "import networkx", "num_tokens": 70}
|
[STATEMENT]
lemma fv_subterms_substI[intro]: "y \<in> fv t \<Longrightarrow> \<theta> y \<in> subterms t \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<in> fv t \<Longrightarrow> \<theta> y \<in> subterms t \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
[PROOF STEP]
using image_iff vars_iff_subtermeq
[PROOF STATE]
proof (prove)
using this:
(?z \<in> ?f ` ?A) = (\<exists>x\<in>?A. ?z = ?f x)
(?x \<in> fv ?t) = (Var ?x \<sqsubseteq> ?t)
goal (1 subgoal):
1. y \<in> fv t \<Longrightarrow> \<theta> y \<in> subterms t \<cdot>\<^sub>s\<^sub>e\<^sub>t \<theta>
[PROOF STEP]
by fastforce
|
{"llama_tokens": 272, "file": "Stateful_Protocol_Composition_and_Typing_More_Unification", "length": 2}
|
import argparse, os, time, json
import numpy as np
from os import path
from evaluation.common import precision_recall_curve
from pairwise_models import restore_definition
from rule_based.most_followers import MostFollowers
from utils.common import Scaler
def f1(prec: float, rec: float) -> float:
return 2 * prec * rec / (prec + rec)
def present(workdir, file):
if path.exists(path.join(workdir, file)):
print(" ", "%s exists" % file)
else:
raise Exception("%s does not exist" % file)
def main(workdir):
print("Performing checks")
[present(workdir, file) for file in ["dataset.json", "splits", "manifest.json"]]
# Preparing the evaluation directory
evaluation_dir = path.join(workdir, "evaluation")
if not path.exists(evaluation_dir):
os.mkdir(evaluation_dir)
splits = []
# Detecting splits
splits_dir = path.join(args.workdir, "splits")
for split in os.listdir(splits_dir):
split_dir = path.join(splits_dir, split)
if split.startswith(".") or not path.isdir(split_dir):
continue
if not path.exists(path.join(split_dir, "models")):
print("Models are not trained for split %s. Halting" % split)
return
splits.append(split_dir)
print("Detected %d splits" % len(splits))
if len(splits) == 0:
print("Train the models first. Halting")
return
models_dir = path.join(splits[0], "models")
print("\nDetected models:")
models = dict()
for model in os.listdir(models_dir):
model_dir = path.join(models_dir, model)
if model.startswith(".") or not path.isdir(model_dir):
continue
for feature_set in os.listdir(model_dir):
if feature_set.startswith("."):
continue
model_name = "%s@%s" % (model, feature_set)
models[model_name] = (model, feature_set)
print(" ", model_name)
if len(models) == 0:
print(" ", "No models detected. Halting")
return
print("\nDeserialising dataset")
timestamp = time.time()
test_set = []
counter = 0
with open(path.join(workdir, "dataset.json"), 'r') as reader:
for line in reader:
sample = json.loads(line)
del sample["resource"]
test_set.append(sample)
counter += 1
if counter % 5000 == 0:
print(" Loaded %d samples" % counter)
print("Done in %.2fs, loaded %d samples" % (time.time() - timestamp, len(test_set)))
print("\nDeserialising test splits")
test_ids = {}
test_stats = {}
for split_id, split in enumerate(splits):
# Deserializing test sets remembering from which split they came from
with open(path.join(split, "test.csv"), 'r') as reader:
ids = []
test_stats[split_id] = 0
for line in reader:
ids.append(line.rstrip().split(',')[0])
for entity_id in ids[1:]:
test_ids[entity_id] = split_id
test_stats[split_id] += 1
print("Loaded %d test items with following counts: [%s]" % (len(test_ids), ", ".join([str(stat) for stat in test_stats.values()])))
print("\nDeserialising scalers")
test_scalers = []
for split_id, split in enumerate(splits):
with open(path.join(split, "scaler.json"), 'r') as scaler_reader:
test_scalers.append(Scaler.from_dict(json.load(scaler_reader)))
print("\nDumping baseline:")
baseline = MostFollowers()
debug_writer = open(path.join(evaluation_dir, "most_followers.dump"), 'w')
for sample in test_set:
debug_writer.write("Entry: %s\n" % sample["entry"]["resourceId"])
debug_writer.write("Query: -\n")
correct = -1
predicted = baseline.predict(sample)
for i, candidate in enumerate(sample["candidates"]):
if sample["entry"]["twitterId"].casefold() == candidate["profile"]["screenName"].casefold():
correct = i
for i, candidate in enumerate(sample["candidates"]):
if predicted == i:
scores = [0.0, 1.0]
else:
scores = [1.0, 0.0]
debug_writer.write("%.6f\t%.6f\t%d\t%d\t%s\t%s\n" % (scores[0], scores[1],
int(correct == i), int(i == 0),
sample["entry"]["twitterId"],
sample["candidates"][i]["profile"]["screenName"]))
debug_writer.close()
print("\nEvaluation:")
for model_name in sorted(models.keys()):
print("Evaluating model %s" % model_name)
# Invoking a trained model from disk for each split
model_instances = []
model_type, feature_set = models[model_name]
try:
for split_id, split in enumerate(splits):
model_location = path.join(split, "models", model_type, feature_set, "model")
model = restore_definition(model_location)
model.restore_from_file(model_location)
model_instances.append(model)
except Exception as e:
print("Error happened while restoring model:", e)
continue
debug_writer = open(path.join(evaluation_dir, model_name + ".dump"), 'w')
expected = []
predicted = {}
for i in np.arange(0.0, 0.5, 0.1):
predicted[i] = []
scores = []
counter = 0
check_interval = 1000
timestamp = time.time()
for sample in test_set:
counter += 1
highest_score = -1.0
predicted_id = -1
candidate_id = -1
correct_id = -1
second_best = -1.0
sample_features = None
for features in sample["features"]:
candidate_id += 1
if sample_features is None:
sample_features = dict()
for subspace in features:
sample_features[subspace] = []
for subspace in features:
cur_vector = test_scalers[test_ids[sample["entry"]["resourceId"]]].fit_subspace(features[subspace], subspace)
sample_features[subspace].append(cur_vector)
is_current_correct = sample["entry"]["twitterId"].casefold() == sample["candidates"][candidate_id]["profile"]["screenName"].casefold()
if is_current_correct:
if correct_id >= 0:
print(" ", "Duplicate correct candidate found")
else:
correct_id = candidate_id
debug_writer.write("Entry: %s\n" % sample["entry"]["resourceId"])
debug_writer.write("Query: -\n")
if len(sample["features"]) > 0:
for subspace in sample_features:
sample_features[subspace] = np.vstack(sample_features[subspace])
sample_scores = model_instances[test_ids[sample["entry"]["resourceId"]]].predict(features=sample_features)
for i in range(sample_scores.shape[0]):
debug_writer.write("%.6f\t%.6f\t%d\t%d\t%s\t%s\n" % (sample_scores[i][0], sample_scores[i][1],
int(correct_id == i), int(i == 0),
sample["entry"]["twitterId"],
sample["candidates"][i]["profile"]["screenName"]))
sample_scores = sample_scores[::, 1]
top_2 = np.argsort(sample_scores)[-2::][::-1].tolist()
predicted_id = top_2[0]
highest_score = sample_scores[top_2[0]]
if len(top_2) > 1:
second_best = sample_scores[top_2[1]]
for threshold in predicted:
if highest_score - second_best < threshold:
predicted[threshold].append(-1)
else:
predicted[threshold].append(predicted_id)
expected.append(correct_id)
scores.append(highest_score)
if counter % check_interval == 0:
print(" ", "%d samples processed (%.2fs)" % (counter, (time.time() - timestamp)))
debug_writer.close()
with open(path.join(evaluation_dir, model_name+".txt"), 'w') as writer:
writer.write("Selection:\n")
writer.write("All")
for threshold in predicted:
p, r, s = precision_recall_curve(expected, predicted[threshold], scores)
for i in range(len(p)):
writer.write("\nDNN\t%.4f\t%.4f\t%.4f\t%.2f\t%.2f" % (p[i], r[i], f1(p[i], r[i]), threshold, s[i]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple API that returns predictions from a model')
parser.add_argument('--workdir', required=True, help='Folder with the pipeline result and pretrained models', metavar='#')
args = parser.parse_args()
print("Initialized with settings:")
print(vars(args))
main(args.workdir)
|
{"hexsha": "1250e0f49a0c8d7576846e62af6c23cd91d0e5e4", "size": 9328, "ext": "py", "lang": "Python", "max_stars_repo_path": "align-train/evaluate.py", "max_stars_repo_name": "Remper/alignments", "max_stars_repo_head_hexsha": "517becd115999914901b4503baa108849058be2c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2017-05-17T16:45:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-15T03:24:39.000Z", "max_issues_repo_path": "align-train/evaluate.py", "max_issues_repo_name": "Remper/alignments", "max_issues_repo_head_hexsha": "517becd115999914901b4503baa108849058be2c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-09T22:56:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-18T11:26:19.000Z", "max_forks_repo_path": "align-train/evaluate.py", "max_forks_repo_name": "Remper/alignments", "max_forks_repo_head_hexsha": "517becd115999914901b4503baa108849058be2c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-06-01T07:44:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-13T02:19:08.000Z", "avg_line_length": 39.8632478632, "max_line_length": 150, "alphanum_fraction": 0.5534948542, "include": true, "reason": "import numpy", "num_tokens": 2012}
|
import numpy as np
# Load the data in csv format
# Each row contains an instance (case)
# The values included in each row are separated by a string given by the parameter sep, e.g., ","
# Each column corresponds to the values of a (discrete) random variable
# name (string): file name containing the data
# sep (string): separates the different values of the data
# return numpy.array[instances, vars]
def loadCsv(name, sep, numInter=3, maxDiscVals=5):
text = np.loadtxt(name, np.str, delimiter=sep)
(N, n) = text.shape
# Determine the nature of the variables (int or float)
disc = [True for i in range(n)]
for i in range(n):
if str.isalpha(text[0, i]) or str.isdigit(
text[0, i]): # if all characters in the string are alphabetic or there is at least one character
vals = np.unique(text[:, i])
if vals.size > maxDiscVals:
try:
for v in vals: np.float(v)
disc[i] = False
except:
disc[i] = True
else:
try:
np.float(text[0, i])
disc[i] = False
except:
disc[i] = True
data = [[] for i in range(n)]
for i in range(n):
if disc[i]:
data[i] = np.unique(text[:, i], return_inverse=True)[1]
else:
varData = [np.float(x) for x in text[:, i]]
if numInter != None: # Discretize with equal frequency
ordered = np.sort(varData)
cut = [ordered[(j + 1) * N / numInter - 1] for j in range(numInter)]
cut[numInter - 1] = ordered[N - 1]
data[i] = [0 for j in range(N)]
for j in range(N):
for k in range(numInter):
if varData[j] <= cut[k]:
break
data[i][j] = k
else: # Not discretize
data[i] = varData
return np.transpose(data)
def saveAsCSV(name, D, delimiter=','):
np.savetxt(name, D, delimiter=delimiter, fmt='%1d')
def card(D):
return np.max(D, 1) + 1
def sufficientTreeStatistics(r, D):
N = [[np.zeros(shape=(r[u], r[v]), dtype=np.int) for u in range(len(r))] for v in range(len(r))]
for x in D:
for u in range(len(r)):
for v in range(len(r)):
N[u][v][x[u], x[v]] += 1
return N
def sufficient3statistics(r, D):
N = [[[np.zeros(shape=(r[u], r[v], r[w]), dtype=np.int) for w in range(len(r))] for u in range(len(r))] for v in
range(len(r))]
for x in D:
for u in range(len(r)):
for v in range(len(r)):
for w in range(len(r)):
N[u][v][w][x[u], x[v], x[w]] += 1
return N
def checkStatistics(stat, r, D):
n = len(D)
for a in stat:
for s in a:
if np.sum(s) != n:
return False
return True
def indToInst(ind, card):
inst = np.zeros((len(card),))
w = 1
res = ind
for i in range(len(card)):
inst[i] = res % card[i]
res = res / card[i]
w *= card[i]
return inst
def instToInd(inst, card):
ind = inst[0]
w = card[0]
for i in range(1, len(card)):
ind += w * inst[i]
w *= card[i]
return ind
'''
Generate a stratified partition of the data of (almost) the same size
indC: index of the class variable, int
D: data set, np.array((instances,variables))
k: number of partitions
'''
def stratifiedPartitions(indC, D, k, seed=None):
if seed != None:
np.random.seed(seed)
C = D[:, indC]
valsC, Nc = np.unique(C, return_counts=True)
ordC = np.argsort(C)
# Get the randomized indices to instances of each class
indc = list()
ini = 0
for i, c in enumerate(valsC):
indc.append(np.random.permutation(ordC[ini:(ini + Nc[i])]))
ini += Nc[i]
# Get the chunks
deltaC = Nc / np.float(k)
Dk = list()
for j in range(k):
Dk.append(np.row_stack(
(D[indc[c][np.arange(int(deltaC[c] * j), int(deltaC[c] * (j + 1)))], :] for c in range(valsC.size))))
return Dk
def stratifiedHoldoutTrainTest(indC, D, percTest=0.3, seed=None):
if seed != None:
np.random.seed(seed)
C = D[:, indC]
valsC, Nc = np.unique(C, return_counts=True)
ordC = np.argsort(C)
# Get the randomized indices to instances of each class
indc = list()
ini = 0
for i, c in enumerate(valsC):
indc.append(np.random.permutation(ordC[ini:(ini + Nc[i])]))
ini += Nc[i]
# Get the chunks
Test = np.row_stack((D[indc[c][np.arange(int(Nc[c] * percTest))], :] for c in range(valsC.size)))
Train = np.row_stack((D[indc[c][np.arange(int(Nc[c] * percTest), Nc[c])], :] for c in range(valsC.size)))
return (Train, Test)
def stratifiedCVtrainingTest(indC, D, k, seed=None):
if seed != None:
np.random.seed(seed)
Test = stratifiedPartitions(indC, D, k)
Training = [np.row_stack((Test[j] for j in range(k) if j != i)) for i in range(k)]
return (Training, Test)
####################
# Weak Supervision #
####################
'''
Creates a weak supervised dataset given in term of bags with label proportions
D: fully supervised data
indC: index of the class variable. If None is the last variable
minSizeBag: minimum size of a bag
maxSizeBag: maximum size of a bag
numBags: number of bags. If None a partition of the data is returned, else
each bag is obtained from a random set of indexes
seed: random seed
return (Bags,Props) where
Bags: is a set of Bags of instances given in terms of
the values of the predictor variables, list(np.array)
Props: is the proportion of the class labels in each bag, np.array(int)
'''
def createBagsWithProps(D, indC=None, minSizeBag=2, maxSizeBag=10, numBags=None, seed=None):
if seed != None:
np.random.seed(seed)
(N, d) = D.shape
if indC == None:
indC = d - 1
cardC = np.max(D[:, indC]) + 1
Dc = D[:, indC]
if indC == d - 1:
Dx = D[:, range(indC)]
else:
Dx = np.hstack((D[:, range(indC)], D[:, range(indC + 1, d)]))
if numBags == None:
# Partition of the dataset
indx = np.random.permutation(N)
total = 0
bagSize = list()
while total < N:
# Crea empty arrays
bagSize.append(np.random.choice(
range(np.min([maxSizeBag + 1, N - total, minSizeBag]), np.min([maxSizeBag + 1, N - total + 1]))))
total += bagSize[-1]
ini = 0
Bags = list()
Props = list()
for s in bagSize:
Bags.append(Dx[indx[range(ini, ini + s)], :])
(c, Nc) = np.unique(Dc[indx[range(ini, ini + s)]], return_counts=True)
prp = np.zeros(cardC, dtype=np.int)
prp[c] = Nc
Props.append(prp)
ini += s
else:
Bags = list()
Props = list()
for b in range(numBags):
indx = np.random.choice(N, np.random.choice(range(minSizeBag, maxSizeBag + 1)))
Bags.append(Dx[indx, :])
(c, Nc) = np.unique(Dc[indx], return_counts=True)
prp = np.zeros(cardC, dtype=np.int)
prp[c] = Nc
Props.append(prp)
return (Bags, Props)
def fromBagsToWeights(Bags, Props):
'''
Transform a dataset given in terms of Bags with label proportions into
a dataset given in terms of replicated fully supervised instances with
weights corresponding to the labels proportions of the associated class
Return (D,w) where
D: fully labeled data, with the class variable in the last column, np.array(instance,variable)
'''
cardC = len(Props[0])
D = list()
w = list()
for ind, B in enumerate(Bags):
for c in range(cardC):
N = np.float(np.sum(Props[ind]))
if Props[ind][c] > 0:
D.append(np.hstack((B, c * np.ones(shape=(B.shape[0], 1), dtype=np.int))))
w.append(Props[ind][c] / N * np.ones(shape=(B.shape[0], 1)))
D = np.vstack(D)
w = np.vstack(w)
return (D, w)
def fromBagsToProbs(Bags, Props):
'''
Transform a dataset given in terms of Bags with label proportions into
a dataset given in terms of probabilistic instances with class probabilities
Return (Dx,pc) where
Dx: unlabeled data, np.array(instance,variable)
pc: probability of the class, npo.array(instance,class label)
'''
cardC = len(Props[0])
Dx = np.vstack(Bags)
pc = list()
for ind, B in enumerate(Bags):
N = np.float(np.sum(Props[ind]))
p = np.hstack([Props[ind][c] / N * np.ones(shape=(B.shape[0], 1)) for c in range(cardC)])
pc.append(p)
pc = np.vstack(pc)
return (Dx, pc)
def supervisedWTraining(D):
return (np.ones(D.shape[0]), D)
def missingWTraining(M, ind, card):
n = M.shape[0]
D = np.vstack([np.hstack((M[:ind], np.ones(n) * x, M[ind:])) for x in range(card)])
return (np.ones(n * card) / float(card), D)
def corruptedWTraining(C, rho, ind, card):
if rho == 0:
return supervisedWTraining(C)
n = C.shape[0]
D = np.vstack([np.hstack((C[:ind], np.ones(n) * x, C[ind:])) for x in range(card)])
w = np.zeros(n * card)
for i in range(n):
for x in range(card):
if x == D[i, ind]:
w[i + x * n] = 1 - rho
else:
w[i + x * n] = rho / (card - 1.0)
return (w, D)
def multilabelWTraining(U, L):
n = U.shape[0]
D = np.vstack([np.hstack((U[i, :], np.array([l])) for i in range(n) for l in L[i])])
w = np.array([1.0 / len(L[i]) for i in range(n) for l in L[i]])
return (w, D)
def missingTransform(D, ind):
if ind == D.shape[1] - 1:
return np.array(D[:, :ind])
elif ind == 0:
return np.array(D[:, ind + 1:])
else:
return np.hstack((D[:, ind], D[:, ind + 1:]))
def corruptedTransform(D, ind, rho, card):
'''
D: data, np.array
ind: index of the corrupted feature
rho: probability of corrupting each instance
card: cardinality of the corrupted feature
'''
C = np.array(D)
(n, d) = C.shape
for i in range(n):
rand = np.random.uniform()
if rand < rho:
C[i, ind] = np.random.choice([c for c in range(card) if c != D[i, ind]])
return C
def multilabelTransform(D, alpha, beta, card):
(n, d) = D.shape
U = D[:, :(d - 1)]
L = list()
for i in range(n):
S = list()
for c in range(card):
rand = np.random.uniform()
if c == D[i, -1]:
if rand < alpha:
S.append(c)
elif rand < beta:
S.append(c)
L.append(S)
return (U, L)
|
{"hexsha": "c0943bc0bcf6621256d9097eb3d2dd344746bd3e", "size": 10862, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/Parser.py", "max_stars_repo_name": "MachineLearningBCAM/minimax-risk-classifier", "max_stars_repo_head_hexsha": "82586c632268c103de269bcbffa5f7849b174a29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-28T01:36:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-22T08:24:17.000Z", "max_issues_repo_path": "scripts/Parser.py", "max_issues_repo_name": "MachineLearningBCAM/minimax-risk-classifier", "max_issues_repo_head_hexsha": "82586c632268c103de269bcbffa5f7849b174a29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/Parser.py", "max_forks_repo_name": "MachineLearningBCAM/minimax-risk-classifier", "max_forks_repo_head_hexsha": "82586c632268c103de269bcbffa5f7849b174a29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-08T10:36:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T10:36:30.000Z", "avg_line_length": 28.212987013, "max_line_length": 116, "alphanum_fraction": 0.5475050635, "include": true, "reason": "import numpy", "num_tokens": 3014}
|
[STATEMENT]
lemma connect[unfolded \<I>_adv_core_def \<I>_usr_core_def]:
fixes \<I>_adv_restk \<I>_adv_resta \<I>_usr_restk \<I>_usr_resta
defines "\<I> \<equiv> (\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta))"
assumes [WT_intro]: "WT_rest \<I>_adv_restk \<I>_usr_restk I_key_rest key_rest"
and [WT_intro]: "WT_rest \<I>_adv_resta \<I>_usr_resta I_auth_rest auth_rest"
and "exception_\<I> \<I> \<turnstile>g D \<surd>"
shows "connect D (obsf_resource ideal_resource) = connect D (obsf_resource real_resource)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
[PROOF STEP]
note I_defs = \<I>_adv_core_def \<I>_usr_core_def
[PROOF STATE]
proof (state)
this:
\<I>_adv_core \<equiv> \<I>_full \<oplus>\<^sub>\<I> (\<I>_full \<oplus>\<^sub>\<I> (\<I>_full \<oplus>\<^sub>\<I> \<I>_uniform (sec.Inp_Fedit ` carrier \<L>) UNIV))
\<I>_usr_core \<equiv> \<I>_uniform (sec.Inp_Send ` carrier \<L>) UNIV \<oplus>\<^sub>\<I> \<I>_uniform UNIV (sec.Out_Recv ` carrier \<L>)
goal (1 subgoal):
1. connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
[PROOF STEP]
have fact1: "\<I> \<turnstile>res RES (fused_resource.fuse ideal_core' ideal_rest') s \<surd>"
if "pred_prod I_key_rest I_auth_rest (snd (snd s))" "invar_ideal' (fst s)"
for s
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<I> \<turnstile>res RES (fused_resource.fuse ideal_core' ideal_rest') s \<surd>
[PROOF STEP]
unfolding assms(1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)) \<turnstile>res RES (fused_resource.fuse ideal_core' ideal_rest') s \<surd>
[PROOF STEP]
apply(rule callee_invariant_on.WT_resource_of_oracle[where I="pred_prod invar_ideal' (\<lambda>(_, s_rest). pred_prod I_key_rest I_auth_rest s_rest)"])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. callee_invariant_on (fused_resource.fuse ideal_core' ideal_rest') (pred_prod invar_ideal' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest)) ((\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)))
2. pred_prod invar_ideal' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. callee_invariant_on (fused_resource.fuse ideal_core' ideal_rest') (pred_prod invar_ideal' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest)) ((\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)))
[PROOF STEP]
by(rule fused_resource.callee_invariant_on_fuse)(rule WT_intro)+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pred_prod invar_ideal' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pred_prod invar_ideal' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
pred_prod I_key_rest I_auth_rest (snd (snd s))
invar_ideal' (fst s)
goal (1 subgoal):
1. pred_prod invar_ideal' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
by(cases s)(simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<lbrakk>pred_prod I_key_rest I_auth_rest (snd (snd ?s7)); invar_ideal' (fst ?s7)\<rbrakk> \<Longrightarrow> \<I> \<turnstile>res RES (fused_resource.fuse ideal_core' ideal_rest') ?s7 \<surd>
goal (1 subgoal):
1. connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
[PROOF STEP]
have fact2: "\<I> \<turnstile>res RES (fused_resource.fuse real_core' real_rest') s \<surd>"
if "pred_prod I_key_rest I_auth_rest (snd (snd s))" "invar_real' (fst s)"
for s
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<I> \<turnstile>res RES (fused_resource.fuse real_core' real_rest') s \<surd>
[PROOF STEP]
unfolding real_rest'_def assms(1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)) \<turnstile>res RES (fused_resource.fuse real_core' ideal_rest') s \<surd>
[PROOF STEP]
apply(rule callee_invariant_on.WT_resource_of_oracle[where I="pred_prod invar_real' (\<lambda>(_, s_rest). pred_prod I_key_rest I_auth_rest s_rest)"])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. callee_invariant_on (fused_resource.fuse real_core' ideal_rest') (pred_prod invar_real' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest)) ((\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)))
2. pred_prod invar_real' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. callee_invariant_on (fused_resource.fuse real_core' ideal_rest') (pred_prod invar_real' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest)) ((\<I>_adv_core \<oplus>\<^sub>\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) \<oplus>\<^sub>\<I> (\<I>_usr_core \<oplus>\<^sub>\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)))
[PROOF STEP]
by(rule fused_resource.callee_invariant_on_fuse)(rule WT_intro)+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pred_prod invar_real' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pred_prod invar_real' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
pred_prod I_key_rest I_auth_rest (snd (snd s))
invar_real' (fst s)
goal (1 subgoal):
1. pred_prod invar_real' (\<lambda>(uu_, s_rest). pred_prod I_key_rest I_auth_rest s_rest) s
[PROOF STEP]
by(cases s)(simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
\<lbrakk>pred_prod I_key_rest I_auth_rest (snd (snd ?s7)); invar_real' (fst ?s7)\<rbrakk> \<Longrightarrow> \<I> \<turnstile>res RES (fused_resource.fuse real_core' real_rest') ?s7 \<surd>
goal (1 subgoal):
1. connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
[PROOF STEP]
unfolding attach_ideal attach_real
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connect_obsf D (obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest'))) = connect_obsf D (obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')))
[PROOF STEP]
apply (rule connect_cong_trace[where \<I>="exception_\<I> \<I>"])
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. ?A \<turnstile>\<^sub>R obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<approx> obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest'))
2. exception_\<I> \<I> \<turnstile>g D \<surd>
3. outs_gpv (exception_\<I> \<I>) D \<subseteq> ?A
4. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
5. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
apply (rule trace_eq_obsf_resourceI, subst trace_eq'_resource_of_oracle)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. ?A \<turnstile>\<^sub>C fused_resource.fuse ideal_core' ideal_rest'((ideal_s_core', ideal_s_rest')) \<approx> fused_resource.fuse real_core' real_rest'((real_s_core', real_s_rest'))
2. exception_\<I> \<I> \<turnstile>g D \<surd>
3. outs_gpv (exception_\<I> \<I>) D \<subseteq> ?A
4. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
5. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
apply (rule trace_eq_sec[OF assms(2) assms(3)])
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. exception_\<I> \<I> \<turnstile>g D \<surd>
2. outs_gpv (exception_\<I> \<I>) D \<subseteq> ((UNIV <+> UNIV <+> UNIV <+> sec.Inp_Fedit ` carrier \<L>) <+> outs_\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) <+> (sec.Inp_Send ` carrier \<L> <+> UNIV) <+> outs_\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)
3. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
4. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exception_\<I> \<I> \<turnstile>g D \<surd>
[PROOF STEP]
by (rule assms(4))
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. outs_gpv (exception_\<I> \<I>) D \<subseteq> ((UNIV <+> UNIV <+> UNIV <+> sec.Inp_Fedit ` carrier \<L>) <+> outs_\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) <+> (sec.Inp_Send ` carrier \<L> <+> UNIV) <+> outs_\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)
2. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
3. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. outs_gpv (exception_\<I> \<I>) D \<subseteq> ((UNIV <+> UNIV <+> UNIV <+> sec.Inp_Fedit ` carrier \<L>) <+> outs_\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) <+> (sec.Inp_Send ` carrier \<L> <+> UNIV) <+> outs_\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)
[PROOF STEP]
using WT_gpv_outs_gpv[OF assms(4)]
[PROOF STATE]
proof (prove)
using this:
outs_gpv (exception_\<I> \<I>) D \<subseteq> outs_\<I> (exception_\<I> \<I>)
goal (1 subgoal):
1. outs_gpv (exception_\<I> \<I>) D \<subseteq> ((UNIV <+> UNIV <+> UNIV <+> sec.Inp_Fedit ` carrier \<L>) <+> outs_\<I> (\<I>_adv_restk \<oplus>\<^sub>\<I> \<I>_adv_resta)) <+> (sec.Inp_Send ` carrier \<L> <+> UNIV) <+> outs_\<I> (\<I>_usr_restk \<oplus>\<^sub>\<I> \<I>_usr_resta)
[PROOF STEP]
by(simp add: I_defs assms(1) nempty_carrier)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
2. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
[PROOF STEP]
using assms(2,3)[THEN WT_restD_rinit]
[PROOF STATE]
proof (prove)
using this:
I_key_rest (rinit key_rest)
I_auth_rest (rinit auth_rest)
goal (1 subgoal):
1. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse ideal_core' ideal_rest') (ideal_s_core', ideal_s_rest')) \<surd>
[PROOF STEP]
by (intro WT_obsf_resource)(rule fact1; simp add: ideal_s_rest'_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
using assms(2,3)[THEN WT_restD_rinit]
[PROOF STATE]
proof (prove)
using this:
I_key_rest (rinit key_rest)
I_auth_rest (rinit auth_rest)
goal (1 subgoal):
1. exception_\<I> \<I> \<turnstile>res obsf_resource (RES (fused_resource.fuse real_core' real_rest') (real_s_core', real_s_rest')) \<surd>
[PROOF STEP]
by (intro WT_obsf_resource)(rule fact2; simp add: real_s_rest'_def ideal_s_rest'_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
connect_obsf D (obsf_resource ideal_resource) = connect_obsf D (obsf_resource real_resource)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5782, "file": "Constructive_Cryptography_CM_Constructions_One_Time_Pad", "length": 38}
|
c-----------------------------------------------------------------------
subroutine bl_proffortfuncstart(str)
character*(*) str
integer NSTR
parameter (NSTR = 128)
integer istr(NSTR)
call blstr2int(istr, NSTR, str)
call bl_proffortfuncstart_cpp(istr, NSTR)
end
c-----------------------------------------------------------------------
subroutine bl_proffortfuncstop(str)
character*(*) str
integer NSTR
parameter (NSTR = 128)
integer istr(NSTR)
call blstr2int(istr, NSTR, str)
call bl_proffortfuncstop_cpp(istr, NSTR)
end
c-----------------------------------------------------------------------
subroutine bl_proffortfuncstart_int(i)
integer i
call bl_proffortfuncstart_cpp_int(i)
end
c-----------------------------------------------------------------------
subroutine bl_proffortfuncstop_int(i)
integer i
call bl_proffortfuncstop_cpp_int(i)
end
|
{"hexsha": "1db8e208c882aafa14d74591fbce7f1c27292441", "size": 988, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Src/C_BaseLib/BLProfiler_F.f", "max_stars_repo_name": "memmett/BoxLib", "max_stars_repo_head_hexsha": "a235af87d30cbfc721d4d7eb4da9b8daadeded7d", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 79, "max_stars_repo_stars_event_min_datetime": "2015-08-03T18:29:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T11:42:40.000Z", "max_issues_repo_path": "Src/C_BaseLib/BLProfiler_F.f", "max_issues_repo_name": "memmett/BoxLib", "max_issues_repo_head_hexsha": "a235af87d30cbfc721d4d7eb4da9b8daadeded7d", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2016-06-15T20:46:49.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-10T21:33:10.000Z", "max_forks_repo_path": "Src/C_BaseLib/BLProfiler_F.f", "max_forks_repo_name": "memmett/BoxLib", "max_forks_repo_head_hexsha": "a235af87d30cbfc721d4d7eb4da9b8daadeded7d", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2015-08-05T02:19:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-18T12:33:14.000Z", "avg_line_length": 34.0689655172, "max_line_length": 72, "alphanum_fraction": 0.463562753, "num_tokens": 210}
|
# -*- coding: latin-1 -*-
# Copyright (c) 2008 Pycircuit Development Team
# See LICENSE for details.
"""The waveform module contains classes for handling simulation results in
the form of X-Y data. The classes can handle results from multi-dimensional
sweeps.
"""
import numpy as np
from numpy import array,concatenate,alltrue,max,min,log10,arange,pi,sin, \
sign, where, newaxis, r_, vstack, apply_along_axis, nan, isscalar, rank, \
inf, isscalar
import scipy as sp
import scipy.interpolate as interpolate
import types
import operator
from copy import copy
from pycircuit.utilities import remove_index
class Waveform(object):
"""The Waveform class handles swept signals. The sweep can be multi
dimensional.
The waveform class can handle both n-dimensional gridded data or data
where the inner dimension has variable length.
Examples:
Initiating N-dimensional gridded data:
>>> w = Waveform([array([1,2]), array([3,4])], array([[3,4],[4,5]]))
Initiating N-dimensional data where the inner dimension has variable length
>>> x0 = array([array([1,1]), array([2])], dtype=object)
>>> x1 = array([array([1,2]), array([1])], dtype=object)
>>> w = Waveform([x0, x1], array([array([3,4]),array([4])], dtype = object))
"""
def __init__(self, x=array([],), y=array([]),
xlabels=None, ylabel=None,
xunits=None, yunit=None):
if type(x) is np.ndarray:
x = [x]
if type(y) is not np.ndarray:
y = array(y)
xlist = [array(xelement) for xelement in x]
self.ragged = (y.dtype == object) and \
set([xe.shape for xe in xlist + [y]]) == set([y.shape]) and \
len(xlist) > 1
dim = len(x)
if not self.ragged and dim != len(y.shape):
raise ValueError("Dimension of x (%s) does not match dimension of"
" y (%s)"%(map(len, x), y.shape))
self._xlist = xlist
self._y = y
self._dim = dim
self.xlabels = xlabels
self.ylabel = ylabel
self.xunits = xunits
self.yunit = yunit
## numpy array interface
__array_priority__ = 100.0
def __array__(self, t=None): return self._y
def __array_wrap__(self, arr, context=None):
return Waveform(list(self._xlist), arr,
xlabels=self.xlabels, ylabel=self.ylabel,
xunits=self.xunits, yunit=self.yunit)
@property
def ndim(self):
"""Return the number of nested sweeps"""
return self._y.ndim
@property
def shape(self): return tuple((len(x) for x in self.x))
def get_x(self, axis=None):
"""Get X vector of the given sweep dimension
If no dimension is given, a list of the sweeps in all dimensions
is returned
>>> w=Waveform([array([1,2]), array([1.5,2.5])], array([[3,4],[4,5]]))
>>> w.get_x(0)
array([1, 2])
>>> w.get_x(1)
array([ 1.5, 2.5])
"""
if axis == None:
return self._xlist
else:
axis = self.getaxis(axis)
return self._xlist[axis]
def get_y(self):
"""Get Y vector or n-dimensional array if sweep dimension > 1"""
return self._y
def set_x(self,value, axis=-1):
"Set X vector"
axis = self.getaxis(axis)
## Swap order if new xvalues are falling
if value[0] > value [-1]:
self._xlist[axis] = value[-1::-1]
if axis != -1:
raise Exception("Can only swap order if axis=-1")
self._y = self._y[..., -1::-1]
else:
self._xlist[axis] = value
def set_y(self,value):
"Set Y multi-dimensional array"
self._y = value
def map_x(self, func, axis=-1, xlabel = ''):
"""Apply function func on x-values of the given axis"""
newxlist = copy(self._xlist)
newxlist[axis] = func(newxlist[axis])
newxlabels = list(self._xlabels)
newxlabels[axis] = xlabel
return Waveform(newxlist, copy(self._y), xlabels = newxlabels,
yunit = self.yunit, ylabel = self.ylabel)
def map_xaxes(self, func, axes, xlabel = None, xunit = None):
"""Apply function func on x-values from the given axes
When the number of axes is greater than 1 the output waveform
has a lower sweep dimension. The axis of the lowest order is
preserved.
"""
axes = sorted(list(axes))
newxlist = copy(self._xlist)
xargs = cartesian([self._xlist[axis] for axis in axes])
newxlist[axes[0]] = map(func, *zip(*xargs))
newxlabels = list(self.xlabels)
newxlabels[axes[0]] = xlabel
newxunits = list(self.xunits)
newxunits[axes[0]] = xunit
newyshape = list(self._y.shape)
newyshape[axes[0]] = len(newxlist[axes[0]])
for axis in reversed(axes[1:]):
del newyshape[axis]
del newxlist[axis]
del newxlabels[axis]
del newxunits[axis]
newy = copy(self._y).reshape(newyshape)
return Waveform(newxlist, newy,
xlabels = newxlabels, xunits = newxunits,
yunit = self.yunit, ylabel = self.ylabel)
## Operations on Waveform objects
def binaryop(self, op, a, ylabel = None, yunit = None, reverse = False,
sameunit = False):
"""Apply binary operator between self and a"""
if isinstance(a, Waveform):
if not compatible(self, a):
raise ValueError("Waveforms are not compatible")
if reverse:
return _broadcast_apply(op, (a, self),
ylabel=ylabel, yunit=yunit,
sameunit=sameunit)
else:
return _broadcast_apply(op, (self, a),
ylabel=ylabel, yunit=yunit,
sameunit=sameunit)
## Unary operators
def __abs__(self):
return Waveform(self._xlist, np.abs(self._y), xlabels = self.xlabels,
xunits = self.xunits,
ylabel = 'abs(%s)'%self.ylabel, yunit = self.yunit)
def __neg__(self):
return Waveform(self._xlist, -self._y, xlabels = self.xlabels,
xunits = self.xunits,
ylabel = '-%s'%self.ylabel, yunit = self.yunit)
## Binary operators
def __add__(self, a): return self.binaryop(operator.__add__, a, sameunit=True)
def __radd__(self, a):
return self.binaryop(operator.__add__, a, reverse=True, sameunit=True)
def __sub__(self, a):
return self.binaryop(operator.__sub__, a, sameunit=True)
def __rsub__(self, a):
return self.binaryop(operator.__sub__, a, reverse=True, sameunit=True)
def __mul__(self, a):
if iswave(a):
ylabel = '%s * %s'%(self.ylabel, a.ylabel)
yunit = '%s * %s'%(self.yunit, a.yunit)
else:
ylabel = self.ylabel
yunit = self.yunit
return self.binaryop(operator.__mul__, a, ylabel = ylabel, yunit=yunit)
def __rmul__(self, a):
if iswave(a):
ylabel = '%s * %s'%(a.ylabel, self.ylabel)
yunit = '%s * %s'%(a.yunit, self.yunit)
else:
ylabel = self.ylabel
yunit = self.yunit
return self.binaryop(operator.__mul__, a, reverse=True, ylabel=ylabel,
yunit=yunit)
def __div__(self, a):
if iswave(a):
ylabel = '%s / %s'%(self.ylabel, a.ylabel)
yunit = '%s / %s'%(self.yunit, a.yunit)
else:
ylabel = self.ylabel
yunit = self.yunit
return self.binaryop(operator.__div__, a, ylabel=ylabel, yunit=yunit)
def __rdiv__(self, a):
if iswave(a):
ylabel = '%s / %s'%(a.ylabel, self.ylabel)
yunit = '%s / %s'%(a.yunit, self.yunit)
else:
ylabel = self.ylabel
yunit = self.yunit
return self.binaryop(operator.__div__, a, reverse=True)
def __pow__(self, a): return self.binaryop(operator.__pow__, a)
def __rpow__(self, a):
return self.binaryop(operator.__pow__, a, reverse=True)
def __eq__(self, x): return self.binaryop(operator.__eq__, x)
def __lt__(self, x): return self.binaryop(operator.__lt__, x)
def __gt__(self, x): return self.binaryop(operator.__gt__, x)
def __le__(self, x): return self.binaryop(operator.__le__, x)
def __ge__(self, x): return self.binaryop(operator.__ge__, x)
def xmax(self, axis=-1):
"""Returns the maximum x-value over one axis
Examples:
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.xmax()
4
"""
return np.max(self._xlist[axis])
def xmin(self, axis=-1):
"""Returns the minimum x-value over one axis
Examples:
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.xmin()
2
"""
return np.min(self._xlist[axis])
def ymax(self, axis=-1):
"""Returns the maximum y-value over one axis
Examples:
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.ymax()
Waveform(array([1, 2]), array([6, 7]))
"""
return reducedim(self, np.max(self._y, axis=self.getaxis(axis)),
axis=self.getaxis(axis))
def ymin(self, axis=-1):
"""Returns the minimum y-value over one axis
Examples:
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.ymin()
Waveform(array([1, 2]), array([3, 4]))
"""
return reducedim(self, np.min(self._y, axis=self.getaxis(axis)),
axis=self.getaxis(axis))
def argmax(self, axis=-1):
"""Returns the x-value where the y-value attains it maximum
Examples:
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.argmax()
Waveform(array([1, 2]), array([4, 4]))
"""
return reducedim(self, self.x[axis][np.argmax(self._y, axis=self.getaxis(axis))],
axis=self.getaxis(axis))
def argmin(self, axis=-1):
"""Returns the x-value where the y-value attains it minimum
Examples:
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.argmin()
Waveform(array([1, 2]), array([2, 2]))
"""
return reducedim(self, self.x[axis][np.argmin(self._y, axis=self.getaxis(axis))],
axis=self.getaxis(axis))
def value(self, x, axis = -1):
"""Returns and interpolated at the given x-value
*x* can be a number or a waveform where the number of dimensions of x is
is one less than the waveform it is operating on
Examples:
1-d waveform
>>> w1=Waveform(array([1,2,3]),array([3,5,6]))
>>> w1.value(1.5)
4.0
2-d waveform
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.value(2.5)
Waveform(array([1, 2]), array([ 4., 5.]))
`x` is a waveform
>>> w2=Waveform([[1,2],[2,3,4]], array([[3,5,6], [4,6,7]]))
>>> w2.value(Waveform([[1, 2]], array([2.5, 3.5])))
Waveform(array([1, 2]), array([ 4. , 6.5]))
"""
axis = self.getaxis(axis)
def findvalue(y):
if len(self._xlist[axis]) == 1 and axis == -1:
return y[-1]
res = sp.interpolate.interp1d(self._xlist[axis], y)(x)
try:
return np.asscalar(res)
except TypeError:
return res
def findvalue_mdimindex(y, i):
xindex = list(i)
del xindex[axis]
xindex = tuple(xindex)
res = sp.interpolate.interp1d(self._xlist[axis], y)(x._y[xindex])
try:
return np.asscalar(res)
except TypeError:
return res
if iswave(x):
newyshape = list(self._y.shape)
del newyshape[axis]
newyshape = tuple(newyshape)
newy = apply_along_axis_with_idx(findvalue_mdimindex,
axis,
self._y).reshape(newyshape)
return reducedim(self, newy, axis=axis)
outw = applyfunc_and_reducedim(findvalue, self, axis = axis)
if outw and not isscalar(outw):
outw.ylabel = self.ylabel
outw.yunit = self.yunit
outw.xunits = remove_index(self.xunits, axis)
outw.xlabels = remove_index(self.xlabels, axis)
return outw
def clip(self, xfrom, xto = None, axis=-1):
"""Restrict the waveform to the range defined by xfrom and xto
>>> w1 = Waveform(array([1.,2.,3.]), array([8., 6., 1.]), ylabel='a')
>>> w1.clip(2,3)
Waveform(array([ 2., 3.]), array([ 6., 1.]))
>>> w1.clip(1.5, 3)
Waveform(array([ 1.5, 2. , 3. ]), array([ 7., 6., 1.]))
"""
axis = self.getaxis(axis)
ifrom = self._xlist[axis].searchsorted(xfrom)
if xto:
ito = self._xlist[axis].searchsorted(xto, side='right')
else:
ito = 0
newxlist = copy(self._xlist)
newxlist[axis] = newxlist[axis][ifrom:ito]
newy = self._y[onedim_index(slice(ifrom,ito), axis, self.ndim)]
## Add new items on left and right side if the clip limit
## does not coincidence with an already present x-value
if self._xlist[axis][ifrom] != xfrom:
newxlist[axis] = np.insert(newxlist[axis], ifrom-1, xfrom)
func = lambda y: [np.interp(xfrom, self._xlist[axis], y)]
yfrom = np.apply_along_axis(func, axis, self._y)
newy = np.concatenate((yfrom, newy),axis=axis)
if self._xlist[axis][ito-1] != xto:
newxlist[axis] = np.insert(newxlist[axis], ito, xto)
func = lambda y: [np.interp(xto, self._xlist[axis], y)]
yto = np.apply_along_axis(func, axis, self._y)
newy = np.concatenate((newy, yto),axis=axis)
return Waveform(newxlist, newy, xunits=self.xunits, yunit=self.yunit,
xlabels=self.xlabels, ylabel=self.ylabel)
# Mathematical functions
def real(self): return applyfunc(np.real, self, 'real')
def imag(self): return applyfunc(np.imag, self, 'imag')
def conjugate(self): return applyfunc(np.conjugate, self, 'conjugate')
def deriv(self):
"""Calculate derivative of a waveform with respect to the inner x-axis"""
newxlist = copy(self._xlist)
newxlist[-1] = newxlist[-1][:-1]
dydx = np.diff(self.y, axis=-1) / np.diff(self._xlist[-1])
return Waveform(newxlist, dydx, xlabels = self.xlabels)
def xval(self, axis=-1):
"""Return a waveform with the x-values from the given dimension"""
y = np.ones(self._y.shape)
axis = self.getaxis(axis) % self.ndim
slices = [np.newaxis] * axis + [slice(0, len(self._xlist[axis]))]
## Broadcast dimension 0 to axis-1 by transpose
y.T[:] = self._xlist[axis][slices].T
newxlist = copy(self._xlist)
return Waveform(newxlist, y, xunits=self.xunits, yunit=self.xunits[axis],
xlabels=self.xlabels, ylabel=self.xlabels[axis])
# Plotting (wrapper around matplotlib)
def _plot(self, plotfunc, *args, **kvargs):
import pylab
set_label = 'label' not in kvargs or '%s' in kvargs['label']
if 'label' in kvargs:
labelarg = kvargs['label']
else:
labelarg = None
if 'axis' in kvargs:
axis = kvargs['axis']
del kvargs['axis']
else:
axis = -1
axis = self.getaxis(axis)
pylab.hold(True)
indexshape = list(self._y.shape)
indexshape[axis] = 1
for i in np.ndindex(*indexshape):
## Select all elments in axis 'axis'
i = list(i)
i[axis] = slice(None)
if set_label:
label = ','.join([self.xlabels[iaxis] + '=' + \
str(self._xlist[iaxis][ix]) for iaxis, ix in enumerate(i)
if ix != slice(None)])
if labelarg != None:
label = labelarg % (label,)
kvargs['label'] = label
# Limit infinite values
y = self.y[i]
y[where(y == inf)] = 1e20
y[where(y == -inf)] = -1e20
p=plotfunc(self.x[axis], y, *args, **kvargs)
pylab.hold(False)
xlabel = self.xlabels[axis]
if self.xunits[axis] != '':
xlabel += ' [%s]'%self.xunits[axis]
pylab.xlabel(xlabel)
ylabel = self.ylabel
if self.yunit != '':
ylabel += ' [%s]'%self.yunit
pylab.ylabel(ylabel)
def plot(self, *args, **kvargs):
import pylab
self._plot(pylab.plot, *args, **kvargs)
def semilogx(self, *args, **kvargs):
import pylab
self._plot(pylab.semilogx, *args, **kvargs)
def semilogy(self, *args, **kvargs):
import pylab
self._plot(pylab.semilogy, *args, **kvargs)
def loglog(self, *args, **kvargs):
import pylab
self._plot(pylab.loglog, *args, **kvargs)
def stem(self, *args, **kvargs):
import pylab
self._plot(pylab.stem, *args, **kvargs)
@property
def astable(self):
"""Return a table in text format
>>> print Waveform(array([1,2,3]),array([3,4,5])).astable
==== ===
x0 y
==== ===
1 3
2 4
3 5
==== ===
>>> print Waveform(array([1,2]),array([3,4]), xlabels = ('X',), \
ylabel = 'Y').astable
=== ===
X Y
=== ===
1 3
2 4
=== ===
>>> t=Waveform(array([1,2]),array([3,4]), xlabels = ['X'], \
ylabel = 'Y').astable
"""
return astable(self)
def getaxis(self, axis):
"""Look up axis index by name of xlabel names"""
if isinstance(axis, basestring):
if axis not in self.xlabels:
raise Exception('No axis with xlabel %s (%s)'%(axis, str(self.xlabels)))
return list(self.xlabels).index(axis)
elif type(axis) is types.IntType:
if axis >= self.ndim:
raise ValueError('axis %d >= number of dimensions'%axis)
elif axis < 0:
axis = self.ndim + axis
return axis
else:
raise ValueError('Axis %s must be a string or an integer'%str(axis))
def reducedimension(self, axes):
"""Reduce given axes by selecting the first element
>>> w = Waveform([[1,2],[3,4]], array([[1,2],[3,4]]))
>>> w.reducedimension([0])
Waveform([array([3, 4])], array([1, 2]))
"""
axes = [self.getaxis(axis) for axis in axes]
theslice = list(np.index_exp[:] * self.ndim)
for axis in axes: theslice[axis] = 0
w = copy(self)
w._y = self._y[theslice]
newxlist = []
newxlabels = []
newxunits = []
for axis in range(self.ndim):
if axis not in axes:
newxlist.append(w._xlist[axis])
if w._xunits:
newxunits.append(w._xunits[axis])
if w._xlabels:
newxlabels.append(w._xlabels[axis])
w._xlist = newxlist
if w._xunits:
w._xunits = newxunits
if w._xlabels:
w._xlabels = newxlabels
return w
def get_xunits(self):
return self._xunits
def set_xunits(self, units):
if units == None:
self._xunits = len(self._xlist) * ['']
else:
self._xunits = self.__checklabels(units)
def get_yunit(self):
if self._yunit != None:
return self._yunit
else:
return ''
def set_yunit(self, s):
if not isinstance(s, basestring) and s != None:
raise ValueError('Unit must be a string')
self._yunit = s
def get_xlabels(self):
return self._xlabels
def set_xlabels(self, labels):
if labels == None:
self._xlabels = list(['x%d'%i for i in range(len(self._xlist))])
else:
self._xlabels = self.__checklabels(labels, unique=True)
def get_ylabel(self):
if self._ylabel != None:
return self._ylabel
else:
return 'y'
def set_ylabel(self, s):
if not isinstance(s, basestring) and s != None:
raise ValueError('Label must be a string')
self._ylabel = s
x = property(get_x, set_x, doc = 'x values')
y = property(get_y, set_y, doc = 'y values')
xlabels = property(get_xlabels, set_xlabels, \
doc = 'x-axis list of labels for each dimension')
ylabel = property(get_ylabel, set_ylabel, doc = 'y-axis label')
xunits = property(get_xunits, set_xunits,
doc = 'x-axis list of units for each dimension')
yunit = property(get_yunit, set_yunit,
doc = 'y-axis unit')
def __getitem__(self, index):
if type(index) in (types.IntType, slice, types.EllipsisType):
index = (index,)
if index[0] == Ellipsis:
index = (self.ndim - len(index)) * (slice(None),) + tuple(index)
else:
index = tuple(index) + (self.ndim - len(index)) * (slice(None),)
## Extend index from right to have same length as dimension
index = tuple(index) + (self.ndim - len(index)) * (slice(None),)
## Return value if index selects a scalar
if np.isscalar(self._y[index]):
return self._y[index]
if len(index) > self.ndim:
raise IndexError('Index order exceeds the number of dimensions')
newxlist = [x[indexpart]
for x, indexpart in zip(self._xlist, index)
if type(indexpart) is not types.IntType]
newxlabels = [label for label,indexpart in zip(self.xlabels, index)
if type(indexpart) is not types.IntType]
newxunits = [label for label,indexpart in zip(self.xunits, index)
if type(indexpart) is not types.IntType]
newy = self._y[index]
return Waveform(tuple(newxlist), newy,
xlabels = newxlabels,
xunits = newxunits,
ylabel = self.ylabel,
yunit = self.yunit)
def getitem(self, key):
"""Get item of each Y-value
>>> wave = Waveform([[1,2]], array([{'a':1, 'b':2}, {'a':3, 'b':4}]))
>>> wave.getitem('a')
Waveform(array([1, 2]), array([1, 3]))
"""
def getitem(d):
return d[key]
ufuncgetitem = np.vectorize(getitem)
return Waveform(self._xlist, ufuncgetitem(self._y),
xlabels = self.xlabels)
def swapaxes(self, i, j):
def list_swap(x, i, j):
x = list(x)
x[i], x[j] = (x[j], x[i])
return x
i, j = [self.getaxis(axis) for axis in i,j]
w = copy(self)
w._xlist = list_swap(w._xlist, i, j)
if w._xlabels != None:
w._xlabels = tuple(list_swap(w._xlabels, i, j))
if w._xunits != None:
w._xunits = tuple(list_swap(w._xunits, i, j))
w._y = w._y.swapaxes(i,j)
return w
def reorder_axes(self, neworder):
neworder = [self.getaxis(axis) for axis in neworder]
result = self
curorder = range(self.ndim)
for axis in range(self.ndim):
if curorder[axis] != neworder[axis]:
newpos = curorder.index(neworder[axis])
result = result.swapaxes(axis, newpos)
curorder[axis], curorder[newpos] = curorder[newpos], curorder[axis]
return result
def axesiterator(self, axes):
"""Iterate over all combinations of given axes and return sub waveforms
Values of xlabels, xunits and x-values are also returned along with
each sub waveform
Each iteration yields a tuple (subwave, xlabels, xvalues, xunits)
"""
## Translate axis name or number to number
axes = [self.getaxis(axis) for axis in axes]
if not set(axes).issubset(range(self.ndim)):
raise ValueError("invalid axes argument")
xlabels = [self.xlabels[axis] for axis in axes]
xunits = [self.xunits[axis] for axis in axes]
xlist = [self.get_x(axis) for axis in axes]
xindex = [range(len(x)) for x in xlist]
for xindices in cartesian(xindex):
xvalues = [xlist[i][xindices[i]] for i in range(len(axes))]
def calc_index(axis):
if axis in axes:
return xindices[axes.index(axis)]
else:
return Ellipsis
subw = self[tuple(calc_index(axis) for axis in range(self.ndim))]
yield subw, xlabels, xvalues, xunits
def apply_along_axis(self, func1d, axis=-1, ylabel=None, yunit=None):
"""Apply a function to 1-D slices along the given axis.
Execute `func1d(a, x)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `w` along `axis`
"""
axis = self.getaxis(axis)
newy = np.apply_along_axis(func1d, axis, self._y, self.x[axis])
if newy.shape == self.shape:
return Waveform(self.x, newy, xlabels=self.xlabels,
xunits=self.xunits,
ylabel=ylabel, yunit=yunit)
else:
if len(newy.shape) == 0:
return newy
elif newy.shape == tuple(remove_index(self.shape, axis)):
return Waveform(remove_index(self.x, axis), newy,
xlabels=remove_index(self.xlabels, axis),
xunits=remove_index(self.xunits, axis),
ylabel=ylabel, yunit=yunit)
else:
raise ValueError('Shape mismatch')
def __repr__(self):
if self._dim > 1:
xlist = self._xlist
else:
xlist = self._xlist[0]
return self.__class__.__name__ + "(" + repr(xlist) + ", " + \
repr(self.y) + ")"
def __checklabels(self, labels, unique=False):
if not labels == None:
try:
labels = list(labels)
except:
raise ValueError('Cannot convert labels to list')
if len(labels) != self._dim:
raise ValueError('Label list should have the same length (%d)'
' as the number of dimensions (%d)'%
(len(labels), self._dim))
for label in labels:
if not isinstance(label, basestring):
raise ValueError('Labels should be of type string')
if unique and len(set(labels)) != len(labels):
raise ValueError('Labels must be unique')
return labels
## Utility functions
def iswave(w):
"""Returns true if argument is a waveform"""
return isinstance(w, Waveform)
def assert_waveform(w):
assert iswave(w), "%s is not a waveform object"%str(w)
def applyfunc(func, w, funcname = None):
if iswave(w):
outw = Waveform(w.x, w.y, xlabels=w.xlabels, xunits=w.xunits)
outw.y = func(outw._y)
if w.ylabel:
if funcname:
outw.ylabel = funcname + '(' + w.ylabel + ')'
else:
outw.ylabel = func.__name__ + '(' + w.ylabel + ')'
return outw
else:
return func(w)
def _broadcast_apply(func, args,
ylabel = None, yunit = None,
sameunit = False):
"""Re-order axes so numpy broadcasting can be used and apply function"""
if len(args) > 2:
raise NotImplemented("Broadcast applies with > 2 args not implemented")
## Find argument with highest number of dimensions
def key_func(i):
if iswave(args[i]):
return args[i].ndim
else:
return -1
ihidim = sorted(range(len(args)), key = key_func, reverse=True)[0]
bothwaves = iswave(args[0]) and iswave(args[1])
if bothwaves:
original_order = args[ihidim].xlabels
commonaxes = set.intersection(*[set(arg.xlabels) for arg in args])
## Reorder axes to make the argument conform to numpy broadcast rules
reordered_args = []
for arg in args:
neworder = list(set(arg.xlabels)-commonaxes) + list(commonaxes)
reordered_args.append(arg.reorder_axes(neworder))
args = reordered_args
## Get y array or argument itself if not a waveform
argsy = []
for arg in args:
if iswave(arg):
argsy.append(arg._y)
else:
argsy.append(arg)
newy = apply(func, argsy)
if sameunit:
yunit = yunit or args[ihidim].yunit
ylabel = ylabel or args[ihidim].ylabel
result = Waveform(args[ihidim]._xlist, newy,
xlabels = args[ihidim].xlabels,
xunits = args[ihidim].xunits,
ylabel = ylabel, yunit = yunit)
## Reorder axes to the original order of the arg with highest dimension
if bothwaves:
result = result.reorder_axes(original_order)
return result
def applyfunc_and_reducedim(func, w, axis = -1, ylabel = None, yunit = None):
"""Apply a function that reduces the dimension by one and return a new waveform or float if zero-rank
"""
axis = w.getaxis(axis)
newyshape = list(w._y.shape)
del newyshape[axis]
newy = apply_along_axis(func, axis, w._y).reshape(newyshape)
if ylabel != None:
ylabel = func.__name__ + '(' + ylabel + ')'
return reducedim(w, newy, axis=axis, ylabel=ylabel, yunit=yunit)
def reducedim(w, newy, axis=-1, ylabel=None, yunit=None):
"""Reduce the dimension by one and return a new waveform or float if zero-rank"""
if rank(newy) == 0:
return np.asscalar(newy)
if ylabel == None:
ylabel = w.ylabel
if yunit == None:
yunit = w.yunit
newxlist = list(w._xlist)
del(newxlist[axis])
newxlabels = list(w.xlabels)
del(newxlabels[axis])
newxunits = list(w.xunits)
del(newxunits[axis])
return Waveform(newxlist, newy, xlabels = newxlabels, ylabel = ylabel,
xunits = newxunits, yunit = yunit)
def to_xy_matrices(*waveforms):
"""Return x and y matrices"""
if not compatible(*waveforms):
raise ValueError('arguments are not compatible')
if waveforms[0].ragged:
def flatten_ragged(a):
if hasattr(a,'dtype') and a.dtype == 'object':
return concatenate(map(flatten_ragged, a.tolist()))
else:
return a
xvalues = zip(*map(flatten_ragged, waveforms[0]._xlist))
yvalues = zip(*[flatten_ragged(w._y) for w in waveforms])
else:
xvalues = cartesian(waveforms[0]._xlist)
yvalues = zip(*[list(w._y.flat) for w in waveforms])
## Filter NaN values
try:
indices = [i for i in range(len(yvalues)) if not np.isnan(yvalues[i]).all()]
xvalues = [xvalues[i] for i in indices]
yvalues = [yvalues[i] for i in indices]
except TypeError:
pass
return np.array(xvalues), np.array(yvalues)
def astable(*waveforms):
"""Return a table of one or more waveforms with the same sweeps in text format
Examples:
>>> w1 = Waveform([range(2)], array([3,4]), ylabel='V1')
>>> w2 = Waveform([range(2)], array([4,6]), ylabel='V2')
>>> print astable(w1,w2)
==== ==== ====
x0 V1 V2
==== ==== ====
0 3 4
1 4 6
==== ==== ====
"""
from pycircuit.utilities import rst
xvalues, yvalues = [a.tolist() for a in to_xy_matrices(*waveforms)]
xlabels = waveforms[0].xlabels
ylabels = [w.ylabel for w in waveforms]
xunits = waveforms[0].xunits
yunits = [w.yunit for w in waveforms]
hasunits = not reduce(operator.__and__, [yunit == '' for yunit in yunits])
if hasunits:
return rst.table(map(lambda x,y: list(x) + list(y),
[xlabels] + [xunits] + xvalues,
[ylabels] + [yunits] + yvalues),
headerrows = 2)
else:
return rst.table(map(lambda x,y: list(x) + list(y),
[xlabels] + xvalues,
[ylabels] + yvalues))
def apply_along_axis_with_idx(func1d,axis,arr,*args):
""" Execute func1d(arr[i], i, *args) where func1d takes 1-D arrays
and arr is an N-d array. i varies so as to apply the function
along the given axis for each 1-d subarray in arr.
"""
arr = np.asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis,nd))
ind = [0]*(nd-1)
i = np.zeros(nd,'O')
indlist = range(nd)
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], tuple(i.tolist()), *args)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = np.zeros(outshape,np.asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist,ind)
res = func1d(arr[tuple(i.tolist())], tuple(i.tolist()), *args)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = np.zeros(outshape,np.asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], tuple(i.tolist()), *args)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def compatible(*args):
"""Return True if the given waveforms have the same x-values
Examples:
>>> w1 = Waveform(array([1,2,3]),array([3,5,6]))
>>> w2 = Waveform(array([1,2,3]),array([1,1.5,2]))
>>> w3 = Waveform(array([1,2]),array([3,5,6]))
>>> compatible(w1, w2)
True
>>> compatible(w1, w3)
False
"""
return True
return set([w.shape for w in args]) == set([args[0].shape])
def compose(wlist, x = None, xlabel = None):
"""Compose list of waveforms into a new waveform where the
waveform list becomes the outer sweep
Examples:
>>> wlist=[Waveform(array([1,2,3]),array([3,5,6])), \
Waveform(array([1,2,3]),array([1,1.5,2]))]
>>> w = compose(wlist, x = array([1,2]), xlabel = 'index')
>>> w
Waveform([array([1, 2]), array([1, 2, 3])], array([[ 3. , 5. , 6. ],
[ 1. , 1.5, 2. ]]))
>>> w.xlabels
('index', 'x0')
"""
if not compatible(*wlist):
return ValueError('Waveforms in wlist are not compatible')
if x != None and len(wlist) != len(x):
return ValueError('Number of x-values must be the same '
'as the number of waveforms')
newy = np.array([w.y for w in wlist])
if x == None:
newx = [range(len(wlist))] + wlist[0].x
else:
newx = [x] + wlist[0].x
if xlabel == None:
xlabel = 'composite index'
return Waveform(newx, newy,
xlabels = [xlabel] + list(wlist[0].xlabels),
ylabel = wlist[0].ylabel,
xunits = [''] + list(wlist[0].xunits),
yunit = wlist[0].yunit)
# Cartesian product operator of list of lists
def cartesian(listList):
if listList:
result = []
prod = cartesian(listList[:-1])
for x in prod:
for y in listList[-1]:
result.append(x + (y,))
return result
return [()]
def onedim_index(index, axis, ndim):
"""Return an index from a 1-dimensional index along the given axis
>>> index = onedim_index(np.index_exp[1,2], 1, 3)
>>> A=np.eye(3)
>>> A[index]
array([[ 0., 0.],
[ 1., 0.],
[ 0., 1.]])
"""
return (slice(None),) * (axis % ndim) + (index,)
def wavefunc(func):
"""Decorator for creating free functions from waveform methods
If the first argument is a waveform the waveform method with the same
name as the function will be called otherwise the decorated function
is called instead.
"""
def g(*args, **kvargs):
if iswave(args[0]):
return getattr(Waveform, func.__name__)(*args, **kvargs)
else:
return func(*args, **kvargs)
g.__name__ = func.__name__
## Copy docstring from waveform method
if func.__doc__:
g.__doc__ = func.__doc__
else:
g.__doc__ = getattr(Waveform, func.__name__).__doc__
return g
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{"hexsha": "f3ada4fee34080a2dd261df958f8aabeab9109fa", "size": 39046, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycircuit/post/waveform.py", "max_stars_repo_name": "michaelnt/pycircuit", "max_stars_repo_head_hexsha": "ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pycircuit/post/waveform.py", "max_issues_repo_name": "michaelnt/pycircuit", "max_issues_repo_head_hexsha": "ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pycircuit/post/waveform.py", "max_forks_repo_name": "michaelnt/pycircuit", "max_forks_repo_head_hexsha": "ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9224283305, "max_line_length": 105, "alphanum_fraction": 0.5294268299, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 10182}
|
import sys
import re
import glob
import os
import h5py
import pdb
import pandas as pd
import scipy as sp
import numpy as np
import statsmodels
# import limix.stats.fdr as fdr
def smartAppend(table,name,value):
""" helper function for appending in a dictionary """
if name not in table.keys():
table[name] = []
table[name].append(value)
def dumpDictHdf5(RV,o):
""" Dump a dictionary where each page is a list or an array """
for key in RV.keys():
o.create_dataset(name=key,data=sp.array(RV[key]),chunks=True,compression='gzip')
def smartDumpDictHdf5(RV,o):
""" Dump a dictionary where each page is a list or an array or still a dictionary (in this case, it iterates) """
for key in RV.keys():
if type(RV[key])==dict:
g = o.create_group(key)
smartDumpDictHdf5(RV[key],g)
else:
o.create_dataset(name=key,data=sp.array(RV[key]),chunks=True,compression='gzip')
def fdr(p_vals):
from scipy.stats import rankdata
ranked_p_values = rankdata(p_vals)
fdr = p_vals * len(p_vals) / ranked_p_values
fdr[fdr > 1] = 1
return fdr
path_results = "/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/debug_May2021/REVISION/CRM_association/"
if __name__ == '__main__':
fname = os.path.join(path_results,"*.tsv")
#prinm(outfilename)
files = glob.glob(fname)
# print (files)
#import pdb; pdb.set_trace()
x = 0
table = {}
count_success = 0
count_failed = 0
#breakpoint()
for file in files:
#breakpoint()
if re.search("perm",file) is not None:
continue
x += 1
if x%500 == 0: print (x)
df = pd.read_csv(file, index_col=0)
nsnps = int(len(df))
if nsnps==0:
continue
line = str(file).split("/")
gene = str(line[-1]).split(".")[0]
chrom = df['chrom'].values[0]
#print(gene)
pval = df['pv'].values
#pval[np.isnan(pval)]=1
pval[pd.isnull(pval)]=1
for i in range(nsnps):
try:
#import pdb; pdb.set_trace()
temp={}
temp['gene'] = gene
temp['n_snps'] = nsnps
temp['chrom'] = chrom
#print(nsnps)
temp['pv_raw'] = df['pv'].values[i]
temp['snpID'] = df['variant'].values[i]
#FWER adjusted (gene-level) pvalue
temp['pv'] = nsnps*temp['pv_raw']
if temp['pv']>1: temp['pv'] = 1
if temp['pv']<0: temp['pv'] = 0
count_success+=1
except:
count_failed+=1
continue
for key in temp.keys():
smartAppend(table,key,temp[key])
#import pdb; pdb.set_trace()
for key in table.keys():
table[key] = sp.array(table[key])
print (count_success)
df = pd.DataFrame.from_dict(table)
import os
outfile = "summary.csv"
myp = os.path.join(path_results,outfile)
df.to_csv(myp)
|
{"hexsha": "0a6bb43913a108f0e319cc0c324a2e7cd0a326d1", "size": 3110, "ext": "py", "lang": "Python", "max_stars_repo_path": "endodiff/usage/scripts/summarise_associations.py", "max_stars_repo_name": "annacuomo/CellRegMap_analyses", "max_stars_repo_head_hexsha": "942dac12c376675a1fd06de872e82b4b038d1c31", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "endodiff/usage/scripts/summarise_associations.py", "max_issues_repo_name": "annacuomo/CellRegMap_analyses", "max_issues_repo_head_hexsha": "942dac12c376675a1fd06de872e82b4b038d1c31", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "endodiff/usage/scripts/summarise_associations.py", "max_forks_repo_name": "annacuomo/CellRegMap_analyses", "max_forks_repo_head_hexsha": "942dac12c376675a1fd06de872e82b4b038d1c31", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7962962963, "max_line_length": 126, "alphanum_fraction": 0.5588424437, "include": true, "reason": "import numpy,import scipy,from scipy,import statsmodels", "num_tokens": 828}
|
import os
import numpy as np
import json
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import tqdm
def normalize(matrix):
'''
Takes a matrix, flattens it, takes the zscore for each value, then normalizes to a unit vector.
Returns the normalized n-dimensional unit vector.
'''
mat_arr = matrix.flatten()
mat_arr = (mat_arr - np.mean(mat_arr)) / np.std(mat_arr)
mat_arr = mat_arr / np.linalg.norm(mat_arr)
return mat_arr
def detect_red_light(I):
'''
This function takes a numpy array <I> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below.
'''
BEGIN YOUR CODE
'''
I_height, I_width, I_dims = I.shape
threshold = 0.60
top_left = [0, 0] # height, width
while ((top_left[0] + ker_height) <= I_height):
# print(top_left)
# slide the moving box from left to right
# if I hit the end of the row, start again a little further down
if ((top_left[1] + ker_width) > I_width):
top_left[0] = top_left[0] + max(2, int(ker_height / 10))
top_left[1] = 0
else:
h, w = top_left
box = I[h:(h + ker_height), w:(w + ker_width), :]
box = normalize(box)
prod = np.inner(box, kernel_norm)
if (prod > threshold):
bounding_boxes.append([h, w, (h + ker_height), (w + ker_width)])
top_left[1] = top_left[1] + ker_width
else:
top_left[1] += 1
'''
END YOUR CODE
'''
for i in range(len(bounding_boxes)):
assert len(bounding_boxes[i]) == 4
return bounding_boxes
kernel_path = './kernels/'
file_names = sorted(os.listdir(kernel_path))
kernel_names = [k for k in file_names if 'kernel' in k]
preds = {}
data_path = '../data/RedLights2011_Medium'
# set a path for saving predictions:
preds_path = '../data/hw01_preds'
os.makedirs(preds_path, exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
# 50 because the code takes a very long time over the entire dataset
# for i in range(len(file_names)):
for i in range(50):
I = Image.open(os.path.join(data_path, file_names[i]))
I = np.asarray(I)
boxes_list = []
for k in range(len(kernel_names)):
kernel = Image.open(os.path.join(kernel_path, kernel_names[k]))
kernel = np.asarray(kernel)
ker_height, ker_width, ker_dims = kernel.shape
kernel_norm = normalize(kernel)
boxes = detect_red_light(I)
boxes_list.extend(boxes)
preds[file_names[i]] = boxes_list
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path, 'preds2.json'), 'w') as f:
json.dump(preds, f)
|
{"hexsha": "409b35b11531e71d94ad3a537c1beaec1e6cdfb3", "size": 3488, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_predictions.py", "max_stars_repo_name": "kvnmei/caltech-ee148-spring2020-hw01", "max_stars_repo_head_hexsha": "a96e31a0479f4a56eb9d709c3069b1b4aee768d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_predictions.py", "max_issues_repo_name": "kvnmei/caltech-ee148-spring2020-hw01", "max_issues_repo_head_hexsha": "a96e31a0479f4a56eb9d709c3069b1b4aee768d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_predictions.py", "max_forks_repo_name": "kvnmei/caltech-ee148-spring2020-hw01", "max_forks_repo_head_hexsha": "a96e31a0479f4a56eb9d709c3069b1b4aee768d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7090909091, "max_line_length": 102, "alphanum_fraction": 0.6430619266, "include": true, "reason": "import numpy", "num_tokens": 886}
|
The Center for Cognitive Liberty & Ethics, aka the CCLE, is a NonProfit Organizations nonprofit organization that works solely to advance sustainable social policies that protect freedom of thought. CCLE was founded to promote public awareness and legal recognition of cognitive liberty and the right of each individual to think independently, to have decisionmaking authority over matters affecting his or her mind, and to engage in the full spectrum of possible thought. CCLE bases its policies on the guiding values of privacy, autonomy and choice.
|
{"hexsha": "6eaff473611e31bf3b9d05d3c0c0c7db4f3678df", "size": 556, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Center_for_Cognitive_Liberty_%26_Ethics.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Center_for_Cognitive_Liberty_%26_Ethics.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Center_for_Cognitive_Liberty_%26_Ethics.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 111.2, "max_line_length": 552, "alphanum_fraction": 0.8237410072, "num_tokens": 100}
|
# Just a script to make recognizing faces easier with few functions
# LBPH + HAAR recognizer combo is capable of identifying person from another, and training runtime
# DNN can be used in place for HAAR for detecting initial faces before LBPH recognition
import os
import time
import numpy as np
import cv2
import pathlib
import pickle
from sys import platform
class Facer:
face_recognizer_lbph = None
face_recognizer_dnn = None
people = {}
nameIndex = {}
onCam = None
realPath = os.path.dirname(os.path.realpath(__file__))
face_cascade = cv2.CascadeClassifier(realPath + '/' + "haarcascade_frontalface_default.xml")
@staticmethod
def camOn():
if Facer.onCam is None:
Facer.onCam = cv2.VideoCapture(0)
if platform != "win32": # Assume all UNIX OS'es need this
Facer.onCam.set(cv2.CAP_PROP_BUFFERSIZE, 5)
if not Facer.onCam.isOpened():
print("Unable to open camera")
Facer.onCam = None
return False
return True
@staticmethod
def camOff():
if Facer.onCam is None:
return True
if Facer.onCam.isOpened():
Facer.onCam.release()
Facer.onCam = None
return True
return False
class LightLevelLow(Exception):
pass
class NoFacesFound(Exception):
pass
# Returns frame on success, None if camera cannot be opened or None if reading frame failed
# Parameter minLightLevel specifies the minimum brightness allowed, if brightness is too low LightLevelLow is thrown
@staticmethod
def camFrame(minLightLevel = 0):
if not Facer.onCam or not Facer.onCam.isOpened():
print("Error: Camera not open")
raise Exception("Camera not open")
ret, frame = Facer.onCam.read()
if not ret or frame is None:
return None
if minLightLevel > 0:
if np.mean(frame) < minLightLevel:
raise Facer.LightLevelLow
return frame
@staticmethod
def camClearBuffer():
if platform != "win32": # Same here
for i in range(5): # Clear old frame
Facer.onCam.grab()
# Takes pictures with webcam, used for training, if count is 0 timeout is used as main count
# If minLightLevel is greater than 0 and it's not hit, LightLevelLow is thrown
@staticmethod
def take_faces(personName, count = 100, timeout = 3, savePicturePath = None, recreate = False, useDNN = False, minLightLevel = 0):
completed = 0
if recreate:
Facer.people = {}
Facer.nameIndex = {}
Facer.camClearBuffer()
#print(f"Taking faces, useDNN:{useDNN}")
facelist = []
lastTime = time.time()
startTime = time.time()
forcedTimeout = timeout * 2 # Forced timeout limit, if we can't find faces by then we fail
while time.time() - startTime < timeout:
if count > 0 and completed >= count:
break
if count > 0:
print (f"Taking frame: {completed + 1}/{count}", end="\r")
else:
print (f"Taking frame: {completed + 1}", end="\r")
try:
frame = Facer.camFrame(minLightLevel)
except Facer.LightLevelLow:
raise
if frame is None:
continue
# Use DNN for taking faces always, way more accurate even if it takes longer
faces = None
if useDNN:
faces = Facer.detect_faces_dnn(frame)
else:
faces = Facer.detect_faces_haar(frame)
if faces is None or len(faces) == 0:
if time.time() - lastTime > 0.1: # Raise timeout each 100ms if face isn't found
timeout += 0.1
lastTime = time.time()
if timeout >= forcedTimeout: # We stop trying if faces simply can't be found
raise Facer.NoFacesFound
continue
facelist.append(faces[0]) # Append first found face
if savePicturePath:
try:
cv2.imwrite(os.path.join(savePicturePath, f"{personName}/face_{completed}.png"), faces[0])
except Exception as e:
print(f"Failed to save frame: {e}")
return False
completed += 1
print("\n")
if len(facelist) > 0:
idx = Facer.nameIndex.get(personName, len(Facer.people))
Facer.people[idx] = facelist
Facer.nameIndex[personName] = idx
else:
print("List empty after take")
return False
if count > 0:
return completed == count
else:
return True
# Detect any faces inside an image using haar cascades
# returns the face area images in gray and face rectangles
@staticmethod
def detect_faces_haar(img, sceneGray = True):
# Get the gray from image and equalize it
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
# Detect faces with multiscale haar
faces = Facer.face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
# No faces? Return nothing
if len(faces) == 0:
return None
grays = []
for face in faces:
(x, y, w, h) = face
grayArea = None
if sceneGray:
grayArea = gray[y:y+w, x:x+h]
else:
area = img[y:y+w, x:x+h]
grayArea = cv2.cvtColor(area, cv2.COLOR_BGR2GRAY)
grayArea = cv2.equalizeHist(grayArea)
if grayArea is not None:
grays.append(grayArea)
return grays
# Detect any faces inside an image using deep neural networks
# uses pre-trained caffe models and protos
# returns the face area images in gray and face rectangles
@staticmethod
def detect_faces_dnn(img, sceneGray = True):
# Load pre-trained model
if Facer.face_recognizer_dnn is None:
print ("Loading caffe model")
Facer.face_recognizer_dnn = cv2.dnn.readNetFromCaffe(Facer.realPath + '/' + "MobileNet-SSD.prototxt", Facer.realPath + '/' + "MobileNet-SSD.caffemodel")
# Resize image and blob it
(h, w) = img.shape[:2]
resizedImg = cv2.resize(img, (224, 224))
blobbed = cv2.dnn.blobFromImage(resizedImg, 1, (224, 224), (104, 117, 123))
# Set the blobbed image as input for neural network, then forward it
Facer.face_recognizer_dnn.setInput(blobbed)
faces = Facer.face_recognizer_dnn.forward()
# No faces? Return nothing
if len(faces) == 0:
return None
areas = []
for i in range(0, faces.shape[2]):
confidence = faces[0, 0, i, 2]
if confidence < 0.2: # skip low confidence faces
continue
box = faces[0, 0, i, 3:7] * np.array([w, h, w, h])
(sX, sY, eX, eY) = box.astype("int")
grayed = None
if sceneGray:
grayed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayed = cv2.equalizeHist(grayed)
grayed = grayed[sY:eY, sX:eX]
else:
cropped = img[sY:eY, sX:eX]
grayed = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
grayed = cv2.equalizeHist(grayed)
if grayed is not None:
areas.append(grayed)
return areas
# Trains using taken data or images inside a folder, images must be in their own subfolder named after the person
# ex. images/MyName, and you give it "images" directory as the data_folder
@staticmethod
def train_faces_lbph(data_folder = None, recreate = False):
requireOneTime = False
if Facer.face_recognizer_lbph is None or recreate is True:
requireOneTime = True
Facer.face_recognizer_lbph = cv2.face.LBPHFaceRecognizer_create()
if data_folder:
subdirs = os.listdir(data_folder)
for subdir in subdirs:
print("Preparing LBPH from images of {}".format(subdir))
fileDir = os.path.join(data_folder, subdir)
files = os.listdir(fileDir)
pSize = len(files)
facelist = []
for i, file in enumerate(files):
# Prevent issues with files starting with a dot and non-image files
if file.startswith(".") or not file.lower().endswith((".png", ".jpg", ".jpeg")):
continue
image_path = os.path.join(fileDir, file)
image = cv2.imread(image_path)
print(f"Progress: {i+1}/{pSize} images", end="\r")
if image is not None:
facelist.append(image)
if len(facelist) > 0:
idx = Facer.nameIndex.get(subdir, len(Facer.people))
Facer.people[idx] = facelist
Facer.nameIndex[subdir] = idx
else:
print("Error: facelist empty loading from folder")
print('\n')
else:
print("Preparing LBPH from data")
if len(Facer.people) == 0:
print("Failed to prepare data")
return False
print("Training LBPH.. ", end="")
try:
#print(f"\nPeople length: {len(Facer.people)}")
for index, data in Facer.people.items():
#print(f"Index: {index}, Data length: {len(data)}")
labels = []
datalist = []
for d in data:
labels.append(index)
datalist.append(d)
#print(f"Labels length: {len(labels)}, Datalist length: {len(datalist)}")
if len(labels) > 0 and len(datalist) > 0 and len(labels) == len(datalist):
if requireOneTime is True:
requireOneTime = False
Facer.face_recognizer_lbph.train(datalist, np.array(labels))
print("Trained")
else:
Facer.face_recognizer_lbph.update(datalist, np.array(labels))
print("Updated")
else:
print("Error: Empty or mismatched data")
return False
except Exception as e:
print(f"Failed, reason: {e}")
return False
return True
# Save trained LBPH recognition data
# requires that you train LBPH first
@staticmethod
def save_trained_lbph(lbph_path, names_path):
print("Saving LBPH.. ", end="")
if not Facer.face_recognizer_lbph:
print("Unable to save. LBPH recognizer not trained yet")
return
try:
Facer.face_recognizer_lbph.write(lbph_path)
with open(names_path, "wb") as f:
pickle.dump(Facer.nameIndex, f)
print("Success")
except Exception as e:
print(f"Unable to save. Reason: {e}")
# Load trained LBPH recognition data
@staticmethod
def load_trained_lbph(lbph_path, names_path):
print("Loading LBPH.. ", end="")
if not Facer.face_recognizer_lbph:
Facer.face_recognizer_lbph = cv2.face.LBPHFaceRecognizer_create()
try:
Facer.face_recognizer_lbph.read(lbph_path)
with open(names_path, "rb") as f:
Facer.nameIndex = pickle.load(f)
print(f"Success, indexes: {Facer.nameIndex}")
except Exception as e:
print(f"Exception on load: {e}")
# Attempts to recognize lbph trained faces within input image
# returns boolean if any face is detected and tuple of recognized faces
# tuple contains index and rectangle of face for each person (name is None if not recognized)
# argument threshold is the threshold for faces being recognized
# higher value is lower tolerance, meaning random faces
# could be recognized as other people
@staticmethod
def recognize_faces_lbph(image, threshold = 0.8, useDNN = False):
if image is not None:
try:
if useDNN:
faces = Facer.detect_faces_dnn(image)
else:
faces = Facer.detect_faces_haar(image)
except Exception as e:
print(f"Detection error: {e}")
return False, None
else:
if faces is not None:
found = False
recognized = []
for face in faces:
try:
label, difference = Facer.face_recognizer_lbph.predict(face)
except Exception as e:
print(f"Could not predict: {e}")
return False, None
else:
try:
found = True
if difference < (threshold * 100):
name = None
for k, v in Facer.nameIndex.items():
if v == label:
name = k
break
recognized.append((name, face))
else:
recognized.append((None, face))
except Exception as e:
print(f"Exception on recognized append: {e}")
return False, None
return found, recognized
return False, None
return False, None
return False, None
|
{"hexsha": "7c776a08b3757419cfc892502bc0e9bb6b896bda", "size": 11164, "ext": "py", "lang": "Python", "max_stars_repo_path": "Submods/MAS Additions/MASM/scripts/facer/facer.py", "max_stars_repo_name": "QuadILOP1/MAS-Additions", "max_stars_repo_head_hexsha": "9275d0c0aac1ae0a9245ada9a2c97ef4147222f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Submods/MAS Additions/MASM/scripts/facer/facer.py", "max_issues_repo_name": "QuadILOP1/MAS-Additions", "max_issues_repo_head_hexsha": "9275d0c0aac1ae0a9245ada9a2c97ef4147222f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Submods/MAS Additions/MASM/scripts/facer/facer.py", "max_forks_repo_name": "QuadILOP1/MAS-Additions", "max_forks_repo_head_hexsha": "9275d0c0aac1ae0a9245ada9a2c97ef4147222f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4564643799, "max_line_length": 155, "alphanum_fraction": 0.6828197779, "include": true, "reason": "import numpy", "num_tokens": 3146}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 17 12:38:44 2017
@author: ahefny, zmarinho
"""
from theano.tensor.shared_randomstreams import RandomStreams
'''
decorator of noisy_model.
'''
class NoisyModel(object):
def __init__(self, obs_noise=0.0, obs_loc=0.0, state_noise=0.0, state_loc=0.0,
state_dim=0, rng=None):
self._srng = RandomStreams(seed=rng.seed())
self.rng = rng
self._obs_loc = obs_loc
self._state_loc = state_loc
self._obs_std = obs_noise
self._state_std = state_noise
self._state_dim = state_dim
self._state_noise = self._srng.normal(size=[self._state_dim], std=obs_noise, avg=state_loc)
def _noisy_state(self, state):
if self._state_std>0:
state = state + self._state_noise
return state
def _noisy_obs(self, obs):
noise = 0.0
if self._obs_std>0:
noise = self.rng.normal(loc=self._obs_loc, scale=self._obs_std, size=obs.shape)
o = obs + noise
return o
|
{"hexsha": "c10fffaa4e8dfe7a111bf7f0d0350046817cf477", "size": 1086, "ext": "py", "lang": "Python", "max_stars_repo_path": "rpsp/rpspnets/psr_lite/noisy_model.py", "max_stars_repo_name": "ahefnycmu/rpsp", "max_stars_repo_head_hexsha": "ff3aa3e89a91bb4afb7bad932d2c04691a727a63", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-11-03T12:04:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T08:55:54.000Z", "max_issues_repo_path": "rpsp/rpspnets/psr_lite/noisy_model.py", "max_issues_repo_name": "ahefnycmu/rpsp", "max_issues_repo_head_hexsha": "ff3aa3e89a91bb4afb7bad932d2c04691a727a63", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rpsp/rpspnets/psr_lite/noisy_model.py", "max_forks_repo_name": "ahefnycmu/rpsp", "max_forks_repo_head_hexsha": "ff3aa3e89a91bb4afb7bad932d2c04691a727a63", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3513513514, "max_line_length": 100, "alphanum_fraction": 0.6187845304, "include": true, "reason": "from theano", "num_tokens": 305}
|
# import the necessary packages
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn import datasets
from nolearn.dbn import DBN
import numpy as np
import cv2
import scipy.io as sio
import pickle
# grab the MNIST dataset (if this is the first time you are running
# this script, this make take a minute -- the 55mb MNIST digit dataset
# will be downloaded)
print "[X] downloading data..."
dataset = datasets.fetch_mldata("MNIST Original")
# scale the data to the range [0, 1] and then construct the training
# and testing splits
(trainX, testX, trainY, testY) = train_test_split(
dataset.data / 255.0, dataset.target.astype("int0"), test_size = 0.33)
print trainX.shape, trainY.shape
print type(trainY), trainY
# train the Deep Belief Network with 784 input units (the flattened,
# 28x28 grayscale image), 300 hidden units, 10 output units (one for
# each possible output classification, which are the digits 1-10)
try:
with open('data.pkl', 'rb') as input:
dbn = pickle.load(input)
except:
dbn = DBN(
[trainX.shape[1], 900, 60],
learn_rates = 0.3,
learn_rate_decays = 0.9,
epochs = 10,
verbose = 1)
dbn.fit(trainX, trainY)
with open('data.pkl', 'wb') as output:
pickle.dump(dbn, output, pickle.HIGHEST_PROTOCOL)
# # compute the predictions for the test data and show a classification
# # report
preds = dbn.predict(testX)
print classification_report(testY, preds)
nepali = ["0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"a",
"aa",
"i",
"ii",
"u",
"uu",
"ri",
"ai",
"aii",
"o",
"ou",
"am",
"a:",
"ka",
"kha",
"ga",
"gha",
"nha",
"cha",
"chha",
"ja",
"jha",
"ya",
"ta",
"tha",
"da",
"dha",
"ara",
"ta:",
"tha:",
"da:",
"dha:",
"na",
"pa",
"pha",
"bha",
"ma",
"ye",
"ra",
"la",
"wa",
"sa",
"kha",
"sa",
"sha-kha",
"sha",
"ha",
"gya",
"tra"
]
# # randomly select a few of the test instances
# for i in np.random.choice(np.arange(0, len(testY)), size = (10,)):
# # classify the digit
# pred = dbn.predict(np.atleast_2d(testX[i]))
# # reshape the feature vector to be a 28x28 pixel image, then change
# # the data type to be an unsigned 8-bit integer
# image = (testX[i] * 255).reshape((28, 28)).astype("uint8")
# # show the image and prediction
# print "Actual digit is {0}, predicted {1}".format(testY[i], pred[0])
# cv2.imshow("Digit", image)
# cv2.waitKey(0)
img = cv2.imread("./input.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
inv = 255-gray
x_top = 0
y_top = 0
x_bottom = 0
y_bottom = 0
for x,row in enumerate(inv):
for y,pix in enumerate(row):
if pix>100:
if x<x_top:
x_top = x
if x>x_bottom:
x_bottom = x
if y<y_top:
y_top = y
if y>y_bottom:
y_bottom = y
img_croped = inv[x_top:x_bottom, y_top:y_bottom]
if img_croped.shape[0] > img_croped.shape[1]:
size_max = img_croped.shape[0]
else:
size_max = img_croped.shape[1]
padding = 3
size_max = size_max + 2*padding
blank_image = np.zeros((size_max,size_max), np.uint8)
height_offset = (size_max - img_croped.shape[0])/2
width_offset = (size_max - img_croped.shape[1])/2
blank_image[height_offset:height_offset + img_croped.shape[0],width_offset:width_offset + img_croped.shape[1]] = img_croped
final = cv2.resize(blank_image, (28, 28))
print final.shape
final_image = np.ravel(final)/255
pred = dbn.predict(np.atleast_2d(final_image))
print "The input image is ", nepali[int(pred[0])]
cv2.imshow('img',final)
cv2.waitKey(0)
|
{"hexsha": "6070a37ba77a11984c77ccd8a7b01de054bdcee5", "size": 3773, "ext": "py", "lang": "Python", "max_stars_repo_path": "nepali.py", "max_stars_repo_name": "sujitmhj/devanagari-handwritting-recognition", "max_stars_repo_head_hexsha": "c503fd5b05077eb59fc834e8b6942222c117f172", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-20T12:38:49.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-20T12:38:49.000Z", "max_issues_repo_path": "nepali.py", "max_issues_repo_name": "sujitmhj/devanagari-handwritting-recognition", "max_issues_repo_head_hexsha": "c503fd5b05077eb59fc834e8b6942222c117f172", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nepali.py", "max_forks_repo_name": "sujitmhj/devanagari-handwritting-recognition", "max_forks_repo_head_hexsha": "c503fd5b05077eb59fc834e8b6942222c117f172", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-04-13T07:24:43.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-30T02:05:02.000Z", "avg_line_length": 22.7289156627, "max_line_length": 123, "alphanum_fraction": 0.6183408428, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1171}
|
from keras.models import load_model
import numpy as np
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32')/255.
x_test = x_test.astype('float32')/255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
autoencoder = load_model('./conv_ae_model.h5')
decoded_imgs = autoencoder.predict(x_test,batch_size=128)
import matplotlib.pyplot as plt
n = 10
plt.figure(figsize=(20,4))
for i in range(n):
#original
ax = plt.subplot(2, n, i+1)
plt.imshow(x_test[i].reshape(28,28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# reconstruction
ax = plt.subplot(2, n, i + 1+ n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plt.savefig('./conv_ae_reconstr.png')
plt.close()
# plt.figure(figsize=(20,8))
# for i in range(n):
# ax = plt.subplot(1, n, i+1)
# plt.imshow(encoded[i].reshape(4, 4*8).T)
# plt.gray()
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.show()
|
{"hexsha": "d9cdd3ffa8b7a99cd6fffa12fcd10253b258d598", "size": 1166, "ext": "py", "lang": "Python", "max_stars_repo_path": "framework/load_conv_ae.py", "max_stars_repo_name": "mullachv/causal_notes", "max_stars_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "framework/load_conv_ae.py", "max_issues_repo_name": "mullachv/causal_notes", "max_issues_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "framework/load_conv_ae.py", "max_forks_repo_name": "mullachv/causal_notes", "max_forks_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9111111111, "max_line_length": 57, "alphanum_fraction": 0.7024013722, "include": true, "reason": "import numpy", "num_tokens": 361}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 8 19:20:14 2018
@author: nemec
"""
import numpy as np
from multiprocessing import Pool
#calculating the life time of the spots according to the choosen decay rate
def decay(spot_area,time,D):
t = spot_area/D +time
return t
#calculate the meridional flow
def merflow(lat):
if abs(lat-90) <= 75:
u = 22*np.sin(2.4*(90-lat)*np.pi/180)*7.0922e-3
if abs(lat-90) > 75:
u = 0
return u
#calculate the differential rotation
def diffrot(lat):
rot = 0.1813 - 2.3*np.sin((90-lat)*np.pi/180)**2.-1.62*np.sin((90-lat)*np.pi/180)**4.
return rot
#define the decay rate
D = 30.9 #MHS per day
#setting up the grid on which to mask the spots
#in this case pixels have size of 0.1 by 0.1 degree
factor = 10.
xvalues = np.around(np.linspace(0,359,num=360*factor),decimals=1)
yvalues = np.around(np.linspace(0,180,num=180*factor),decimals=1)
x,y = np.meshgrid(xvalues,yvalues)
#for doing the sin/cos calculations
conv = np.pi/180.
# =============================================================================
# if you want to experiment with random parameters for the positions and areas,
#just comment out the line, where I read in the input file and define the coordinates
# and area yourself
# =============================================================================
#reading in the file
data = np.loadtxt("AR-mod.txt")
#defining the coordinates
long_pos = data[:, 2]
long_neg = data[:,4]
#need to redifine grid so that north pole is a + 90 degree and south pole at -90 degree
lat_pos = 90-data[:,1]
lat_neg = 90 -data[:,3]
#define the area at the time of emergence
spot = data[:,5]
#define which part of the input should then be used
start = 70724
end = 74519
grid_max = 359
#only use this for calculations that should not be run parallel!
#positivx= open("positivx3.txt","w")
#positivy= open("positivy3.txt","w")
#negativx= open("negativx3.txt","w")
#negativy= open("negativy3.txt","w")
#for i in range(start,end):
#starting doing the spot masking parallel
def f(i):
#for i in range(start,end):
#print(i)
positivx= open("positivx{}.txt".format(i),"w")
positivy= open("positivy{}.txt".format(i),"w")
negativx= open("negativx{}.txt".format(i),"w")
negativy= open("negativy{}.txt".format(i),"w")
spot_area = spot[i]
time = data[i,0]
t = decay(spot_area,time,D)
phi_pos = 90-lat_pos[i]
phi_neg = 90-lat_neg[i]
#print(t)
if np.int(t-time) == 0:
area = spot[i]/(30.81*np.pi/2.*np.pi/2.)
r = area**(1./2.)
#define positive polarity patch
x_min_pos = long_pos[i]-r/2.*1./np.cos(phi_pos*conv)
x_max_pos = long_pos[i]+r/2.*1./np.cos(phi_pos*conv)
y_min_pos = lat_pos[i]-r/2.
y_max_pos = lat_pos[i]+r/2.
#define negative polarity patch
x_min_neg = long_neg[i]-r/2.*1./np.cos(phi_neg*conv)
x_max_neg = long_neg[i]+r/2.*1./np.cos(phi_neg*conv)
y_min_neg = lat_neg[i]-r/2.
y_max_neg = lat_neg[i]+r/2.
if x_min_pos < 0 and x_max_pos >0:
x_min_pos1= grid_max+x_min_pos
x_pos_pos = x[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_pos1 = x[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos1 = y[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos1:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos1:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_pos < 0. and x_max_pos <0:
x_min_pos2= grid_max+x_min_pos
x_max_pos2 = grid_max+x_max_pos
x_pos_pos2 = x[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos2 = y[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos2:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos2:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
#
if x_min_pos > 0.:
x_pos_pos3 = x[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos3 = y[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos3:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos3:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg < 0. and x_max_neg >0:
x_min_neg1= grid_max+x_min_neg
x_pos_neg1 = x[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg= y[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_neg1 = x[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg1 = y[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg1:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg < 0. and x_max_neg <0:
x_min_neg2= grid_max+x_min_neg
x_max_neg2 = grid_max +x_max_neg
x_pos_neg2 = x[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg2 = y[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg2:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg2:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg > 0.:
x_pos_neg3 = x[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg3 = y[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg3:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg3:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_pos >grid_max and x_min_pos <grid_max:
x_max_pos4= x_max_pos-grid_max
x_pos_pos = x[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_pos4 = x[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos4 = y[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos4:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos4:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_pos >grid_max and x_min_pos >grid_max:
x_min_pos5= x_min_pos-grid_max
x_max_pos5 =x_max_pos-grid_max
x_pos_pos5 = x[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos5 = y[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos5:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos5:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
#
if x_max_pos > grid_max:
x_pos_pos6 = x[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos6 = y[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos6:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos6:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_neg >grid_max and x_min_neg <grid_max:
x_max_neg4= x_max_pos-grid_max
x_pos_neg = x[np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg = y[np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_neg4 = x[np.where((x<=x_max_neg4) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg4 = y[np.where((x<=x_max_neg4) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg4:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg4:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_neg >grid_max and x_min_neg >grid_max:
x_min_neg5= x_min_neg-grid_max
x_max_neg5 =x_max_neg-grid_max
x_pos_neg5 = x[np.where((x >= x_min_neg5) & (x<=x_max_neg5) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg5 = y[np.where((x >= x_min_neg5) & (x<=x_max_neg5) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg5:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg5:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if np.int(t-time) > 0:
lat_pos_new = np.zeros(np.int(t-time)+1)
lat_neg_new = np.zeros(np.int(t-time)+1)
long_pos_new = np.zeros(np.int(t-time)+1)
long_neg_new = np.zeros(np.int(t-time)+1)
lat_pos_new[0]= lat_pos[i]
lat_neg_new[0]= lat_neg[i]
long_pos_new[0]= long_pos[i]
long_neg_new[0]= long_neg[i]
n = time
for n in range(np.int(time),np.int(t)+1):
#update the are according to the decay law
#in that case it's a linear!
area =(spot[i]-D*(n-time))/(30.81*np.pi/2.*np.pi/2.) #in degree
if area <= 0.:
r = 0.
else:
r = area**(1./2.)
if n == time:
#define positive polarity patch
x_min_pos = long_pos[i]-r/2.*1./np.cos(phi_pos*conv)
x_max_pos = long_pos[i]+r/2.*1./np.cos(phi_pos*conv)
y_min_pos = lat_pos[i]-r/2.
y_max_pos = lat_pos[i]+r/2.
#define negative polarity patch
x_min_neg = long_neg[i]-r/2.*1./np.cos(phi_neg*conv)
x_max_neg = long_neg[i]+r/2.*1./np.cos(phi_neg*conv)
y_min_neg = lat_neg[i]-r/2.
y_max_neg = lat_neg[i]+r/2.
if x_min_pos < 0 and x_max_pos >0:
x_min_pos1= grid_max+x_min_pos
x_pos_pos = x[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_pos1 = x[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos1 = y[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos1:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos1:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_pos < 0. and x_max_pos <0:
x_min_pos2= grid_max+x_min_pos
x_max_pos2 = grid_max +x_max_pos
x_pos_pos2 = x[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos2 = y[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos2:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos2:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_pos > 0.:
x_pos_pos3 = x[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos3 = y[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos3:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos3:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg < 0. and x_max_neg >0:
#print(x_min_neg)
x_min_neg1= grid_max+x_min_neg
#print(x_min_neg1)
x_pos_neg1 = x[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg= y[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_neg1 = x[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg1 = y[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg1:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg < 0. and x_max_neg <0:
x_min_neg2= grid_max+x_min_neg
x_max_neg2 = grid_max +x_max_neg
x_pos_neg2 = x[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg2 = y[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg2:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg2:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg > 0.:
x_pos_neg3 = x[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg3 = y[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg3:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg3:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_pos >grid_max and x_min_pos <grid_max:
x_max_pos4= x_max_pos-grid_max
x_pos_pos = x[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_pos4 = x[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos4 = y[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos4:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos4:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_pos >grid_max and x_min_pos >grid_max:
x_min_pos5= x_min_pos-grid_max
x_max_pos5 =x_max_pos-grid_max
x_pos_pos5 = x[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos5 = y[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos5:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos5:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_neg >grid_max and x_min_neg <grid_max:
x_max_neg4= x_max_pos-grid_max
x_pos_neg = x[np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg = y[np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_neg4 = x[np.where((x<=x_max_neg4) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg4 = y[np.where((x<=x_max_neg4) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg4:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg4:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_neg >grid_max and x_min_neg >grid_max:
x_min_neg5= x_min_neg-grid_max
x_max_neg5 =x_max_neg-grid_max
x_pos_neg5 = x[np.where((x >= x_min_neg5) & (x<=x_max_neg5) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg5 = y[np.where((x >= x_min_neg5) & (x<=x_max_neg5) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg5:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg5:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if n >time:
a = np.int(n-time-1)
b = np.int(n-time)
#I think that this if-statements could be skipped and the positions just be
#updated, but I need to think about that again....
if lat_pos_new[a] <= 90:
u = merflow(lat_pos_new[a])
lat_pos_new[b] = lat_pos_new[a]-u
rot= diffrot(lat_pos_new[a])
long_pos_new[b] = long_pos_new[a]+rot
if lat_neg_new[a] <= 90:
u = merflow(lat_neg_new[a])
lat_neg_new[b] = lat_neg_new[a]-u
rot= diffrot(lat_neg_new[a])
long_neg_new[b] = long_neg_new[a]+rot
if lat_pos_new[a] > 90:
u = merflow(lat_pos_new[a])
lat_pos_new[b] = lat_pos_new[a]-u
rot= diffrot(lat_pos_new[a])
long_pos_new[b] = long_pos_new[a]+rot
if lat_neg_new[a] > 90:
u = merflow(lat_neg_new[a])
lat_neg_new[b] = lat_neg_new[a]-u
rot= diffrot(lat_neg_new[a])
long_neg_new[b] = long_neg_new[a]+rot
x_min_pos = long_pos_new[b]-r/2.*1./np.cos(phi_pos*conv)
x_max_pos = long_pos_new[b]+r/2.*1./np.cos(phi_pos*conv)
y_min_pos = lat_pos_new[b]-r/2.
y_max_pos = lat_pos_new[b]+r/2.
x_min_neg = long_neg_new[b]-r/2.*1./np.cos(phi_neg*conv)
x_max_neg = long_neg_new[b]+r/2.*1./np.cos(phi_neg*conv)
y_min_neg = lat_neg_new[b]-r/2.
y_max_neg = lat_neg_new[b]+r/2.
if x_min_pos < 0 and x_max_pos >0:
x_min_pos1= grid_max+x_min_pos
x_pos_pos = x[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
x_pos_pos1 = x[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos1 = y[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos1:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos1:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
if x_min_pos < 0. and x_max_pos <0:
x_min_pos2= grid_max+x_min_pos
x_max_pos2 =grid_max+x_max_pos
x_pos_pos2 = x[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos2 = y[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos2:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos2:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
#
if x_min_pos > 0.:
x_pos_pos3 = x[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos3 = y[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos3:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos3:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
if x_min_neg < 0. and x_max_neg >0:
#print(x_min_neg,n)
#print(x_max_neg)
x_min_neg1= grid_max+x_min_neg
x_pos_neg1 = x[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg= y[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
x_pos_neg1 = x[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg1 = y[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg1:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
if x_min_neg < 0. and x_max_neg <0:
# print(x_min_neg,n)
x_min_neg2= grid_max+x_min_neg
x_max_neg2 = grid_max +x_max_neg
#print(x_min_neg2,x_max_neg2,n)
x_pos_neg2 = x[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg2 = y[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg2:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg2:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
if x_min_neg > 0.:
x_pos_neg3 = x[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg3 = y[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg3:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg3:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
if x_max_pos >grid_max and x_min_pos <grid_max:
x_max_pos4= x_max_pos-grid_max
x_pos_pos = x[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
x_pos_pos4 = x[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos4 = y[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos4:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos4:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
if x_max_pos >grid_max and x_min_pos >grid_max:
x_min_pos5= x_min_pos-grid_max
x_max_pos5 =x_max_pos-grid_max
x_pos_pos5 = x[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos5 = y[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos5:
positivx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_pos5:
positivy.write("%f \t %f \t %f\n" % (item,r,n))
if x_max_neg >grid_max and x_min_neg <grid_max:
x_max_neg4= x_max_pos-grid_max
x_pos_neg = x[np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg = y[np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
x_pos_neg4 = x[np.where((x<=x_max_neg4) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg4 = y[np.where((x<=x_max_neg4) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg4:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg4:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
if x_max_neg >grid_max and x_min_neg >grid_max:
x_min_neg5= x_min_neg-grid_max
x_max_neg5 =x_max_neg-grid_max
x_pos_neg5 = x[np.where((x >= x_min_neg5) & (x<=x_max_neg5) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg5 = y[np.where((x >= x_min_neg5) & (x<=x_max_neg5) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg5:
negativx.write("%f \t %f \t %f\n" % (item,r,n))
for item in y_pos_neg5:
negativy.write("%f \t %f \t %f\n" % (item,r,n))
#
pool = Pool(4)
pool.map(f, range(start, end + 1))
#
#
#
|
{"hexsha": "690be628bd23115d69407135a6241d8e5d043542", "size": 29569, "ext": "py", "lang": "Python", "max_stars_repo_path": "decay_spots.py", "max_stars_repo_name": "rtagirov/ff-sftm", "max_stars_repo_head_hexsha": "b899440c980ec827486b596f237279851f3be428", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "decay_spots.py", "max_issues_repo_name": "rtagirov/ff-sftm", "max_issues_repo_head_hexsha": "b899440c980ec827486b596f237279851f3be428", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "decay_spots.py", "max_forks_repo_name": "rtagirov/ff-sftm", "max_forks_repo_head_hexsha": "b899440c980ec827486b596f237279851f3be428", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.3730834753, "max_line_length": 117, "alphanum_fraction": 0.4677533904, "include": true, "reason": "import numpy", "num_tokens": 8679}
|
from __future__ import print_function, division
import sys
import time
from copy import copy, deepcopy
from os.path import join, exists
from collections import Counter
from math import log
import itertools
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from ikelos.data import Vocabulary
from keras.layers import LSTM, Dense, Embedding, Distribute, Dropout, Input
from keras.callbacks import Callback, ProgbarLogger, ModelCheckpoint, ProgbarV2, LearningRateScheduler
from keras.utils.generic_utils import Progbar
from keras.regularizers import l2
from keras.optimizers import Adam, SGD
from keras.engine import Model
import keras.backend as K
try:
input = raw_input
except:
pass
try:
import cPickle as pickle
except:
pass
from ..common import make_logger
from .igor import Igor
sys.setrecursionlimit(40000)
def compose(*layers):
def func(x):
out = x
for layer in layers[::-1]:
out = layer(out)
return out
return func
class LanguageModel(object):
def __init__(self, igor):
now = datetime.now()
self.run_name = "fergusr_{}mo_{}day_{}hr_{}min".format(now.month, now.day,
now.hour, now.minute)
log_location = join(igor.log_dir, self.run_name+".log")
self.logger = igor.logger = make_logger(igor, log_location)
self.igor = igor
@classmethod
def from_config(cls, config):
igor = Igor(config)
igor.prep()
model = cls(igor)
model.make()
return model
def make(self):
B = self.igor.batch_size
R = self.igor.rnn_size
S = self.igor.max_sequence_len
V = self.igor.vocab_size
E = self.igor.embedding_size
emb_W = self.igor.embeddings.astype(K.floatx())
## dropout parameters
p_emb = self.igor.p_emb_dropout
p_W = self.igor.p_W_dropout
p_U = self.igor.p_U_dropout
p_dense = self.igor.p_dense_dropout
w_decay = self.igor.weight_decay
def embedding_parameters():
return {"W_regularizer": l2(w_decay),
"weights": [emb_W],
"mask_zero": True,
"dropout": p_emb}
def sequence_parameters():
return {"return_sequences": True,
"dropout_W": p_W,
"dropout_U": p_U,
"U_regularizer": l2(w_decay),
"W_regularizer": l2(w_decay)}
def predict_parameters():
return {"activation": 'softmax',
"W_regularizer": l2(w_decay),
"b_regularizer": l2(w_decay)}
F_embed = Embedding(V, E, **embedding_parameters())
F_seq1 = LSTM(R, **sequence_parameters())
F_seq2 = LSTM(R*int(1/p_dense), **sequence_parameters())
F_drop = Dropout(p_dense)
F_predict = Distribute(Dense(V, **predict_parameters()))
words_in = Input(batch_shape=(B,S), dtype='int32')
predictions = compose(F_predict,
F_drop,
F_seq2,
F_drop,
F_seq1,
F_embed)(words_in)
#self.F_p = K.Function([words_in, K.learning_phase()], predictions)
optimizer = Adam(self.igor.LR, clipnorm=self.igor.max_grad_norm,
clipvalue=self.igor.max_grad_value)
self.model = Model(input=[words_in],
output=[predictions])
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy', 'perplexity'])
if self.igor.from_checkpoint:
self.load_checkpoint_weights()
def load_checkpoint_weights(self):
weight_file = join(self.igor.model_location,
self.igor.saving_prefix,
self.igor.checkpoint_weights)
if exists(weight_file):
self.logger.info("+ Loading checkpoint weights")
self.model.load_weights(weight_file, by_name=True)
else:
self.logger.warning("- Checkpoint weights do not exist; {}".format(weight_file))
def train(self):
train_data = self.igor.train_gen(forever=True)
dev_data = self.igor.dev_gen(forever=True)
N = self.igor.num_train_samples
E = self.igor.num_epochs
# generator, samplers per epoch, number epochs
callbacks = [ProgbarV2(3, 10)]
checkpoint_fp = join(self.igor.model_location,
self.igor.saving_prefix,
self.igor.checkpoint_weights)
self.logger.info("+ Model Checkpoint: {}".format(checkpoint_fp))
callbacks += [ModelCheckpoint(filepath=checkpoint_fp, verbose=1, save_best_only=True)]
callbacks += [LearningRateScheduler(lambda epoch: self.igor.LR * 0.95 ** (epoch % 15))]
self.model.fit_generator(generator=train_data, samples_per_epoch=N, nb_epoch=E,
callbacks=callbacks, verbose=1,
validation_data=dev_data,
nb_val_samples=self.igor.num_dev_samples)
def test(self, num_samples=None):
num_samples = num_samples or 100
test_data = self.igor.test_gen()
out = self.model.evaluate_generator(test_data, num_samples)
try:
for o, label in zip(out, self.model.metric_names):
print("{}: {}".format(o, label))
except Exception as e:
print("some sort of error.. {}".format(e))
import pdb
pdb.set_trace()
def format_sentence(self, sentence):
''' turn into indices here '''
if not isinstance(sentence, list):
sentence = sentence.split(" ")
sentence = [self.igor.vocabs.words[w] for w in sentence]
in_X = np.zeros(self.max_sequence_len)
out_Y = np.zeros(self.max_sequence_len, dtype=np.int32)
bigram_data = zip(sentence[0:-1], sentence[1:])
for datum_j,(datum_in, datum_out) in enumerate(bigram_data):
in_X[datum_j] = datum_in
out_Y[datum_j] = datum_out
return in_X, out_Y
def eval_sentence(self, sentence):
X, y = self.format_sentence(sentence)
yout = self.F_p([X[None,:]]+[0.])
yout = yout[0]
return X, y, yout
def sample(self):
L = self.igor.train_vocab.lookup
for dev_datum in self.igor.dev_gen():
X, y = dev_datum # X.shape = (b,s); y.shape = (b,s,V)
Px = self.model.predict_proba(X) # Px.shape = (b,s,V)
for i in range(X.shape[0]):
w_in = []
w_true = []
w_tprob = []
w_pprob = []
w_pred = []
for j in range(X.shape[1]):
if L(X[i][j]) == "<MASK>": continue
w_in.append(L(X[i][j]))
w_true.append(L(y[i][j].argmax()))
w_pred.append(L(Px[i][j].argmax()))
w_tprob.append(Px[i][j][y[i][j].argmax()])
w_pprob.append(Px[i][j].max())
n = max([len(w) for w in w_true+w_pred]) + 6
for wt,wi,pwt,wp,pwp in zip(w_true, w_in, w_tprob, w_pred,w_pprob):
s = "|\t{:0.6f}\t|{:>%d} => {:<%d}|{:^%d}|\t{:0.6f}\t|" % (n,n,n)
print(s.format(pwt, wi,wt, wp, pwp))
perp = 2**(-sum([log(p,2) for p in w_tprob]) / (len(w_tprob)-1))
print("Per word perplexity of sentence: {:0.3f}".format(perp))
prompt = input("<enter to continue, y to enter pdb, exit to exit>")
if prompt == "y":
import pdb
pdb.set_trace()
elif prompt == "exit":
import sys
sys.exit(0)
def examine(self):
L = self.igor.train_vocab.lookup
sent_ppls = []
sent_lls = []
count = 0
for dev_datum in self.igor.dev_gen(False):
X, y = dev_datum # X.shape = (b,s); y.shape = (b,s,V)
Px = self.model.predict_proba(X) # Px.shape = (b,s,V)
for i in range(X.shape[0]):
word_probs = []
for j in range(X.shape[1]):
if L(X[i][j]) == "<MASK>": continue
word_probs.append(Px[i][j][y[i][j].argmax()])
perp = 2**(-sum([log(p,2) for p in word_probs]) / (len(word_probs)-1))
sent_lls.append(-sum([log(p,2) for p in word_probs]))
count += len(word_probs)
sent_ppls.append(perp)
with open("ppls.pkl", "w") as fp:
pickle.dump(sent_ppls, fp)
print("PERPLEXITIES")
print("Mean: {}".format(np.mean(sent_ppls)))
print("Median: {}".format(np.median(sent_ppls)))
ent = sum(sent_lls) / (count-1.0)
print("from sent lls and then calculated after: {:0.5f}".format(2**ent))
plot = plt.hist(sent_ppls, bins=20)
plt.show()
|
{"hexsha": "d5641f1dada029af5abfd41574db3fc17c811bed", "size": 9434, "ext": "py", "lang": "Python", "max_stars_repo_path": "fergus/models/language_model/model.py", "max_stars_repo_name": "braingineer/neural_tree_grammar", "max_stars_repo_head_hexsha": "e0534b733e9a6815e97e9ab28434dae7b94a632f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-10-11T06:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-11T03:39:35.000Z", "max_issues_repo_path": "fergus/models/language_model/model.py", "max_issues_repo_name": "braingineer/neural_tree_grammar", "max_issues_repo_head_hexsha": "e0534b733e9a6815e97e9ab28434dae7b94a632f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fergus/models/language_model/model.py", "max_forks_repo_name": "braingineer/neural_tree_grammar", "max_forks_repo_head_hexsha": "e0534b733e9a6815e97e9ab28434dae7b94a632f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8515625, "max_line_length": 102, "alphanum_fraction": 0.5381598474, "include": true, "reason": "import numpy", "num_tokens": 2167}
|
import control
import planning
import airsimneurips as asim
import numpy
import time
# Ideas:
# include drag:https://github.com/microsoft/AirSim/blob/18b36c7e3ea3d1e705c3938a7b8462d44bd81297/AirLib/include/vehicles/multirotor/MultiRotor.hpp#L191
# linear_drag_coefficient = 1.3f / 4.0f; air_density = 1.225f;
# Inclination limit makes big difference
# predictive over goal
# Input constraints modify at mpc
# increase resolution
if __name__ == "__main__":
# Extending to new goal: [ 14.24904251 -155.98028564 10.91945133 0.34202044 0.32139261
# 0.88302254]
# Current state: <Vector3r> { 'x_val': 13.273137092590332,
# 'y_val': -156.78729248046875,
# 'z_val': 9.852869033813477} orient <Quaternionr> { 'w_val': 0.8830069899559021,
# 'x_val': -0.2556034326553345,
# 'y_val': 0.3794059455394745,
# 'z_val': 0.10496046394109726}
# Special case with set vz: [1.02606132 0.96417784 2.64906762]
# planner = planning.RapidPlanner(0,19.72,6.28,0.02,0.05)
# planner.getShortestPath(state, numpy.array( [ 6.3731294 , 81.437416 , -43.879955 ]))
#
# Current state: <KinematicsState> { 'angular_acceleration': <Vector3r> { 'x_val': 166.14730834960938,
# 'y_val': -1.4056991338729858,
# 'z_val': 1.1516906023025513},
# 'angular_velocity': <Vector3r> { 'x_val': 2.1352334022521973,
# 'y_val': 0.6188440322875977,
# 'z_val': 0.017196346074342728},
# 'linear_acceleration': <Vector3r> { 'x_val': 0.6800534725189209,
# 'y_val': -0.46433937549591064,
# 'z_val': 1.6281375885009766},
# 'linear_velocity': <Vector3r> { 'x_val': -6.9823994636535645,
# 'y_val': 4.4047698974609375,
# 'z_val': -0.12542438507080078},
# 'orientation': <Quaternionr> { 'w_val': 0.9962534308433533,
# 'x_val': -0.01565200462937355,
# 'y_val': -0.020513707771897316,
# 'z_val': -0.08254285156726837},
# 'position': <Vector3r> { 'x_val': -17.122417449951172,
# 'y_val': 45.57592010498047,
# 'z_val': -47.230995178222656}}
planner = planning.RapidPlanner(0,19.72,6.28,0.05,0.05)
state = asim.KinematicsState()
state.position = asim.Vector3r(77.43971252, -96.87151337, -5.48000002)
# state.linear_velocity = asim.Vector3r(7.5843505859375, 0.7305274605751038, 6.088780403137207)
# state.linear_acceleration = asim.Vector3r(-2.003018379211426, -0.30256304144859314, -5.39830207824707)
# state.orientation = asim.Quaternionr(-0.012809118255972862, 0.05772315710783005, -0.03406976908445358,0.9976689219474792)
# state.position.x_val = 6.373129367828369
# state.position.y_val = 81.43741607666016
# state.position.z_val = -42.87995529174805
# path = planner.getShortestPath(state, numpy.array( [ 6.373129367828369 , 81.43741607666016 , -43.87995529174805]))
path = planner.getShortestPath(state, numpy.array([-111.93122864, 120.21295929 , -46.08000031 , -0.64279248 , 0.76604036,0.]))
# path2 = planner.getExtendedPath(numpy.array([ 0, -1 , -20]))
# raw_path = numpy.concatenate((path, path2))
# path2 = planner.getExtendedPath(numpy.array([ 10.388415, 80.77406, -43.579998]))
# path3 = planner.getExtendedPath(numpy.array([ 18.110466 ,76.26078, -43.579998]))
mpc_control = control.MpcControl(20)
state_read = asim.KinematicsState()
# state_read.orientation.w_val = 0.70710678118
# state_read.orientation.z_val = 0.70710678118
# state_read.orientation.w_val = 0.984807550907135
# state_read.orientation.x_val = 0.0008127406472340226
# state_read.orientation.y_val = -0.0021525132469832897
# state_read.orientation.z_val = -0.17364919185638428
# state_read.angular_velocity.x_val = -0.030123945325613022
# state_read.angular_velocity.y_val = 0.0011088978499174118
# state_read.angular_velocity.z_val = 7.625947910128161e-05
# state_read.linear_velocity.x_val = 0.0
# state_read.linear_velocity.y_val = 0.0
# state_read.linear_velocity.z_val = -0.24393419921398163
# state_read.position.z_val = -0.07
# state_read.angular_velocity.y_val = 1.946580171585083
# state_read.angular_velocity.z_val = -0.0002931684139184654
# state_read.position.x_val = 6.373129367828369
# state_read.position.y_val = 81.43741607666016
# state_read.position.z_val = -42.87995529174805 #-43.689579010009766
mpc_control.set_traj(path, state.orientation)
# mpc_control.append_traj(path2)
assert(mpc_control.tracking_status(state)==0)
# cur = time.time()
# for i in range(1000):
# u = mpc_control.getInput(state)
# print("Spent time", (time.time()-cur)/1000)
ref,_ = mpc_control.getReference(state)
X, U = mpc_control.getFullMpcOutput(state)
# print(ref[:,3:7]-X[:20,3:7])
import visualization
visualization.draw(path, X, U, 10.0)
|
{"hexsha": "66b964138f6402bc5c46f89a5baa622d8e09e10a", "size": 4832, "ext": "py", "lang": "Python", "max_stars_repo_path": "Daniel/test.py", "max_stars_repo_name": "JD-ETH/AirSimNeurIPS", "max_stars_repo_head_hexsha": "0eb80f12fe6e65c508418dfc208ab0029f6c7c87", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-01-13T21:17:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T09:13:46.000Z", "max_issues_repo_path": "Daniel/test.py", "max_issues_repo_name": "JD-ETH/AirSimNeurIPS", "max_issues_repo_head_hexsha": "0eb80f12fe6e65c508418dfc208ab0029f6c7c87", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-03-23T18:27:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T08:11:02.000Z", "max_forks_repo_path": "Daniel/test.py", "max_forks_repo_name": "JD-ETH/AirSimNeurIPS", "max_forks_repo_head_hexsha": "0eb80f12fe6e65c508418dfc208ab0029f6c7c87", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-17T22:47:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-09T07:02:23.000Z", "avg_line_length": 45.1588785047, "max_line_length": 152, "alphanum_fraction": 0.7005380795, "include": true, "reason": "import numpy", "num_tokens": 1732}
|
from autograd import numpy as np
from sklearn.covariance import LedoitWolf
import warnings
class DensityEstimator:
def init(self, X):
pass
def fit(self, v, X):
"""
Fits density estimator to <v, X>
"""
raise Exception('DensityEstimator is an abstract class')
def predict(self, v, X):
"""
Returns estimates on <v, X>
"""
raise Exception('DensityEstimator is an abstract class')
def fit_predict(self, v, Xfit, X):
"""
Fits density estimator to <v, Xfit> and returns estimates on <v, X>
"""
self.fit(v, Xfit)
return self.predict(v, X)
def toGMM(self):
raise Exception('DensityEstimator is an abstract class')
class KDEEstimator1D(DensityEstimator):
"""
This will be simple 1D KDE estimator based on Silverman's rule of thumb.
"""
def __init__(self, gamma=1.0):
self.gamma = gamma
def fit(self, v, X):
self.means = np.dot(X, v)
mean = np.mean(self.means)
self.var = ((self.means - mean) ** 2).sum()
self.N = X.shape[0]
self.h = np.sqrt(self.var) * 1.06 * self.N ** (-0.2) * self.gamma
def K(self, u):
return 1. / np.sqrt(2. * np.pi) * np.exp(-0.2 * u ** 2)
def predict(self, v, X):
X = np.dot(X, v)
pred = 1. / (self.N * self.h) * self.K((X.reshape(-1, 1) - self.means.reshape(1, -1)) / self.h).sum(axis=1)
return pred
def toGMM(self):
weights = np.array([1. / self.N] * self.N)
variances = np.array([self.h] * self.N)
gmm = np.vstack((weights, self.means, variances)).T # weight, mean, var
return gmm
class NormalEstimator1D(DensityEstimator):
"""
Implements regularized maximum likelihood estimator of 1D normal density.
For efficiency reasons, it uses LedoitWolf estimator in the input space, in order to compute
estimators in the projected space (thus fitting estimator in the projected space is independent
on the size of training set)
"""
def __init__(self, gamma=1.0, cov_estimator=LedoitWolf):
self.gamma = gamma
self.cov_estimator = cov_estimator
def init(self, X):
self.mean = X.mean(axis=0)
self.cov = self.cov_estimator(store_precision = False).fit(X).covariance_ * self.gamma ** 2
def fit(self, v, X):
self.mu = np.dot(v, self.mean)
self.var = np.dot(np.dot(v, self.cov ), v.T)
def predict(self, v, X):
return 1. / np.sqrt(2 * np.pi * self.var) * np.exp(-(np.dot(X, v) - self.mu)**2 / (2*self.var))
def toGMM(self):
return np.array([[1.0, self.mu, self.var]]) # weight, mean, var
|
{"hexsha": "6783a581e25b48e9f9a4fcd3cc08f028ccb25053", "size": 2705, "ext": "py", "lang": "Python", "max_stars_repo_path": "pmlm/densities.py", "max_stars_repo_name": "gmum/PMLM", "max_stars_repo_head_hexsha": "9a5912b3836a74ac06cc8b5e2eaaa38ea719c437", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pmlm/densities.py", "max_issues_repo_name": "gmum/PMLM", "max_issues_repo_head_hexsha": "9a5912b3836a74ac06cc8b5e2eaaa38ea719c437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pmlm/densities.py", "max_forks_repo_name": "gmum/PMLM", "max_forks_repo_head_hexsha": "9a5912b3836a74ac06cc8b5e2eaaa38ea719c437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.091954023, "max_line_length": 115, "alphanum_fraction": 0.5900184843, "include": true, "reason": "import numpy", "num_tokens": 751}
|
import numpy as np
import keras.backend as K
import keras.layers as kl
import keras.losses as kloss
from concise.utils.helper import get_from_module
MASK_VALUE = -1
def mask_loss(loss, mask_value=MASK_VALUE):
"""Generates a new loss function that ignores values where `y_true == mask_value`.
# Arguments
loss: str; name of the keras loss function from `keras.losses`
mask_value: int; which values should be masked
# Returns
function; Masked version of the `loss`
# Example
```python
categorical_crossentropy_masked = mask_loss("categorical_crossentropy")
```
"""
loss_fn = kloss.deserialize(loss)
def masked_loss_fn(y_true, y_pred):
# currently not suppoerd with NA's:
# - there is no K.is_nan impolementation in keras.backend
# - https://github.com/fchollet/keras/issues/1628
mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
# we divide by the mean to correct for the number of done loss evaluations
return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask)
masked_loss_fn.__name__ = loss + "_masked"
return masked_loss_fn
# Bellow would be the most general case and wouldn't reqire hard-coding the values
# However, it doesn't work with the classes as the current serialization is with .__name__
# and not with serialize_keras_object
# https://github.com/fchollet/keras/blob/master/keras/metrics.py#L48
#
# class MaskLoss:
# __name__ = "MaskLoss"
# def __init__(self, loss, mask_value=MASK_VALUE):
# """
# Compile masked loss function
# This function ignores values where y_true == mask_value.
# Arguments:
# loss = loss function from keras.losses
# mask_value = numeric value to be masked away (np.nan not supported for now)
# Inspired by: https://github.com/fchollet/keras/issues/3893
# """
# self.loss = kloss.deserialize(loss) # TODO - add the ability to create your own loss functions
# self.mask_value = mask_value
# def __call__(self, y_true, y_pred):
# # currently not suppoerd with NA's:
# # - there is no K.is_nan impolementation in keras.backend
# # - https://github.com/fchollet/keras/issues/1628
# mask = K.cast(K.not_equal(y_true, self.mask_value), K.floatx())
# # we divide by the mean to correct for the number of done loss evaluations
# return self.loss(y_true * mask, y_pred * mask) / K.mean(mask)
# def get_config(self):
# return {"loss": kloss.serialize(self.loss),
# "mask_value": self.mask_value
# }
# masked loss functions
AVAILABLE = [ # "mean_squared_error_masked",
# "mean_absolute_error_masked",
# "mean_absolute_percentage_error_masked",
# "mean_squared_logarithmic_error_masked",
# "squared_hinge_masked",
# "hinge_masked",
"categorical_crossentropy_masked",
"sparse_categorical_crossentropy_masked",
"binary_crossentropy_masked",
"kullback_leibler_divergence_masked"]
# NOTE - name has to be <loss>_mask
# TODO - take care of which masking value you are using
# - use nan for numeric values
# mean_squared_error_masked = mask_loss("mean_squared_error")
# mean_absolute_error_masked = mask_loss("mean_absolute_error")
# mean_absolute_percentage_error_masked = mask_loss("mean_absolute_percentage_error")
# mean_squared_logarithmic_error_masked = mask_loss("mean_squared_logarithmic_error")
# squared_hinge_masked = mask_loss("squared_hinge")
# hinge_masked = mask_loss("hinge")
categorical_crossentropy_masked = mask_loss("categorical_crossentropy")
sparse_categorical_crossentropy_masked = mask_loss("sparse_categorical_crossentropy")
binary_crossentropy_masked = mask_loss("binary_crossentropy")
kullback_leibler_divergence_masked = mask_loss("kullback_leibler_divergence")
def get(name):
try:
return kloss.get(name)
except ValueError:
return get_from_module(name, globals())
|
{"hexsha": "5cf86ef0f10089428b11abb99aa1c806e79d40b5", "size": 4049, "ext": "py", "lang": "Python", "max_stars_repo_path": "concise/losses.py", "max_stars_repo_name": "gagneurlab/concise", "max_stars_repo_head_hexsha": "12078d75f37fe176bb7d221134b8b14aeb48e11f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2017-07-25T08:42:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T18:11:38.000Z", "max_issues_repo_path": "concise/losses.py", "max_issues_repo_name": "Avsecz/concise", "max_issues_repo_head_hexsha": "4df8dd51e1eb69245cbdd6f006a64f6b056634c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2016-09-21T20:16:31.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-06T10:11:15.000Z", "max_forks_repo_path": "concise/losses.py", "max_forks_repo_name": "gagneurlab/concise", "max_forks_repo_head_hexsha": "12078d75f37fe176bb7d221134b8b14aeb48e11f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2017-11-16T04:23:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T12:40:29.000Z", "avg_line_length": 37.1467889908, "max_line_length": 105, "alphanum_fraction": 0.701407755, "include": true, "reason": "import numpy", "num_tokens": 977}
|
"""
poly2pm(PM; grade = k) -> P
Build a grade `k` matrix polynomial representation `P(λ)` from a polynomial matrix, polynomial vector or scalar polynomial `PM(λ)`.
`PM(λ)` is a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
`P(λ)` is a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`, for which
the coefficient matrices `P_l`, `l = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,l]` contains the `l`-th coefficient matrix `P_l` (multiplying `λ**(l-1)`).
If `grade = missing`, then `k` is chosen the largest degree of the elements of `PM`.
The coefficients of the degree `d` element `(i,j)` of `PM(λ)` result in `P[i,j,1:d+1]`.
"""
function poly2pm(PM::Matrix{<:Polynomial}; grade::Union{Int,Missing} = missing)
p, m = size(PM)
degs = degree.(PM)
d = maximum(degs)
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
T = eltype(eltype(PM))
k == 0 && (return zeros(T,p,m,1))
P = zeros(T,p,m,k)
for j = 1:m
for i = 1:p
degs[i,j] < 0 || (P[i,j,1:degs[i,j]+1] = coeffs(PM[i,j]))
end
end
return P
end
function poly2pm(PM::Matrix{T}; grade::Union{Int,Missing} = missing) where T <: Number
p, m = size(PM)
d = 0
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
k == 0 && (return zeros(T,p,m,1))
if k == 1
P = reshape(PM,p,m,1)
else
P = zeros(T,p,m,k)
P[:,:,1] = PM
end
return P
end
function poly2pm(PM::Vector{<:Polynomial}; grade::Union{Int,Missing} = missing)
m = length(PM)
degs = degree.(PM)
d = maximum(degs)
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
T = eltype(eltype(PM))
k == 0 && (return zeros(T,m,1,1))
P = zeros(T,m,1,k)
for i = 1:m
degs[i] < 0 || (P[i,1,1:degs[i]+1] = coeffs(PM[i]))
end
return P
end
function poly2pm(PM::Vector{T}; grade::Union{Int,Missing} = missing) where T <: Number
m = length(PM)
d = 0
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
k == 0 && (return zeros(T,m,1,1))
if k == 1
P = reshape(PM,m,1,1)
else
P = zeros(T,m,1,k)
P[:,:,1] = PM
end
return P
end
function poly2pm(PM::Union{Adjoint{T,Vector{T}},Transpose{T,Vector{T}}}; grade::Union{Int,Missing} = missing) where T <: Number
m = length(PM)
d = 0
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
k == 0 && (return zeros(T,1,m,1))
adj = typeof(PM) <: Adjoint
if k == 1
adj ? P = reshape(conj(PM.parent),1,m,1) : P = reshape(PM.parent,1,m,1)
else
P = zeros(T,1,m,k)
adj ? P[:,:,1] = reshape(conj(PM.parent),1,m,1) : P[:,:,1] = reshape(PM.parent,1,m,1)
end
return P
end
function poly2pm(PM::Polynomial{T}; grade::Union{Int,Missing} = missing) where T
d = degree(PM)
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
k == 0 && (return zeros(T,1,1,1))
P = zeros(T,1,1,k)
d < 0 || (P[1,1,1:d+1] = coeffs(PM))
return P
end
function poly2pm(PM::Number; grade::Union{Int,Missing} = missing)
T = typeof(PM)
PM == zero(T) ? d = -1 : d = 0
ismissing(grade) ? k = d+1 : k = max(d,grade)+1
k == 0 && (return zeros(T,1,1,1))
P = zeros(T,1,1,k)
d < 0 || (P[1,1,1] = PM)
return P
end
"""
pm2poly(P[,var = 'x']) -> PM
Build the polynomial matrix `PM(λ)` from its matrix polynomial representation `P(λ)`.
`P(λ)` is a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`, for which
the coefficient matrices `P_l`, `l = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,l]` contains the `l`-th coefficient matrix `P_l` (multiplying `λ**(l-1)`).
`PM(λ)` is a matrix of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
The element `(i,j)` of `PM(λ)` is built from the coefficients contained in `P[i,j,1:k+1]`.
The symbol to be used for the indeterminate `λ` can be specified in the optional input variable `var`.
"""
function pm2poly(PM::AbstractArray{T,3},var::Union{Char, AbstractString, Symbol}='x') where T
m, n, k = size(PM)
P = zeros(Polynomial{T},m,n)
for i = 1:m
for j = 1:n
P[i,j] = Polynomial(PM[i,j,1:k],var)
end
end
return P
end
"""
pmdeg(P) -> deg
Determine the degree `deg` of a polynomial matrix `P(λ)`.
`P(λ)` is a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`, for which
the coefficient matrices `P_i`, `i = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
The degree of `P(λ)` is `deg = j-1`, where `j` is the largest index for which `P[:,:,j]` is nonzero. The degree of
the zero polynomial matrix is defined to be `deg = -1`.
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
The degree of `P(λ)` is the largest degree of the elements of `P(λ)`.
The degree of the zero polynomial matrix is defined to be `-1`.
"""
function pmdeg(P::AbstractArray{T,3}) where T
for j = size(P,3):-1:1
norm(P[:,:,j],Inf) > 0 && return j-1
end
return -1
end
function pmdeg(P::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number})
typeof(P) <: Number && (P == 0 ? (return -1) : (return 0) )
return maximum(degree.(P))
end
"""
pmeval(P,val) -> R
Evaluate `R = P(val)` for a polynomial matrix `P(λ)`, using Horner's scheme.
`P(λ)` is a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`, for which
the coefficient matrices `P_i`, `i = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
"""
function pmeval(P::AbstractArray{T,3},val::Number) where {T}
# Horner's scheme
p, m, k1 = size(P)
nd = pmdeg(P)+1
S = typeof(val)
nd == 0 && return zeros(promote_type(T,S),p,m)
R = P[:,:,nd]*one(S)
for k = nd-1:-1:1
R = R*val+ P[:,:,k]
end
return R
end
pmeval(P::Union{AbstractVecOrMat{<:Polynomial},Polynomial},val::Number) = pmeval(poly2pm(P),val)
pmeval(P::Union{Number,AbstractVecOrMat{<:Number}},val::Number = 0) = P
"""
pmreverse(P[,j]) -> Q
Build `Q(λ) = λ^j*P(1/λ)`, the `j`-reversal of a polynomial matrix `P(λ)` for `j ≥ deg(P(λ))`.
If `j` is not specified, the default value `j = deg(P(λ))` is used.
`P(λ)` can be specified as a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`,
for which the coefficient matrices `P_i`, `i = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
If deg(P(λ)), then `Q(λ)` is a grade `j` polynomial matrix of the form
`Q(λ) = Q_1 + λ Q_2 + ... + λ**j Q_(j+1)`, for which
the coefficient matrices `Q_i`, `i = 1, ..., j+1`, are stored in the 3-dimensional matrix `Q`,
where `Q[:,:,i]` contains the `i`-th coefficient matrix `Q_i` (multiplying `λ**(i-1)`).
The coefficient matrix `Q_i` is either `0` if `i ≤ j-d` or `Q_(j-d+i) = P_(d-i+1)` for `i = 1, ..., d`.
"""
function pmreverse(P::AbstractArray{T,3}, j::Int = pmdeg(P)) where T
d = pmdeg(P)
j < d && error("j must be at least $d")
m, n, k1 = size(P)
Q = zeros(eltype(P),m,n,j+1)
Q[:,:,j-d+1:j+1] = reverse(P[:,:,1:d+1],dims=3)
return Q
end
pmreverse(P::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number}; kwargs...) =
pmreverse(poly2pm(P); kwargs...)
"""
pmdivrem(N,D) -> (Q, R)
Compute the quotients in `Q(λ)` and remainders in `R(λ)` of the elementwise polynomial divisions `N(λ)./D(λ)`.
`N(λ)` is a polynomial matrix of the form `N(λ) = N_1 + λ N_2 + ... + λ**k N_(k+1)`, for which
the coefficient matrices `N_i`, `i = 1, ..., k+1` are stored in the 3-dimensional matrix `N`,
where `N[:,:,i]` contains the `i`-th coefficient matrix `N_i` (multiplying `λ**(i-1)`).
`D(λ)` is a polynomial matrix of the form `D(λ) = D_1 + λ D_2 + ... + λ**l D_(l+1)`, for which
the coefficient matrices `D_i`, `i = 1, ..., l+1`, are stored in the 3-dimensional matrix `D`,
where `D[:,:,i]` contain the `i`-th coefficient matrix `D_i` (multiplying `λ**(i-1)`).
Alternatively, `N(λ)` and `D(λ)` can be specified as matrices of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
The polynomial matrices of quotients `Q(λ)` and remainders `R(λ)` are stored in the 3-dimensional
matrices `Q` and `R`, respectively, where `Q[:,:,i]` and `R[:,:,i]` contain the `i`-th
coefficient matrix multiplying `λ**(i-1)`.
"""
function pmdivrem(N::AbstractArray{T1,3},D::AbstractArray{T2,3}) where {T1,T2}
p, m, k = size(N)
p1, m1, k1 = size(D)
(p,m) == (p1, m1) || error("Numerator and denominator polynomial matrices must have the same size")
degQ1 = 0
degD1 = 0
for j = 1:m
for i = 1:p
n1 = poldeg1(N[i,j,1:k])
d1 = poldeg1(D[i,j,1:k1])
d1 == 0 && error("DivideError: zero denominator polynomial")
n1 < d1 || (degQ1 = max(degQ1,n1-d1+1))
degD1 = max(degD1,d1)
end
end
T = eltype(one(T1)/one(T2))
R = zeros(T,p,m,max(degD1-1,1))
Q = zeros(T,p,m,max(degQ1,1))
for j = 1:m
for i = 1:p
q, r = poldivrem(N[i,j,1:k],D[i,j,1:k1])
nq1 = poldeg1(q)
nr1 = poldeg1(r)
nq1 == 0 || (Q[i,j,1:nq1] = copy_oftype(q[1:nq1],T))
nr1 == 0 || (R[i,j,1:nr1] = copy_oftype(r[1:nr1],T))
end
end
return Q[:,:,1:pmdeg(Q)+1], R[:,:,1:pmdeg(R)+1]
end
pmdivrem(N::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number,AbstractVecOrMat{<:Number}},
D::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number,AbstractVecOrMat{<:Number}}) =
pmdivrem(poly2pm(N),poly2pm(D))
"""
pm2lpCF1(P; grade = l) -> (M, N)
Build a strong linearization `M - λN` of a polynomial matrix `P(λ)` in the first companion Frobenius form.
`P(λ)` is a grade `k` polynomial matrix assumed of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`, with
the coefficient matrices `P_i`, `i = 1, ..., k+1` stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
The effective grade `l` to be used for linearization can be specified via the keyword argument `grade` as
`grade = l`, where `l` must be chosen equal to or greater than the degree of `P(λ)`.
The default value used for `l` is `l = deg(P(λ))`.
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
If `P(λ)` is a `m x n` polynomial matrix of effective grade `l` and degree `d`,
then the resulting matrix pencil `M - λN` satisfies the following conditions [1]:
(1) `M - λN` has dimension `(m+n*(l-1)) x n*l` and `M - λN` is regular if `P(λ)` is regular;
(2) `M - λN` and `P(λ)` have the same finite eigenvalues;
(3) the partial multiplicities of infinite eigenvalues of `M - λN` are in excess with `l-d` to the
partial multiplicities of the infinite eigenvalues of `P(λ)`;
(4) `M - λN` and `P(λ)` have the same number of right Kronecker indices and the right
Kronecker indices of `M - λN` are in excess with `l-1` to the right Kronecker indices of `P(λ)`;
(5) `M - λN` and `P(λ)` have the same left Kronecker structure (i.e., the same left Kronecker indices).
[1] F. De Terán, F. M. Dopico, D. S. Mackey, Spectral equivalence of polynomial matrices and
the Index Sum Theorem, Linear Algebra and Its Applications, vol. 459, pp. 264-333, 2014.
"""
function pm2lpCF1(P::AbstractArray{T,3}; grade::Int = pmdeg(P)) where T
m, n, k1 = size(P)
deg = pmdeg(P)
grade < deg && error("The selected grade must be at least $deg")
grade == -1 && (return zeros(T,m,n), nothing )
grade == 0 && (return P[:,:,1], nothing )
grade == 1 && (grade > deg ? (return P[:,:,1], zeros(T,m,n)) : (return P[:,:,1], -P[:,:,2]) )
nd = n*grade
nd1 = nd-n
grade > deg ? (N = [ zeros(T,nd1+m,n) [zeros(T,m,nd1); I] ]) :
(N = [ [P[:,:,grade+1]; zeros(T,nd1,n)] [zeros(T,m,nd1); I] ])
M = [ zeros(T,m,nd); [I zeros(T,nd1,n)] ]
deg == -1 && (return M, N)
k = nd
it = 1:m
grade > deg ? ne = deg+1 : ne = max(1,deg)
for i = 1:ne
M[it,k-n+1:k] = -P[:,:,i]
k -= n
end
return M, N
end
pm2lpCF1(P::Union{AbstractVecOrMat{<:Polynomial},Polynomial,AbstractVecOrMat{<:Number}, Number}; kwargs...) =
pm2lpCF1(poly2pm(P); kwargs...)
"""
pm2lpCF2(P; grade = l) -> (M, N)
Build a strong linearization `M - λN` of a polynomial matrix `P(λ)` in the second companion Frobenius form.
`P(λ)` is a grade `k` polynomial matrix assumed of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`, with
the coefficient matrices `P_i`, `i = 1, ..., k+1` stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
The effective grade `l` to be used for linearization can be specified via the keyword argument `grade` as
`grade = l`, where `l` must be chosen equal to or greater than the degree of `P(λ)`.
The default value used for `l` is `l = deg(P(λ))`.
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
If `P(λ)` is a `m x n` polynomial matrix of effective grade `l` and degree `d`, then the resulting matrix pencil
`M - λN` satisfies the following conditions [1]:
(1) `M - λN` has dimension `l*m x (n+(l-1)*m)` and `M - λN` is regular if `P(λ)` is regular;
(2) `M - λN` and `P(λ)` have the same finite eigenvalues;
(3) the partial multiplicities of infinite eigenvalues of `M - λN` are in excess with `l-d` to the
partial multiplicities of the infinite eigenvalues of `P(λ)`;
(4) `M - λN` and `P(λ)` have the same right Kronecker structure (i.e., the same right Kronecker indices);
(5) `M - λN` and `P(λ)` have the same number of left Kronecker indices and the left
Kronecker indices of `M - λN` are in excess with `l-1` to the left Kronecker indices of `P(λ)`.
[1] F. De Terán, F. M. Dopico, D. S. Mackey, Spectral equivalence of polynomial matrices and
the Index Sum Theorem, Linear Algebra and Its Applications, vol. 459, pp. 264-333, 2014.
"""
function pm2lpCF2(P::AbstractArray{T,3}; grade::Int = pmdeg(P)) where T
m, n, k1 = size(P)
deg = pmdeg(P)
grade < deg && error("The selected grade must be at least $deg")
grade == -1 && (return zeros(T,m,n), nothing )
grade == 0 && (return P[:,:,1], nothing )
grade == 1 && (grade > deg ? (return P[:,:,1], zeros(T,m,n)) : (return P[:,:,1], -P[:,:,2]) )
md = m*grade
md1 = md-m
grade > deg ? (N = [ zeros(T,md,n) [zeros(T,m,md1); I] ]) :
(N = [ [P[:,:,grade+1]; zeros(T,md1,n)] [zeros(T,m,md1); I] ])
M = [ zeros(T,md,n) [I; zeros(T,m,md1)] ]
deg == -1 && (return M, N)
k = md
it = 1:n
grade > deg ? ne = deg+1 : ne = max(1,deg)
for i = 1:ne
M[k-m+1:k,it] = -P[:,:,i]
k -= m
end
return M, N
end
pm2lpCF2(P::Union{AbstractVecOrMat{<:Polynomial},Polynomial,AbstractVecOrMat{<:Number}, Number}; kwargs...) =
pm2lpCF2(poly2pm(P); kwargs...)
"""
pm2ls(P; contr = false, obs = false, noseig = false, minimal = false,
fast = true, atol = 0, rtol) -> (A, E, B, C, D)
Build a structured linearization as a system matrix `S(λ)` of the form
| A-λE | B |
S(λ) = |------|---|
| C | D |
of the polynomial matrix `P(λ)` which preserves a part of the Kronecker structure of `P(λ)`.
`P(λ)` can be specified as a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`,
for which the coefficient matrices `P_i`, `i = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
If `d` is the degree of `P(λ)` and `n` is the order of `A-λE`, then the computed linearization satisfies:
(1) `A-λE` is regular and `P(λ) = C*inv(λE-A)*B+D`;
(2) `rank[B A-λE] = n` (controllability) if `minimal = true` or `contr = true`, in which case
the right Kronecker structure is preserved;
(3) `rank[A-λE; C] = n` (observability) if `minimal = true` or `contr = true`, in which case
the left Kronecker structure is preserved;
(4) `A-λE` has no non-dynamic modes if `minimal = true` or `noseig = true`.
If conditions (1)-(4) are satisfied, the linearization is called `minimal` and the resulting order `n`
is the least achievable order. If conditions (1)-(3) are satisfied, the linearization is called `irreducible`
and the resulting order `n` is the least achievable order using orthogonal similarity transformations.
For an irreducible linearization `S(λ)` preserves the pole-zero structure (finite and infinite) and the
left and right Kronecker structures of `P(λ)`.
The underlying pencil manipulation algorithms [1] and [2] to compute reduced order linearizations
employ rank determinations based on either the use of
rank revealing QR-decomposition with column pivoting, if `fast = true`, or the SVD-decomposition, if `fast = false`.
The rank decision based on the SVD-decomposition is generally more reliable, but the involved computational effort is higher.
The keyword arguments `atol` and `rtol`, specify the absolute and relative tolerances for the
nonzero coefficients of `P(λ)`, respectively.
[1] P. Van Dooreen, The generalized eigenstructure problem in linear system theory,
IEEE Transactions on Automatic Control, vol. AC-26, pp. 111-129, 1981.
[2] A. Varga, Solving Fault Diagnosis Problems - Linear Synthesis Techniques, Springer Verlag, 2017.
"""
function pm2ls(P::AbstractArray{T,3}; minimal::Bool = false, contr::Bool = false, obs::Bool = false, noseig::Bool = false,
fast::Bool = true, atol::Real = zero(real(T)),
rtol::Real = (min(size(P)...)*eps(real(float(one(T)))))*iszero(atol)) where T
minimal && (contr = true; obs = true; noseig = true)
p, m, k1 = size(P)
nd = pmdeg(P)+1
nd == 0 && (return zeros(T,0,0), zeros(T,0,0), zeros(T,0,m), zeros(T,p,0), zeros(T,p,m))
D = P[:,:,1]
nd == 1 && (return zeros(T,0,0), zeros(T,0,0), zeros(T,0,m), zeros(T,p,0), D)
if xor(contr,obs)
if obs
# build an observable linearization
n = p*nd
E = [zeros(T,n,p) [I; zeros(T,p,p*(nd-1))]]
B = zeros(T,n,m)
k = p
for i = 2:nd
B[k+1:k+p,:] = P[:,:,i]
k += p
end
C = [ -I zeros(T,p,p*(nd-1)) ]
else
# build a controllable linearization
n = m*nd
E = [zeros(T,n,m) [I; zeros(T,m,m*(nd-1))]]
B = [zeros(T,m*(nd-1),m); -I ]
C = zeros(T,p,n)
k = 0
for i = 1:nd-1
C[:,k+1:k+m] = P[:,:,nd-i+1]
k += m
end
end
A = Matrix{T}(I,n,n)
else
if p <= m
# build an observable linearization
n = p*nd
E = [zeros(T,n,p) [I; zeros(T,p,p*(nd-1))]]
B = zeros(T,n,m)
k = p
for i = 2:nd
B[k+1:k+p,:] = P[:,:,i]
k += p
end
C = [ -I zeros(T,p,p*(nd-1)) ]
if contr
# remove uncontrollable part
T <: BlasFloat ? T1 = T : T1 = promote_type(Float64,T)
Er = copy_oftype(E,T1)
Br = copy_oftype(B,T1)
Cr = copy_oftype(C,T1)
_, _, nr, nuc = sklf_right!(Er, Br, Cr; fast = fast, atol1 = atol, atol2 = atol, rtol = rtol, withQ = false)
if nuc > 0
ir = 1:nr
# save intermediary results
E = Er[ir,ir]
B = Br[ir,:]
C = Cr[:,ir]
end
A = Matrix{T1}(I,nr,nr)
else
A = Matrix{T}(I,n,n)
end
else
# build a controllable linearization
n = m*nd
E = [zeros(T,n,m) [I; zeros(T,m,m*(nd-1))]]
B = [zeros(T,m*(nd-1),m); -I ]
C = zeros(T,p,n)
k = 0
for i = 1:nd-1
C[:,k+1:k+m] = P[:,:,nd-i+1]
k += m
end
if obs
# remove unobservable part
T <: BlasFloat ? T1 = T : T1 = promote_type(Float64,T)
Er = copy_oftype(E,T1)
Br = copy_oftype(B,T1)
Cr = copy_oftype(C,T1)
_, _, nr, nuo = sklf_left!(Er, Cr, Br; fast = fast, atol1 = atol, atol2 = atol, rtol = rtol, withQ = false)
if nuo > 0
ir = n-nr+1:n
# save intermediary results
E = Er[ir,ir]
B = Br[ir,:]
C = Cr[:,ir]
end
A = Matrix{T1}(I,nr,nr)
else
A = Matrix{T}(I,n,n)
end
end
end
if noseig
A, E, B, C, D = lsminreal(A,E,B,C,D,contr = false, obs = false, fast = fast, atol1 = atol, atol2 = atol, rtol = rtol)
end
return A, E, B, C, D
end
pm2ls(P::Union{AbstractVecOrMat{Polynomial{T}},Polynomial{T},AbstractVecOrMat{Polynomial{T,X}},Polynomial{T,X}}; kwargs...) where {T,X} =
pm2ls(poly2pm(P); kwargs...)
pm2ls(P::Union{AbstractVecOrMat{T},Number}; kwargs...) where {T <: Number} =
pm2ls(poly2pm(P); kwargs...)
# pm2ls(P::Union{AbstractVecOrMat{Polynomial{T,X}},Polynomial{T,X}}; kwargs...) where {T,X} =
# pm2ls(poly2pm(P); kwargs...)
"""
pm2lps(P; contr = false, obs = false) -> (A, E, B, F, C, G, D, H)
Build a structured linearization as a system matrix `S(λ)` of the form
| A-λE | B-λF |
S(λ) = |------|------|
| C-λG | D-λH |
of the polynomial matrix `P(λ)` which preserves a part of the Kronecker structure of `P(λ)`.
`P(λ)` can be specified as a grade `k` polynomial matrix of the form `P(λ) = P_1 + λ P_2 + ... + λ**k P_(k+1)`,
for which the coefficient matrices `P_i`, `i = 1, ..., k+1`, are stored in the 3-dimensional matrix `P`,
where `P[:,:,i]` contains the `i`-th coefficient matrix `P_i` (multiplying `λ**(i-1)`).
`P(λ)` can also be specified as a matrix, vector or scalar of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
If `d` is the degree of the `p x m` polynomial matrix `P(λ)`, then the computed linearization satisfies:
(1) `A-λE` is a `n x n` regular pencil, where `n = p(d-1)` if `contr = false` and `p <= m`
and `n = m(d-1)` otherwise;
(2) `P(λ) = (C-λG)*inv(λE-A)*(B-λF)+D-λH`;
(3) `rank[B-λF A-λE] = n` for any finite and infinite `λ` (strong controllability) if `contr = true`, in which case
the right Kronecker structure is preserved;
(4) `rank[A-λE; C-λG] = n` for any finite and infinite `λ` (strong observability) if `obs = true`, in which case
the left Kronecker structure is preserved.
If conditions (1)-(4) are satisfied, the linearization is called `strongly minimal`, the resulting order `n`
is the least achievable order and `S(λ)` preserves the pole-zero structure (finite and infinite) and the
left and right Kronecker structures of `P(λ)`.
The pencil based linearization is built using the methods described in [1].
[1] A. Varga, On computing the Kronecker structure of polynomial and rational matrices using Julia, 2020,
[arXiv:2006.06825](https://arxiv.org/pdf/2006.06825).
"""
function pm2lps(P::AbstractArray{T,3}; contr::Bool = false, obs::Bool = false) where T
p, m, k1 = size(P)
d = pmdeg(P)
nd = d+1
H = zeros(T,p,m)
d == -1 && (return zeros(T,0,0), zeros(T,0,0), zeros(T,0,m), zeros(T,0,m), zeros(T,p,0), zeros(T,p,0), H, H)
D = P[:,:,1]
d == 0 && (return zeros(T,0,0), zeros(T,0,0), zeros(T,0,m), zeros(T,0,m), zeros(T,p,0), zeros(T,p,0), D, H)
d == 1 && (return zeros(T,0,0), zeros(T,0,0), zeros(T,0,m), zeros(T,0,m), zeros(T,p,0), zeros(T,p,0), D, -P[:,:,2])
if obs || (!contr && p <= m)
# build a strongly observable linearization
n = p*(d-1)
E = [zeros(T,n,p) [I; zeros(T,p,n-p)]]
B = zeros(T,n,m)
F = zeros(T,n,m)
k = 0
for i = 2:d
B[k+1:k+p,:] = P[:,:,i]
k += p
end
F[n-p+1:n,:] = -P[:,:,nd]
C = zeros(T,p,n)
G = [I zeros(T,p,n-p) ]
A = Matrix{T}(I,n,n)
else
# build a strongly controllable linearization
n = m*(d-1)
E = [zeros(T,n,m) [I; zeros(T,m,n-m)]]
B = zeros(T,n,m)
F = [zeros(T,n-m,m); I]
C = zeros(T,p,n)
G = [ -P[:,:,nd] zeros(T,p,n-m)]
k = n
for i = 2:d
#C[:,k-m+1:k] = P[:,:,nd-i]
C[:,k-m+1:k] = P[:,:,i]
k -= m
end
A = Matrix{T}(I,n,n)
end
return A, E, B, F, C, G, D, H
end
pm2lps(P::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number}; kwargs...) where {T} =
pm2lps(poly2pm(P); kwargs...)
"""
ls2pm(A, E, B, C, D; fast = true, atol1 = 0, atol2 = 0, gaintol = 0, rtol = min(atol1,atol2) > 0 ? 0 : n*ϵ, val) -> P
Build the polynomial matrix `P(λ) = C*inv(λE-A)*B+D` corresponding to its structured linearization
| A-λE | B |
|------|---|
| C | D |
by explicitly determining for each polynomial entry, its coefficients from its roots and a corresponding gain.
The keyword arguments `atol1` and `atol2` specify the absolute tolerances for the elements of `A`, `B`, `C`, `D`, and,
respectively, of `E`, and `rtol` specifies the relative tolerances for the nonzero elements of `A`, `B`, `C`, `D` and `E`.
The default relative tolerance is `(n+1)*ϵ`, where `n` is the size of the size dimension of `A`, and `ϵ` is the
machine epsilon of the element type of coefficients of `A`.
The keyword argument `gaintol` specifies the threshold for the magnitude of the nonzero elements of the gain matrix
`C*inv(γE-A)*B+D`, where `γ = val` if `val` is a number or `γ` is a randomly chosen complex value of unit magnitude,
if `val = missing`. Generally, `val` should not be a root of any of entries of `P`.
"""
function ls2pm(A::AbstractMatrix, E::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix, D::AbstractMatrix;
fast::Bool = true, atol1::Real = zero(real(eltype(A))), atol2::Real = zero(real(eltype(E))),
gaintol::Real = zero(real(eltype(A))), val::Union{Number,Missing} = missing,
rtol::Real = ((size(A,1)+1)*eps(real(float(one(eltype(A))))))*iszero(min(atol1,atol2)))
n = LinearAlgebra.checksquare(A)
(n,n) == size(E) || throw(DimensionMismatch("A and E must have the same dimensions"))
p, m = size(D)
(n,m) == size(B) || throw(DimensionMismatch("A, B and D must have compatible dimensions"))
(p,n) == size(C) || throw(DimensionMismatch("A, C and D must have compatible dimensions"))
T = promote_type(eltype(A),eltype(E), eltype(B),eltype(C),eltype(D))
T <: BlasFloat || (T = promote_type(Float64,T))
compl = T <: Complex
P = zeros(T,p,m,n+1)
ismissing(val) && (val = exp(rand()*im))
A1, E1, B1, C1, D1 = lsminreal2(A, E, B, C, D, infinite = false, noseig = false, atol1 = atol1, atol2 = atol2)
Pval = lseval(A1, E1, B1, C1, D1, val)
isunimodular(A1, E1, atol1 = atol1, atol2 = atol2, rtol = rtol) ||
error("The given linearization cannot be converted to a polynomial form")
for i = 1:p
for j = 1:m
if abs(Pval[i,j]) > gaintol
zer, iz, = spzeros(A1, E1, B1[:,j:j], C1[i:i,:], D1[i:i,j:j];
fast = fast, atol1 = atol1, atol2 = atol2, rtol = rtol)
c, pval = polcoeffval(zer[1:(length(zer)-sum(iz))],val)
P[i,j,1:length(c)] = compl ? c*(Pval[i,j]/pval) : real(c*(Pval[i,j]/pval))
end
end
end
return P[:,:,1:pmdeg(P)+1]
end
"""
lps2pm(A, E, B, F, C, G, D, H; fast = true, atol1 = 0, atol2 = 0, gaintol = 0, rtol = min(atol1,atol2) > 0 ? 0 : n*ϵ, val) -> P
Build the polynomial matrix `P(λ) = (C-λG)*inv(λE-A)*(B-λF)+D-λH` corresponding to its structured linearization
| A-λE | B-λF |
|------|------|
| C-λG | D-λH |
by explicitly determining for each polynomial entry, its coefficients from its roots and corresponding gain.
The keyword arguments `atol1` and `atol2` specify the absolute tolerances for the elements of `A`, `B`, `C`, `D`, and of
`E`, `F`, `G`, `H`, respectively, and `rtol` specifies the relative tolerances for the nonzero elements of
`A`, `B`, `C`, `D`, `E`, F`, `G`, `H`.
The default relative tolerance is `(n+2)*ϵ`, where `n` is the size of the size dimension of `A`, and `ϵ` is the
machine epsilon of the element type of coefficients of `A`.
The keyword argument `gaintol` specifies the threshold for the magnitude of the nonzero elements of the gain matrix
`C*inv(γE-A)*B+D`, where `γ = val` if `val` is a number or `γ` is a randomly chosen complex value of unit magnitude,
if `val = missing`. Generally, `val` should not be a root of any of entries of `P`.
"""
function lps2pm(A::AbstractMatrix, E::AbstractMatrix,B::AbstractMatrix, F::AbstractMatrix,
C::AbstractMatrix, G::AbstractMatrix, D::AbstractMatrix, H::AbstractMatrix;
fast::Bool = true, atol1::Real = zero(real(eltype(A))), atol2::Real = zero(real(eltype(E))),
gaintol::Real = zero(real(eltype(A))), val::Union{Number,Missing} = missing,
rtol::Real = ((size(A,1)+2)*eps(real(float(one(eltype(A))))))*iszero(min(atol1,atol2)))
n = LinearAlgebra.checksquare(A)
(n,n) == size(E) || throw(DimensionMismatch("A and E must have the same dimensions"))
p, m = size(D)
(n,m) == size(B) || throw(DimensionMismatch("A, B and D must have compatible dimensions"))
(p,n) == size(C) || throw(DimensionMismatch("A, C and D must have compatible dimensions"))
(n,m) == size(F) || throw(DimensionMismatch("B and F must have the same dimensions"))
(p,n) == size(G) || throw(DimensionMismatch("C and G must have the same dimensions"))
(p,m) == size(H) || throw(DimensionMismatch("D and H must have the same dimensions"))
T = promote_type(eltype(A), eltype(B), eltype(C), eltype(D), eltype(E), eltype(F), eltype(G), eltype(H))
T <: BlasFloat || (T = promote_type(Float64,T))
compl = T <: Complex
P = zeros(T,p,m,n+2)
ismissing(val) && (val = exp(rand()*im))
A1, E1, B1, F1, C1, G1, D1, H1, V1, W1 = lpsminreal(A, E, B, F, C, G, D, H, atol1 = atol1, atol2 = atol2, rtol = rtol)
Pval = V1'\lpseval(A1, E1, B1, F1, C1, G1, D1, H1, val)/W1
isunimodular(A1, E1, atol1 = atol1, atol2 = atol2, rtol = rtol) ||
error("The given linearization cannot be converted to a polynomial form")
M1 = [A1 B1/W1; V1'\C1 V1'\D1/W1]
N1 = [E1 F1/W1; V1'\G1 V1'\H1/W1]
n = size(A1,1)
indi = [1:n;[1]]
indj = [1:n;[1]]
n1 = n+1
for i = 1:p
indi[n1] = n+i
for j = 1:m
if abs(Pval[i,j]) > gaintol
indj[n1] = n+j
zer, iz, = pzeros(view(M1,indi,indj),view(N1,indi,indj);
fast = fast, atol1 = atol1, atol2 = atol2, rtol = rtol)
c, pval = polcoeffval(zer[1:(length(zer)-sum(iz))],val)
P[i,j,1:length(c)] = compl ? c*(Pval[i,j]/pval) : real(c*(Pval[i,j]/pval))
end
end
end
return P[:,:,1:pmdeg(P)+1]
end
"""
spm2ls(T, U, V, W; fast = true, contr = false, obs = false, minimal = false, atol = 0, rtol) -> (A, E, B, C, D)
Build a structured linearization as a system matrix `S(λ)` of the form
| A-λE | B |
S(λ) = |------|---|
| C | D |
of the structured polynomial matrix
| -T(λ) | U(λ) |
P(λ) = |-------|------|
| V(λ) | W(λ) |
such that `V(λ)*inv(T(λ))*U(λ)+W(λ) = C*inv(λE-A)*B+D`. The resulting linearization `S(λ)` preserves a part,
if `minimal = false`, or the complete Kronecker structure, if `minimal = true`, of `P(λ)`. In the latter case,
the order `n` of `A-λE` is the least possible one and `S(λ)` is a strong linearization of `P(λ)`.
`T(λ)`, `U(λ)`, `V(λ)`, and `W(λ)` can be specified as polynomial matrices of the form `X(λ) = X_1 + λ X_2 + ... + λ**k X_(k+1)`,
for `X = T`, `U`, `V`, and `W`, for which the coefficient matrices `X_i`, `i = 1, ..., k+1`, are stored in
the 3-dimensional matrices `X`, where `X[:,:,i]` contains the `i`-th coefficient matrix `X_i` (multiplying `λ**(i-1)`).
`T(λ)`, `U(λ)`, `V(λ)`, and `W(λ)` can also be specified as matrices, vectors or scalars of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
The computed structured linearization satisfies:
(1) `A-λE` is regular;
(2) `rank[B A-λE] = n` (controllability) if `minimal = true` or `contr = true`, in which case
the finite and right Kronecker structures are preserved;
(3) `rank[A-λE; C] = n` (observability) if `minimal = true` or `obs = true`, in which case
the finite and left Kronecker structures are preserved;
(4) `A-λE` has no simple infinite eigenvalues if `minimal = true`, in which case the complete Kronecker structure is preserved.
The keyword arguments `atol` and `rtol`, specify, respectively, the absolute and relative tolerance for the
nonzero coefficients of the matrices `T(λ)`, `U(λ)`, `V(λ)` and `W(λ)`. The default relative tolerance is `nt*ϵ`,
where `nt` is the size of the square matrix `T(λ)` and `ϵ` is the machine epsilon of the element type of its coefficients.
The structured linearization is built using the methods described in [1].
[1] A. Varga, On computing the Kronecker structure of polynomial and rational matrices using Julia, 2020,
[arXiv:2006.06825](https://arxiv.org/pdf/2006.06825).
"""
function spm2ls(T::Union{AbstractArray{T1,3},AbstractArray{T1,2}},U::Union{AbstractArray{T2,3},AbstractArray{T2,2}},
V::Union{AbstractArray{T3,3},AbstractArray{T3,2}},W::Union{AbstractArray{T4,3},AbstractArray{T4,2}};
contr::Bool = false, obs::Bool = false, minimal::Bool = false,
fast::Bool = true, atol::Real = zero(real(T1)),
rtol::Real = size(T,1)*eps(real(float(one(T1))))*iszero(atol)) where {T1, T2, T3, T4}
if ndims(T) == 2
n, nt = size(T)
n == nt || throw(DimensionMismatch("T(λ) must be a square polynomial matrix"))
n == rank(T, atol=atol, rtol=rtol) || error("T(λ) must be a regular square polynomial matrix")
ndT = 1
T = reshape(T,n,nt,ndT)
else
n, nt, ndT = size(T)
n == nt || throw(DimensionMismatch("T(λ) must be a square polynomial matrix"))
n == pmrank(T, atol=atol, rtol=rtol) || error("T(λ) must be a regular square polynomial matrix")
ndT = max(pmdeg(T)+1,1)
end
if ndims(U) == 2
nt, m = size(U)
ndU = 1
U = reshape(U,nt,m,ndU)
else
nt, m, ndU = size(U)
ndU = max(pmdeg(U)+1,1)
end
n == nt || throw(DimensionMismatch("T(λ) and U(λ) must have the same number of rows"))
if ndims(V) == 2
p, nt = size(V)
ndV = 1
V = reshape(V,p,nt,ndV)
else
p, nt, ndV = size(V)
ndV = max(pmdeg(V)+1,1)
end
n == nt || throw(DimensionMismatch("T(λ) and V(λ) must have the same number of columns"))
if ndims(W) == 2
pt, mt = size(W)
ndW = 1
W = reshape(W,pt, mt,ndW)
else
pt, mt, ndW = size(W)
ndW = max(pmdeg(W)+1,1)
end
pt == p || throw(DimensionMismatch("W(λ) and V(λ) must have the same number of rows"))
mt == m || throw(DimensionMismatch("W(λ) and U(λ) must have the same number of columns"))
nd = max(ndT,ndU,ndV,ndW)
TT = promote_type(T1, T2, T3, T4)
if nd == 1
if minimal
Ar,Er,Br,Cr,Dr = lsminreal(T[:,:,1], zeros(TT,n,n), U[:,:,1], V[:,:,1], W[:,:,1],
contr = false, obs = false, atol1 = atol, atol2 = atol, rtol = rtol)
return Ar,Er,Br,Cr,Dr
else
return T[:,:,1], zeros(TT,n,n), U[:,:,1], V[:,:,1], W[:,:,1]
end
end
# build the compound polynomial matrix [-T(λ) U(λ); V(λ) W(λ)]
P = zeros(TT,n+p,n+m,nd)
ia = 1:n
jb = n+1:n+m
ic = n+1:n+p
P[ia,ia,1:ndT] = -T[:,:,1:ndT]
P[ia,jb,1:ndU] = U[:,:,1:ndU]
P[ic,ia,1:ndV] = V[:,:,1:ndV]
P[ic,jb,1:ndW] = W[:,:,1:ndW]
# build a linearization of the compound polynomial matrix [-T(λ) U(λ); V(λ) W(λ)]
A, E, B, C, D = pm2ls(P, contr = contr, obs = obs, noseig = false,
fast = fast, atol = atol, rtol = rtol)
# form the linearization of P(λ) = V(λ)*inv(T(λ))*U(λ)+W(λ)
nr = size(A,1)
Ar = [A B[:,ia]; C[ia,:] D[ia,ia]]
Er = [E zeros(TT,nr,n); zeros(TT,n,n+nr)]
Br = [B[:,jb]; D[ia,jb]]
Cr = [C[ic,:] D[ic,ia]]
Dr = D[ic,jb]
if minimal
Ar,Er,Br,Cr,Dr = lsminreal(Ar,Er,Br,Cr,Dr, fast=fast, atol1 = atol, atol2 = atol, rtol = rtol)
end
return Ar, Er, Br, Cr, Dr
end
spm2ls(T::Union{AbstractVecOrMat{Polynomial{T1}},Polynomial{T1},Number,AbstractMatrix{T1}}, U::Union{AbstractVecOrMat{Polynomial{T2}},Polynomial{T2},Number,AbstractVecOrMat{T2}},
V::Union{AbstractVecOrMat{Polynomial{T3}},Polynomial{T3},Number,AbstractVecOrMat{T3}}, W::Union{AbstractVecOrMat{Polynomial{T4}},Polynomial{T4},Number,AbstractVecOrMat{T4}}; kwargs...) where {T1, T2, T3, T4} =
spm2ls(poly2pm(T),poly2pm(U),poly2pm(V),poly2pm(W); kwargs...)
"""
spm2lps(T, U, V, W; fast = true, contr = false, obs = false, minimal = false, atol = 0, rtol) -> (A, E, B, F, C, G, D, H)
Build a structured linearization
| A-λE | B-λF |
M - λN = |------|------|
| C-λG | D-λH |
of the structured polynomial matrix
| -T(λ) | U(λ) |
P(λ) = |-------|------|
| V(λ) | W(λ) |
such that `V(λ)*inv(T(λ))*U(λ)+W(λ) = (C-λG))*inv(λE-A)*(B-λF)+D-λH`. The resulting linearization `M - λN` preserves a part,
if `minimal = false`, or the complete Kronecker structure, if `minimal = true`, of `P(λ)`. In the latter case,
the order `n` of `A-λE` is the least possible one and `M - λN` is a strong linearization of `P(λ)`.
`T(λ)`, `U(λ)`, `V(λ)`, and `W(λ)` can be specified as polynomial matrices of the form `X(λ) = X_1 + λ X_2 + ... + λ**k X_(k+1)`,
for `X = T`, `U`, `V`, and `W`, for which the coefficient matrices `X_i`, `i = 1, ..., k+1`, are stored in
the 3-dimensional matrices `X`, where `X[:,:,i]` contains the `i`-th coefficient matrix `X_i` (multiplying `λ**(i-1)`).
`T(λ)`, `U(λ)`, `V(λ)`, and `W(λ)` can also be specified as matrices, vectors or scalars of elements of the `Polynomial` type
provided by the [Polynomials](https://github.com/JuliaMath/Polynomials.jl) package.
The computed structured linearization satisfies:
(1) `A-λE` is regular;
(2) `rank[B-λF A-λE] = n` (strong controllability) if `minimal = true` or `contr = true`, in which case
the finite and right Kronecker structures are preserved;
(3) `rank[A-λE; C-λG] = n` (strong observability) if `minimal = true` or `obs = true`, in which case
the finite and left Kronecker structures are preserved.
The keyword arguments `atol` and `rtol`, specify, respectively, the absolute and relative tolerance for the
nonzero coefficients of the matrices `T(λ)`, `U(λ)`, `V(λ)` and `W(λ)`. The default relative tolerance is `nt*ϵ`,
where `nt` is the size of the square matrix `T(λ)` and `ϵ` is the machine epsilon of the element type of its coefficients.
"""
function spm2lps(T::Union{AbstractArray{T1,3},AbstractArray{T1,2}},U::Union{AbstractArray{T2,3},AbstractArray{T2,2}},
V::Union{AbstractArray{T3,3},AbstractArray{T3,2}},W::Union{AbstractArray{T4,3},AbstractArray{T4,2}};
contr::Bool = false, obs::Bool = false, minimal::Bool = false,
fast::Bool = true, atol::Real = zero(real(T1)),
rtol::Real = size(T,1)*eps(real(float(one(T1))))*iszero(atol)) where {T1, T2, T3, T4}
if ndims(T) == 2
n, nt = size(T)
n == nt || throw(DimensionMismatch("T(λ) must be a square polynomial matrix"))
n == rank(T, atol=atol, rtol=rtol) || error("T(λ) must be a regular square polynomial matrix")
ndT = 1
T = reshape(T,n,nt,ndT)
else
n, nt, ndT = size(T)
n == nt || throw(DimensionMismatch("T(λ) must be a square polynomial matrix"))
n == pmrank(T, atol=atol, rtol=rtol) || error("T(λ) must be a regular square polynomial matrix")
ndT = max(pmdeg(T)+1,1)
end
if ndims(U) == 2
nt, m = size(U)
ndU = 1
U = reshape(U,nt,m,ndU)
else
nt, m, ndU = size(U)
ndU = max(pmdeg(U)+1,1)
end
n == nt || throw(DimensionMismatch("T(λ) and U(λ) must have the same number of rows"))
if ndims(V) == 2
p, nt = size(V)
ndV = 1
V = reshape(V,p,nt,ndV)
else
p, nt, ndV = size(V)
ndV = max(pmdeg(V)+1,1)
end
n == nt || throw(DimensionMismatch("T(λ) and V(λ) must have the same number of columns"))
if ndims(W) == 2
pt, mt = size(W)
ndW = 1
W = reshape(W,pt, mt,ndW)
else
pt, mt, ndW = size(W)
ndW = max(pmdeg(W)+1,1)
end
pt == p || throw(DimensionMismatch("W(λ) and V(λ) must have the same number of rows"))
mt == m || throw(DimensionMismatch("W(λ) and U(λ) must have the same number of columns"))
nd = max(ndT,ndU,ndV,ndW)
TT = promote_type(T1, T2, T3, T4)
if nd == 1
if minimal
Ar,Er,Br,Cr,Dr = lsminreal(T[:,:,1], zeros(TT,n,n), U[:,:,1], V[:,:,1], W[:,:,1],
contr = false, obs = false, atol1 = atol, atol2 = atol, rtol = rtol)
nr = size(Ar,1)
TW = eltype(Ar)
return Ar,Er,Br,zeros(TW,nr,m),Cr,zeros(TW,p,nr),Dr,zeros(TW,p,m)
else
return T[:,:,1], zeros(TT,n,n), U[:,:,1], zeros(TT,n,m), V[:,:,1], zeros(TT,p,n), W[:,:,1], zeros(TT,p,m)
end
end
# build the compound polynomial matrix [-T(λ) U(λ); V(λ) W(λ)]
P = zeros(TT,n+p,n+m,nd)
ia = 1:n
jb = n+1:n+m
ic = n+1:n+p
P[ia,ia,1:ndT] = -T[:,:,1:ndT]
P[ia,jb,1:ndU] = U[:,:,1:ndU]
P[ic,ia,1:ndV] = V[:,:,1:ndV]
P[ic,jb,1:ndW] = W[:,:,1:ndW]
# build a linearization of the compound polynomial matrix [-T(λ) U(λ); V(λ) W(λ)]
A, E, B, F, C, G, D, H = pm2lps(P, contr = contr, obs = obs)
# form the linearization of P(λ) = V(λ)*inv(T(λ))*U(λ)+W(λ)
nr = size(A,1)
Ar = [A B[:,ia]; C[ia,:] D[ia,ia]]
Er = [E F[:,ia]; G[ia,:] H[ia,ia]]
Br = [B[:,jb]; D[ia,jb]]
Fr = [F[:,jb]; H[ia,jb]]
Cr = [C[ic,:] D[ic,ia]]
Gr = [G[ic,:] H[ic,ia]]
Dr = D[ic,jb]
Hr = H[ic,jb]
if minimal
Ar,Er,Br,Fr,Cr,Gr,Dr,Hr,Vr,Wr = lpsminreal(Ar,Er,Br,Fr,Cr,Gr,Dr,Hr, fast=fast, atol1 = atol, atol2 = atol, rtol = rtol)
return Ar,Er,Br/Wr,Fr/Wr,Vr'\Cr,Vr'\Gr,Vr'\Dr/Wr,Vr'\Hr/Wr
else
return Ar,Er,Br,Fr,Cr,Gr,Dr,Hr
end
end
function spm2lps(T::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number,AbstractVecOrMat{<:Number}},
U::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number,AbstractVecOrMat{<:Number}},
V::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number,AbstractVecOrMat{<:Number}},
W::Union{AbstractVecOrMat{<:Polynomial},Polynomial,Number,AbstractVecOrMat{<:Number}}; kwargs...)
# TODO: checking that all entries have the same variable
return spm2lps(poly2pm(T),poly2pm(U),poly2pm(V),poly2pm(W); kwargs...)
end
|
{"hexsha": "555e441697da9031ae61221fbdf011a9c9071989", "size": 45272, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/pmtools.jl", "max_stars_repo_name": "baggepinnen/MatrixPencils.jl", "max_stars_repo_head_hexsha": "c16b7415bd2765b452f29b6977bcc4f0566003a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pmtools.jl", "max_issues_repo_name": "baggepinnen/MatrixPencils.jl", "max_issues_repo_head_hexsha": "c16b7415bd2765b452f29b6977bcc4f0566003a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pmtools.jl", "max_forks_repo_name": "baggepinnen/MatrixPencils.jl", "max_forks_repo_head_hexsha": "c16b7415bd2765b452f29b6977bcc4f0566003a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.408224674, "max_line_length": 217, "alphanum_fraction": 0.5758747128, "num_tokens": 15321}
|
"""
The wntr.epanet.util module contains unit conversion utilities based on EPANET units.
.. rubric:: Contents
- :class:`~wntr.epanet.util.FlowUnits`
- :class:`~wntr.epanet.util.MassUnits`
- :class:`~wntr.epanet.util.QualParam`
- :class:`~wntr.epanet.util.HydParam`
- :meth:`to_si`
- :meth:`from_si`
- :class:`~StatisticsType`
- :class:`~QualType`
- :class:`~SourceType`
- :class:`~PressureUnits`
- :class:`~FormulaType`
- :class:`~ControlType`
- :class:`~LinkTankStatus`
- :class:`~MixType`
- :class:`~ResultType`
- :class:`~wntr.epanet.util.EN`
----
"""
import numpy as np
import enum
import logging
logger = logging.getLogger(__name__)
__all__ = ["FlowUnits", "MassUnits", "QualParam", "HydParam", "to_si", "from_si",
"StatisticsType", "QualType", "SourceType", "PressureUnits", "FormulaType",
"ControlType", "LinkTankStatus",
"MixType", "ResultType", "EN"]
class FlowUnits(enum.Enum):
u"""Epanet Units Enum class.
EPANET has defined unit codes that are used in its INP input files.
This enumerated type class provides the appropriate values, rather than
setting up a large number of constants. Additionally, each Enum value has
a property that identifies it as either `traditional` or `metric` flow unit.
EPANET *does not* use fully SI units - these are provided for WNTR compatibilty.
.. rubric:: Enum Members
============== ==================================== ========================
:attr:`~CFS` :math:`ft^3\,/\,s` :attr:`is_traditional`
:attr:`~GPM` :math:`gal\,/\,min` :attr:`is_traditional`
:attr:`~MGD` :math:`10^6\,gal\,/\,day` :attr:`is_traditional`
:attr:`~IMGD` :math:`10^6\,Imp.\,gal\,/\,day` :attr:`is_traditional`
:attr:`~AFD` :math:`acre\cdot\,ft\,/\,day` :attr:`is_traditional`
:attr:`~LPS` :math:`L\,/\,s` :attr:`is_metric`
:attr:`~LPM` :math:`L\,/\,min` :attr:`is_metric`
:attr:`~MLD` :math:`ML\,/\,day` :attr:`is_metric`
:attr:`~CMH` :math:`m^3\,\,hr` :attr:`is_metric`
:attr:`~CMD` :math:`m^3\,/\,day` :attr:`is_metric`
:attr:`~SI` :math:`m^3\,/\,s`
============== ==================================== ========================
.. rubric:: Enum Member Attributes
.. autosummary::
factor
is_traditional
is_metric
Examples
--------
>>> from wntr.epanet import FlowUnits
>>> FlowUnits.GPM
<FlowUnits.GPM: (1, 6.30901964e-05)>
Units can be converted to the EPANET integer values by casting as an ``int`` and can be
converted to a string by accessing the ``name`` property. The factor to convert to SI units
is accessed using the ``factor`` property.
>>> FlowUnits.LPS.name
'LPS'
>>> int(FlowUnits.LPS)
5
The reverse is also true, where an ``int`` from an EPANET run or the string from and input
file can be used to get a ``FlowUnits`` object.
>>> FlowUnits(4)
<FlowUnits.AFD: (4, 0.014276410185185185)>
>>> FlowUnits['CMD']
<FlowUnits.CMD: (9, 1.1574074074074073e-05)>
Units can be checked for metric or US customary status using the ``is_traditional`` or
``is_metric`` options.
>>> FlowUnits.GPM.is_traditional
True
>>> FlowUnits.GPM.is_metric
False
Conversion can be done using the `factor` attribute. For example, to convert 10 AFD to SI units,
and to convert 10 MGD to MLD,
>>> 10 * FlowUnits.AFD.factor
0.14276410185185184
>>> 10 * FlowUnits.MGD.factor / FlowUnits.MLD.factor
37.85411784000001
.. note::
This Enum uses a value of 0 for one of its members, and therefore
acts in a non-standard way when evaluating truth values. Use ``None`` / ``is None``
to check for truth values for variables storing a FlowUnits.
"""
CFS = (0, 0.0283168466)
GPM = (1, (0.003785411784/60.0))
MGD = (2, (1e6*0.003785411784/86400.0))
IMGD = (3, (1e6*0.00454609/86400.0))
AFD = (4, (1233.48184/86400.0))
LPS = (5, 0.001)
LPM = (6, (0.001/60.0))
MLD = (7, (1e6*0.001/86400.0))
CMH = (8, (1.0/3600.0))
CMD = (9, (1.0/86400.0))
SI = (11, 1.0)
def __init__(self, EN_id, flow_factor):
self._value2member_map_[EN_id] = self
self._member_map_[self.name.lower()] = self
def __int__(self):
"""Convert to an EPANET Toolkit enum number."""
return int(self.value[0])
def __str__(self):
"""Convert to a string for INP files."""
return self.name
@property
def factor(self):
"""float: The conversion factor to convert units into SI units of :math:`m^3\,s^{-1}`.
Letting values in the original units be :math:`v`, and the resulting values in SI units
be :math:`s`, the conversion factor, :math:`f`, such that
.. math::
v f = s
"""
return self.value[1]
@property
def is_traditional(self):
"""bool: True if flow unit is a US Customary (traditional) unit.
Traditional units include CFS, GPM, MGD, IMGD and AFD.
Examples
--------
>>> FlowUnits.MGD.is_traditional
True
>>> FlowUnits.MLD.is_traditional
False
>>> FlowUnits.SI.is_traditional
False
"""
return self in [FlowUnits.CFS, FlowUnits.GPM, FlowUnits.MGD, FlowUnits.IMGD, FlowUnits.AFD]
@property
def is_metric(self):
"""bool: True if flow unit is an SI Derived (metric) unit.
Metric units include LPS, LPM, MLD, CMH, and CMD.
This 'does not' include FlowUnits.SI itself, only 'derived' units.
Examples
--------
>>> FlowUnits.MGD.is_metric
False
>>> FlowUnits.MLD.is_metric
True
>>> FlowUnits.SI.is_metric
False
"""
return self in [FlowUnits.LPS, FlowUnits.LPM, FlowUnits.MLD, FlowUnits.CMH, FlowUnits.CMD]
class MassUnits(enum.Enum):
r"""Mass units used by EPANET, plus SI conversion factor.
Mass units are defined in the EPANET INP file when the QUALITY option is
set to a chemical. This is parsed to obtain the mass part of the concentration units,
and is used to set this enumerated type.
.. rubric:: Enum Members
============ ============================================
:attr:`~mg` miligrams; EPANET as "mg/L" or "mg/min"
:attr:`~ug` micrograms; EPANET as "ug/L" or "ug/min"
:attr:`~g` grams
:attr:`~kg` kilograms; WNTR standard
============ ============================================
.. rubric:: Enum Member Attributes
.. autosummary::
factor
"""
mg = (1, 0.000001)
ug = (2, 0.000000001)
g = (3, 0.001)
kg = (4, 1.0)
@property
def factor(self):
"""float : The scaling factor to convert to kg."""
return self.value[1]
class QualParam(enum.Enum):
u"""EPANET water quality parameters conversion.
These parameters are separated from the HydParam parameters because they are related to a
logically separate model in EPANET, but also because conversion to SI units requires additional
information, namely, the MassUnits that were specified in the EPANET input file. Additionally,
the reaction coefficient conversions require information about the reaction order that was
specified. See the `to_si` and `from_si` functions for details.
.. rubric:: Enum Members
========================== ================================================================
:attr:`~Concentration` General concentration parameter
:attr:`~Quality` Nodal water quality
:attr:`~LinkQuality` Link water quality
:attr:`~BulkReactionCoeff` Bulk reaction coefficient (req. `reaction_order` to convert)
:attr:`~WallReactionCoeff` Wall reaction coefficient (req. `reaction_order` to convert)
:attr:`~ReactionRate` Average reaction rate within a link
:attr:`~SourceMassInject` Injection rate for water quality sources
:attr:`~WaterAge` Water age at a node
========================== ================================================================
"""
Quality = 4
LinkQuality = 10
ReactionRate = 13
Concentration = 35
BulkReactionCoeff = 36
WallReactionCoeff = 37
SourceMassInject = 38
WaterAge = 39
def __init__(self, value):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def _to_si(self, flow_units, data, mass_units=MassUnits.mg,
reaction_order=0):
"""Convert a water quality parameter to SI units from EPANET units.
By default, the mass units are the EPANET default of mg, and the reaction order is 0.
Parameters
----------
flow_units : ~FlowUnits
The EPANET flow units to use in the conversion
data : array-like
The data to be converted (scalar, array or dictionary)
mass_units : ~MassUnits
The EPANET mass units to use in the conversion (mg or ug)
reaction_order : int
The reaction order for use converting reaction coefficients
Returns
-------
float
The data values converted to SI standard units
"""
data_type = type(data)
if data_type is dict:
data_keys = data.keys()
data = np.array(data.values())
elif data_type is list:
data = np.array(data)
# Do conversions
if self in [QualParam.Concentration, QualParam.Quality, QualParam.LinkQuality]:
data = data * (mass_units.factor/0.001) # MASS /L to kg/m3
elif self in [QualParam.SourceMassInject]:
data = data * (mass_units.factor/60.0) # MASS /min to kg/s
elif self in [QualParam.BulkReactionCoeff] and reaction_order == 1:
data = data * (1/86400.0) # per day to per second
elif self in [QualParam.WallReactionCoeff] and reaction_order == 0:
if flow_units.is_traditional:
data = data * (mass_units.factor*0.092903/86400.0) # M/ft2/d to SI
else:
data = data * (mass_units.factor/86400.0) # M/m2/day to M/m2/s
elif self in [QualParam.WallReactionCoeff] and reaction_order == 1:
if flow_units.is_traditional:
data = data * (0.3048/86400.0) # ft/d to m/s
else:
data = data * (1.0/86400.0) # m/day to m/s
elif self in [QualParam.SourceMassInject]:
data = data * (mass_units.factor/60.0) # per min to per second
elif self in [QualParam.WaterAge]:
data = data * 3600.0 # hr to s
# Convert back to input data type
if data_type is dict:
data = dict(zip(data_keys, data))
elif data_type is list:
data = list(data)
return data
def _from_si(self, flow_units, data, mass_units=MassUnits.mg,
reaction_order=0):
"""Convert a water quality parameter back to EPANET units from SI units.
Mass units defaults to :class:`MassUnits.mg`, as this is the EPANET default.
Parameters
----------
flow_units : ~FlowUnits
The EPANET flow units to use in the conversion
data : array-like
The SI unit data to be converted (scalar, array or dictionary)
mass_units : ~MassUnits
The EPANET mass units to use in the conversion (mg or ug)
reaction_order : int
The reaction order for use converting reaction coefficients
Returns
-------
float
The data values converted to EPANET appropriate units, based on the flow units.
"""
data_type = type(data)
if data_type is dict:
data_keys = data.keys()
data = np.array(data.values())
elif data_type is list:
data = np.array(data)
# Do conversions
if self in [QualParam.Concentration, QualParam.Quality, QualParam.LinkQuality]:
data = data / (mass_units.factor/0.001) # MASS /L fr kg/m3
elif self in [QualParam.SourceMassInject]:
data = data / (mass_units.factor/60.0) # MASS /min fr kg/s
elif self in [QualParam.BulkReactionCoeff] and reaction_order == 1:
data = data / (1/86400.0) # per day fr per second
elif self in [QualParam.WallReactionCoeff] and reaction_order == 0:
if flow_units.is_traditional:
data = data / (mass_units.factor*0.092903/86400.0) # M/ft2/d fr SI
else:
data = data / (mass_units.factor/86400.0) # M/m2/day fr M/m2/s
elif self in [QualParam.WallReactionCoeff] and reaction_order == 1:
if flow_units.is_traditional:
data = data / (0.3048/86400.0) # ft/d fr m/s
else:
data = data / (1.0/86400.0) # m/day fr m/s
elif self in [QualParam.SourceMassInject]:
data = data / (mass_units.factor/60.0) # per min fr per second
elif self in [QualParam.WaterAge]:
data = data / 3600.0 # hr fr s
# Convert back to input data type
if data_type is dict:
data = dict(zip(data_keys, data))
elif data_type is list:
data = list(data)
return data
class HydParam(enum.Enum):
u"""EPANET hydraulics and energy parameter conversion.
The hydraulic parameter enumerated type is used to perform unit conversion
between EPANET internal units and SI units used by WNTR. The units for each
parameter are determined based on the :class:`FlowUnits` used.
Parameters that are unitless or otherwise require no conversion are not members of this
Enum type.
.. rubric:: Enum Members
========================== ===================================================================
:attr:`~Elevation` Nodal elevation
:attr:`~Demand` Nodal demand
:attr:`~HydraulicHead` Nodal head
:attr:`~Pressure` Nodal pressure
:attr:`~EmitterCoeff` Emitter coefficient
:attr:`~TankDiameter` Tank diameter
:attr:`~Volume` Tank volume
:attr:`~Length` Link length
:attr:`~PipeDiameter` Pipe diameter
:attr:`~Flow` Link flow
:attr:`~Velocity` Link velocity
:attr:`~HeadLoss` Link headloss (from start node to end node)
:attr:`~RoughnessCoeff` Link roughness (requires `darcy_weisbach` setting for conversion)
:attr:`~Energy` Pump energy
:attr:`~Power` Pump power
========================== ===================================================================
"""
Elevation = 0
Demand = 1
HydraulicHead = 2
Pressure = 3
# Quality = 4
Length = 5
PipeDiameter = 6
Flow = 7
Velocity = 8
HeadLoss = 9
# Link Quality = 10
# Status = 11
# Setting = 12
# Reaction Rate = 13
# Friction factor = 14
Power = 15
# Time = 16
Volume = 17
# The following are not "output" network variables, and thus are defined separately
EmitterCoeff = 31
RoughnessCoeff = 32
TankDiameter = 33
Energy = 34
def __init__(self, value):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def _to_si(self, flow_units, data, darcy_weisbach=False):
"""Convert from EPANET units groups to SI units.
If converting roughness, specify if the Darcy-Weisbach equation is
used using the darcy_weisbach parameter. Otherwise, that parameter
can be safely ignored/omitted for any other conversion.
Parameters
----------
flow_units : ~FlowUnits
The flow units to use in the conversion
data : array-like
The EPANET-units data to be converted (scalar, array or dictionary)
darcy_weisbach : bool, optional
Set to ``True`` if converting roughness coefficients for use with Darcy-Weisbach
formula.
Returns
-------
float
The data values converted to SI standard units.
"""
# Convert to array for unit conversion
data_type = type(data)
if data_type is dict:
data_keys = data.keys()
data = np.array(list(data.values()))
elif data_type is list:
data = np.array(data)
# Do conversions
if self in [HydParam.Demand, HydParam.Flow, HydParam.EmitterCoeff]:
data = data * flow_units.factor
if self is HydParam.EmitterCoeff:
if flow_units.is_traditional:
data = data / 0.7032 # flowunit/psi0.5 to flowunit/m0.5
elif self in [HydParam.PipeDiameter]:
if flow_units.is_traditional:
data = data * 0.0254 # in to m
elif flow_units.is_metric:
data = data * 0.001 # mm to m
elif self in [HydParam.RoughnessCoeff] and darcy_weisbach:
if flow_units.is_traditional:
data = data * (1000.0*0.3048) # 1e-3 ft to m
elif flow_units.is_metric:
data = data * 0.001 # mm to m
elif self in [HydParam.TankDiameter, HydParam.Elevation, HydParam.HydraulicHead,
HydParam.Length, HydParam.HeadLoss]:
if flow_units.is_traditional:
data = data * 0.3048 # ft to m
elif self in [HydParam.Velocity]:
if flow_units.is_traditional:
data = data * 0.3048 # ft/s to m/s
elif self in [HydParam.Energy]:
data = data * 3600000.0 # kW*hr to J
elif self in [HydParam.Power]:
if flow_units.is_traditional:
data = data * 745.699872 # hp to W (Nm/s)
elif flow_units.is_metric:
data = data * 1000.0 # kW to W (Nm/s)
elif self in [HydParam.Pressure]:
if flow_units.is_traditional:
data = data * 0.703249614902 # psi to m
elif self in [HydParam.Volume]:
if flow_units.is_traditional:
data = data * np.power(0.3048, 3) # ft3 to m3
# Convert back to input data type
if data_type is dict:
data = dict(zip(data_keys, data))
elif data_type is list:
data = list(data)
return data
def _from_si(self, flow_units, data, darcy_weisbach=False):
"""Convert from SI units into EPANET specified units.
If converting roughness, specify if the Darcy-Weisbach equation is
used using the darcy_weisbach parameter. Otherwise, that parameter
can be safely ignored/omitted for any other conversion.
Parameters
----------
flow_units : :class:`~FlowUnits`
The flow units to use in the conversion
data : array-like
The SI unit data to be converted (scalar, array or dictionary)
darcy_weisbach : bool, optional
Set to ``True`` if converting roughness coefficients for use with Darcy-Weisbach
formula.
Returns
-------
float
The data values converted to EPANET appropriate units based on the flow units.
"""
# Convert to array for conversion
data_type = type(data)
if data_type is dict:
data_keys = data.keys()
data = np.array(list(data.values()))
elif data_type is list:
data = np.array(data)
# Do onversions
if self in [HydParam.Demand, HydParam.Flow, HydParam.EmitterCoeff]:
data = data / flow_units.factor
if self is HydParam.EmitterCoeff:
if flow_units.is_traditional:
data = data / 0.7032 # flowunit/psi0.5 from flowunit/m0.5
elif self in [HydParam.PipeDiameter]:
if flow_units.is_traditional:
data = data / 0.0254 # in from m
elif flow_units.is_metric:
data = data / 0.001 # mm from m
elif self in [HydParam.RoughnessCoeff] and darcy_weisbach:
if flow_units.is_traditional:
data = data / (1000.0*0.3048) # 1e-3 ft from m
elif flow_units.is_metric:
data = data / 0.001 # mm from m
elif self in [HydParam.TankDiameter, HydParam.Elevation, HydParam.HydraulicHead,
HydParam.Length, HydParam.HeadLoss]:
if flow_units.is_traditional:
data = data / 0.3048 # ft from m
elif self in [HydParam.Velocity]:
if flow_units.is_traditional:
data = data / 0.3048 # ft/s from m/s
elif self in [HydParam.Energy]:
data = data / 3600000.0 # kW*hr from J
elif self in [HydParam.Power]:
if flow_units.is_traditional:
data = data / 745.699872 # hp from W (Nm/s)
elif flow_units.is_metric:
data = data / 1000.0 # kW from W (Nm/s)
elif self in [HydParam.Pressure]:
if flow_units.is_traditional:
data = data / 0.703249614902 # psi from m
elif self in [HydParam.Volume]:
if flow_units.is_traditional:
data = data / np.power(0.3048, 3) # ft3 from m3
# Put back into data format passed in
if data_type is dict:
data = dict(zip(data_keys, data))
elif data_type is list:
data = list(data)
return data
def to_si(from_units, data, param,
mass_units=MassUnits.mg, pressure_units=None,
darcy_weisbach=False, reaction_order=0):
"""Convert an EPANET parameter from internal to SI standard units.
Parameters
----------
from_units : :class:`~FlowUnits`
The EPANET flow units (and therefore units system) to use for conversion
data : float, array-like, dict
The data to be converted
param : :class:`~HydParam` or :class:`~QualParam`
The parameter type for the data
mass_units : :class:`~MassUnits`, optional
The EPANET mass units (mg or ug internal to EPANET)
pressure_units : :class:`~PressureUnits`, optional
The EPANET pressure units being used (based on `flow_units`, normally)
darcy_weisbach : bool, optional
For roughness coefficients, is this used in a Darcy-Weisbach formula?
reaction_order : int, optional
For reaction coefficients, what is the reaction order?
Returns
-------
float, array-like, or dict
The data values convert into SI standard units
"""
if isinstance(param, HydParam):
return param._to_si(from_units, data, darcy_weisbach)
elif isinstance(param, QualParam):
return param._to_si(from_units, data, mass_units, reaction_order)
else:
raise RuntimeError('Invalid parameter: %s' % param)
def from_si(to_units, data, param,
mass_units=MassUnits.mg, pressure_units=None,
darcy_weisbach=False, reaction_order=0):
"""Convert an EPANET parameter from SI standard units back to internal units.
Parameters
----------
to_units : :class:`~FlowUnits`
The EPANET flow units (and therefore units system) to use for conversion
data : float, array-like, dict
The data to be converted
param : :class:`~HydParam` or :class:`~QualParam`
The parameter type for the data
mass_units : :class:`~MassUnits`, optional
The EPANET mass units (mg or ug internal to EPANET)
pressure_units : :class:`~PressureUnits`, optional
The EPANET pressure units being used (based on `flow_units`, normally)
darcy_weisbach : bool, optional
For roughness coefficients, is this used in a Darcy-Weisbach formula?
reaction_order : int, optional
For reaction coefficients, what is the reaction order?
Returns
-------
float, array-like, or dict
The data values converted into EPANET internal units
"""
if isinstance(param, HydParam):
return param._from_si(to_units, data, darcy_weisbach)
elif isinstance(param, QualParam):
return param._from_si(to_units, data, mass_units, reaction_order)
else:
raise RuntimeError('Invalid parameter: %s' % param)
class StatisticsType(enum.Enum):
"""EPANET time series statistics processing.
.. rubric:: Enum Members
================ =========================================================================
:attr:`~none` Do no processing, provide instantaneous values on output at time `t`.
:attr:`~Average` Average the value across the report period ending at time `t`.
:attr:`~Minimum` Provide the minimum value across all complete reporting periods.
:attr:`~Maximum` Provide the maximum value across all complete reporting periods.
:attr:`~Range` Provide the range (max - min) across all complete reporting periods.
================ =========================================================================
"""
none = 0
Average = 1
Minimum = 2
Maximum = 3
Range = 4
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class QualType(enum.Enum):
"""Provide the EPANET water quality simulation quality type.
.. rubric:: Enum Members
================ =========================================================================
:attr:`~none` Do not perform water quality simulation.
:attr:`~Chem` Do chemical transport simulation.
:attr:`~Age` Do water age simulation.
:attr:`~Trace` Do a tracer test (results in percentage of water is from trace node).
================ =========================================================================
"""
none = 0
Chem = 1
Age = 2
Trace = 3
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class SourceType(enum.Enum):
"""What type of EPANET Chemical source is used.
.. rubric:: Enum Members
================== =========================================================================
:attr:`~Concen` Concentration -- cannot be used at nodes with non-zero demand.
:attr:`~Mass` Mass -- mass per minute injection. Can be used at any node.
:attr:`~Setpoint` Setpoint -- force node quality to be a certain concentration.
:attr:`~FlowPaced` Flow paced -- set variable mass injection based on flow.
================== =========================================================================
"""
Concen = 0
Mass = 1
Setpoint = 2
FlowPaced = 3
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class PressureUnits(enum.Enum):
"""EPANET output pressure units.
.. rubric:: Enum Members
=============== ====================================================
:attr:`~psi` Pounds per square inch (flow units are traditional)
:attr:`~kPa` kilopascals (flow units are metric)
:attr:`~meters` meters of H2O
=============== ====================================================
"""
psi = 0
kPa = 1
Meters = 2
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class FormulaType(enum.Enum):
"""Formula used for determining head loss due to roughness.
.. rubric:: Enum Members
=============== ==================================================================
:attr:`~HW` Hazen-Williams headloss formula (:attr:`~str`="H-W")
:attr:`~DW` Darcy-Weisbach formala; requires units conversion.
(:attr:`~str`='D-W')
:attr:`~CM` Chezy-Manning formula (:attr:`~str`="C-M")
=============== ==================================================================
"""
HW = (0, "H-W",)
DW = (1, "D-W",)
CM = (2, "C-M",)
def __init__(self, eid, inpcode):
self._value2member_map_[eid] = self
self._member_map_[inpcode] = self
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __int__(self):
return self.value[0]
def __str__(self):
return self.value[1]
class ControlType(enum.Enum):
"""The type of control.
.. rubric:: Enum Members
================== ==================================================================
:attr:`~LowLevel` Act when grade below set level
:attr:`~HiLevel` Act when grade above set level
:attr:`~Timer` Act when set time reached (from start of simulation)
:attr:`~TimeOfDay` Act when time of day occurs (each day)
================== ==================================================================
"""
LowLevel = 0
HiLevel = 1
Timer = 2
TimeOfDay = 3
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class LinkTankStatus(enum.Enum):
XHead = 0 #: pump cannot deliver head (closed)
TempClosed = 1 #: temporarily closed
Closed = 2 #: closed
Open = 3 #: open
Active = 4 #: valve active (partially open)
XFlow = 5 #: pump exceeds maximum flow
XFCV = 6 #: FCV cannot supply flow
XPressure = 7 #: valve cannot supply pressure
Filling = 8 #: tank filling
Emptying = 9 #: tank emptying
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class MixType(enum.Enum):
"""Tank mixing model type.
.. rubric:: Enum Members
=============== ==================================================================
:attr:`~Mix1` Single compartment mixing model
:attr:`~Mix2` Two-compartment mixing model
:attr:`~FIFO` First-in/first-out model
:attr:`~LIFO` Last-in/first-out model
=============== ==================================================================
"""
Mix1 = 0
Mix2 = 1
FIFO = 2
LIFO = 3
Mixed = 0
TwoComp = 1
def __init__(self, val):
if self.name != self.name.upper():
self._member_map_[self.name.upper()] = self
if self.name != self.name.lower():
self._member_map_[self.name.lower()] = self
def __str__(self):
return self.name
class ResultType(enum.Enum):
"""Extended period simulation results type"""
demand = 1
head = 2
pressure = 3
quality = 4
flowrate = 5
velocity = 6
headloss = 7
linkquality = 8
status = 9
setting = 10
rxnrate = 11
frictionfact = 12
@property
def is_node(self):
"""Is a nodal property result"""
if abs(self.value) < 5:
return True
return False
@property
def is_link(self):
"""Is a link property result"""
if self.value > 4:
return True
return False
@property
def is_qual(self):
"""Is related to quality"""
if self.value in [4, 8, 11]:
return True
return False
@property
def is_hyd(self):
"""Is related to hydraulics"""
if self.value in [1,2,3,5,6,7,12]:
return True
return False
class EN(enum.IntEnum):
"""All the ``EN_`` constants for the EPANET toolkit.
For example, ``EN_LENGTH`` is accessed as ``EN.LENGTH``, instead. Please see the EPANET
toolkit documentation for the description of these enums. Several enums are duplicated
in separaet classes above for clarity during programming.
The enums can be broken in the following groups.
- Node parameters: :attr:`~ELEVATION`, :attr:`~BASEDEMAND`, :attr:`~PATTERN`, :attr:`~EMITTER`, :attr:`~INITQUAL`, :attr:`~SOURCEQUAL`, :attr:`~SOURCEPAT`, :attr:`~SOURCETYPE`, :attr:`~TANKLEVEL`, :attr:`~DEMAND`, :attr:`~HEAD`, :attr:`~PRESSURE`, :attr:`~QUALITY`, :attr:`~SOURCEMASS`, :attr:`~INITVOLUME`, :attr:`~MIXMODEL`, :attr:`~MIXZONEVOL`, :attr:`~TANKDIAM`, :attr:`~MINVOLUME`, :attr:`~VOLCURVE`, :attr:`~MINLEVEL,`, :attr:`~MAXLEVEL`, :attr:`~MIXFRACTION`, :attr:`~TANK_KBULK`, :attr:`~TANKVOLUME`, :attr:`~MAXVOLUME`
- Link parameters: :attr:`~DIAMETER`, :attr:`~LENGTH`, :attr:`~ROUGHNESS`, :attr:`~MINORLOSS`, :attr:`~INITSTATUS`, :attr:`~INITSETTING`, :attr:`~KBULK`, :attr:`~KWALL`, :attr:`~FLOW`, :attr:`~VELOCITY`, :attr:`~HEADLOSS`, :attr:`~STATUS`, :attr:`~SETTING`, :attr:`~ENERGY`, :attr:`~LINKQUAL`, :attr:`~LINKPATTERN`
- Time parameters: :attr:`~DURATION`, :attr:`~HYDSTEP`, :attr:`~QUALSTEP`, :attr:`~PATTERNSTEP`, :attr:`~PATTERNSTART`, :attr:`~REPORTSTEP`, :attr:`~REPORTSTART`, :attr:`~RULESTEP`, :attr:`~STATISTIC`, :attr:`~PERIODS`, :attr:`~STARTTIME`, :attr:`~HTIME`, :attr:`~HALTFLAG`, :attr:`~NEXTEVENT`
- Solver parameters: :attr:`~ITERATIONS`, :attr:`~RELATIVEERROR`
- Component counts: :attr:`~NODECOUNT`, :attr:`~TANKCOUNT`, :attr:`~LINKCOUNT`, :attr:`~PATCOUNT`, :attr:`~CURVECOUNT`, :attr:`~CONTROLCOUNT`
- Node types: :attr:`~JUNCTION`, :attr:`~RESERVOIR`, :attr:`~TANK`
- Link types: :attr:`~CVPIPE`, :attr:`~PIPE`, :attr:`~PUMP`, :attr:`~PRV`, :attr:`~PSV`, :attr:`~PBV`, :attr:`~FCV`, :attr:`~TCV`, :attr:`~GPV`
- Quality analysis types: :attr:`~NONE`, :attr:`~CHEM`, :attr:`~AGE`, :attr:`~TRACE`
- Source quality types: :attr:`~CONCEN`, :attr:`~MASS`, :attr:`~SETPOINT`, :attr:`~FLOWPACED`
- Flow unit types: :attr:`~CFS`, :attr:`~GPM`, :attr:`~MGD`, :attr:`~IMGD`, :attr:`~AFD`, :attr:`~LPS`, :attr:`~LPM`, :attr:`~MLD`, :attr:`~CMH`, :attr:`~CMD`
- Miscelaneous options: :attr:`~TRIALS`, :attr:`~ACCURACY`, :attr:`~TOLERANCE`, :attr:`~EMITEXPON`, :attr:`~DEMANDMULT`
- Control types: :attr:`~LOWLEVEL`, :attr:`~HILEVEL`, :attr:`~TIMER`, :attr:`~TIMEOFDAY`
- Time statistic types: :attr:`~NONE`, :attr:`~AVERAGE`, :attr:`~MINIMUM`, :attr:`~MAXIMUM`, :attr:`~RANGE`
- Tank mixing model types: :attr:`~MIX1`, :attr:`~MIX2`, :attr:`~FIFO`, :attr:`~LIFO`
- Save results flag: :attr:`~NOSAVE`, :attr:`~SAVE`, :attr:`~INITFLOW`
- Pump behavior types: :attr:`~CONST_HP`, :attr:`~POWER_FUNC`, :attr:`~CUSTOM`
"""
ELEVATION = 0
BASEDEMAND = 1
PATTERN = 2
EMITTER = 3
INITQUAL = 4
SOURCEQUAL = 5
SOURCEPAT = 6
SOURCETYPE = 7
TANKLEVEL = 8
DEMAND = 9
HEAD = 10
PRESSURE = 11
QUALITY = 12
SOURCEMASS = 13
INITVOLUME = 14
MIXMODEL = 15
MIXZONEVOL = 16
TANKDIAM = 17
MINVOLUME = 18
VOLCURVE = 19
MINLEVEL = 20
MAXLEVEL = 21
MIXFRACTION = 22
TANK_KBULK = 23
TANKVOLUME = 24
MAXVOLUME = 25
DIAMETER = 0
LENGTH = 1
ROUGHNESS = 2
MINORLOSS = 3
INITSTATUS = 4
INITSETTING = 5
KBULK = 6
KWALL = 7
FLOW = 8
VELOCITY = 9
HEADLOSS = 10
STATUS = 11
SETTING = 12
ENERGY = 13
LINKQUAL = 14
LINKPATTERN = 15
DURATION = 0
HYDSTEP = 1
QUALSTEP = 2
PATTERNSTEP = 3
PATTERNSTART = 4
REPORTSTEP = 5
REPORTSTART = 6
RULESTEP = 7
STATISTIC = 8
PERIODS = 9
STARTTIME = 10
HTIME = 11
HALTFLAG = 12
NEXTEVENT = 13
ITERATIONS = 0
RELATIVEERROR= 1
NODECOUNT = 0
TANKCOUNT = 1
LINKCOUNT = 2
PATCOUNT = 3
CURVECOUNT = 4
CONTROLCOUNT = 5
JUNCTION = 0
RESERVOIR = 1
TANK = 2
CVPIPE = 0
PIPE = 1
PUMP = 2
PRV = 3
PSV = 4
PBV = 5
FCV = 6
TCV = 7
GPV = 8
NONE = 0
CHEM = 1
AGE = 2
TRACE = 3
CONCEN = 0
MASS = 1
SETPOINT = 2
FLOWPACED = 3
CFS = 0
GPM = 1
MGD = 2
IMGD = 3
AFD = 4
LPS = 5
LPM = 6
MLD = 7
CMH = 8
CMD = 9
TRIALS = 0
ACCURACY = 1
TOLERANCE = 2
EMITEXPON = 3
DEMANDMULT = 4
LOWLEVEL = 0
HILEVEL = 1
TIMER = 2
TIMEOFDAY = 3
AVERAGE = 1
MINIMUM = 2
MAXIMUM = 3
RANGE = 4
MIX1 = 0
MIX2 = 1
FIFO = 2
LIFO = 3
NOSAVE = 0
SAVE = 1
INITFLOW = 10
CONST_HP = 0
POWER_FUNC = 1
CUSTOM = 2
|
{"hexsha": "e0d831576c8e15884d70b75a2f8b274176d40678", "size": 38407, "ext": "py", "lang": "Python", "max_stars_repo_path": "wntr/epanet/util.py", "max_stars_repo_name": "xiamo311/AquaSCALE", "max_stars_repo_head_hexsha": "28968d1b349c2370d8c20bda5b6675270e4ab65d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wntr/epanet/util.py", "max_issues_repo_name": "xiamo311/AquaSCALE", "max_issues_repo_head_hexsha": "28968d1b349c2370d8c20bda5b6675270e4ab65d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wntr/epanet/util.py", "max_forks_repo_name": "xiamo311/AquaSCALE", "max_forks_repo_head_hexsha": "28968d1b349c2370d8c20bda5b6675270e4ab65d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8387665198, "max_line_length": 531, "alphanum_fraction": 0.5569036894, "include": true, "reason": "import numpy", "num_tokens": 10172}
|
C @(#)ickikk.f 20.3 2/13/96
function ickikk(kt,mt)
C
C THIS FUNCTION DETERMINES THE STATUS OF THE VARIABLE KT
C
C
C ICKIKK CONTROL
C ------ -------
C 0 KT NOT IN CONTROL SCHEME
C 1 V(KT)-->V(MT)
C 2 V(KT)<--V(MT)
C 3 T(KT)-->V(MT)
C 4 V(KT)<--T(MT)
C -N ABOVE CONTROL IS FLAGGED BUT INACTIVE
C
include 'ipfinc/parametr.inc'
include 'ipfinc/ikk.inc'
C
ickikk=0
mt=0
ix=ikk(4,kt)
100 if (ix.eq.0.or.ix.gt.nindxx) go to 120
if (indx(1,ix).eq.kt) go to 110
if (iabs(indx(1,ix)).ne.kt) go to 120
ix=ix+1
go to 100
110 ickikk=indx(2,ix)
mt=indx(3,ix)
if(ikk(2,kt).eq.0) ickikk = -ickikk
120 continue
return
C
C THIS ENTRY DETERMINES THE POSSIBILITY OF ESTABLISHING
C CONTROLS KT-->MT.
C
entry icktmt(kt,mt)
icktmt=0
ix=ikk(4,kt)
122 if(ix.eq.0.or.ix.gt.nindxx) go to 126
if (iabs(indx(1,ix)).ne.kt) go to 126
if (indx(3,ix).eq.mt) go to 124
ix=ix+1
go to 122
124 icktmt=1
126 ix=ikk(4,mt)
127 if (ix.eq.0.or.ix.gt.nindxx) go to 130
if (iabs(indx(1,ix)).ne.mt) go to 130
if (indx(3,ix).eq.kt) go to 128
ix=ix+1
go to 127
128 icktmt=icktmt+2
130 return
C
C THIS ENTRY RETURNS CONSTRAINT INDEX ASSOCIATED WITH TYPE JT.
C
C
C
C JCKIKK CONTROL
C ------- --------
C
C 0 ILLEGAL
C 1 (NOT USED)
C 2 S(K)=F(TAP(M))
C 3 S(K)=F(AREA(M))
C 4 S(K)=F(PCNTVAR(M))
C 5 S(K)=F(TIE(M))
C 6 TAP(K)=U(M)
C 7 TAP(K)=F(TIE(M))
C
C
entry jckikk(kt,jt)
jckikk=0
ix = ikk(5,kt)
140 if (ix.eq.0.or.ix.gt.njndxx) go to 180
if (iabs(jndx(1,ix)).ne.kt) go to 180
if (jndx(2,ix).ne.jt) go to 150
jckikk = jndx(3,ix)
go to 180
150 ix=ix+1
go to 140
180 continue
return
end
|
{"hexsha": "f3211bd7a7956f654f65730a8ef5e51a49627fea", "size": 2186, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/ickikk.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/ickikk.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/ickikk.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 24.2888888889, "max_line_length": 68, "alphanum_fraction": 0.4661482159, "num_tokens": 829}
|
import gym
import numpy as np
import torch
import torch.optim as opt
from tqdm import tqdm
import gym_puzzle
from agent import Agent
# ハイパーパラメータ
HIDDEN_NUM = 128 # エージェントの隠れ層のニューロン数
EPISODE_NUM = 10000 # エピソードを何回行うか
MAX_STEPS = 1000 # 1エピソード内で最大何回行動するか
GAMMA = .99 # 時間割引率
env = gym.make('puzzle-v0')
agent = Agent(env.metadata['N'], HIDDEN_NUM)
optimizer = opt.Adam(agent.parameters())
# 1エピソード(`done`が`True`になるまで)行動し続け、lossを返す
def do_episode():
obs = env.reset()
obss, actions, rewards = [], [], []
# 現在の方策で1エピソード行動する
agent.eval()
with torch.no_grad():
for step in range(1, MAX_STEPS + 1):
# observationをPyTorchで使える形式に変換し、保存する
obs = torch.tensor([obs], dtype=torch.float)
obss.append(obs)
# 方策から出力された行動確率を元に行動を決定し、行動する
prob = agent(obs)[0].exp().numpy()
action = np.random.choice(range(4), p=prob)
obs, reward, done, _ = env.step(action)
# 行動と報酬を保存する
# 行動は、one-hot形式と呼ばれる形で保存する(後の都合)
actions.append(torch.eye(4, dtype=torch.float)[action])
rewards.append(reward)
if done:
break
# 割引報酬和を求める
cum_rewards = [0]
for i, r in enumerate(rewards[::-1]):
cum_rewards.append(GAMMA*cum_rewards[i] + r)
cum_rewards = cum_rewards[:0:-1]
# lossを計算して返す
agent.train()
loss_sum = 0
log_pis = [agent(o)[0] * a for (o, a) in zip(obss, actions)]
for log_pi, r in zip(log_pis, cum_rewards):
loss_sum = loss_sum - (log_pi * r).sum()
return loss_sum / len(obss)
if __name__ == '__main__':
for episode in tqdm(range(1, EPISODE_NUM + 1)):
# 1エピソードを実行して、lossを得る
loss = do_episode()
# lossを用いて方策を更新する
optimizer.zero_grad()
loss.backward()
optimizer.step()
|
{"hexsha": "03060b863cfeaee84e9cee097d218bfbca7d1b50", "size": 1848, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "gpageinin/puzzle", "max_stars_repo_head_hexsha": "7aa12751bc0cb4d22aa91c2dd5b5fbc84ff65686", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "gpageinin/puzzle", "max_issues_repo_head_hexsha": "7aa12751bc0cb4d22aa91c2dd5b5fbc84ff65686", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "gpageinin/puzzle", "max_forks_repo_head_hexsha": "7aa12751bc0cb4d22aa91c2dd5b5fbc84ff65686", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.972972973, "max_line_length": 67, "alphanum_fraction": 0.6076839827, "include": true, "reason": "import numpy", "num_tokens": 670}
|
# -*- coding: utf-8 -*-
"""
Module implementing MainWindow.
"""
import sys
#import numpy as np
from math import pi, atan, sqrt
#import matplotlib.pyplot as plt
from datetime import datetime
import matplotlib
matplotlib.use("Qt5Agg") # 声明使用QT5
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from Ui_MainWindow import Ui_MainWindow
x = [0.0, 0.0]
y = [0.0, 0.0]
Sab = 0.0
Tab = 0.0
#计算方位角
def Azimuth():
dx = x[1] - x[0]
dy = y[1] - y[0]
if dx ==0:
if dy >=0 :
a = pi/2
else:
a = pi * 3 / 2
elif dy ==0:
if dx >= 0:
a=0
else:
a = pi
else:
a = atan(dy / dx)
if dx <= 0:
a = a + pi
elif dy <= 0:
a = a + 2 * pi
return a
class Figure_Canvas(FigureCanvas): # 通过继承FigureCanvas类,使得该类既是一个PyQt5的Qwidget,又是一个matplotlib的FigureCanvas,这是连接pyqt5与matplot lib的关键
def __init__(self, parent=None, width=5.1, height=4, dpi=10):
fig = Figure(figsize=(width, height), dpi=80) # 创建一个Figure,注意:该Figure为matplotlib下的figure,不是matplotlib.pyplot下面的figure
FigureCanvas.__init__(self, fig) # 初始化父类
self.setParent(parent)
self.axes = fig.add_subplot(111) # 调用figure下面的add_subplot方法,类似于matplotlib.pyplot下面的subplot方法
def StartPlot(self):
self.axes.set_xlabel('Y')
self.axes.set_ylabel('X')
self.axes.scatter(y[0], x[0], c= 'red', marker='o')
self.axes.scatter(y[1], x[1], c= 'yellow')
self.axes.legend(('A', 'B'), loc='best')
self.axes.set_title('Calculation Results',color = 'blue')
self.axes.plot(y, x, c= 'blue', lw=0.5)
self.axes.annotate('(' + str(x[0]) + ',' + str(y[0]) + ')', xy=(y[0], x[0]), xytext=(-40, 6), textcoords='offset points', weight='heavy')
self.axes.annotate('(' + str(x[1]) + ',' + str(y[1]) + ')', xy=(y[1], x[1]), xytext=(-40, 6), textcoords='offset points', weight='heavy')
t1 = (y[0]+y[1])/2
t2 = (x[0]+x[1])/2
self.axes.annotate('Sab = '+ str(Sab) + '; Tab = ' + str(Tab), xy=(t1, t2), xytext=(-80, 80), textcoords='offset points', color = 'blue', arrowprops = dict( arrowstyle = '->', connectionstyle = 'arc3', color = 'b'))
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.plainTextEdit.setPlainText('[' + str(datetime.now()) + ']' + '输入数据或从文件打开来开始计算')
@pyqtSlot()
def on_action_Open_triggered(self):
filename,_ = QFileDialog.getOpenFileName(self, '输入坐标数据', './', 'All Files (*);;Text Files (*.txt)');
if filename == '':
self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '打开失败 返回值为空')
return 0
f=open(filename,'r', encoding='utf-8')
dic = []
for line in f.readlines():
line=line.strip('\n') #去掉换行符\n
b=line.split(',') #将每一行以,为分隔符转换成列表
dic.append(b)
self.lineEdit_XA.setText(dic[0][0])
self.lineEdit_YA.setText(dic[0][1])
self.lineEdit_XB.setText(dic[1][0])
self.lineEdit_YB.setText(dic[1][1])
self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '打开文件:' + str(filename))
f.close()
@pyqtSlot()
def on_action_Save_triggered(self):
"""
Slot documentation goes here.
"""
# TODO: 保存结果
with open('输出结果.txt','a') as f:
f.write('[' + str(datetime.now()) + ']' + '\n')
f.write('A:'+str([x[0], y[0]]) + ';B:' + str([x[1],y[1]]) + '\n')
f.write('Sab = '+ str(Sab) + '; Tab = ' + str(Tab) + '\n')
f.write('\n')
self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '保存成功')
@pyqtSlot()
def on_action_Close_triggered(self):
self.close()
@pyqtSlot()
def on_action_Calculate_triggered(self):
"""
Slot documentation goes here.
"""
# TODO: 检查是否缺失条件, 进行计算, 绘制图形
if self.lineEdit_XA.text() == '' or self.lineEdit_XB.text() == '' or self.lineEdit_YA.text() == '' or self.lineEdit_YB.text() == '': #空的情况下,内容为‘’ (空白);不是None
self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '中断:参数为空')
return 0
XA = float(self.lineEdit_XA.text())
XB = float(self.lineEdit_XB.text())
YA = float(self.lineEdit_YA.text())
YB = float(self.lineEdit_YB.text())
if XA ==XB and YA == YB:
self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '中断:两点重合')
return 0
global x, y, Sab, Tab # 给全局变量赋值
x = [XA, XB]
y = [YA, YB]
Sab = sqrt((XA - XB) * (XA - XB) + (YA - YB) * (YA - YB) )
Tab = Azimuth()
self.lineEdit_Sab.setText(str(Sab))
self.lineEdit_tab.setText(str(Tab))
self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '计算完成:' + 'Sab = '+ str(Sab) + '; Tab = ' + str(Tab))
ins = Figure_Canvas() #实例化一个FigureCanvas
ins.StartPlot() # 画图
graphicscene = QGraphicsScene() #创建一个QGraphicsScene,因为加载的图形(FigureCanvas)不能直接放到graphicview控件中,必须先放到graphicScene,然后再把graphicscene放到graphicview中
graphicscene.addWidget(ins) # 把图形放到QGraphicsScene中,注意:图形是作为一个QWidget放到QGraphicsScene中的
# graphicscene=graphicscene.scaled(self.graphicsView.width()-10,self.graphicsView.height()-10)
# 咋调大小暂时还没搞清楚
self.graphicsView.setScene(graphicscene) # 把QGraphicsScene放入QGraphicsView
self.graphicsView.show() # 调用show方法呈现图形
@pyqtSlot()
def on_action_Quit_triggered(self):
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
dlg = MainWindow()
dlg.show()
sys.exit(app.exec_())
|
{"hexsha": "c28223a477ecf0fd060b047010bd783b570f0e47", "size": 6441, "ext": "py", "lang": "Python", "max_stars_repo_path": "\u8ba1\u7b97\u8ddd\u79bb\u3001\u65b9\u4f4d\u89d2/MainWindow.py", "max_stars_repo_name": "Antrovirens/learn-surveying-software-designing", "max_stars_repo_head_hexsha": "96b492510b3a3ac970675ddffb25d2b4d2b9970c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-10-28T13:22:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T12:15:42.000Z", "max_issues_repo_path": "\u8ba1\u7b97\u8ddd\u79bb\u3001\u65b9\u4f4d\u89d2/MainWindow.py", "max_issues_repo_name": "Antrovirens/learn-surveying-software-designing", "max_issues_repo_head_hexsha": "96b492510b3a3ac970675ddffb25d2b4d2b9970c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-23T06:02:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-27T16:24:31.000Z", "max_forks_repo_path": "\u8ba1\u7b97\u8ddd\u79bb\u3001\u65b9\u4f4d\u89d2/MainWindow.py", "max_forks_repo_name": "Antrovirens/learn-surveying-software-designing", "max_forks_repo_head_hexsha": "96b492510b3a3ac970675ddffb25d2b4d2b9970c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-28T13:22:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-07T18:34:24.000Z", "avg_line_length": 33.0307692308, "max_line_length": 227, "alphanum_fraction": 0.552864462, "include": true, "reason": "import numpy", "num_tokens": 1923}
|
import sys # argv
import string
from collections import deque
import numpy as np
from heapdict import heapdict
with open(sys.argv[1]) as f:
grid = []
object_locs = {}
loc_objects = {}
for y,line in enumerate(f):
grid.append([])
for x,char in enumerate(line.strip()):
grid[-1].append(char != '#')
if char not in '.#':
loc_objects[(x, y)] = char
object_locs[char] = (x, y)
grid = np.array(grid).T
# make a more concise graph of shortest paths between keys/doors
def get_adjacent_locs(loc: tuple):
x, y = loc
return [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]
def find_adjacent_objects(start_loc: tuple):
queue = deque([(start_loc, 0)])
visited = {start_loc}
objects = {}
while queue:
loc, dist = queue.popleft()
for loc2 in get_adjacent_locs(loc):
if grid[loc2] and loc2 not in visited:
visited.add(loc2)
if loc2 in loc_objects and loc_objects[loc2] != '@':
objects[loc_objects[loc2]] = dist+1
else:
queue.append((loc2, dist+1))
return objects
edges = {
obj: find_adjacent_objects(loc)
for obj, loc in object_locs.items()
}
def get_shortest_path_len():
"""Modified Dijkstra's algorithm for collecting all the keys"""
initial_state = ('@', frozenset())
all_keys = frozenset(string.ascii_lowercase) & frozenset(object_locs.keys())
all_doors = frozenset(string.ascii_uppercase) & frozenset(object_locs.keys())
queue = heapdict()
queue[initial_state] = 0
visited = set()
while queue:
state, dist = queue.popitem()
visited.add(state)
node, keys = state
if keys == all_keys:
return dist
for node2, edge_len in edges[node].items():
if node2 in all_doors and node2.lower() not in keys:
continue
if node2 in all_keys:
new_keys = keys | {node2}
else:
new_keys = keys
new_state = (node2, new_keys)
if new_state in visited:
continue
new_dist = dist + edge_len
if new_state not in queue or new_dist < queue[new_state]:
queue[new_state] = new_dist
raise RuntimeError("No path found")
print(get_shortest_path_len())
|
{"hexsha": "a7d27795433654cde693d96e7895cf38aadbe622", "size": 2421, "ext": "py", "lang": "Python", "max_stars_repo_path": "18/first.py", "max_stars_repo_name": "qxzcode/aoc_2019", "max_stars_repo_head_hexsha": "5a6ae570d4ec62a1e05456b58562cb05d1c10f71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "18/first.py", "max_issues_repo_name": "qxzcode/aoc_2019", "max_issues_repo_head_hexsha": "5a6ae570d4ec62a1e05456b58562cb05d1c10f71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "18/first.py", "max_forks_repo_name": "qxzcode/aoc_2019", "max_forks_repo_head_hexsha": "5a6ae570d4ec62a1e05456b58562cb05d1c10f71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0384615385, "max_line_length": 81, "alphanum_fraction": 0.5708384965, "include": true, "reason": "import numpy", "num_tokens": 581}
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from collections import defaultdict
from pathlib import Path
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torchvision
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from common import evaluate
from common.utils import save_reconstructions
from data.mri_data import SliceData
class MRIModel(pl.LightningModule):
"""
Abstract super class for Deep Learning based reconstruction models.
This is a subclass of the LightningModule class from pytorch_lightning, with
some additional functionality specific to fastMRI:
- fastMRI data loaders
- Evaluating reconstructions
- Visualization
- Saving test reconstructions
To implement a new reconstruction model, inherit from this class and implement the
following methods:
- train_data_transform, val_data_transform, test_data_transform:
Create and return data transformer objects for each data split
- training_step, validation_step, test_step:
Define what happens in one step of training, validation and testing respectively
- configure_optimizers:
Create and return the optimizers
Other methods from LightningModule can be overridden as needed.
"""
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
def _create_data_loader(self, data_transform, data_partition, sample_rate=None):
sample_rate = sample_rate or self.hparams.sample_rate
dataset = SliceData(
root=self.hparams.data_path / f'{self.hparams.challenge}_{data_partition}',
transform=data_transform,
sample_rate=sample_rate,
challenge=self.hparams.challenge
)
sampler = RandomSampler(dataset)
# sampler = DistributedSampler(dataset)
return DataLoader(
dataset=dataset,
batch_size=self.hparams.batch_size,
num_workers=4,
pin_memory=False,
sampler=sampler,
)
def train_data_transform(self):
raise NotImplementedError
@pl.data_loader
def train_dataloader(self):
return self._create_data_loader(self.train_data_transform(), data_partition='train')
def val_data_transform(self):
raise NotImplementedError
@pl.data_loader
def val_dataloader(self):
return self._create_data_loader(self.val_data_transform(), data_partition='val')
def test_data_transform(self):
raise NotImplementedError
@pl.data_loader
def test_dataloader(self):
return self._create_data_loader(self.test_data_transform(), data_partition='test', sample_rate=1.)
def _evaluate(self, val_logs):
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
for log in val_logs:
losses.append(log['val_loss'].cpu().numpy())
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
for fname in outputs:
output = np.stack([out for _, out in sorted(outputs[fname])])
target = np.stack([tgt for _, tgt in sorted(targets[fname])])
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
# save the metrics data
metric_file_path = Path(self.hparams.exp_dir) / self.hparams.exp / "validation_metrics"
metric_file_path.mkdir(parents=True, exist_ok=True)
metric_file_path = metric_file_path / "metrics.csv"
df = pd.DataFrame([metrics])
if metric_file_path.exists():
df.to_csv(metric_file_path, mode="a", header=False, index=False)
else:
df.to_csv(metric_file_path, mode="w", header=True, index=False)
return dict(log=metrics, **metrics)
def _visualize(self, val_logs):
def _normalize(image):
image = image[np.newaxis]
image -= image.min()
return image / image.max()
def _save_image(image, tag):
grid = torchvision.utils.make_grid(torch.Tensor(image), nrow=4, pad_value=1)
grid_path = Path(self.hparams.exp_dir) / self.hparams.exp / "image_validation_step"
grid_path.mkdir(parents=True, exist_ok=True)
grid_path = grid_path / tag
grid_np = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
grid_pil = Image.fromarray(grid_np)
try:
grid_pil.save(grid_path, format="PNG")
except ValueError as e:
print(e)
# Only process first size to simplify visualization.
visualize_size = val_logs[0]['output'].shape
val_logs = [x for x in val_logs if x['output'].shape == visualize_size]
num_logs = len(val_logs)
num_viz_images = 16
step = (num_logs + num_viz_images - 1) // num_viz_images
outputs, targets = [], []
for i in range(0, num_logs, step):
outputs.append(_normalize(val_logs[i]['output'][0]))
targets.append(_normalize(val_logs[i]['target'][0]))
outputs = np.stack(outputs)
targets = np.stack(targets)
_save_image(targets, 'Target')
_save_image(outputs, 'Reconstruction')
_save_image(np.abs(targets - outputs), 'Error')
def validation_epoch_end(self, val_logs):
self._visualize(val_logs)
return self._evaluate(val_logs)
def test_epoch_end(self, test_logs):
outputs = defaultdict(list)
for log in test_logs:
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
for fname in outputs:
outputs[fname] = np.stack([out for _, out in sorted(outputs[fname])])
save_reconstructions(outputs, self.hparams.exp_dir / self.hparams.exp / 'reconstructions')
return dict()
|
{"hexsha": "7917f6741b8b0ee6a80b493b6092a86f6f044089", "size": 6592, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/mri_model.py", "max_stars_repo_name": "ygrepo/fastMRI", "max_stars_repo_head_hexsha": "cb9a2019f1833bfffe4969023113189abcbad0f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/mri_model.py", "max_issues_repo_name": "ygrepo/fastMRI", "max_issues_repo_head_hexsha": "cb9a2019f1833bfffe4969023113189abcbad0f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/mri_model.py", "max_forks_repo_name": "ygrepo/fastMRI", "max_forks_repo_head_hexsha": "cb9a2019f1833bfffe4969023113189abcbad0f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-11T07:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-11T07:15:41.000Z", "avg_line_length": 39.9515151515, "max_line_length": 109, "alphanum_fraction": 0.6513956311, "include": true, "reason": "import numpy", "num_tokens": 1416}
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# load tensorflow and keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers, optimizers, datasets
from tensorflow.keras.layers.experimental import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, roc_curve, auc
from sklearn.model_selection import cross_val_score, KFold, StratifiedKFold
from sklearn.inspection import permutation_importance
from sklearn.metrics import precision_recall_curve, f1_score
#helper libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Make numpy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
print(tf.__version__)
# In[ ]:
#Plotting function
def plot_history(model_history, model_name):
fig = plt.figure(figsize=(15,5), facecolor='w')
ax = fig.add_subplot(121)
ax.plot(model_history.history['loss'])
ax.plot(model_history.history['val_loss'])
ax.set(title=model_name + ': Model loss', ylabel='Loss', xlabel='Epoch')
ax.legend(['Train', 'Test'], loc='upper right')
ax = fig.add_subplot(122)
ax.plot(np.log(model_history.history['loss']))
ax.plot(np.log(model_history.history['val_loss']))
ax.set(title=model_name + ': Log model loss', ylabel='Log loss', xlabel='Epoch')
ax.legend(['Train', 'Test'], loc='upper right')
plt.show()
plt.close()
# In[ ]:
# Define the cross-validator object for regression, which inherits from
# StratifiedKFold, overwritting the split method
#code source: https://colab.research.google.com/drive/1KnXujsQDvLZOgCRg_iis036cwffwZM2_?usp=sharing#scrollTo=2q_q9w8Jpmwd
# &https://github.com/scikit-learn/scikit-learn/issues/4757
class StratifiedKFoldReg(StratifiedKFold):
"""
This class generate cross-validation partitions
for regression setups, such that these partitions
resemble the original sample distribution of the
target variable.
"""
def split(self, X, y, groups=None):
n_samples = len(y)
# Number of labels to discretize our target variable,
# into bins of quasi equal size
n_labels = int(np.round(n_samples/self.n_splits))
# Assign a label to each bin of n_splits points
y_labels_sorted = np.concatenate([np.repeat(ii, self.n_splits) for ii in range(n_labels)])
# Get number of points that would fall
# out of the equally-sized bins
mod = np.mod(n_samples, self.n_splits)
# Find unique idxs of first unique label's ocurrence
_, labels_idx = np.unique(y_labels_sorted, return_index=True)
# sample randomly the label idxs to which assign the
# the mod points
rand_label_ix = np.random.choice(labels_idx, mod, replace=False)
# insert these at the beginning of the corresponding bin
y_labels_sorted = np.insert(y_labels_sorted, rand_label_ix, y_labels_sorted[rand_label_ix])
# find each element of y to which label corresponds in the sorted
# array of labels
map_labels_y = dict()
for ix, label in zip(np.argsort(y), y_labels_sorted):
map_labels_y[ix] = label
# put labels according to the given y order then
y_labels = np.array([map_labels_y[ii] for ii in range(n_samples)])
return super().split(X, y_labels, groups)
# In[ ]:
#load data
raw_dataset = pd.read_csv('.spyder-py3/merged_datasets_for_simeval.csv')
dataset = raw_dataset.copy()
dataset.drop(['dofv', 'ID', 'Study_ID', 'Model_number', 'lin_model'], axis = 1, inplace=True)
dataset.head()
# In[ ]:
#split features and labels
x = dataset.copy()
y = x.pop('residual')
# In[ ]:
# normalization
# scaler = StandardScaler().fit(x)
# X = scaler.transform(x)
X = x.values # Normalization is built into model now
# In[ ]:
##################################################################################################################################################
# In[ ]:
#ANN5: This is the final model
n_splits = 10 #number of folds
loss_per_fold = [] #to store test loss value in each fold
Train_loss_per_fold = [] #to store training loss value in each fold
predcited_y = np.array([]) #to store predicted residual value from each CV fold
true_y = np.array([]) #to store true residual value from each CV fold
cv_stratified = StratifiedKFoldReg(n_splits=n_splits, shuffle=True, random_state=10) # Stratified CV
fold_no = 1
for ii, (train_index, test_index) in enumerate(cv_stratified.split(X, y)):
y_train, y_test = y[train_index], y[test_index]
X_train, X_test = X[train_index], X[test_index]
#Define and summarize the model
inps = layers.Input(shape=X_train[0].shape)
norm_layer = layers.Normalization(axis=1)
norm_layer.adapt(X_train)
x = norm_layer(inps)
x = layers.Dense(48, activation='relu')(x)
x = layers.Dense(24, activation='relu')(x)
x = layers.Dense(12, activation='relu')(x)
x = layers.Dropout(0.2)(x)
preds = layers.Dense(1)(x)
ANN5 = models.Model(inputs=inps, outputs=preds)
#Compile the model
lr = 0.00007
ANN5.compile(optimizer=optimizers.RMSprop(lr=lr), loss='mse')
# Generate a print
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
test_labels = y_test.to_list()
test_labels = [round(num, 2) for num in test_labels]
print(test_labels) #to have a look at the true residual values for test dataset
#print histogram of y_test and y_train
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
axs.hist(y_train, label="training")
axs.hist(y_test, label="test")
axs.legend()
plt.tight_layout()
# Fit data to model
history = ANN5.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
plot_history(history, 'ANN5')
#to store values for plotting global predicted vs. true residual values
test_predictions = ANN5.predict(X_test).flatten()
predcited_y = np.append(predcited_y, test_predictions)
y_test_array = y_test.values
true_y = np.append(true_y, y_test_array)
# Generate generalization metrics
scores = ANN5.evaluate(X_test, y_test, verbose=0)
print(f'Test Score for fold {fold_no}: {ANN5.metrics_names} of {scores}')
scores_training = ANN5.evaluate(X_train, y_train, verbose=0)
print(f'Training Score for fold {fold_no}: {ANN5.metrics_names} of {scores_training}')
loss_per_fold.append(scores)
Train_loss_per_fold.append(scores_training)
# Increase fold number
fold_no = fold_no + 1
# global plot true vs. predicted
a = plt.axes(aspect='equal')
plt.scatter(predcited_y, true_y)
plt.xlabel('Predictions [residual]')
plt.ylabel('True Values [residual]')
lims = [-5, 20]
plt.xlim(lims)
plt.ylim(lims)
prediction_plot = plt.plot(lims, lims)
# In[ ]:
# == Provide average scores ==
print('------------------------------------------------------------------------')
print('Score per fold')
for i in range(0, len(loss_per_fold)):
print('------------------------------------------------------------------------')
print(f'> Fold {i+1} - Training Loss: {Train_loss_per_fold[i]} - Testing Loss: {loss_per_fold[i]} -')
print('------------------------------------------------------------------------')
print('Average scores for all folds:')
print(f'> Test Loss: {np.mean(loss_per_fold)}')
print(f'> Training Loss: {np.mean(Train_loss_per_fold)}')
print('------------------------------------------------------------------------')
# In[ ]:
#permutation importance
feature_name = ['Model_subjects', 'Model_observations',
'Obsi_Obs_Subj', 'Covariate_relations', 'Max_cov', 'Max_CWRESi', 'Median_CWRESi',
'Max_EBEij_omegaj', 'OFVRatio', 'mean_ETC_omega']
r = permutation_importance(ANN5, X, y,
n_repeats=30,
random_state=0, scoring='neg_mean_squared_error')
for i in r.importances_mean.argsort()[::-1]:
if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
print(f"{feature_name[i]:<10}"
f"{r.importances_mean[i]:.3f}"
f" +/- {r.importances_std[i]:.3f}")
# In[ ]:
#sensitivity analysis: cut-off=3 for both true and predicted residual
true_positives = 0
false_positives = 0
true_negative = 0
false_negative = 0
for i in range(0,len(true_y)):
if abs(true_y[i]) > 3 and abs(predcited_y[i]) > 3:
true_positives = true_positives + 1
if abs(true_y[i]) <= 3 and abs(predcited_y[i]) > 3:
false_positives = false_positives + 1
if abs(true_y[i]) <= 3 and abs(predcited_y[i]) <= 3:
true_negative = true_negative + 1
if abs(true_y[i]) > 3 and abs(predcited_y[i]) <= 3:
false_negative = false_negative + 1
print(f'> TP: {true_positives}')
print(f'> FP: {false_positives}')
print(f'> TN: {true_negative}')
print(f'> FN: {false_negative}')
sensitivity = true_positives/(true_positives + false_negative)
specificity = true_negative/(true_negative + false_positives)
precision = true_positives/(true_positives + false_positives)
print(f'> Sensitivity: {round(sensitivity, 3)}')
print(f'> Specificity: {round(specificity, 3)}')
print(f'> Precision: {round(precision, 3)}')
# In[ ]:
#sensitivity analysis: cut-off=3 for true and 2.5 for predicted residual
true_positives = 0
false_positives = 0
true_negative = 0
false_negative = 0
for i in range(0,len(true_y)):
if abs(true_y[i]) > 3 and abs(predcited_y[i]) > 2.5:
true_positives = true_positives+1
if abs(true_y[i]) <= 3 and abs(predcited_y[i]) > 2.5:
false_positives = false_positives + 1
if abs(true_y[i]) <= 3 and abs(predcited_y[i]) <= 2.5:
true_negative = true_negative+1
if abs(true_y[i]) > 3 and abs(predcited_y[i]) <= 2.5:
false_negative = false_negative + 1
print(f'> TP: {true_positives}')
print(f'> FP: {false_positives}')
print(f'> TN: {true_negative}')
print(f'> FN: {false_negative}')
sensitivity = true_positives/(true_positives + false_negative)
specificity = true_negative/(true_negative + false_positives)
precision = true_positives/(true_positives + false_positives)
print(f'> Sensitivity: {round(sensitivity, 3)}')
print(f'> Specificity: {round(specificity, 3)}')
print(f'> Precision: {round(precision, 3)}')
# In[ ]:
#sensitivity analysis: cut-off=3 for true and 2.0 for predicted residual
true_positives = 0
false_positives = 0
true_negative = 0
false_negative = 0
for i in range(0,len(true_y)):
if abs(true_y[i]) > 3 and abs(predcited_y[i]) > 2:
true_positives = true_positives+1
if abs(true_y[i]) <= 3 and abs(predcited_y[i]) > 2:
false_positives = false_positives + 1
if abs(true_y[i]) <= 3 and abs(predcited_y[i]) <= 2:
true_negative = true_negative+1
if abs(true_y[i]) > 3 and abs(predcited_y[i]) <= 2:
false_negative = false_negative + 1
print(f'> TP: {true_positives}')
print(f'> FP: {false_positives}')
print(f'> TN: {true_negative}')
print(f'> FN: {false_negative}')
sensitivity = true_positives/(true_positives + false_negative)
specificity = true_negative/(true_negative + false_positives)
precision = true_positives/(true_positives + false_positives)
print(f'> Sensitivity: {round(sensitivity, 3)}')
print(f'> Specificity: {round(specificity, 3)}')
print(f'> Precision: {round(precision, 3)}')
# In[ ]:
#Convert target values to binary to plot ROC & PR curves (cutoff=3)
true_outliers_binary = true_y.copy()
predicted_outliers_binary = predcited_y.copy()
for i in range(0,len(true_outliers_binary)):
if true_outliers_binary[i] > 3:
true_outliers_binary[i] = 1
else:
true_outliers_binary[i] = 0
for i in range(0,len(predicted_outliers_binary)):
if predicted_outliers_binary[i] > 3:
predicted_outliers_binary[i] = 1
else:
predicted_outliers_binary[i] = 0
# In[ ]:
# ROC curve
fpr, tpr, threshold = roc_curve(true_outliers_binary, predicted_outliers_binary)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# In[ ]:
# precision-recall curve and f1
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
lr_precision, lr_recall, _ = precision_recall_curve(true_outliers_binary, predicted_outliers_binary)
lr_f1, lr_auc = f1_score(true_outliers_binary, predicted_outliers_binary), auc(lr_recall, lr_precision)
# summarize scores
print('f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
# plot the precision-recall curves
no_skill = len(true_outliers_binary[true_outliers_binary==1]) / len(true_outliers_binary)
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')
plt.plot(lr_recall, lr_precision, marker='.', label='model')
# axis labels
plt.xlabel('Recall')
plt.ylabel('Precision')
# show the legend
plt.legend()
# show the plot
plt.show()
# In[ ]:
#plot ROC curve at different cut-off values (0.5-3.0)
cutoff_pred = 0 # will be increased gradually in the for loop (+0.5/loop)
cutoff_true = 3 # constant
true_outliers_binary = true_y.copy() #to convert contineous value into binary (using cutoff_true)
predicted_outliers_binary = predcited_y.copy() #to convert contineous value into binary (using cutoff_pred)
fpr = {} # to store fpr values from each loop with distinct variable name
tpr = {} # same
threshold = {} # same
roc_auc= {} # same
for k in range(0,6):
true_outliers_binary = true_y.copy()
predicted_outliers_binary = predcited_y.copy()
cutoff_pred= cutoff_pred + 0.5
for i in range(0,len(true_outliers_binary)):
if true_outliers_binary[i] > cutoff_true:
true_outliers_binary[i] = 1
else:
true_outliers_binary[i] = 0
for i in range(0,len(predicted_outliers_binary)):
if predicted_outliers_binary[i] > cutoff_pred:
predicted_outliers_binary[i] = 1
else:
predicted_outliers_binary[i] = 0
print(cutoff_pred)
fpr["fpr%s" %k], tpr["tpr%s" %k],threshold["threshold%s" %k] = roc_curve(true_outliers_binary, predicted_outliers_binary)
roc_auc["roc_auc%s" %k] = auc(fpr["fpr%s" %k], tpr["tpr%s" %k])
plt.style.use('seaborn')
plt.title('Receiver Operating Characteristic')
plt.plot(fpr['fpr0'], tpr['tpr0'], linestyle='--',color='orange', label='cutoff=0.5'+',AUC = %0.2f' % roc_auc['roc_auc0'])
plt.plot(fpr['fpr1'], tpr['tpr1'], linestyle='--',color='green', label='cutoff=1.0'+',AUC = %0.2f' % roc_auc['roc_auc1'])
plt.plot(fpr['fpr2'], tpr['tpr2'], linestyle='--',color='blue', label='cutoff=1.5'+',AUC = %0.2f' % roc_auc['roc_auc2'])
plt.plot(fpr['fpr3'], tpr['tpr3'], linestyle='--',color='red', label='cutoff=2.0'+',AUC = %0.2f' % roc_auc['roc_auc3'])
plt.plot(fpr['fpr4'], tpr['tpr4'], linestyle='--',color='black', label='cutoff=2.5'+',AUC = %0.2f' % roc_auc['roc_auc4'])
plt.plot(fpr['fpr5'], tpr['tpr5'], linestyle='--',color='yellow', label='cutoff=3.0'+',AUC = %0.2f' % roc_auc['roc_auc5'])
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r-')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# In[ ]:
# precision-recall curve and f1
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
cutoff_pred = 0
cutoff_true = 3
true_outliers_binary = true_y.copy()
predicted_outliers_binary = predcited_y.copy()
lr_precision = {}
lr_recall = {}
lr_f1 = {}
lr_auc= {}
for k in range(0,6):
true_outliers_binary = true_y.copy()
predicted_outliers_binary = predcited_y.copy()
cutoff_pred= cutoff_pred + 0.5
for i in range(0,len(true_outliers_binary)):
if true_outliers_binary[i] > cutoff_true:
true_outliers_binary[i] = 1
else:
true_outliers_binary[i] = 0
for i in range(0,len(predicted_outliers_binary)):
if predicted_outliers_binary[i] > cutoff_pred:
predicted_outliers_binary[i] = 1
else:
predicted_outliers_binary[i] = 0
print(cutoff_pred)
lr_precision["lr_precision%s" %k], lr_recall["lr_recall%s" %k], _ = precision_recall_curve(true_outliers_binary, predicted_outliers_binary)
lr_f1["lr_f1%s" %k], lr_auc["lr_auc%s" %k] = f1_score(true_outliers_binary, predicted_outliers_binary), auc(lr_recall["lr_recall%s" %k], lr_precision["lr_precision%s" %k])
plt.style.use('seaborn')
plt.title('PR plot')
plt.plot(lr_recall['lr_recall0'], lr_precision['lr_precision0'], linestyle='--',color='orange', label='cutoff=0.5'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc0'], lr_f1['lr_f10']))
plt.plot(lr_recall['lr_recall1'], lr_precision['lr_precision1'], linestyle='--',color='green', label='cutoff=1.0'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc1'], lr_f1['lr_f11']))
plt.plot(lr_recall['lr_recall2'], lr_precision['lr_precision2'], linestyle='--',color='blue', label='cutoff=1.5'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc2'], lr_f1['lr_f12']))
plt.plot(lr_recall['lr_recall3'], lr_precision['lr_precision3'], linestyle='--',color='red', label='cutoff=2.0'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc3'], lr_f1['lr_f13']))
plt.plot(lr_recall['lr_recall4'], lr_precision['lr_precision4'], linestyle='--',color='black', label='cutoff=2.5'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc4'], lr_f1['lr_f14']))
plt.plot(lr_recall['lr_recall5'], lr_precision['lr_precision5'], linestyle='--',color='yellow', label='cutoff=3.0'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc5'], lr_f1['lr_f15']))
plt.legend(loc = 'lower left')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.show()
# In[ ]:
#ANN5 with ROC curve for each fold
n_splits = 10 #number of folds
loss_per_fold = [] #to store test loss value in each fold
Train_loss_per_fold = [] #to store training loss value in each fold
predcited_y = np.array([]) #to store predicted residual value from each CV fold
true_y = np.array([]) #to store true residual value from each CV fold
tprs = [] #to store values
fprs = []
aucs = []
fpr = {} #to store values with different variable name in each fold
tpr = {}
threshold = {}
roc_auc= {}
lr_precision = {}
lr_recall = {}
lr_f1 = {}
lr_auc= {}
cv_stratified = StratifiedKFoldReg(n_splits=n_splits, shuffle=True, random_state=10) # Stratified CV
fold_no = 1
for ii, (train_index, test_index) in enumerate(cv_stratified.split(X, y)):
y_train, y_test = y[train_index], y[test_index]
X_train, X_test = X[train_index], X[test_index]
#Define and summarize the model
inps = layers.Input(shape=X_train[0].shape)
x = layers.Dense(48, activation='relu')(inps)
x = layers.Dense(24, activation='relu')(x)
x = layers.Dense(12, activation='relu')(x)
x = layers.Dropout(0.2)(x)
preds = layers.Dense(1)(x)
ANN5 = models.Model(inputs=inps, outputs=preds)
#Compile the model
lr = 0.00007
ANN5.compile(optimizer=optimizers.RMSprop(lr=lr), loss='mse')
# Generate a print
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
test_labels = y_test.to_list()
test_labels = [round(num, 2) for num in test_labels]
print(test_labels) #to have a look at the true residual values for test dataset
#print histogram of y_test and y_train
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
axs.hist(y_train, label="training")
axs.hist(y_test, label="test")
axs.legend()
plt.tight_layout()
# Fit data to model
history = ANN5.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=200, verbose=0)
plot_history(history, 'ANN5')
#to store values for plotting global predicted vs. true residual values
test_predictions = ANN5.predict(X_test).flatten()
predcited_y = np.append(predcited_y, test_predictions)
y_test_array = y_test.values
true_y = np.append(true_y, y_test_array)
# Generate generalization metrics
scores = ANN5.evaluate(X_test, y_test, verbose=0)
print(f'Test Score for fold {fold_no}: {ANN5.metrics_names} of {scores}')
scores_training = ANN5.evaluate(X_train, y_train, verbose=0)
print(f'Training Score for fold {fold_no}: {ANN5.metrics_names} of {scores_training}')
loss_per_fold.append(scores)
Train_loss_per_fold.append(scores_training)
## convert data of this fold to binary
true_outliers_bi = y_test_array.copy()
predicted_outliers_bi = test_predictions.copy()
for i in range(0,len(true_outliers_bi)):
if true_outliers_bi[i] > 3:
true_outliers_bi[i] = 1
else:
true_outliers_bi[i] = 0
for i in range(0,len(predicted_outliers_bi)):
if predicted_outliers_bi[i] > 3:
predicted_outliers_bi[i] = 1
else:
predicted_outliers_bi[i] = 0
## ROC
fpr["fpr%s" %fold_no], tpr["tpr%s" %fold_no],threshold["threshold%s" %fold_no] = roc_curve(true_outliers_bi, predicted_outliers_bi)
roc_auc["roc_auc%s" %fold_no] = auc(fpr["fpr%s" %fold_no], tpr["tpr%s" %fold_no])
tprs.append(tpr["tpr%s" %fold_no])
fprs.append(["fpr%s" %fold_no])
aucs.append(["roc_auc%s" %fold_no])
## PR
lr_precision["lr_precision%s" %fold_no], lr_recall["lr_recall%s" %fold_no], _ = precision_recall_curve(true_outliers_bi, predicted_outliers_bi)
lr_f1["lr_f1%s" %fold_no], lr_auc["lr_auc%s" %fold_no] = f1_score(true_outliers_bi, predicted_outliers_bi), auc(lr_recall["lr_recall%s" %fold_no], lr_precision["lr_precision%s" %fold_no])
# Increase fold number
fold_no = fold_no + 1
#ROC plots
plt.title('Receiver Operating Characteristic')
plt.plot(fpr['fpr1'], tpr['tpr1'], linestyle='--',color='green', label='fold_No=1'+',AUC = %0.2f' % roc_auc['roc_auc1'])
plt.plot(fpr['fpr2'], tpr['tpr2'], linestyle='--',color='blue', label='fold_No=2'+',AUC = %0.2f' % roc_auc['roc_auc2'])
plt.plot(fpr['fpr3'], tpr['tpr3'], linestyle='--',color='red', label='fold_No=3'+',AUC = %0.2f' % roc_auc['roc_auc3'])
plt.plot(fpr['fpr4'], tpr['tpr4'], linestyle='--',color='black', label='fold_No=4'+',AUC = %0.2f' % roc_auc['roc_auc4'])
plt.plot(fpr['fpr5'], tpr['tpr5'], linestyle='--',color='yellow', label='fold_No=5'+',AUC = %0.2f' % roc_auc['roc_auc5'])
plt.plot(fpr['fpr6'], tpr['tpr6'], linestyle='-',color='green', label='fold_No=6'+',AUC = %0.2f' % roc_auc['roc_auc6'])
plt.plot(fpr['fpr7'], tpr['tpr7'], linestyle='-',color='blue', label='fold_No=7'+',AUC = %0.2f' % roc_auc['roc_auc7'])
plt.plot(fpr['fpr8'], tpr['tpr8'], linestyle='-',color='red', label='fold_No=8'+',AUC = %0.2f' % roc_auc['roc_auc8'])
plt.plot(fpr['fpr9'], tpr['tpr9'], linestyle='-',color='black', label='fold_No=9'+',AUC = %0.2f' % roc_auc['roc_auc9'])
plt.plot(fpr['fpr10'], tpr['tpr10'], linestyle='-',color='yellow', label='fold_No=10'+',AUC = %0.2f' % roc_auc['roc_auc10'])
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1], linestyle='-',color='orange')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
#PR plots
plt.title('PR plot')
plt.plot(lr_recall['lr_recall1'], lr_precision['lr_precision1'], linestyle='--',color='green', label='fold_No=1'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc1'], lr_f1['lr_f11']))
plt.plot(lr_recall['lr_recall2'], lr_precision['lr_precision2'], linestyle='--',color='blue', label='fold_No=2'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc2'], lr_f1['lr_f12']))
plt.plot(lr_recall['lr_recall3'], lr_precision['lr_precision3'], linestyle='--',color='red', label='fold_No=2'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc3'], lr_f1['lr_f13']))
plt.plot(lr_recall['lr_recall4'], lr_precision['lr_precision4'], linestyle='--',color='black', label='fold_No=4'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc4'], lr_f1['lr_f14']))
plt.plot(lr_recall['lr_recall5'], lr_precision['lr_precision5'], linestyle='--',color='yellow', label='fold_No=5'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc5'], lr_f1['lr_f15']))
plt.plot(lr_recall['lr_recall6'], lr_precision['lr_precision6'], linestyle='-',color='green', label='fold_No=6'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc6'], lr_f1['lr_f16']))
plt.plot(lr_recall['lr_recall7'], lr_precision['lr_precision7'], linestyle='-',color='blue', label='fold_No=7'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc7'], lr_f1['lr_f17']))
plt.plot(lr_recall['lr_recall8'], lr_precision['lr_precision8'], linestyle='-',color='red', label='fold_No=8'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc8'], lr_f1['lr_f18']))
plt.plot(lr_recall['lr_recall9'], lr_precision['lr_precision9'], linestyle='-',color='black', label='fold_No=9'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc9'], lr_f1['lr_f19']))
plt.plot(lr_recall['lr_recall10'], lr_precision['lr_precision10'], linestyle='-',color='yellow', label='fold_No=10'+',AUC = %0.2f f1=%.2f' % (lr_auc['lr_auc10'], lr_f1['lr_f110']))
plt.legend(loc = 'lower left')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.show()
# global plot true vs. predicted
#a = plt.axes(aspect='equal')
#plt.scatter(predcited_y, true_y)
#plt.xlabel('Predictions [residual]')
#plt.ylabel('True Values [residual]')
#lims = [-5, 20]
#plt.xlim(lims)
#plt.ylim(lims)
#prediction_plot = plt.plot(lims, lims)
|
{"hexsha": "ae03abbedf68f2a766c471590291c5fc8d88103d", "size": 25373, "ext": "py", "lang": "Python", "max_stars_repo_path": "outlier/Simeval_model_final_SC.py", "max_stars_repo_name": "pharmpy/ml-devel", "max_stars_repo_head_hexsha": "6c97bd71b58aee0deac207fce41c3e001786e779", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "outlier/Simeval_model_final_SC.py", "max_issues_repo_name": "pharmpy/ml-devel", "max_issues_repo_head_hexsha": "6c97bd71b58aee0deac207fce41c3e001786e779", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "outlier/Simeval_model_final_SC.py", "max_forks_repo_name": "pharmpy/ml-devel", "max_forks_repo_head_hexsha": "6c97bd71b58aee0deac207fce41c3e001786e779", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0924608819, "max_line_length": 189, "alphanum_fraction": 0.6681905963, "include": true, "reason": "import numpy", "num_tokens": 7393}
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name,missing-docstring
"""
Visualization functions for measurement counts.
"""
from collections import Counter
import warnings
import numpy as np
import matplotlib.pyplot as plt
from ._error import VisualizationError
def plot_histogram(data, number_to_keep=False, legend=None, options=None):
"""Plot a histogram of data.
Args:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
number_to_keep (int): DEPRECATED the number of terms to plot and rest
is made into a single bar called other values
legend(list): A list of strings to use for labels of the data.
The number of entries must match the lenght of data (if data is a
list or 1 if it's a dict)
options (dict): Representation settings containing
- number_to_keep (integer): groups max values
- show_legend (bool): show legend of graph content
Raises:
VisualizationError: When legend is provided and the length doesn't
match the input data.
"""
if options is None:
options = {}
if number_to_keep is not False:
warnings.warn("number_to_keep has been deprecated, use the options "
"dictionary and set a number_to_keep key instead",
DeprecationWarning)
if isinstance(data, dict):
data = [data]
if legend and len(legend) != len(data):
raise VisualizationError("Length of legendL (%s) doesn't match "
"number of input executions: %s" %
(len(legend), len(data)))
_, ax = plt.subplots()
for item, execution in enumerate(data):
if number_to_keep is not False or (
'number_to_keep' in options and options['number_to_keep']):
data_temp = dict(Counter(execution).most_common(number_to_keep))
data_temp["rest"] = sum(execution.values()) - sum(data_temp.values())
execution = data_temp
labels = sorted(execution)
values = np.array([execution[key] for key in labels], dtype=float)
pvalues = values / sum(values)
numelem = len(values)
ind = np.arange(numelem) # the x locations for the groups
width = 0.35 # the width of the bars
label = None
if legend:
label = legend[item]
adj = width * item
rects = ax.bar(ind+adj, pvalues, width, label=label)
# add some text for labels, title, and axes ticks
ax.set_ylabel('Probabilities', fontsize=12)
ax.set_xticks(ind)
ax.set_xticklabels(labels, fontsize=12, rotation=70)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in pvalues])])])
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%f' % float(height),
ha='center', va='bottom')
if legend and (
'show_legend' not in options or options['show_legend'] is True):
plt.legend()
plt.show()
|
{"hexsha": "6aac7fcf2596891fd4484d6d9ac3de0f1639b555", "size": 3423, "ext": "py", "lang": "Python", "max_stars_repo_path": "qiskit/tools/visualization/_counts_visualization.py", "max_stars_repo_name": "kifumi/qiskit-terra", "max_stars_repo_head_hexsha": "203fca6d694a18824a6b12cbabd3dd2c64dd12ae", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-01T01:35:43.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-01T01:35:43.000Z", "max_issues_repo_path": "qiskit/tools/visualization/_counts_visualization.py", "max_issues_repo_name": "kifumi/qiskit-terra", "max_issues_repo_head_hexsha": "203fca6d694a18824a6b12cbabd3dd2c64dd12ae", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qiskit/tools/visualization/_counts_visualization.py", "max_forks_repo_name": "kifumi/qiskit-terra", "max_forks_repo_head_hexsha": "203fca6d694a18824a6b12cbabd3dd2c64dd12ae", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6153846154, "max_line_length": 81, "alphanum_fraction": 0.6123283669, "include": true, "reason": "import numpy", "num_tokens": 761}
|
using ABMExamples
using Test
using Statistics: mean
@testset "ABMExamples.jl" begin
@testset "SchellingsSegregation.jl" begin
schelling_data, schelling_filename = run_schelling_model!(20,"schelling")
@show mean(schelling_data.sum_mood)
@test mean(schelling_data.sum_mood) == 274.0
end
@testset "ForestFire.jl" begin
_,ff_data = run_forest_fire_model!("forest_new.mp4")
rounded = round(mean(ff_data.burnt_percentage), digits=2)
@test rounded == 0.36
end
@testset "HKOpinionDynamics.jl" begin
all_data = hk_model_run_and_plot!()
# we are testing only ϵ = 0.3, which is the last in the list
data_epsilon_03 = all_data[end]
rounded = round(
mean(data_epsilon_03.new_opinion),
digits=2
)
@test rounded == 0.54
end
@testset "Flocking.jl" begin
run_flocking_example!()
@test isfile("flocking.mp4") == true
end
@testset "PredatorPrey.jl" begin
_,adf,mdf = run_predatorprey_model!()
rounded_sheep = round(mean(adf.count_sheep), digits=2)
rounded_wolves = round(mean(adf.count_wolves), digits=2)
@test rounded_sheep == 62.99
@test rounded_wolves == 15.09
end
end
|
{"hexsha": "26e089e9255eaa69a1da623f2ffddfdf37d2d0b2", "size": 1270, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "codekomali/ABMExamples.jl", "max_stars_repo_head_hexsha": "0a297336198009345b837476c3f64bb1ab25d58d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "codekomali/ABMExamples.jl", "max_issues_repo_head_hexsha": "0a297336198009345b837476c3f64bb1ab25d58d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "codekomali/ABMExamples.jl", "max_forks_repo_head_hexsha": "0a297336198009345b837476c3f64bb1ab25d58d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4210526316, "max_line_length": 81, "alphanum_fraction": 0.6440944882, "num_tokens": 373}
|
#!/usr/bin/env python
# Title :loader.py
# Author :Venkatraman Narayanan, Bala Murali Manoghar, Vishnu Shashank Dorbala, Aniket Bera, Dinesh Manocha
# Copyright :"Copyright 2020, Proxemo project"
# Version :1.0
# License :"MIT"
# Maintainer :Venkatraman Narayanan, Bala Murali Manoghar
# Email :vnarayan@terpmail.umd.edu, bsaisudh@terpmail.umd.edu
#==============================================================================
import numpy as np
def augment3D(data_features, theta, trans, scale):
"""Function to transform the given gait points
theta should be given in degrees
Args:
data_features (np.array): gait cycle data
theta (int): Augmentation angle (wrt skeletal root)
trans (np.array): transformation matrix to be applied.
scale (int): scaling factor for 2D camera matrix
Returns:
[np.array]: augmented matrix
"""
theta = theta*np.pi/180
rotMat = np.array([[np.cos(theta), 0, -np.sin(theta)],
[0, 1, 0], [np.sin(theta), 0, np.cos(theta)]])
xyTrans = [trans*np.cos(theta), trans*np.sin(theta), 0]
xyTrans = np.array(xyTrans)
K = np.array([[scale, 0, 0, 0], [0, scale, 0, 0],
[0, 0, 1, 0], [0, 0, 0, 1]])
H = np.ones((4, 4))
H[0:3, 0:3] = rotMat
H[3, 0:3] = xyTrans
P = K @ H
data_features = np.reshape(data_features, (len(data_features), -1, 3))
df = np.ones((data_features.shape[0], data_features.shape[1], 4))
df[:, :, 0:3] = data_features
df = df @ P
df = df[:, :, 0:3]
df = np.reshape(df, (len(df), -1))
return df
|
{"hexsha": "a45f4e8de8bcaa3a5d8a09528e835f05d66965d3", "size": 1652, "ext": "py", "lang": "Python", "max_stars_repo_path": "emotion_classification/utils/transform3DPose.py", "max_stars_repo_name": "vijay4313/proxemo", "max_stars_repo_head_hexsha": "98c4e2133047aa8519cc2f482b59565d9160e81a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-08-18T17:31:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T10:37:31.000Z", "max_issues_repo_path": "emotion_classification/utils/transform3DPose.py", "max_issues_repo_name": "bsaisudh/proxemo", "max_issues_repo_head_hexsha": "7b09828c3b63b01617824c3b27a059584eb11ca4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-09T10:18:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-31T21:34:37.000Z", "max_forks_repo_path": "emotion_classification/utils/transform3DPose.py", "max_forks_repo_name": "bsaisudh/proxemo", "max_forks_repo_head_hexsha": "7b09828c3b63b01617824c3b27a059584eb11ca4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-08-15T16:46:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-08T06:54:03.000Z", "avg_line_length": 37.5454545455, "max_line_length": 116, "alphanum_fraction": 0.5623486683, "include": true, "reason": "import numpy", "num_tokens": 508}
|
from os.path import dirname, exists, splitext, basename
from os import makedirs
from math import ceil, floor
from matplotlib import pyplot as plt
from math import log10
import warnings
import numpy as np
from matplotlib.colors import LogNorm
from traitlets import Dict, List, Unicode
from ctapipe.core import Tool, Component
from ctapipe.analysis.camera.chargeresolution import ChargeResolutionCalculator
class ChargeResolutionVariationPlotter(Component):
name = 'ChargeResolutionVariationPlotter'
output_path = Unicode(None, allow_none=True,
help='Output path to save the '
'plot.').tag(config=True)
def __init__(self, config, tool, **kwargs):
"""
Calculator of charge resolution.
Parameters
----------
config : traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
Set to None if no configuration to pass.
tool : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
Set to None if no Tool to pass.
reductor : ctapipe.calib.camera.reductors.Reductor
The reductor to use to reduce the waveforms in the event.
By default no data volume reduction is applied, and the dl0 samples
will equal the r1 samples.
kwargs
"""
super().__init__(config=config, parent=tool, **kwargs)
try:
if self.output_path is None:
raise ValueError
except ValueError:
self.log.exception('Please specify an output path')
raise
self.fig = plt.figure(figsize=(20, 8))
self.ax_l = self.fig.add_subplot(121)
self.ax_r = self.fig.add_subplot(122)
self.fig.subplots_adjust(left=0.05, right=0.95, wspace=0.6)
self.legend_handles = []
self.legend_labels = []
def plot_hist(self, hist, xedges, yedges):
hist[hist == 0.0] = np.nan
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
ax.set_title(splitext(basename(self.output_path))[0])
x, y = np.meshgrid(xedges, yedges)
x = np.power(10, x)
y = np.power(10, y)
hist_mask = np.ma.masked_where(np.isnan(hist), hist)
im = ax.pcolormesh(x, y, hist_mask, norm=LogNorm(),
cmap=plt.cm.viridis)
cb = plt.colorbar(im)
ax.set_aspect('equal')
ax.grid()
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(r'True Charge $Q_T$ (p.e.)')
ax.set_ylabel(r'Measured Charge $Q_M$ (p.e.)')
cb.ax.set_ylabel("Count")
line = np.linspace(*ax.get_xlim(), 100)
ax.plot(line, line, c='0.75', ls='--')
# Add minor ticks
lmin = floor(log10(hist_mask.min()))
lmax = ceil(log10(hist_mask.max()))
logticks = np.tile(np.arange(lmin, 10), lmax) * (
np.power(10, np.arange(lmax * 10) // 10))
logticks = im.norm(logticks[(logticks != 0) &
(logticks >= hist_mask.min()) &
(logticks <= hist_mask.max())])
cb.ax.yaxis.set_ticks(logticks, minor=True)
cb.ax.yaxis.set_tick_params(which='minor', length=5)
cb.ax.tick_params(length=10)
output_dir = dirname(self.output_path)
if not exists(output_dir):
self.log.info("[output] Creating directory: {}".format(output_dir))
makedirs(output_dir)
self.log.info("[output] {}".format(self.output_path))
warnings.filterwarnings("ignore", module="matplotlib")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.savefig(self.output_path, bbox_inches='tight')
class ChargeResolutionVariationViewer(Tool):
name = "ChargeResolutionVariationViewer"
description = "Plot the charge resolution from " \
"ChargeResolutionCalculator objects restored via " \
"pickled dictionaries."
input_path = Unicode(None, allow_none=True,
help='Path to the hdf5 file produced from'
'ChargeResolutionCalculator.save()'
'').tag(config=True)
aliases = Dict(dict(f='ChargeResolutionVariationViewer.input_path',
O='ChargeResolutionVariationPlotter.output_path',
))
classes = List([ChargeResolutionCalculator,
ChargeResolutionVariationPlotter
])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.calculator = None
self.plotter = None
def setup(self):
self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"
kwargs = dict(config=self.config, tool=self)
self.calculator = ChargeResolutionCalculator(**kwargs)
self.plotter = ChargeResolutionVariationPlotter(**kwargs)
def start(self):
self.calculator.load(self.input_path)
def finish(self):
hist = self.calculator.variation_hist
xedges = self.calculator.variation_xedges
yedges = self.calculator.variation_yedges
self.plotter.plot_hist(hist, xedges, yedges)
def main():
exe = ChargeResolutionVariationViewer()
exe.run()
if __name__ == '__main__':
main()
|
{"hexsha": "addb58756edec3d4acd694750935fe076426120a", "size": 5527, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctapipe/tools/plot_charge_resolution_variation_hist.py", "max_stars_repo_name": "orelgueta/ctapipe", "max_stars_repo_head_hexsha": "ee28440e83cc283ccd57428d5fdad764a1e786f0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ctapipe/tools/plot_charge_resolution_variation_hist.py", "max_issues_repo_name": "orelgueta/ctapipe", "max_issues_repo_head_hexsha": "ee28440e83cc283ccd57428d5fdad764a1e786f0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ctapipe/tools/plot_charge_resolution_variation_hist.py", "max_forks_repo_name": "orelgueta/ctapipe", "max_forks_repo_head_hexsha": "ee28440e83cc283ccd57428d5fdad764a1e786f0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1241830065, "max_line_length": 79, "alphanum_fraction": 0.6028586937, "include": true, "reason": "import numpy", "num_tokens": 1217}
|
# -*- coding: utf-8 -*-
import numpy as np
import numpy.ma as ma
import cv2
import tables
from tierpsy.analysis.compress.selectVideoReader import selectVideoReader
class BackgroundSubtractorBase():
def __init__(self,
video_file,
buff_size = -1,
frame_gap = -1,
is_light_background = True):
#input parameters
self.video_file = video_file
self.buff_size = buff_size
self.frame_gap = frame_gap
self.is_light_background = is_light_background
self.bgnd = None
def init_buffer(self):
'''Initilize the buffer. As the reading of the video progress, i will update this buffer.'''
pass
def subtract_bgnd(self, imgage):
'''Function that deals how to do the background subtraction'''
pass
def is_update_frame(self, current_frame):
'''Test if the current frame is valid to update the background'''
pass
def update_background(self, image, current_frame):
'''Update background on base of the current image and frame'''
pass
def _apply_single(self, image, current_frame):
#substract background from a single to a single frame
if self.is_update_frame(current_frame):
self.update_background(image, current_frame)
return self.subtract_bgnd(image)
def apply(self, image, current_frame = np.nan):
#substract background from a single to a single frame and deal with a batch of several images
if image.ndim == 2:
return self._apply_single(image, current_frame)
elif image.ndim == 3:
out = [self._apply_single(img, current_frame + ii) for ii, img in enumerate(image)]
return np.array(out)
else:
raise ValueError
def _subtract_bgnd_from_mask(self, img):
ss = np.zeros_like(img)
if self.is_light_background:
cv2.subtract(self.bgnd, img, ss)
else:
cv2.subtract(img, self.bgnd, ss)
ss[img==0] = 0
return ss
class BackgroundSubtractorStream(BackgroundSubtractorBase):
def __init__(self,
*args,
**argkws
):
super().__init__(*args, **argkws)
#make sure this values are correct
assert self.buff_size > 0
assert self.frame_gap > 0
self.reduce_func = np.max if self.is_light_background else np.min
#internal variables
self.last_frame = -1
self.buffer = None
self.buffer_ind = -1
self.init_buffer()
self.calculate_bgnd()
def is_update_frame(self, current_frame):
'''Test if the current frame is valid to update the background'''
#update only if the frame gap is larger between the current frame and the last frame used
_is_good = ((self.last_frame < 0) | (current_frame - self.last_frame >= self.frame_gap))
return _is_good
def update_background(self, image, current_frame):
self.update_buffer(image, current_frame)
self.calculate_bgnd()
def calculate_bgnd(self):
'''Calculate the background from the buffer.'''
pass
def update_buffer(self, image, current_frame):
'''Add new frame to the buffer.'''
if image.sum() == 0:
#this is a black image that sometimes occurs, ignore it...
return
self.last_frame = current_frame
#treat it as a circular buffer (if it exceds if size return to the beginning)
self.buffer_ind += 1
if self.buffer_ind >= self.buffer.shape[0]:
self.buffer_ind = 0
self.buffer[self.buffer_ind] = image
class BackgroundSubtractorVideo(BackgroundSubtractorStream):
def __init__(self, *args, **argkws):
super().__init__(*args, **argkws)
def init_buffer(self):
ret = 1
current_frame = 0
vid = selectVideoReader(self.video_file)
d_info = np.iinfo(vid.dtype)
if self.is_light_background:
init_value = d_info.min
else:
init_value = d_info.max
self.buffer = np.full((self.buff_size, vid.height, vid.width), init_value, vid.dtype)
self.buffer_ind = -1
max_frames = self.buff_size*self.frame_gap
if vid.__class__.__name__ != 'readLoopBio':
# for non-loopbio videos
while current_frame < max_frames:
ret, image = vid.read()
#if not valid frame is returned return.
if ret == 0:
break
if image.ndim == 3:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if self.is_update_frame(current_frame):
self.update_buffer(image, current_frame)
current_frame += 1
self.last_frame = current_frame - 1
else:
# loopbio videos:
for fc in range(self.buff_size):
frame_to_read = fc * self.frame_gap
ret, image = vid.read_frame(frame_to_read)
# if not valid frame is returned return.
if ret == 0:
break
self.update_buffer(image, frame_to_read)
self.last_frame = frame_to_read - self.frame_gap
vid.release()
if self.buffer_ind < 0:
#no valid frames read
self.buffer = None
elif self.buffer_ind < (self.buff_size - 1):
#not enough frames to fill the buffer, reduce its size
self.buffer = self.buffer[:(self.buffer_ind+1)]
def calculate_bgnd(self):
'''Calculate the background from the buffer.'''
self.bgnd = self.reduce_func(self.buffer, axis=0)
self.bgnd = self.bgnd.astype(np.int32)
def subtract_bgnd(self, image):
# new method using bitwise not
def _remove_func(_img, _func, _bg):
#the reason to use opencv2 instead of numpy is to avoid buffer overflow
#https://stackoverflow.com/questions/45817037/opencv-image-subtraction-vs-numpy-subtraction/45817868
new_img = np.zeros_like(_img); #maybe can do this in place
if image.ndim == 2:
_func(_img, _bg, new_img)
else:
for ii, this_frame in enumerate(_img):
_func(this_frame, _bg, new_img[ii])
return new_img
bg = self.bgnd.astype(np.uint8)
if self.is_light_background:
notbg = ~bg
ss = _remove_func(image, cv2.add, notbg)
else: # fluorescence
ss = _remove_func(image, cv2.subtract, bg)
ss = np.clip( ss ,1,255);
return ss
class BackgroundSubtractorMasked(BackgroundSubtractorStream):
'''
Object to subtract the background from a masked image.
'''
def __init__(self, *args, **argkws):
self.full_img = None
super().__init__(*args, **argkws)
def init_buffer(self):
#we only accept masked files
assert self.video_file.endswith('hdf5')
with tables.File(self.video_file, 'r') as fid:
masks = fid.get_node('/mask')
#here i am using a masked numpy array to deal with the zeroed background
last_frame = self.buff_size*self.frame_gap - 1
self.buffer = masks[:last_frame + 1:self.frame_gap]
self.buffer = ma.masked_array(self.buffer, self.buffer==0)
self.buffer_ind = self.buffer.shape[0] - 1
self.last_frame = last_frame
if '/full_data' in fid:
#here i am using as the canonical background the results of using the reducing function in all the full frames
full_data = fid.get_node('/full_data')
self.full_img = self.reduce_func(full_data, axis=0)
def calculate_bgnd(self):
fill_value = 0 if self.is_light_background else 255
self.bgnd = self.reduce_func(self.buffer, axis=0).filled(fill_value)
if self.full_img is not None:
dd = (self.bgnd, self.full_img)
self.bgnd = self.reduce_func(dd, axis=0)
def _update_background(self, image, frame_n):
super()._update_background(image, frame_n)
dd = self.buffer[self.buffer_ind]
self.buffer[self.buffer_ind] = ma.masked_array(dd, dd ==0)
def subtract_bgnd(self, image):
return self._subtract_bgnd_from_mask(image)
#%%
class BackgroundSubtractorPrecalculated(BackgroundSubtractorBase):
def __init__(self, *args, **argkws):
self.save_interval = -1
self.precalculated_bgnd = None
self.full_img = None
super().__init__(*args, **argkws)
self.init_buffer()
def init_buffer(self):
#we only accept masked files
assert self.video_file.endswith('hdf5')
with tables.File(self.video_file, 'r') as fid:
_bgnd = fid.get_node('/bgnd')
self.save_interval = int(_bgnd._v_attrs['save_interval'])
self.precalculated_bgnd = _bgnd[:]
self.last_frame = 0
self.bgnd = self.precalculated_bgnd[0]
def subtract_bgnd(self, image):
return self._subtract_bgnd_from_mask(image)
def is_update_frame(self, current_frame):
'''Here the update is trivial so i can do it in every frame'''
return True
def update_background(self, image, current_frame):
self.bgnd = self.precalculated_bgnd[current_frame//self.save_interval]
#%%
if __name__ == '__main__':
import matplotlib.pylab as plt
import tqdm
#video_file = '/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/CX11254_Ch1_05092017_075253.hdf5'
#video_file = '/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/CX11314_Ch2_01072017_093003.hdf5'
video_file = '/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/CX11314_Ch1_04072017_103259.hdf5'
#%%
#video_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/tierpsy_test_data/different_animals/worm_motel/MaskedVideos/Position6_Ch2_12012017_102810_s.hdf5'
#_sub = BackgroundSubtractorMasked(video_file, buff_size = 5, frame_gap = 100)
_sub = BackgroundSubtractorPrecalculated(video_file)
with tables.File(video_file, 'r') as fid:
masks = fid.get_node('/mask')
tot = min(1000000, masks.shape[0])
for frame_number in tqdm.tqdm(range(0, tot, 1000)):
img = masks[frame_number]
img_s = _sub.apply(img, current_frame = frame_number)
fig, axs = plt.subplots(1,3, sharex = True, sharey=True)
axs[0].imshow(img)
axs[1].imshow(_sub.bgnd)
axs[2].imshow(img_s)
#%%
# from pathlib import Path
# video_file = Path.home () / 'OneDrive - Imperial College London/documents/papers_in_progress/paper_tierpsy_tracker/figures_data/different_setups/CeLeST/RawVideos/Sample01/frame001.jpg'
# video_file = str(video_file)
# bngd_subtr = BackgroundSubtractorVideo(video_file, buff_size = 10, frame_gap = 50, is_light_background=False)
# assert (bngd_subtr.bgnd).sum() > 0
# print(np.mean(bngd_subtr.bgnd))
#
# img_s = bngd_subtr.apply(bngd_subtr.buffer)
#
# fig, axs = plt.subplots(1,2, sharex=True, sharey=True)
# axs[0].imshow(bngd_subtr.buffer[0])
# axs[1].imshow(img_s[0])
|
{"hexsha": "b94d60a51265f3957856612cefde40bcdbfea3fd", "size": 11412, "ext": "py", "lang": "Python", "max_stars_repo_path": "tierpsy/analysis/compress/BackgroundSubtractor.py", "max_stars_repo_name": "saulmoore1/tierpsy-tracker", "max_stars_repo_head_hexsha": "69630c90de2e8a0b70168790f9c1198a0a644b3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-01-11T10:49:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T15:48:00.000Z", "max_issues_repo_path": "tierpsy/analysis/compress/BackgroundSubtractor.py", "max_issues_repo_name": "saulmoore1/tierpsy-tracker", "max_issues_repo_head_hexsha": "69630c90de2e8a0b70168790f9c1198a0a644b3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-05-08T15:43:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T10:19:24.000Z", "max_forks_repo_path": "tierpsy/analysis/compress/BackgroundSubtractor.py", "max_forks_repo_name": "saulmoore1/tierpsy-tracker", "max_forks_repo_head_hexsha": "69630c90de2e8a0b70168790f9c1198a0a644b3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-12-18T12:10:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T09:12:47.000Z", "avg_line_length": 33.4662756598, "max_line_length": 189, "alphanum_fraction": 0.6255695759, "include": true, "reason": "import numpy", "num_tokens": 2726}
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import collections as cl
import json
from .util import *
class Waterbank():
def __init__(self, df, name, key):
self.T = len(df)
self.index = df.index
self.number_years = self.index.year[self.T - 1] - self.index.year[0]
self.key = key
self.name = name
for k,v in json.load(open('cord/banks/%s_properties.json' % key)).items():
setattr(self,k,v)
self.recharge_rate = self.initial_recharge*cfs_tafd
self.tot_current_storage = 0.0#total above-ground storage being used in water bank
self.loss_rate = 0.06#how much of banked deliveries is lost duing spreading
#dictionaries for individual member use of the bank
self.storage = {}#how much water delivered to bank this time step
self.recovery_use = {}#how much recovery capacity is being used by a memeber this time step
self.banked = {} #how much water is stored in the groundwater banking account of the member
#timeseries for export to csv
self.bank_timeseries = {}#daily
self.annual_timeseries = {}#annual
self.recharge_rate_series = np.zeros(self.T)#daily recharge rate
for x in self.participant_list:
self.storage[x] = 0.0
self.bank_timeseries[x] = np.zeros(self.T)
self.annual_timeseries[x] = np.zeros(self.number_years)
self.recovery_use[x] = 0.0
self.banked[x] = 0.0
#counters to keeps track of the duration of waterbank use (recharge rate declines after continuous use)
self.thismonthuse = 0
self.monthusecounter = 0
self.monthemptycounter = 0
def object_equals(self, other):
##This function compares two instances of an object, returns True if all attributes are identical.
equality = {}
if (self.__dict__.keys() != other.__dict__.keys()):
return ('Different Attributes')
else:
differences = 0
for i in self.__dict__.keys():
if type(self.__getattribute__(i)) is dict:
equality[i] = True
for j in self.__getattribute__(i).keys():
if (type(self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) is bool):
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) == False):
equality[i] = False
differences += 1
else:
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]).all() == False):
equality[i] = False
differences += 1
else:
if (type(self.__getattribute__(i) == other.__getattribute__(i)) is bool):
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i))
if equality[i] == False:
differences += 1
else:
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i)).all()
if equality[i] == False:
differences += 1
return (differences == 0)
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################DETERMINE DELIVERIES ON CANAL######################################################
#####################################################################################################################
def find_node_demand(self,contract_list,xx,num_members, search_type):
#this function finds the maximum 'demand' available at each node - if
#in recovery mode, max demand is the available recovery capacity
#all other modes, max demand is the available recharge space
if search_type == "recovery":
#recovery mode - sum the (pumping) capacity use of each wb member
current_recovery_use = 0.0
for x in self.recovery_use:
current_recovery_use += self.recovery_use[x]
demand_constraint = max(self.recovery - current_recovery_use, 0.0)
else:
#recharge mode - sum the (spreading basin) capacity use of each wb member
current_storage = 0.0
for xx in self.participant_list:
current_storage += self.storage[xx]
demand_constraint = max(self.tot_storage - current_storage, 0.0)
return demand_constraint
def find_priority_space(self, num_members, xx, search_type):
#this function finds how much 'priority' space in the recharge/recovery capacity is owned by a member (member_name) in a given bank
if search_type == "recovery":
initial_capacity = max(self.recovery*self.ownership[xx]/num_members - self.recovery_use[xx], 0.0)
available_banked = self.banked[xx]
return min(initial_capacity, available_banked)
else:
initial_capacity = max(self.tot_storage*self.ownership[xx]/num_members - self.storage[xx], 0.0)
return initial_capacity
def set_demand_priority(self, priority_list, contract_list, demand, delivery, demand_constraint, search_type, contract_canal, current_canal, member_contracts):
#this function creates a dictionary (demand_dict) that has a key for each 'priority type' associated with the flow
#different types of flow (flood, delivery, banking, recovery) have different priority types
demand_dict = {}
#for flood flows, determine if the wb members have contracts w/ the flooding reservoir - 1st priority
#if not, do they have turnouts on the 'priority' canals - 2nd priority
#if not, the demand is 'excess' - 3rd priority (so that flood waters only use certain canals unless the flood releases are big enough)
if search_type == 'flood':
priority_toggle = 0
contractor_toggle = 0
canal_toggle = 0
for yy in priority_list:
if yy.name == contract_canal:
priority_toggle = 1
if priority_toggle == 1:
for y in contract_list:
for yx in member_contracts:
if y.name == yx:
contractor_toggle = 1
for yy in self.get_iterable(self.canal_rights):
if yy == current_canal:
canal_toggle = 1
if contractor_toggle == 1 and canal_toggle == 1:
demand_dict['contractor'] = demand
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
elif contractor_toggle == 1:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = demand
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = demand
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = demand
#if the flows are for delivery, the don't come to a water bank
elif search_type == 'delivery':
demand_dict[contract_canal] = 0.0
#banking flows are priority for flows that can be taken by a wb member under their 'owned' capacity
#secondary priority is assigned to districts that are usuing 'excess' space in the wb that they do not own (but the owner does not want to use)
elif search_type == 'banking':
canal_toggle = 0
for yy in self.get_iterable(self.canal_rights):
if yy == current_canal:
canal_toggle = 1
if canal_toggle == 1:
demand_dict['priority'] = min(max(min(demand,delivery), 0.0), demand_constraint)
demand_dict['secondary'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['priority'])
else:
demand_dict['priority'] = 0.0
demand_dict['secondary'] = min(max(delivery, 0.0), demand_constraint)
#recovery flows are similar to banking flows - first priority for wb members that are using capacity they own, second priority for wb members using 'excess' capacity
elif search_type == 'recovery':
demand_dict['initial'] = min(max(min(demand,delivery), 0.0), demand_constraint)
demand_dict['supplemental'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['initial'])
return demand_dict
def set_deliveries(self, priorities,type_fractions,type_list,member_name):
final_deliveries = 0.0
for zz in type_list:
#deliveries at this priority level
total_deliveries = priorities[zz]*type_fractions[zz]
#running total of all deliveries at this node
final_deliveries += total_deliveries
#deliveries first go to direct irrigation, if demand remains
#adjust demand/recharge space
self.storage[member_name] += total_deliveries
return final_deliveries
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
###################### UPDATE/SAVE STATE VARIABLES (BANK ACCOUT BALANCES) ################################
#####################################################################################################################
def adjust_recovery(self, deliveries, member_name, wateryear):
#this function adjusts the waterbank accounts & capacity usage after
#a wb member uses recovery
self.banked[member_name] -= deliveries#bank account
self.recovery_use[member_name] += deliveries#capacity use
def sum_storage(self):
#this function calculates the total capacity use in a recharge basin
self.tot_current_storage = 0.0
for x in self.participant_list:
self.tot_current_storage += self.storage[x]
def absorb_storage(self):
#this function takes water applied to a recharge basin and 'absorbs' it into the
#ground, clearing up capacity in the recharge basin and adding to the 'bank' accounts
#of the wb member that applied it
if self.tot_current_storage > self.recharge_rate*0.75:
self.thismonthuse = 1
if self.tot_current_storage > 0.0:
absorb_fraction = min(self.recharge_rate/self.tot_current_storage,1.0)
self.tot_current_storage -= self.tot_current_storage*absorb_fraction
for x in self.participant_list:
self.banked[x] += self.storage[x]*absorb_fraction*(1.0-self.loss_rate)#bank account (only credit a portion of the recharge to the bank acct)
self.storage[x] -= self.storage[x]*absorb_fraction#capacity use
def accounting(self, t, m, da, wateryear):
#this stores bank account balances in a daily dictionary (for export to
stacked_amount = 0.0
self.recharge_rate_series[t] = self.recharge_rate
for x in self.participant_list:
self.bank_timeseries[x][t] = self.banked[x] + stacked_amount
stacked_amount += self.banked[x]
if m == 9 and da == 29:
#annual dictionary stores the annual change in gw bank balances
for x in self.participant_list:
sum_total = 0.0
for year_counter in range(0, wateryear):
sum_total += self.annual_timeseries[x][year_counter]
self.annual_timeseries[x][wateryear] = self.banked[x] - sum_total
def bank_as_df(self, index):
#take daily bank account balances (w/running recharge capacities) and save them as a data frame (for export to csv)
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s' % (self.key,n)] = pd.Series(self.bank_timeseries[n], index = index)
df['%s_rate' % self.key] = pd.Series(self.recharge_rate_series, index = index)
return df
def annual_bank_as_df(self):
#save annual bank changes as data frame (for export to csv)
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s_leiu' % (self.key,n)] = pd.Series(self.annual_timeseries[n])
return df
def get_iterable(self, x):
if isinstance(x, cl.Iterable):
return x
else:
return (x,)
|
{"hexsha": "b5d7e9d00fcefd7d306bc7bb376f98e1f7cfc04c", "size": 12206, "ext": "py", "lang": "Python", "max_stars_repo_path": "Stochastic_engine/cord/waterbank.py", "max_stars_repo_name": "romulus97/HYDROWIRES", "max_stars_repo_head_hexsha": "115e534764d8f58d64340d99cf6cb8eb6598c4ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Stochastic_engine/cord/waterbank.py", "max_issues_repo_name": "romulus97/HYDROWIRES", "max_issues_repo_head_hexsha": "115e534764d8f58d64340d99cf6cb8eb6598c4ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Stochastic_engine/cord/waterbank.py", "max_forks_repo_name": "romulus97/HYDROWIRES", "max_forks_repo_head_hexsha": "115e534764d8f58d64340d99cf6cb8eb6598c4ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2348484848, "max_line_length": 166, "alphanum_fraction": 0.6175651319, "include": true, "reason": "import numpy", "num_tokens": 2850}
|
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import numpy as np
writer = SummaryWriter("logs")
image_path = 'dataset/cat_vs_dog/train/cat/cat.0.jpg' # 图像目录
img_PIL = Image.open(image_path) # 打开图片文件(PILimage)
img_array = np.array(img_PIL) # 转成numpy格式
print(type(img_array))
print(img_array.shape) # (374, 500, 3)
writer.add_image('cat', img_array, 1, dataformats='HWC')
x = range(100)
for i in x:
writer.add_scalar('y=2x', i * 2, i)
writer.close()
# from PIL import Image
# import numpy as np
#
# image_path = 'dataset/cat_vs_dog/train/cat/cat.0.jpg'
# img = Image.open(image_path)
# print(type(img))
# img_array = np.array(img)
# print(type(img_array))
|
{"hexsha": "3f6dd4e1ea4448019e402fbb40622136b86d5662", "size": 723, "ext": "py", "lang": "Python", "max_stars_repo_path": "note2_test_tb.py", "max_stars_repo_name": "AluminiumOxide/pytorch_base_-tutorial", "max_stars_repo_head_hexsha": "a6d3bea6070c7c774dcd7c55d94b0a1441548c8b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "note2_test_tb.py", "max_issues_repo_name": "AluminiumOxide/pytorch_base_-tutorial", "max_issues_repo_head_hexsha": "a6d3bea6070c7c774dcd7c55d94b0a1441548c8b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "note2_test_tb.py", "max_forks_repo_name": "AluminiumOxide/pytorch_base_-tutorial", "max_forks_repo_head_hexsha": "a6d3bea6070c7c774dcd7c55d94b0a1441548c8b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9310344828, "max_line_length": 61, "alphanum_fraction": 0.6901798064, "include": true, "reason": "import numpy", "num_tokens": 214}
|
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import networkx as nx
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
sys.path.append('%s/../../pytorch_structure2vec/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from pytorch_util import weights_init
sys.path.append('%s/../common' % os.path.dirname(os.path.realpath(__file__)))
from graph_embedding import EmbedMeanField, EmbedLoopyBP
from cmd_args import cmd_args
from modules.custom_mod import JaggedMaxModule
from rl_common import local_args
def greedy_actions(q_values, v_p, banned_list):
actions = []
offset = 0
banned_acts = []
prefix_sum = v_p.data.cpu().numpy()
for i in range(len(prefix_sum)):
n_nodes = prefix_sum[i] - offset
if banned_list is not None and banned_list[i] is not None:
for j in banned_list[i]:
banned_acts.append(offset + j)
offset = prefix_sum[i]
q_values = q_values.data.clone()
if len(banned_acts):
q_values[banned_acts, :] = np.finfo(np.float64).min
jmax = JaggedMaxModule()
values, actions = jmax(Variable(q_values), v_p)
return actions.data, values.data
class QNet(nn.Module):
def __init__(self, s2v_module = None):
super(QNet, self).__init__()
if cmd_args.gm == 'mean_field':
model = EmbedMeanField
elif cmd_args.gm == 'loopy_bp':
model = EmbedLoopyBP
else:
print('unknown gm %s' % cmd_args.gm)
sys.exit()
if cmd_args.out_dim == 0:
embed_dim = cmd_args.latent_dim
else:
embed_dim = cmd_args.out_dim
if local_args.mlp_hidden:
self.linear_1 = nn.Linear(embed_dim * 2, local_args.mlp_hidden)
self.linear_out = nn.Linear(local_args.mlp_hidden, 1)
else:
self.linear_out = nn.Linear(embed_dim * 2, 1)
weights_init(self)
if s2v_module is None:
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=2,
num_edge_feats=0,
max_lv=cmd_args.max_lv)
else:
self.s2v = s2v_module
def PrepareFeatures(self, batch_graph, picked_nodes):
n_nodes = 0
prefix_sum = []
picked_ones = []
for i in range(len(batch_graph)):
if picked_nodes is not None and picked_nodes[i] is not None:
assert picked_nodes[i] >= 0 and picked_nodes[i] < batch_graph[i].num_nodes
picked_ones.append(n_nodes + picked_nodes[i])
n_nodes += batch_graph[i].num_nodes
prefix_sum.append(n_nodes)
node_feat = torch.zeros(n_nodes, 2)
node_feat[:, 0] = 1.0
if len(picked_ones):
node_feat.numpy()[picked_ones, 1] = 1.0
node_feat.numpy()[picked_ones, 0] = 0.0
return node_feat, torch.LongTensor(prefix_sum)
def add_offset(self, actions, v_p):
prefix_sum = v_p.data.cpu().numpy()
shifted = []
for i in range(len(prefix_sum)):
if i > 0:
offset = prefix_sum[i - 1]
else:
offset = 0
shifted.append(actions[i] + offset)
return shifted
def rep_global_embed(self, graph_embed, v_p):
prefix_sum = v_p.data.cpu().numpy()
rep_idx = []
for i in range(len(prefix_sum)):
if i == 0:
n_nodes = prefix_sum[i]
else:
n_nodes = prefix_sum[i] - prefix_sum[i - 1]
rep_idx += [i] * n_nodes
rep_idx = Variable(torch.LongTensor(rep_idx))
if cmd_args.ctx == 'gpu':
rep_idx = rep_idx.cuda()
graph_embed = torch.index_select(graph_embed, 0, rep_idx)
return graph_embed
def forward(self, time_t, states, actions, greedy_acts = False):
batch_graph, picked_nodes, banned_list = zip(*states)
node_feat, prefix_sum = self.PrepareFeatures(batch_graph, picked_nodes)
if cmd_args.ctx == 'gpu':
node_feat = node_feat.cuda()
prefix_sum = prefix_sum.cuda()
prefix_sum = Variable(prefix_sum)
embed, graph_embed = self.s2v(batch_graph, node_feat, None, pool_global=True)
if actions is None:
graph_embed = self.rep_global_embed(graph_embed, prefix_sum)
else:
shifted = self.add_offset(actions, prefix_sum)
embed = embed[shifted, :]
embed_s_a = torch.cat((embed, graph_embed), dim=1)
if local_args.mlp_hidden:
embed_s_a = F.relu( self.linear_1(embed_s_a) )
raw_pred = self.linear_out(embed_s_a)
if greedy_acts:
actions, _ = greedy_actions(raw_pred, prefix_sum, banned_list)
return actions, raw_pred, prefix_sum
class NStepQNet(nn.Module):
def __init__(self, num_steps, s2v_module = None):
super(NStepQNet, self).__init__()
list_mod = [QNet(s2v_module)]
for i in range(1, num_steps):
list_mod.append(QNet(list_mod[0].s2v))
self.list_mod = nn.ModuleList(list_mod)
self.num_steps = num_steps
def forward(self, time_t, states, actions, greedy_acts = False):
assert time_t >= 0 and time_t < self.num_steps
return self.list_mod[time_t](time_t, states, actions, greedy_acts)
|
{"hexsha": "8f5ffcd81ca7c649c2360ea671ee197cf7cac64d", "size": 5720, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/graph_attack/q_net.py", "max_stars_repo_name": "HenryKenlay/graph_adversarial_attack", "max_stars_repo_head_hexsha": "5282d1269aa637ecafb0af239c53fa8396e5ef66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 275, "max_stars_repo_stars_event_min_datetime": "2020-10-22T22:03:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T06:08:05.000Z", "max_issues_repo_path": "code/graph_attack/q_net.py", "max_issues_repo_name": "HenryKenlay/graph_adversarial_attack", "max_issues_repo_head_hexsha": "5282d1269aa637ecafb0af239c53fa8396e5ef66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2020-10-30T08:28:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:55:12.000Z", "max_forks_repo_path": "code/graph_attack/q_net.py", "max_forks_repo_name": "HenryKenlay/graph_adversarial_attack", "max_forks_repo_head_hexsha": "5282d1269aa637ecafb0af239c53fa8396e5ef66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 70, "max_forks_repo_forks_event_min_datetime": "2020-10-28T19:14:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T06:11:51.000Z", "avg_line_length": 32.6857142857, "max_line_length": 103, "alphanum_fraction": 0.6034965035, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1369}
|
const PISpins = Matrix{Int16}
"""
generate a random spin configuration.
"""
rand_pispins(nsite::Int, ntau::Int) = rand([-Int16(1), Int16(1)], nsite, ntau)
"""
number of imaginary time slice.
"""
ntau(spins::PISpins) = size(spins, 2)
nsite(spins::PISpins) = size(spins, 1)
"""
flip!(spins::PISpins, i::Int, j::Int) -> PISpins
flip a spin at specific position inplace.
"""
flip!(spins::PISpins, i::Int, j::Int) = (spins[i, j] = -spins[i, j]; spins)
"""
PathIntegralIter{MT<:AbstractModel}
PathIntegralIter(model::MT, spins::PISpins) where {MT<:AbstractModel} -> PathIntegralIter{MT}
An iterator for sweeps in Path Integral MonteCarlo.
"""
struct PathIntegralIter{MT<:AbstractModel}
model::MT
spins::PISpins
PathIntegralIter(model::MT, spins::PISpins) where {MT<:AbstractModel} = new{MT}(model, spins)
end
function Base.iterate(si::PathIntegralIter, state::Int=1)
model = si.model
spins = si.spins
lt = model.lattice
NN = nsite(lt)
NTAU = ntau(spins)
beta = 1/model.temp
dtau = beta/NTAU
J_spatial = model.J0/NTAU
J_Trotter = - log(tanh(dtau*model.Γ)) * model.temp / 2
# visit each spin on the space-time lattice in sequential order
for itau = 1:NTAU
for ir = 1:NN
# interaction energy with the neighbouring spins in real space and
# the neighbouring spins in imaginary time
exchange_field = J_spatial * sum(view(spins, neighbors(lt, ir), itau))
# periodic boundary conditions in imaginary time
itau_up = mod(itau, NTAU) + 1
itau_down = mod(itau-2, NTAU) + 1
exchange_field += J_Trotter * (spins[ir,itau_up] + spins[ir,itau_down])
energy_diff = 2 * spins[ir,itau] * exchange_field # Note: J_spatial > 0 corresponds fo FM interactions
# Metropolis-Hastings update: flip the spin with probability min(1, exp(-beta*energy_diff))
(energy_diff < 0 || rand() < exp(-beta*energy_diff)) && flip!(spins, ir, itau)
end
end
((state, model, spins), state+1)
end
"""
measure(model::AbstractModel, spins::PISpins) -> Tuple
Measure observables, returns a tuple of (E, Mz, Mz², Mz⁴).
"""
function measure(model::AbstractModel, spins::PISpins)
lt = model.lattice
NN = nsite(lt)
NTAU = ntau(spins)
beta = 1/model.temp
dtau = beta/NTAU
J_spatial = model.J0/NTAU
J_Trotter = - log(tanh(dtau*model.Γ)) * model.temp / 2
# Measure the total energy
energy_tot = 0.0
magnz = 0.0
for itau = 1:NTAU
for ir = 1:NN
magnz += spins[ir,itau]
energy_tot -= J_spatial * spins[ir,itau] * sum(view(spins, neighbors(lt, ir), itau)) / 2 # compenstate for double counting of bonds
# periodic boundary conditions in imaginary time
itau_up = mod(itau, NTAU) + 1
energy_tot -= J_Trotter * spins[ir,itau] * spins[ir,itau_up]
end
end
magnz /= (NTAU * NN)
energy_tot / (NTAU * NN), magnz, magnz^2, magnz^4
end
"""
runsse(sseiter, ntherm::Int, nmeas::Int; binsize::Int=200, seed::Int=2) -> DynamicBin
Run an PathIntegralIter application for calculating mean energy and magnetization, where
* sseiter: PathIntegralIter instance.
* ntherm: number of steps for heat bath.
* nmeas: number of measures.
* binsize: size of bin.
"""
function runsse(sseiter, ntherm::Int, nmeas::Int; binsize::Int=200)
println("thermalizing ...")
for (k, model, spins) in sseiter
k == ntherm && break
end
println("measuring ...")
println(" E Mz Mz² Mz⁴")
bin = DynamicBin(binsize, Float64, Float64, Float64, Float64)
for (k, model, spins) in sseiter
res = measure(model, spins)
push!(bin, res)
if fitted(bin)
print_lastbin(bin); println()
end
k == nmeas && break
end
bin
end
|
{"hexsha": "17918f19dc03c351fd2291a4cce2d48bda4e8d17", "size": 3919, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PathIntegral.jl", "max_stars_repo_name": "JuliaTagBot/Pathintegral-QMC.jl", "max_stars_repo_head_hexsha": "9e59f93452e4f289094a6945fe8981480b08521e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-09-27T02:42:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T14:47:36.000Z", "max_issues_repo_path": "src/PathIntegral.jl", "max_issues_repo_name": "JuliaTagBot/Pathintegral-QMC.jl", "max_issues_repo_head_hexsha": "9e59f93452e4f289094a6945fe8981480b08521e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-09-09T09:26:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-09T09:26:41.000Z", "max_forks_repo_path": "src/PathIntegral.jl", "max_forks_repo_name": "JuliaTagBot/Pathintegral-QMC.jl", "max_forks_repo_head_hexsha": "9e59f93452e4f289094a6945fe8981480b08521e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-25T03:33:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:57:35.000Z", "avg_line_length": 30.8582677165, "max_line_length": 144, "alphanum_fraction": 0.6236284767, "num_tokens": 1218}
|
(*
Title: Examples_Echelon_Form_IArrays.thy
Author: Jose Divasón <jose.divasonm at unirioja.es>
Author: Jesús Aransay <jesus-maria.aransay at unirioja.es>
*)
section\<open>Examples of computations using immutable arrays\<close>
theory Examples_Echelon_Form_IArrays
imports
Echelon_Form_Inverse_IArrays
"HOL-Library.Code_Target_Numeral"
Gauss_Jordan.Examples_Gauss_Jordan_Abstract
Examples_Echelon_Form_Abstract
begin
text\<open>The file @{file \<open>Examples_Echelon_Form_Abstract.thy\<close>} is only imported to
include the definitions of matrices that we use in the following examples.
Otherwise, it could be removed.\<close>
subsection\<open>Computing echelon forms, determinants, characteristic polynomials and so on using
immutable arrays\<close>
subsubsection\<open>Serializing gcd\<close>
text\<open>First of all, we serialize the gcd to the ones of PolyML and MLton as we did in the
Gauss-Jordan development.\<close>
context
includes integer.lifting
begin
lift_definition gcd_integer :: "integer => integer => integer"
is "gcd :: int => int => int" .
lemma gcd_integer_code [code]:
"gcd_integer l k = \<bar>if l = (0::integer) then k else gcd_integer l (\<bar>k\<bar> mod \<bar>l\<bar>)\<bar>"
by transfer (simp add: gcd_code_int [symmetric] ac_simps)
end
code_printing
constant "abs :: integer => _" \<rightharpoonup> (SML) "IntInf.abs"
| constant "gcd_integer :: integer => _ => _" \<rightharpoonup> (SML) "(PolyML.IntInf.gcd ((_),(_)))" (*Only for Poly/ML*)
(* | constant "gcd_integer :: integer => _ => _" \<rightharpoonup> (SML) "(MLton.IntInf.gcd ((_),(_)))"*) (*Only for MLton*)
lemma gcd_code [code]:
"gcd a b = int_of_integer (gcd_integer (of_int a) (of_int b))"
by (metis gcd_integer.abs_eq int_of_integer_integer_of_int integer_of_int_eq_of_int)
code_printing
constant "abs :: real => real" \<rightharpoonup>
(SML) "Real.abs"
declare [[code drop: "abs :: real \<Rightarrow> real"]]
code_printing
constant "divmod_integer :: integer => _ => _" \<rightharpoonup> (SML) "(IntInf.divMod ((_),(_)))"
subsubsection\<open>Examples\<close>
value "det test_int_3x3"
value "det test_int_3x3_03"
value "det test_int_6x6"
value "det test_int_8x8"
value "det test_int_20x20"
value "charpoly test_real_3x3"
value "charpoly test_real_6x6"
value "inverse_matrix test_int_3x3_02"
value "matrix_to_iarray (echelon_form_of test_int_3x3 euclid_ext2)"
value "matrix_to_iarray (echelon_form_of test_int_8x8 euclid_ext2)"
text\<open>The following computations are much faster when code is exported.\<close>
(*value "matrix_to_iarray (echelon_form_of_euclidean test_int_20x20)"*)
(*value "echelon_form_of_iarrays (matrix_to_iarray test_int_20x20) euclid_ext2"*)
(*value "matrix_to_iarray (echelon_form_of test_int_20x20 euclid_ext2)"*)
text\<open>The following matrix will have an integer inverse since its determinant is equal to one\<close>
value "det test_int_3x3_03"
value "the (matrix_to_iarray_option (inverse_matrix test_int_3x3_03))"
text\<open>We check that the previous inverse has been correctly computed:\<close>
value "matrix_matrix_mult_iarray
(matrix_to_iarray test_int_3x3_03)
(the (matrix_to_iarray_option (inverse_matrix test_int_3x3_03)))"
value "matrix_matrix_mult_iarray
(the (matrix_to_iarray_option (inverse_matrix test_int_3x3_03)))
(matrix_to_iarray test_int_3x3_03)"
text\<open>The following matrices have determinant different from zero,
and thus do not have an integer inverse\<close>
value "det test_int_6x6"
value "matrix_to_iarray_option (inverse_matrix test_int_6x6)"
value "det test_int_20x20"
value "matrix_to_iarray_option (inverse_matrix test_int_20x20)"
text\<open>The inverse in dimension 20 has (trivial) inverse.\<close>
value "the (matrix_to_iarray_option (inverse_matrix (mat 1::int^20^20)))"
value "the (matrix_to_iarray_option (inverse_matrix (mat 1::int^20^20))) = matrix_to_iarray (mat 1::int^20^20)"
definition "print_echelon_int (A::int^20^20) = echelon_form_of_iarrays (matrix_to_iarray A) euclid_ext2"
text\<open>Performance is better when code is exported. In addition, it depends on the growth of
the integer coefficients of the matrices. For instance, \<open>test_int_20x20\<close>
is a matrix of integer numbers between $-10$ and $10$. The computation of its echelon form (by means
of \<open>print_echelon_int\<close>) needs about 2 seconds. However, the matrix \<open>test_int_20x20_2\<close>
has elements between $0$ and $1010$. The computation of its echelon form (by means
of \<open>print_echelon_int\<close> too) needs about 0.310 seconds. These benchmarks have been carried
out in a laptop with an i5-3360M processor with 4 GB of RAM.\<close>
export_code charpoly det echelon_form_of test_int_8x8 test_int_20x20 test_int_20x20_2 print_echelon_int
in SML module_name Echelon (*file "Echelon.sml"*)
(*
PolyML.use "Echelon.sml"; open Echelon; open Timer;
let val now=startCPUTimer (); in print_echelon_int (test_int_20x20); checkCPUTimes (now) end;
*)
(*Analogously, code can be exported to Haskell using the file Code_Rational presented in the
Gauss-Jordan AFP entry.*)
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Echelon_Form/Examples_Echelon_Form_IArrays.thy"}
|
# --------------- AND Perceptron ---------------
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0.2
weight2 = 0.8
bias = -1.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# --------------- NOT Perceptron --------------------
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0.0
weight2 = -1.0
bias = -0.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [True, False, True, False]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# --------------- Perceptron Step --------------------
import numpy as np
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
def stepFunction(t):
if t >= 0:
return 1
return 0
def prediction(X, W, b):
return stepFunction((np.matmul(X,W)+b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# The function should receive as inputs the data X, the labels y,
# the weights W (as an array), and the bias b,
# update the weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate = 0.01):
for i in range(len(X)):
y_hat = prediction(X[i],W,b)
if y[i]-y_hat == 1:
W[0] += X[i][0]*learn_rate
W[1] += X[i][1]*learn_rate
b += learn_rate
elif y[i]-y_hat == -1:
W[0] -= X[i][0]*learn_rate
W[1] -= X[i][1]*learn_rate
b -= learn_rate
return W, b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2,1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
boundary_lines.append((-W[0]/W[1], -b/W[1]))
return boundary_lines
# --------------- Softmax --------------------
import numpy as np
def softmax(L):
expL = np.exp(L)
sumExpL = sum(expL)
result = []
for i in expL:
result.append(i*1.0/sumExpL)
return result
# Note: The function np.divide can also be used here, as follows:
# def softmax(L):
# expL = np.exp(L)
# return np.divide (expL, expL.sum())
# --------------- Cross Entropy --------------------
import numpy as np
def cross_entropy(Y, P):
Y = np.float_(Y)
P = np.float_(P)
return -np.sum(Y * np.log(P) + (1 - Y) * np.log(1 - P))
# -------------------- Gradient Descent --------------------
# Sigmoid Activation Function ( Integral of log(e^x + 1) + C )
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Output (prediction) formula
def output_formula(features, weights, bias):
linear_combination=np.dot(features, weights)
return sigmoid(linear_combination + bias)
# Error Formula (Binary Cross-Entropy / Log Loss)
# y = probability
def error_formula(y, output):
porbability_of_1 = - y * np.log(output)
probability_of_0 = - (1 - y) * np.log(1-output)
binary_cross_entropy = porbability_of_1 + probability_of_0
return binary_cross_entropy
# Gradient descent step
def update_weights(x, y, weights, bias, learnrate):
output = output_formula(x, weights, bias)
d_error = -(y - output)
weights -= learnrate * d_error * x
bias -= learnrate * d_error
return weights, bias
# -------------------- Gradient Descent 2 --------------------
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
def sigmoid_prime(x):
"""
# Derivative of the sigmoid function
"""
return sigmoid(x) * (1 - sigmoid(x))
learnrate = 0.5
x = np.array([1, 2, 3, 4])
y = np.array(0.5)
# Initial weights
w = np.array([0.5, -0.5, 0.3, 0.1])
### Calculate one gradient descent step for each weight
### Note: Some steps have been consolidated, so there are
### fewer variable names than in the above sample code
# TODO: Calculate the node's linear combination of inputs and weights
h = np.dot(x, w)
# TODO: Calculate output of neural network
nn_output = sigmoid(h)
# TODO: Calculate error of neural network
error = y - nn_output
# TODO: Calculate the error term
# Remember, this requires the output gradient, which we haven't
# specifically added a variable for.
error_term = error * sigmoid_prime(h)
# Note: The sigmoid_prime function calculates sigmoid(h) twice,
# but you've already calculated it once. You can make this
# code more efficient by calculating the derivative directly
# rather than calling sigmoid_prime, like this:
# error_term = error * nn_output * (1 - nn_output)
# TODO: Calculate change in weights
del_w = learnrate * error_term * x
print('Neural Network output:')
print(nn_output)
print('Amount of Error:')
print(error)
print('Change in Weights:')
print(del_w)
# --------------- Gradient Descent 3 --------------------
import numpy as np
from data_prep import features, targets, features_test, targets_test
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# TODO: We haven't provided the sigmoid_prime function like we did in
# the previous lesson to encourage you to come up with a more
# efficient solution. If you need a hint, check out the comments
# in solution.py from the previous lecture.
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Activation of the output unit
# Notice we multiply the inputs and the weights here
# rather than storing h as a separate variable
output = sigmoid(np.dot(x, weights))
# The error, the target minus the network output
error = y - output
# The error term
# Notice we calulate f'(h) here instead of defining a separate
# sigmoid_prime function. This just makes it faster because we
# can re-use the result of the sigmoid function stored in
# the output variable
error_term = error * output * (1 - output)
# The gradient descent step, the error times the gradient times the inputs
del_w += error_term * x
# Update the weights here. The learning rate times the
# change in weights, divided by the number of records to average
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
# --------------- Multiplayer Perceptrons (Hidden Layers) --------------------
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
# Network size
N_input = 4
N_hidden = 3
N_output = 2
np.random.seed(42)
# Make some fake data
X = np.random.randn(4)
weights_input_to_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))
weights_hidden_to_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))
# TODO: Make a forward pass through the network
hidden_layer_in = np.dot(X, weights_input_to_hidden)
hidden_layer_out = sigmoid(hidden_layer_in)
print('Hidden-layer Output:')
print(hidden_layer_out)
output_layer_in = np.dot(hidden_layer_out, weights_hidden_to_output)
output_layer_out = sigmoid(output_layer_in)
print('Output-layer Output:')
print(output_layer_out)
# -------------------- Backpropagation --------------------
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
x = np.array([0.5, 0.1, -0.2])
target = 0.6
learnrate = 0.5
weights_input_hidden = np.array([[0.5, -0.6],
[0.1, -0.2],
[0.1, 0.7]])
weights_hidden_output = np.array([0.1, -0.3])
## Forward pass
hidden_layer_input = np.dot(x, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)
output = sigmoid(output_layer_in)
## Backwards pass
## TODO: Calculate output error
error = target - output
# TODO: Calculate error term for output layer
output_error_term = error * output * (1 - output)
# TODO: Calculate error term for hidden layer
hidden_error_term = np.dot(output_error_term, weights_hidden_output) * \
hidden_layer_output * (1 - hidden_layer_output)
# TODO: Calculate change in weights for hidden layer to output layer
delta_w_h_o = learnrate * output_error_term * hidden_layer_output
# TODO: Calculate change in weights for input layer to hidden layer
delta_w_i_h = learnrate * hidden_error_term * x[:, None]
print('Change in weights for hidden layer to output layer:')
print(delta_w_h_o)
print('Change in weights for input layer to hidden layer:')
print(delta_w_i_h)
# -------------------- Backpropagation 2 --------------------
import numpy as np
from data_prep import features, targets, features_test, targets_test
np.random.seed(21)
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# Hyperparameters
n_hidden = 2 # number of hidden units
epochs = 900
learnrate = 0.005
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights_input_hidden = np.random.normal(scale=1 / n_features ** .5,
size=(n_features, n_hidden))
weights_hidden_output = np.random.normal(scale=1 / n_features ** .5,
size=n_hidden)
for e in range(epochs):
del_w_input_hidden = np.zeros(weights_input_hidden.shape)
del_w_hidden_output = np.zeros(weights_hidden_output.shape)
for x, y in zip(features.values, targets):
## Forward pass ##
# TODO: Calculate the output
hidden_input = np.dot(x, weights_input_hidden)
hidden_output = sigmoid(hidden_input)
output = sigmoid(np.dot(hidden_output,
weights_hidden_output))
## Backward pass ##
# TODO: Calculate the network's prediction error
error = y - output
# TODO: Calculate error term for the output unit
output_error_term = error * output * (1 - output)
## propagate errors to hidden layer
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(output_error_term, weights_hidden_output)
# TODO: Calculate the error term for the hidden layer
hidden_error_term = hidden_error * hidden_output * (1 - hidden_output)
# TODO: Update the change in weights
del_w_hidden_output += output_error_term * hidden_output
del_w_input_hidden += hidden_error_term * x[:, None]
# TODO: Update weights
weights_input_hidden += learnrate * del_w_input_hidden / n_records
weights_hidden_output += learnrate * del_w_hidden_output / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
hidden_output = sigmoid(np.dot(x, weights_input_hidden))
out = sigmoid(np.dot(hidden_output,
weights_hidden_output))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
hidden = sigmoid(np.dot(features_test, weights_input_hidden))
out = sigmoid(np.dot(hidden, weights_hidden_output))
predictions = out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
|
{"hexsha": "9b02743cbdde3c0d3d4bac56ac38d5e9f1fa9742", "size": 14533, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai_notes.py", "max_stars_repo_name": "AlanACruz/aipnd-project", "max_stars_repo_head_hexsha": "e0d5dcb49865cced1a9e88f03adaf71f6d0bf1a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ai_notes.py", "max_issues_repo_name": "AlanACruz/aipnd-project", "max_issues_repo_head_hexsha": "e0d5dcb49865cced1a9e88f03adaf71f6d0bf1a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ai_notes.py", "max_forks_repo_name": "AlanACruz/aipnd-project", "max_forks_repo_head_hexsha": "e0d5dcb49865cced1a9e88f03adaf71f6d0bf1a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0534188034, "max_line_length": 133, "alphanum_fraction": 0.6501066538, "include": true, "reason": "import numpy", "num_tokens": 3645}
|
"""
glutils.py
Author: Mahesh Venkitachalam
Some OpenGL utilities.
"""
import OpenGL
from OpenGL.GL import *
from OpenGL.GL.shaders import *
import numpy, math
import numpy as np
from PIL import Image
def loadTexture(filename):
"""load OpenGL 2D texture from given image file"""
img = Image.open(filename)
imgData = numpy.array(list(img.getdata()), np.int8)
texture = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, texture)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img.size[0], img.size[1],
0, GL_RGBA, GL_UNSIGNED_BYTE, imgData)
glBindTexture(GL_TEXTURE_2D, 0)
return texture
def perspective(fov, aspect, zNear, zFar):
"""returns matrix equivalent for gluPerspective"""
fovR = math.radians(fov)
f = 1.0/math.tan(fovR/2.0)
return numpy.array([f/float(aspect), 0.0, 0.0, 0.0,
0.0, f, 0.0, 0.0,
0.0, 0.0, (zFar+zNear)/float(zNear-zFar), -1.0,
0.0, 0.0, 2.0*zFar*zNear/float(zNear-zFar), 0.0],
numpy.float32)
def ortho(l, r, b, t, n, f):
"""returns matrix equivalent of glOrtho"""
return numpy.array([2.0/float(r-l), 0.0, 0.0, 0.0,
0.0, 2.0/float(t-b), 0.0, 0.0,
0.0, 0.0, -2.0/float(f-n), 0.0,
-(r+l)/float(r-l), -(t+b)/float(t-b),
-(f+n)/float(f-n), 1.0],
numpy.float32)
def lookAt(eye, center, up):
"""returns matrix equivalent of gluLookAt - based on MESA implementation"""
# create an identity matrix
m = np.identity(4, np.float32)
forward = np.array(center) - np.array(eye)
norm = np.linalg.norm(forward)
forward /= norm
# normalize up vector
norm = np.linalg.norm(up)
up /= norm
# Side = forward x up
side = np.cross(forward, up)
# Recompute up as: up = side x forward
up = np.cross(side, forward)
m[0][0] = side[0]
m[1][0] = side[1]
m[2][0] = side[2]
m[0][1] = up[0]
m[1][1] = up[1]
m[2][1] = up[2]
m[0][2] = -forward[0]
m[1][2] = -forward[1]
m[2][2] = -forward[2]
# eye translation
t = np.identity(4, np.float32)
t[3][0] += -eye[0]
t[3][1] += -eye[1]
t[3][2] += -eye[2]
return t.dot(m)
def translate(tx, ty, tz):
"""creates the matrix equivalent of glTranslate"""
return np.array([1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
tx, ty, tz, 1.0], np.float32)
def compileShader2(source, shaderType):
"""Compile shader source of given type
source -- GLSL source-code for the shader
shaderType -- GLenum GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, etc,
returns GLuint compiled shader reference
raises RuntimeError when a compilation failure occurs
"""
if isinstance(source, str):
print('string shader')
source = [source]
elif isinstance(source, bytes):
print('bytes shader')
source = [source.decode('utf-8')]
shader = glCreateShader(shaderType)
glShaderSource(shader, source)
glCompileShader(shader)
result = glGetShaderiv(shader, GL_COMPILE_STATUS)
if not(result):
# TODO: this will be wrong if the user has
# disabled traditional unpacking array support.
raise RuntimeError(
"""Shader compile failure (%s): %s"""%(
result,
glGetShaderInfoLog( shader ),
),
source,
shaderType,
)
return shader
def loadShaders(strVS, strFS):
"""load vertex and fragment shaders from strings"""
# compile vertex shader
shaderV = compileShader([strVS], GL_VERTEX_SHADER)
# compiler fragment shader
shaderF = compileShader([strFS], GL_FRAGMENT_SHADER)
# create the program object
program = glCreateProgram()
if not program:
raise RunTimeError('glCreateProgram faled!')
# attach shaders
glAttachShader(program, shaderV)
glAttachShader(program, shaderF)
# Link the program
glLinkProgram(program)
# Check the link status
linked = glGetProgramiv(program, GL_LINK_STATUS)
if not linked:
infoLen = glGetProgramiv(program, GL_INFO_LOG_LENGTH)
infoLog = ""
if infoLen > 1:
infoLog = glGetProgramInfoLog(program, infoLen, None);
glDeleteProgram(program)
raise RunTimeError("Error linking program:\n%s\n", infoLog);
return program
|
{"hexsha": "34423d5acd7ef955213af563a2f64ea4d65d0233", "size": 4991, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/glutils.py", "max_stars_repo_name": "mkvenkit/pp2e", "max_stars_repo_head_hexsha": "b74aafd0f1a61fbb919b2b5e22dcccba6a13a35d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common/glutils.py", "max_issues_repo_name": "mkvenkit/pp2e", "max_issues_repo_head_hexsha": "b74aafd0f1a61fbb919b2b5e22dcccba6a13a35d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/glutils.py", "max_forks_repo_name": "mkvenkit/pp2e", "max_forks_repo_head_hexsha": "b74aafd0f1a61fbb919b2b5e22dcccba6a13a35d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8086419753, "max_line_length": 79, "alphanum_fraction": 0.5934682428, "include": true, "reason": "import numpy", "num_tokens": 1420}
|
As concluded in Chapter \ref{ch:litReview}, with the rapid development of mobile devices and mobile computing, the literature review has showed the potential of combining different information sources, such as mobile sensors and social media, in a crowd monitoring approach. In our framework, the context data layer is designed to fulfill this objective and serves as the input for the other layers down the stack.
The context data layer collects information about the context, which is the mass gathering event. In the development of a domain ontology for mass gathering, \citet{DelirHaghighi2013a} has summarised a set of related features that might have the effect on the safety of an event, such as environmental factors, event type, crowd size and venue. These knowledge can be retrieved from different sources. For this crowd monitoring framework, we propose three main sources:
\begin{inparaenum}[i)]
\item event information;
\item mobile sensing;
\item social media
\end{inparaenum}, each of which will be discussed in detail in the following sections.
\subsection{Event Information}
Event information includes information about the venue, expected number of participants or crowd size, and the type of event. This information is static and not required to be collected in real time, hence it can be prepared in the pre-event phase of the emergency management. Ideally, it can be either crawled from the events' homepages or provided by the event organizers and gathered at a centralized system for the monitoring phase.
One of the important features of a mass gathering in the context of emergency management is crowd density, which is the key factor leading to trampling and crushing incidents \citet{Lee2005}. This information can be estimated by using collected data above, namely the estimated crowd size, the venue capacity and the size of venue following below formula.
\[
crowd\_density = crowd\_size / venue\_size
\]
where, \(crowd\_density\) is the estimated crowd density, \(crowd\_size\) is the number of participants or the venue capacity and \(venue\_size\) is the area of the venue holding the event.
\subsection{Mobile Sensing}
Mobile sensing paradigm has become an emerging topic among recent studies in mobile and context aware computing. It leverages the power of sensor-enhanced mobile devices to acquire information about the context \citep{guo2014participatory}. The mobile devices include modern smart-phones and devices, which are integrated with a wide range of sensors such as GPS receivers, accelerometers and ambient light sensors.
Using the built-in GPS receiver in mobile phones to collect information for crowd monitoring has been proposed in a related works proposed by \citet{Wirz2012}. As mentioned in the literature review, in this study, GPS is used to obtain the current location of the phone bearer. The continuous sampling of the location can be used to estimate the movement of the crowd and the crowd density.
Another integrated sensor in mobile phones that can be used to gather context data is the accelerometer. Accelerometer is often used for the purpose of activity recognition \citep{ravi2005activity, kwapisz2011activity}. In our context of crowd monitoring, the literature review has noted a related work from \citet{Roggen2011}, where several crowd activities can be determined by analysing the pattern of accelerometer's data.
Interestingly, the microphone in mobile phones can also act as a sensor to count the number of devices in a crowd \citep{Kannan2012, Xu2013}, thus can be eventually used to roughly estimate the number of participants. Table \ref{table:mobileSensingCrowdFeature} summarises the possible sensing sources that can be integrated into our framework and the knowledge about a crowd that can be inferred from these sensors.
\begin{table}
\caption{Mobile sensors and Crowd features}
\label{table:mobileSensingCrowdFeature}
\centering
\begin{tabular}{|l|l|}
\hline
\textbf{Mobile sensor} & \textbf{Crowd features} \\
\hline
GPS receiver & Movement and density \\
\hline
Accelerometer & Activity \\
\hline
Microphone & Density \\
\hline
\end{tabular}
\end{table}
The role of mobile sensing in our crowd monitoring framework is to provide the real-time data about a mass gathering event. As mentioned in the previous chapter, the during event phase in emergency management requires real-time decision making, which requires continuously updated intelligence from the crowd monitoring system. The reason for this requirement is that, according to \citet{Berlonghi1995}, a particular crowd can change from one type to another type during the event. This dynamic nature of a crowd is the key factor that we want to address in our approach by integrating the mobile sensing as one of the sources of context data.
\subsection{Social Media}
Social media is another source of information that is capable of providing real-time data. In a related work, social media is considered as a special ``soft sensor'' in the mobile sensing techniques used in crowd monitoring \citep{Ramesh2014}. Social media is a generic term referring to a wide range of Internet-based tools that enable users to create and share content. These tools include social network sites, such as Twitter and Facebook, Internet forums and channels. Among those tools, Twitter is by far the most commonly utilized social media in research because of the huge volume of user base and the availability of the API which makes the data highly accessible.
Studies show that during emergency situation, there is significant use of social media to report about the incident. One of the most well known research is the potential of earthquake detection by Twitter by \citet{Sakaki2010}. In the context of crowd monitoring, \citet{DelirHaghighi2013} has also proposed an approach to analyse tweets and capture the bipolar crowd mood. This has proven the feasibility of probing the social media to detect emergency situation.
In our proposed framework, social media is also integrated as one of the input for contextual data. If mobile sensing mentioned in the previous section can be considered as a pro-active mechanism where data is continuously sampling and analysed to detect critical crowd condition, social media provides us a reactive channel where we can capture a crowd incident as soon as it is reported by the Internet user.
\section{Crowd Model}
The next layer in the crowd monitoring framework is the crowd model, which consists of a crowd typology and a set of crowd attributes for classification a crowd into a specific type.
\subsection{Comparison of different Crowd Models}
As mentioned in the literature review, there has been a very limited work on crowd modelling. From the perspective of emergency planning, Berlonghi's model is among the most widely adopted by emergency management bureaus worldwide \citep{FEMA2005, EMA1999}. Our literature also highlights several notable works in other disciplines that attempt to classify and describe different crowd types. Interestingly, in spite of having different disciplinary views, there are similarities and overlaps can be observed between those works and the model proposed by Berlonghi. Berlonghi has proposed the most number of crowd types and his eleven crowd types can be mapped into the crowd types mentioned in other studies and vice versa. Hence, in our approach, we will employ Berlonghi's model as the baseline to make distinction of different crowds.
Table \ref{table:crowdModelComparison} presents our chosen crowd types based on Berlonghi's work and compares each crowd type with the crowd types defined in the related works. From these related works, the definition and description of each crowd type are collected and gathered to construct a more detailed explanation of the crowd type.
\subsection{Crowd Types}
Explain each type
\begin{itemize}
\item Ambulatory crowd
\item Limited movement crowd
\item Crowd of spectators
\item Participatory crowd
\item Expressive crowd
\item Aggressive crowd
\item Crowd of demonstrator
\item Escaping crowd
\item Looting crowd
\item Rushing crowd
\item Violent crowd
\end{itemize}
\subsection{Crowd Attributes}
Explain why we need to add attribute. because the definition is not sufficient to classify. need human judgement. the need for a systematic approach.
Discuss each attribute
\begin{itemize}
\item Level of Density
\item Level of Movement
\item Crowd Activities
\item Motivating Emotions
\end{itemize}
Table showing the crowd type and the attributes
Among those attributes, we focus on the motivating emotions and discuss further in the subsections
\subsubsection{Human Basic Emotion}
List work on human basic emotions
8 emotions
6 emotions
4 emotions
\subsubsection{Emotion and Collective Behaviour}
\citet{Lofland1985} and \citet{Smelser1998} identify joy, fear and anger can motivate the formation of collective behaviour.
\subsubsection{Mapping from Emotions to Crowd Types}
Insert the table here
\subsubsection{Social Media Analysis and Emotion Capture}
By probing social media, we can capture the emotion
\section{Crowd Monitoring}
|
{"hexsha": "15eb2f28234e17665725fd8fd5dfb3984351f1fe", "size": 9122, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Trash/approach.tex", "max_stars_repo_name": "romyngo/mcm-thesis", "max_stars_repo_head_hexsha": "b216c2a0d0f51fb5ddf840ca03ced4a514f6e9a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-02T06:10:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T06:10:35.000Z", "max_issues_repo_path": "Trash/approach.tex", "max_issues_repo_name": "romyngo/mcm-thesis", "max_issues_repo_head_hexsha": "b216c2a0d0f51fb5ddf840ca03ced4a514f6e9a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Trash/approach.tex", "max_forks_repo_name": "romyngo/mcm-thesis", "max_forks_repo_head_hexsha": "b216c2a0d0f51fb5ddf840ca03ced4a514f6e9a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 82.1801801802, "max_line_length": 838, "alphanum_fraction": 0.8129796097, "num_tokens": 1900}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 17:27:33 2019
@author: zl
"""
import os
import argparse
import glob
import shutil
from collections import defaultdict
import tqdm
import numpy as np
import pandas as pd
from PIL import Image
import imagehash
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', dest='data_dir',
help='the directory of the data',
default='data', type=str)
return parser.parse_args()
def main():
args = parse_args()
raw_images_dir = os.path.join(args.data_dir, 'raw')
external_dir = os.path.join(raw_images_dir, 'external')
external_filenames = list(glob.glob(os.path.join(external_dir, '*_green.png')))
# print(external_filenames)
df_external = pd.read_csv(os.path.join(args.data_dir, 'external.csv'))
records = []
for _, row in tqdm.tqdm(df_external.iterrows()):
id_t = row['Id']
this_dir = os.path.join(external_dir, id_t)
this_dir = this_dir + '_green.png'
#print('this_dir ', this_dir)
#break
if this_dir in external_filenames:
records.append((row['Id'], row['Target']))
df = pd.DataFrame.from_records(records, columns=['Id', 'Target'])
output_filename = os.path.join(args.data_dir, 'external_z.csv')
df.to_csv(output_filename, index=False)
if __name__ == '__main__':
main()
|
{"hexsha": "06072997000089e1a3a908ab4df97ca891c46a15", "size": 1455, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/find_undownloaded_images.py", "max_stars_repo_name": "rosaann/kaggle-hpa", "max_stars_repo_head_hexsha": "b59ddd3232d01484dea446bedcee9dfe0f461bac", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/find_undownloaded_images.py", "max_issues_repo_name": "rosaann/kaggle-hpa", "max_issues_repo_head_hexsha": "b59ddd3232d01484dea446bedcee9dfe0f461bac", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/find_undownloaded_images.py", "max_forks_repo_name": "rosaann/kaggle-hpa", "max_forks_repo_head_hexsha": "b59ddd3232d01484dea446bedcee9dfe0f461bac", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5294117647, "max_line_length": 83, "alphanum_fraction": 0.6481099656, "include": true, "reason": "import numpy", "num_tokens": 346}
|
'''MFCC.py
Calculation of MFCC coefficients from frequency-domain data
Adapted from the Vampy example plugin "PyMFCC" by Gyorgy Fazekas
http://code.soundsoftware.ac.uk/projects/vampy/repository/entry/Example%20VamPy%20plugins/PyMFCC.py
Centre for Digital Music, Queen Mary University of London.
Copyright (C) 2009 Gyorgy Fazekas, QMUL.
'''
import sys
import os
module_path = os.path.abspath(os.getcwd())
if module_path not in sys.path:
sys.path.append(module_path)
import sys,numpy
from numpy import abs,log,exp,floor,sum,sqrt,cos,hstack
from numpy.fft import *
class melScaling(object):
def __init__(self,sampleRate,inputSize,numBands,minHz = 0,maxHz = None):
'''Initialise frequency warping and DCT matrix.
Parameters:
sampleRate: audio sample rate
inputSize: length of magnitude spectrum (half of FFT size assumed)
numBands: number of mel Bands (MFCCs)
minHz: lower bound of warping (default = DC)
maxHz: higher bound of warping (default = Nyquist frequency)
'''
self.sampleRate = sampleRate
self.NqHz = sampleRate / 2.0
self.minHz = minHz
if maxHz is None : maxHz = self.NqHz
self.maxHz = maxHz
self.inputSize = inputSize
self.numBands = numBands
self.valid = False
self.updated = False
def update(self):
# make sure this will run only once
# if called from a vamp process
if self.updated: return self.valid
self.updated = True
self.valid = False
print('Updating parameters and recalculating filters: ')
print('Nyquist: ',self.NqHz)
if self.maxHz > self.NqHz :
raise Exception('Maximum frequency must be smaller than the Nyquist frequency')
self.maxMel = 1000*log(1+self.maxHz/700.0)/log(1+1000.0/700.0)
self.minMel = 1000*log(1+self.minHz/700.0)/log(1+1000.0/700.0)
print('minHz:%s\nmaxHz:%s\nminMel:%s\nmaxMel:%s\n' \
%(self.minHz,self.maxHz,self.minMel,self.maxMel))
self.filterMatrix = self.getFilterMatrix(self.inputSize,self.numBands)
self.DCTMatrix = self.getDCTMatrix(self.numBands)
self.valid = True
return self.valid
def getFilterCentres(self,inputSize,numBands):
'''Calculate Mel filter centres around FFT bins.
This function calculates two extra bands at the edges for
finding the starting and end point of the first and last
actual filters.'''
centresMel = numpy.array(range(numBands+2)) * (self.maxMel-self.minMel)/(numBands+1) + self.minMel
centresBin = numpy.floor(0.5 + 700.0*inputSize*(exp(centresMel*log(1+1000.0/700.0)/1000.0)-1)/self.NqHz)
return numpy.array(centresBin,int)
def getFilterMatrix(self,inputSize,numBands):
'''Compose the Mel scaling matrix.'''
filterMatrix = numpy.zeros((numBands,inputSize))
self.filterCentres = self.getFilterCentres(inputSize,numBands)
for i in range(numBands) :
start,centre,end = self.filterCentres[i:i+3]
self.setFilter(filterMatrix[i],start,centre,end)
return filterMatrix.transpose()
def setFilter(self,filt,filterStart,filterCentre,filterEnd):
'''Calculate a single Mel filter.'''
k1 = numpy.float32(filterCentre-filterStart)
k2 = numpy.float32(filterEnd-filterCentre)
up = (numpy.array(range(filterStart,filterCentre))-filterStart)/k1
dn = (filterEnd-numpy.array(range(filterCentre,filterEnd)))/k2
filt[filterStart:filterCentre] = up
filt[filterCentre:filterEnd] = dn
def warpSpectrum(self,magnitudeSpectrum):
'''Compute the Mel scaled spectrum.'''
return numpy.dot(magnitudeSpectrum,self.filterMatrix)
def getDCTMatrix(self,size):
'''Calculate the square DCT transform matrix. Results are
equivalent to Matlab dctmtx(n) with 64 bit precision.'''
DCTmx = numpy.array(range(size),numpy.float64).repeat(size).reshape(size,size)
DCTmxT = numpy.pi * (DCTmx.transpose()+0.5) / size
DCTmxT = (1.0/sqrt( size / 2.0)) * cos(DCTmx * DCTmxT)
DCTmxT[0] = DCTmxT[0] * (sqrt(2.0)/2.0)
return DCTmxT
def dct(self,data_matrix):
'''Compute DCT of input matrix.'''
return numpy.dot(self.DCTMatrix,data_matrix)
def getMFCCs(self,warpedSpectrum,cn=True):
'''Compute MFCC coefficients from Mel warped magnitude spectrum.'''
mfccs=self.dct(numpy.log(numpy.clip(warpedSpectrum, 1e-9, numpy.inf)))
if cn is False : mfccs[0] = 0.0
return mfccs
|
{"hexsha": "91da2a79e705bcc526b4fc7ba1c8fe38f7697b73", "size": 4187, "ext": "py", "lang": "Python", "max_stars_repo_path": "MFCC.py", "max_stars_repo_name": "Rutherford9191/audiolab", "max_stars_repo_head_hexsha": "f6d78d28a0be3ff77551b7f59f7113e74131f4e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-09T23:27:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-09T23:27:36.000Z", "max_issues_repo_path": "MFCC.py", "max_issues_repo_name": "Rutherford9191/audiolab", "max_issues_repo_head_hexsha": "f6d78d28a0be3ff77551b7f59f7113e74131f4e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-03-24T15:53:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:24:59.000Z", "max_forks_repo_path": "MFCC.py", "max_forks_repo_name": "Rutherford9191/audiolab", "max_forks_repo_head_hexsha": "f6d78d28a0be3ff77551b7f59f7113e74131f4e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4086956522, "max_line_length": 106, "alphanum_fraction": 0.7353713876, "include": true, "reason": "from numpy", "num_tokens": 1252}
|
{-# OPTIONS --type-in-type #-}
module dyn where
open import prelude
open import functors
open import poly0 public
open import prelude.Stream
open Stream
open import Data.List as L using (List)
record Dyn : Set where
constructor dyn
field
{state} : Set
{body} : ∫
pheno : ∫[ (state , λ _ → state) , body ]
open Dyn public
run : (d : Dyn) → ∫[ body d , 𝒴 ] → state d → Stream (π₁ (body d))
hd (run d@(dyn l) e s₀) = s₀ ★ l
tl (run d@(dyn l) e s₀) = run d e (s₀ # l ← hd (run d e s₀) # e ← tt)
module _ (d₁ d₂ : Dyn) where
_⊠⊠⊠_ : Dyn
_⊠⊠⊠_ = dyn (pheno d₁ ⟦⊠⟧ pheno d₂)
_⟫_ : (d : Dyn) → ∫[ body d , A ] → Dyn
d ⟫ l = dyn (pheno d ▸ l)
fun→dyn : ∀ {a b} → (a → b) → Dyn
fun→dyn f = dyn (λ a⁺ → a⁺ , f)
delay : Set → Dyn
delay s = fun→dyn (id {A = s})
|
{"hexsha": "6abec05927920494116a74f2bbdac76cf205df92", "size": 783, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "code-examples/agda/dyn.agda", "max_stars_repo_name": "mstone/poly", "max_stars_repo_head_hexsha": "425de958985aacbd3284d3057fe21fd682e315ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2021-02-18T16:31:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T23:08:27.000Z", "max_issues_repo_path": "code-examples/agda/dyn.agda", "max_issues_repo_name": "dspivak/poly", "max_issues_repo_head_hexsha": "425de958985aacbd3284d3057fe21fd682e315ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-09-02T02:29:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T10:06:32.000Z", "max_forks_repo_path": "code-examples/agda/dyn.agda", "max_forks_repo_name": "dspivak/poly", "max_forks_repo_head_hexsha": "425de958985aacbd3284d3057fe21fd682e315ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-10T17:19:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-30T11:45:57.000Z", "avg_line_length": 22.3714285714, "max_line_length": 69, "alphanum_fraction": 0.558109834, "num_tokens": 330}
|
# -*- coding: utf-8 -*-
"""
Rast_bandArithmetic.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Leandro França'
__date__ = '2022-01-20'
__copyright__ = '(C) 2022, Leandro França'
from PyQt5.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsWkbTypes,
QgsFields,
QgsField,
QgsFeature,
QgsPointXY,
QgsGeometry,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterString,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterCrs,
QgsProcessingParameterEnum,
QgsFeatureRequest,
QgsExpression,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFileDestination,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsApplication,
QgsProject,
QgsRasterLayer,
QgsCoordinateTransform,
QgsCoordinateReferenceSystem)
from osgeo import osr, gdal_array, gdal
from lftools.geocapt.imgs import Imgs
import numpy as np
import os
from qgis.PyQt.QtGui import QIcon
class BandArithmetic(QgsProcessingAlgorithm):
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return BandArithmetic()
def name(self):
return 'bandarithmetic'
def displayName(self):
return self.tr('Band Arithmetic', 'Aritmética de bandas')
def group(self):
return self.tr('Raster')
def groupId(self):
return 'raster'
def tags(self):
return self.tr('raster,rgb,bands,color,algebra,arithmetic,aritmética,ndvi,gli,ndwi,index,índice').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/raster.png'))
txt_en = '''Performs an arithmetic operation on the bands of a raster. The predefined formula is used to calculate the Green Leaf Index (GLI) for a RGB raster. However you can enter your own formula.
Examples:
NDVI with RGN raster: ( b3 - b1) / (b3 + b1)
NDWI with RGN raster: ( b3 - b2) / (b3 + b2)
GLI with RGB raster: (2*b2 - b1 - b3) / (2*b2 + b1 + b3)
Obs.:
The operators supported are: + , - , * , /'''
txt_pt = '''Executa uma operação aritmética entre as bandas de um raster. A fórmula predefinida é usado para calcular o Green Leaf Index (GLI) para um raster RGB. No entanto, você pode inserir sua própria fórmula.
Exemplos:
NDVI com raster RGN: ( b3 - b1) / (b3 + b1)
NDWI com raster RGN: ( b3 - b2) / (b3 + b2)
GLI com raster RGB: (2*b2 - b1 - b3) / (2*b2 + b1 + b3)
Obs.:
Os operadores suportados são: + , - , * , /'''
figure = 'images/tutorial/raster_bandArithmetic.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<p align="right">
<b>'''+self.tr('Author: Leandro Franca', 'Autor: Leandro França')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
INPUT = 'INPUT'
ALPHA = 'ALPHA'
FORMULA = 'FORMULA'
OUTPUT = 'OUTPUT'
OPEN = 'OPEN'
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT,
self.tr('Input Raster', 'Raster de Entrada'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.ALPHA,
self.tr('Fourth band is transparency', 'Quarta banda é de transparência'),
defaultValue= True
)
)
self.addParameter(
QgsProcessingParameterString(
self.FORMULA,
self.tr('Formula', 'Fórmula'),
defaultValue = '(2*b2 - b1 - b3)/(2*b2 + b1 + b3)'
)
)
# OUTPUT
self.addParameter(
QgsProcessingParameterFileDestination(
self.OUTPUT,
self.tr('Calculated index', 'Índice calculado'),
fileFilter = 'GeoTIFF (*.tif)'
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.OPEN,
self.tr('Load calculated index', 'Carregar índice calculado'),
defaultValue= True
)
)
def processAlgorithm(self, parameters, context, feedback):
RasterIN = self.parameterAsRasterLayer(
parameters,
self.INPUT,
context
)
if RasterIN is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
alfa = self.parameterAsBool(
parameters,
self.ALPHA,
context
)
expr = self.parameterAsString(
parameters,
self.FORMULA,
context
)
Output = self.parameterAsFileOutput(
parameters,
self.OUTPUT,
context
)
Carregar = self.parameterAsBool(
parameters,
self.OPEN,
context
)
# Input Raster
image = gdal.Open(RasterIN.dataProvider().dataSourceUri())
prj=image.GetProjection()
geotransform = image.GetGeoTransform()
GDT = image.GetRasterBand(1).DataType
Pixel_Nulo = image.GetRasterBand(1).GetNoDataValue()
n_bands = image.RasterCount
cols = image.RasterXSize
rows = image.RasterYSize
CRS=osr.SpatialReference(wkt=prj)
# Verificar número de bandas
dic = {}
for k in range(n_bands):
feedback.pushInfo(self.tr('Reading band {}...'.format(k+1), 'Lendo a banda {}...'.format(k+1)))
dic['b[n]'.replace('[n]', str(k+1))] = image.GetRasterBand(k+1).ReadAsArray().astype('float')
expr = expr.replace('b[n]'.replace('[n]', str(k+1)), "dic['b[n]']".replace('[n]', str(k+1)))
if n_bands == 4 and alfa:
transp = dic['b4'] > 0
image = None
lista = expr.lower().split('/')
try:
feedback.pushInfo(self.tr('Carrying out the calculations...', 'Realizando os cálculos...'))
if len(lista) == 2:
NUM, DEN = lista
NUM = eval(NUM)
DEN = eval(DEN)
if n_bands == 4 and alfa:
INDICE = -9999*((DEN == 0) | np.logical_not(transp)) + ((DEN != 0) & transp)*(NUM/(DEN + (DEN == 0)*1))
else:
if isinstance(Pixel_Nulo, (int, float)):
INDICE = -9999*((DEN == 0) | (b1 == Pixel_Nulo)) + ((DEN != 0) & (b1 != Pixel_Nulo))*(NUM/(DEN + (DEN == 0)*1))
else:
INDICE = -9999*(DEN == 0) + (DEN != 0)*(NUM/(DEN + (DEN == 0)*1))
elif len(lista) == 1:
formula = eval(lista[0])
if n_bands == 4 and alfa:
INDICE = -9999*(np.logical_not(transp)) + (transp)*formula
else:
if isinstance(Pixel_Nulo, (int, float)):
INDICE = -9999*(b1 == Pixel_Nulo) + (b1 != Pixel_Nulo)*formula
else:
INDICE = formula
else:
raise QgsProcessingException(self.tr('Check the input formula!', 'Verifique a fórmula de entrada!'))
except:
raise QgsProcessingException(self.tr('Check if your formula is correct!', 'Verifique se sua fórmula está correta!'))
# Criate driver
Driver = gdal.GetDriverByName('GTiff').Create(Output, cols, rows, 1, gdal.GDT_Float32)
Driver.SetGeoTransform(geotransform)
Driver.SetProjection(CRS.ExportToWkt())
outband = Driver.GetRasterBand(1)
feedback.pushInfo(self.tr('Writing results...', 'Escrevendo resultados...'))
outband.SetNoDataValue(-9999)
outband.WriteArray(INDICE)
Driver.FlushCache()
Driver = None
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('Leandro Franca - Cartographic Engineer', 'Leandro França - Eng Cart'))
self.CAMINHO = Output
self.CARREGAR = Carregar
return {self.OUTPUT: Output}
def postProcessAlgorithm(self, context, feedback):
if self.CARREGAR:
rlayer = QgsRasterLayer(self.CAMINHO, self.tr('Calculated index', 'Índice calculado'))
QgsProject.instance().addMapLayer(rlayer)
return {}
|
{"hexsha": "7b84ebd6f0fa8752347508f8eea9725e29ac6f5b", "size": 10373, "ext": "py", "lang": "Python", "max_stars_repo_path": "processing_provider/Rast_bandArithmetic.py", "max_stars_repo_name": "geodourados/lftools", "max_stars_repo_head_hexsha": "4b9d703513bd3d49ac7952014575bf95492a2d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-28T22:18:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:18:09.000Z", "max_issues_repo_path": "processing_provider/Rast_bandArithmetic.py", "max_issues_repo_name": "geodourados/lftools", "max_issues_repo_head_hexsha": "4b9d703513bd3d49ac7952014575bf95492a2d90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "processing_provider/Rast_bandArithmetic.py", "max_forks_repo_name": "geodourados/lftools", "max_forks_repo_head_hexsha": "4b9d703513bd3d49ac7952014575bf95492a2d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8576642336, "max_line_length": 217, "alphanum_fraction": 0.5308975224, "include": true, "reason": "import numpy", "num_tokens": 2356}
|
\subsubsection{Installation}
\begin{enumerate}
\item Download
\opt{iriverh10}{\url{http://download.rockbox.org/bootloader/iriver/H10_20GC.mi4}}
\opt{iriverh10_5gb}{
\begin{itemize}
\item \url{http://download.rockbox.org/bootloader/iriver/H10.mi4} if your \dap{} is UMS or
\item \url{http://download.rockbox.org/bootloader/iriver/H10_5GB-MTP/H10.mi4} if it is MTP.
\end{itemize}}
\item Connect your \playertype{} to the computer using UMS mode and the UMS trick%
\opt{iriverh10_5gb}{ if necessary}.
\item Rename the \opt{iriverh10}{\fname{H10\_20GC.mi4}}\opt{iriverh10_5gb}{\fname{H10.mi4}}
file to \fname{OF.mi4} in the \fname{System} directory on your \playertype{}.
\opt{iriverh10_5gb}{\note{If you have a Pure model \playertype{} (which
does not have an FM radio) it is possible that this file will be
called \fname{H10EMP.mi4} instead. If so, rename the \fname{H10.mi4}
you downloaded in step 1 to \fname{H10EMP.mi4}.}}
\note{You should keep a safe backup of this file for use if you ever wish
to switch back to the \playerman{} firmware.}
\note{If you cannot see the \fname{System} directory, you will need to make
sure your operating system is configured to show hidden files and
directories.}
\item Copy the \opt{iriverh10}{\fname{H10\_20GC.mi4}}\opt{iriverh10_5gb}{\fname{H10.mi4}
(or \fname{H10EMP.mi4} if you have a \playertype{} Pure)} file you
downloaded to the System directory on your \dap{}.
\end{enumerate}
|
{"hexsha": "38e780a1ca84e613cab3ee8be0e7ea9648f32cf5", "size": 1569, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manual/getting_started/h10_install.tex", "max_stars_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC", "max_stars_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2015-03-10T08:43:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T14:09:46.000Z", "max_issues_repo_path": "manual/getting_started/h10_install.tex", "max_issues_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC", "max_issues_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-07-04T18:15:33.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-18T05:33:33.000Z", "max_forks_repo_path": "manual/getting_started/h10_install.tex", "max_forks_repo_name": "Rockbox-Chinese-Community/Rockbox-RCC", "max_forks_repo_head_hexsha": "a701aefe45f03ca391a8e2f1a6e3da1b8774b2f2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2015-01-21T13:58:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-04T04:30:22.000Z", "avg_line_length": 56.0357142857, "max_line_length": 99, "alphanum_fraction": 0.6857871256, "num_tokens": 494}
|
[STATEMENT]
theorem cp_thm:
assumes lp: "iszlfm p (a #bs)"
and u: "d_\<beta> p 1"
and d: "d_\<delta> p d"
and dp: "d > 0"
shows "(\<exists> (x::int). Ifm (real_of_int x #bs) p) = (\<exists> j\<in> {1.. d}. Ifm (real_of_int j #bs) (minusinf p) \<or> (\<exists> b \<in> set (\<beta> p). Ifm ((Inum (a#bs) b + real_of_int j) #bs) p))"
(is "(\<exists> (x::int). ?P (real_of_int x)) = (\<exists> j\<in> ?D. ?M j \<or> (\<exists> b\<in> ?B. ?P (?I b + real_of_int j)))")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
from minusinf_inf[OF lp]
[PROOF STATE]
proof (chain)
picking this:
\<exists>z. \<forall>x<z. Ifm (real_of_int x # bs) (minusinf p) = Ifm (real_of_int x # bs) p
[PROOF STEP]
have th: "\<exists>(z::int). \<forall>x<z. ?P (real_of_int x) = ?M x"
[PROOF STATE]
proof (prove)
using this:
\<exists>z. \<forall>x<z. Ifm (real_of_int x # bs) (minusinf p) = Ifm (real_of_int x # bs) p
goal (1 subgoal):
1. \<exists>z. \<forall>x<z. Ifm (real_of_int x # bs) p = Ifm (real_of_int x # bs) (minusinf p)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>z. \<forall>x<z. Ifm (real_of_int x # bs) p = Ifm (real_of_int x # bs) (minusinf p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
let ?B' = "{\<lfloor>?I b\<rfloor> | b. b\<in> ?B}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
from \<beta>_int[OF lp] isint_iff[where bs="a # bs"]
[PROOF STATE]
proof (chain)
picking this:
\<forall>b\<in>set (\<beta> p). isint b (a # bs)
isint ?n (a # bs) = (real_of_int \<lfloor>Inum (a # bs) ?n\<rfloor> = Inum (a # bs) ?n)
[PROOF STEP]
have B: "\<forall> b\<in> ?B. real_of_int \<lfloor>?I b\<rfloor> = ?I b"
[PROOF STATE]
proof (prove)
using this:
\<forall>b\<in>set (\<beta> p). isint b (a # bs)
isint ?n (a # bs) = (real_of_int \<lfloor>Inum (a # bs) ?n\<rfloor> = Inum (a # bs) ?n)
goal (1 subgoal):
1. \<forall>b\<in>set (\<beta> p). real_of_int \<lfloor>Inum (a # bs) b\<rfloor> = Inum (a # bs) b
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>b\<in>set (\<beta> p). real_of_int \<lfloor>Inum (a # bs) b\<rfloor> = Inum (a # bs) b
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
from B[rule_format]
[PROOF STATE]
proof (chain)
picking this:
?b \<in> set (\<beta> p) \<Longrightarrow> real_of_int \<lfloor>Inum (a # bs) ?b\<rfloor> = Inum (a # bs) ?b
[PROOF STEP]
have "(\<exists>j\<in>?D. \<exists>b\<in> ?B. ?P (?I b + real_of_int j)) = (\<exists>j\<in>?D. \<exists>b\<in> ?B. ?P (real_of_int \<lfloor>?I b\<rfloor> + real_of_int j))"
[PROOF STATE]
proof (prove)
using this:
?b \<in> set (\<beta> p) \<Longrightarrow> real_of_int \<lfloor>Inum (a # bs) ?b\<rfloor> = Inum (a # bs) ?b
goal (1 subgoal):
1. (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((real_of_int \<lfloor>Inum (a # bs) b\<rfloor> + real_of_int j) # bs) p)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((real_of_int \<lfloor>Inum (a # bs) b\<rfloor> + real_of_int j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((real_of_int \<lfloor>Inum (a # bs) b\<rfloor> + real_of_int j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
have "\<dots> = (\<exists>j\<in>?D. \<exists>b\<in> ?B. ?P (real_of_int (\<lfloor>?I b\<rfloor> + j)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((real_of_int \<lfloor>Inum (a # bs) b\<rfloor> + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm (real_of_int (\<lfloor>Inum (a # bs) b\<rfloor> + j) # bs) p)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((real_of_int \<lfloor>Inum (a # bs) b\<rfloor> + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm (real_of_int (\<lfloor>Inum (a # bs) b\<rfloor> + j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((real_of_int \<lfloor>Inum (a # bs) b\<rfloor> + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm (real_of_int (\<lfloor>Inum (a # bs) b\<rfloor> + j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
have"\<dots> = (\<exists> j \<in> ?D. \<exists> b \<in> ?B'. ?P (real_of_int (b + j)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm (real_of_int (\<lfloor>Inum (a # bs) b\<rfloor> + j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm (real_of_int (\<lfloor>Inum (a # bs) b\<rfloor> + j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
[PROOF STEP]
have BB':
"(\<exists>j\<in>?D. \<exists>b\<in> ?B. ?P (?I b + real_of_int j)) = (\<exists> j \<in> ?D. \<exists> b \<in> ?B'. ?P (real_of_int (b + j)))"
[PROOF STATE]
proof (prove)
using this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
goal (1 subgoal):
1. (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
hence th2: "\<forall> x. \<not> (\<exists> j \<in> ?D. \<exists> b \<in> ?B'. ?P (real_of_int (b + j))) \<longrightarrow> ?P (real_of_int x) \<longrightarrow> ?P (real_of_int (x - d))"
[PROOF STATE]
proof (prove)
using this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
goal (1 subgoal):
1. \<forall>x. \<not> (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p) \<longrightarrow> Ifm (real_of_int x # bs) p \<longrightarrow> Ifm (real_of_int (x - d) # bs) p
[PROOF STEP]
using \<beta>'[OF lp u d dp]
[PROOF STATE]
proof (prove)
using this:
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
\<forall>x. \<not> (\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) \<longrightarrow> Ifm (real_of_int x # bs) p \<longrightarrow> Ifm (real_of_int (x - d) # bs) p
goal (1 subgoal):
1. \<forall>x. \<not> (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p) \<longrightarrow> Ifm (real_of_int x # bs) p \<longrightarrow> Ifm (real_of_int (x - d) # bs) p
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<forall>x. \<not> (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p) \<longrightarrow> Ifm (real_of_int x # bs) p \<longrightarrow> Ifm (real_of_int (x - d) # bs) p
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
from minusinf_repeats[OF d lp]
[PROOF STATE]
proof (chain)
picking this:
Ifm (real_of_int (?x - ?k * d) # bs) (minusinf p) = Ifm (real_of_int ?x # bs) (minusinf p)
[PROOF STEP]
have th3: "\<forall> x k. ?M x = ?M (x-k*d)"
[PROOF STATE]
proof (prove)
using this:
Ifm (real_of_int (?x - ?k * d) # bs) (minusinf p) = Ifm (real_of_int ?x # bs) (minusinf p)
goal (1 subgoal):
1. \<forall>x k. Ifm (real_of_int x # bs) (minusinf p) = Ifm (real_of_int (x - k * d) # bs) (minusinf p)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>x k. Ifm (real_of_int x # bs) (minusinf p) = Ifm (real_of_int (x - k * d) # bs) (minusinf p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
from cpmi_eq[OF dp th th2 th3] BB'
[PROOF STATE]
proof (chain)
picking this:
(\<exists>x. Ifm (real_of_int x # bs) p) = ((\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p)) \<or> (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p))
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<exists>x. Ifm (real_of_int x # bs) p) = ((\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p)) \<or> (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p))
(\<exists>j\<in>{1..d}. \<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p) = (\<exists>j\<in>{1..d}. \<exists>b\<in>{\<lfloor>Inum (a # bs) b\<rfloor> |b. b \<in> set (\<beta> p)}. Ifm (real_of_int (b + j) # bs) p)
goal (1 subgoal):
1. (\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(\<exists>x. Ifm (real_of_int x # bs) p) = (\<exists>j\<in>{1..d}. Ifm (real_of_int j # bs) (minusinf p) \<or> (\<exists>b\<in>set (\<beta> p). Ifm ((Inum (a # bs) b + real_of_int j) # bs) p))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6340, "file": null, "length": 30}
|
/**
* @copyright Copyright 2016 The J-PET Framework Authors. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may find a copy of the License in the LICENCE file.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @file JPetCommonToolsTest.cpp
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE JPetCommonToolsTest
#include <boost/test/unit_test.hpp>
#include <map>
#include "JPetCommonTools.h"
BOOST_AUTO_TEST_SUITE(CommonToolsTestSuite)
BOOST_AUTO_TEST_CASE(findSubstringTest)
{
std::string str("There are two needles in this haystack with needles.");
std::string str2("needle");
std::size_t found = JPetCommonTools::findSubstring(str, str2);
BOOST_REQUIRE_EQUAL(found != std::string::npos, true);
}
BOOST_AUTO_TEST_CASE(ItoaTest)
{
int testNumber = 64;
std::string intAsASting = JPetCommonTools::Itoa(testNumber);
BOOST_REQUIRE_EQUAL(intAsASting == "64", true);
}
BOOST_AUTO_TEST_CASE(intToStringTest)
{
int testNumber = 64;
std::string intAsASting = JPetCommonTools::intToString(testNumber);
BOOST_REQUIRE_EQUAL(intAsASting == "64", true);
}
BOOST_AUTO_TEST_CASE(doubleToStringTest)
{
double testNumber = 256.3264;
std::string doubleAsASting = JPetCommonTools::doubleToString(testNumber);
BOOST_REQUIRE_EQUAL(doubleAsASting == "256.326", true);
}
BOOST_AUTO_TEST_CASE(stringToIntTest)
{
std::string testString = "1024";
int stringAsAInt = JPetCommonTools::stringToInt(testString);
BOOST_REQUIRE_EQUAL(stringAsAInt == 1024, true);
}
BOOST_AUTO_TEST_CASE(toBoolTest)
{
std::string testString = "0";
bool stringAsABool = JPetCommonTools::to_bool(testString);
BOOST_REQUIRE_EQUAL(stringAsABool == false, true);
}
BOOST_AUTO_TEST_CASE(ifFileExistingTest)
{
std::string fileTest = "run_tests.pl";
bool ifFileExisting = JPetCommonTools::ifFileExisting(fileTest);
BOOST_REQUIRE_EQUAL(ifFileExisting, true);
}
BOOST_AUTO_TEST_CASE(mapAreEqualTest)
{
std::map<int, int> mapTestLeft, mapTestRight;
bool areMapsEqual = JPetCommonTools::mapComparator(mapTestLeft, mapTestRight);
BOOST_REQUIRE_EQUAL(areMapsEqual, true);
}
BOOST_AUTO_TEST_CASE(mapAreNotEqualTest)
{
std::map<char,int> first;
first['a']=10;
first['b']=30;
first['c']=50;
first['d']=70;
std::map<char,int> second;
bool areMapsEqual = JPetCommonTools::mapComparator(first, second);
BOOST_REQUIRE_EQUAL(areMapsEqual, false);
}
BOOST_AUTO_TEST_CASE(stripFileNameSuffixTest)
{
std::string fileTest = "run_tests.pl";
std::string stripFileNameSuffix = JPetCommonTools::stripFileNameSuffix(fileTest);
BOOST_REQUIRE_EQUAL(stripFileNameSuffix == "run_tests", true);
}
BOOST_AUTO_TEST_CASE(currentFullPathTest)
{
std::string currentFullPathTest = boost::filesystem::path(boost::filesystem::current_path()).string();
std::string currentFullPath = JPetCommonTools::currentFullPath();
BOOST_REQUIRE_EQUAL(currentFullPath == currentFullPathTest, true);
}
BOOST_AUTO_TEST_CASE(extractPathFromFileTest)
{
std::string currentFullPathTest = boost::filesystem::path(boost::filesystem::current_path()).string();
std::string currentFullPathTestWithFileName = currentFullPathTest + "/" + "run_tests.pl";
std::string result = JPetCommonTools::extractPathFromFile(currentFullPathTestWithFileName);
BOOST_REQUIRE_EQUAL(result.compare(currentFullPathTest), 0);
}
BOOST_AUTO_TEST_CASE(isDirectory)
{
BOOST_REQUIRE(JPetCommonTools::isDirectory(boost::filesystem::initial_path().string()));
BOOST_REQUIRE(!JPetCommonTools::isDirectory("fake/directory/baba"));
}
BOOST_AUTO_TEST_CASE(appendSlashToPathIfAbsent)
{
BOOST_REQUIRE_EQUAL(JPetCommonTools::appendSlashToPathIfAbsent(""), "");
BOOST_REQUIRE_EQUAL(JPetCommonTools::appendSlashToPathIfAbsent("./"), "./");
BOOST_REQUIRE_EQUAL(JPetCommonTools::appendSlashToPathIfAbsent("/home/"), "/home/");
BOOST_REQUIRE_EQUAL(JPetCommonTools::appendSlashToPathIfAbsent("/home"), "/home/");
BOOST_REQUIRE_EQUAL(JPetCommonTools::appendSlashToPathIfAbsent("home/bbl/be"), "home/bbl/be/");
BOOST_REQUIRE_EQUAL(JPetCommonTools::appendSlashToPathIfAbsent("test"), "test/");
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "9f36f65bb87ff91c207dfd982be66ab571358f69", "size": 4528, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "JPetCommonTools/JPetCommonToolsTest.cpp", "max_stars_repo_name": "kamilrakoczy/j-pet-framework", "max_stars_repo_head_hexsha": "4a0761bc8996dd5076575e996003c11f4110db44", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "JPetCommonTools/JPetCommonToolsTest.cpp", "max_issues_repo_name": "kamilrakoczy/j-pet-framework", "max_issues_repo_head_hexsha": "4a0761bc8996dd5076575e996003c11f4110db44", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2018-02-10T18:04:38.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-10T18:20:09.000Z", "max_forks_repo_path": "JPetCommonTools/JPetCommonToolsTest.cpp", "max_forks_repo_name": "kamilrakoczy/j-pet-framework", "max_forks_repo_head_hexsha": "4a0761bc8996dd5076575e996003c11f4110db44", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4444444444, "max_line_length": 104, "alphanum_fraction": 0.7678886926, "num_tokens": 1133}
|
\section{Introduction}
In evolving distributed simulations with complex
relational structures the computational work needs to
be evenly distributed throughout the simulation.
In many applications, this requires starting with balanced
partitions at the beginning of the simulation
as well as the continuous rebalancing of the work as the
simulation evolves. In these cases, re-running static partitioning
algorithms is too expensive to be done repetitively.
For better performance, efficient dynamic load
balancing strategies must be applied to the evolving
structure. To further motivate the methods being developed
in the current work, common
partitioning techniques applied on these relational data
structures are able to partition one aspect of the data.
In structures that include two or more partitioning
criteria that need to be satisfied, these methods do not
perform well on their own.
An efficient set of algorithms to solve these challenges
use diffusive load balancing techniques.
These algorithms perform load balancing across part
boundaries and can be applied in succession to perform
multiple levels of partitioning.
One such diffusive load balancer, ParMA \cite{SmithParma2015}, works directly
on an element-based unstructured mesh data structure PUMI \cite{ibanez2016pumi}.
ParMA utilizes mesh adjacencies to perform localized
balancing through diffusive migration from
heavy parts to lighter neighbors.
It has been shown that these operations
can be efficiently used to perform
multi-criteria load balancing in order to
improve the partition that is returned by
graph/hypergraph and geometric methods.
While ParMA has been used to improve partitions for
several simulations using mesh databases, there are applications that have
different relation based data structures that have similar
partitioning requirements. Two examples of these
structures are scale-free graphs often used in
computational social science to analyze social
phenomena \cite{pienta2013parallel,gonzalez2012powergraph}
and vertex-based unstructured meshes which are sometimes
used in computational fluid dynamic simulations
\cite{anderson1999achieving}. While the latter example can
be handled in ParMA by converting the vertex-based mesh to
PUMI's element-based mesh, there are some challenges to
appropriately account for the switch and it becomes more
difficult to accurately approximate the load of each part.
To extend ParMA's capabilities to these other applications,
we present EnGPar. It utilizes an expanded graph structure
to provide a general representation
of relation-based data. Using this graph structure, a new
implementation of ParMA's diffusive load balancing algorithm is given.
Section 2 defines notation. Section 3 gives an overview of
partitioning techniques. In sections 4
and 5 EnGPar's design and implementation details
are provided. Experiments and results are
given in section 6 and 7. Finally, conclusions
and future plans are discussed in section 8.
\section{Notation}
\begin{itemize}
\item $M^d$ a set of mesh entities of dimension $d$.
\item $M_i^d$ the $i$th mesh entity of dimension $d$.
\item $V$ a set of vertices $u_i$ which uniquely
exist on one part such that $V = \bigcup_{\forall_i}u_i$.
\item $E_i$ a set of relations $e_{ij}$ of type $i$ that represent a
edge between two vertices, $u,v\in V$ such that $E_i = \bigcup_{\forall_j}e_{ij}$.
\item $H_i$ a set of relations $h_{ij}$ of type $i$ that represent a
hyperedge between a set of vertices such that $H_i = \bigcup_{\forall_j}h_{ij}$.
\item $V_{h_{ij}}$ the set of vertices the hyperedge $h_{ij}\in H_i$ connects.
\item $P_i$ a set of pins which represent the connection from hyperedge
$h_{ij}$ to $v$ where $h_{ij} \in H_i$ and $v \in V_{h_{ij}}$.
\end{itemize}
\section{Partitioning}
\subsection{(Hyper)Graph partitioners}
Graph-based partitioning methods are a common way to perform
load balancing on relational data. These methods require
the data to be transformed into a graph structure. The idea of this
construction is such that the vertices represent a unit of work
and the relations between the work form the edges of the graph.
When this graph is partitioned the vertices are uniquely divided
between the parts and the edges that cross the part boundaries
represent communication between the connected parts. Graph
partitioners split the graph into $k$ parts
where each part has the same amount of work and the inter part
communication is minimized. Parallel multi-level techniques
applied to graphs have been shown to produce high-quality
partitions up to tens of thousands of parts quickly and efficiently
\cite{catalyurek2013umpa,karypis1999parallel,lasalle2013multi,schloegel2002parallel}.
Hypergraph methods improve graph partitioning methods, using
hyperedges to better represent more complex forms of
communication. These hyperedges allow relations
between multiple sets of work or graph vertices. In
many cases, including unstructured meshes and sparse matrices,
hypergraph partitioning has been shown to reduce the
communication costs of the final partition
\cite{catalyurek1999hypergraph,catalyurek2009repartitioning,devine2006parallel}.
While these methods produce better partitions of the data,
they are more computational and memory intensive relative
to graph-based methods.
\subsection{Diffusive partitioning}
Diffusive partitioning techniques perform improvements to existing
partitions by migrating load across part boundaries. These migrations
are either controlled globally, or locally computed on each part.
The global methods select weight to migrate in order to minimize
the total weight transferred or the maximum weight transferred
in or out of a part \cite{hu1999improved,ou1994parallel}. Local
diffusive partitioners transfer load iteratively from parts
with more work to neighboring parts with less
\cite{cybenko1989dynamic,subramanian1994analysis}. This approach
combined with heuristics for how the load is transferred can have
significantly less computational cost than global methods
\cite{Fiduccia1982,Kernighan1970}.
\subsection{The Current Contribution}
%What is new versus parma?
EnGPar implements local diffusive partitioning for general
usage on a range of applications with relational data structures.
Towards this goal, an expanded graph structure, N-graph, is used to represent relational data structures from different applications. Then, diffusive partitioning techniques used within ParMA are implemented on the N-graph.
The re-implementation of the ParMA methods has been focused
on array-based structures that can support efficient data
parallel operations on GP-GPUs and vector units in many core processors.
These changes will allow EnGPar to perform faster on next generation systems.
In addition to the data parallel implementation
of ParMA algorithms, certain portions of the algorithm have been improved to
further increase the performance of EnGPar on all machines.
\section{N-graph}
EnGPar interfaces to the different partitioning procedures through a multigraph
abstraction called the N-graph. A multigraph
\cite{BANGJENSENmultigraph, BOESCHmultigraph} is a graph that supports multiple
edges between two vertices. Towards
supporting a combination of (hyper)graph, geometric, and diffusive partitioning
methods on relation-based data, the N-graph is defined using one of two modes.
The first is a traditional multigraph with a set of vertices $V$ and $n$
different sets of edges $E_0,...,E_{n-1}$. The N-graph also supports a
multi-hypergraph mode utilizing hyperedges to better represent certain types
of relations. This mode is defined by a set of vertices $V$, $n$ sets of
hyperedges $H_0,...H_{n-1}$ and $n$ sets of pins $P_0,...,P_{n-1}$ which
connect the vertices and hyperedges.
The hyperedge mode for the
N-graph is well suited to relate vertices that share a single connection
between greater than two vertices. Take for instance the construction of
an N-graph given an
unstructured mesh. Mesh elements, $M^3$, are represented
by graph vertices and mesh vertices, $M^0$, are used for
relations between mesh elements. Depending on the mode, edges/hyperedges and
pins will be added to represent these relations. In the
traditional graph mode, an edge is created between two
graph vertices if the mesh elements they represent share a mesh
vertex. In the hypergraph mode, one hyperedge is made for
each mesh vertex. Also, a pin is created to connect a
graph vertex and graph hyperedge if the corresponding mesh
element is bounded by the mesh vertex. To examine these two modes, let
$n$ be the number of mesh elements that bound a certain mesh vertex. On average
in a three-dimensional tetrahedron mesh, $n$ is 23 \cite{beall1997general}. To
represent this relation between all $n$ graph vertices in the N-graph with
traditional edges would require $O(n^2)$ edges in the N-graph. However, when
using hyperedges one
graph edge is created to represent the mesh vertex and $O(n)$ pins to connect
the graph vertices and hyperedges. This results in a reduction in memory usage.
%[TODO].
%as well as runtime improvements for certain graph operations.
Figure \ref{fig:edgecounts} gives an example 2D mesh
where seven mesh elements bound a mesh vertex (a). The
N-graph of this mesh constructed using mesh faces
as graph vertices and vertices for faces is shown in
(b) using traditional edges and in (c) with hyperedges.
In (b) fifteen graph edges are created for the one mesh
vertex while in (c) one hyperedge is added and seven
pins connect the graph vertices to the hyperedge.
\begin{figure}[!ht]
\centering
\includegraphics[width=3.5in]{edgecounts.png}
\caption{(a) a seven triangle mesh surrounding one vertex. (b) N-graph constructed around the vertex using traditional edges to connect the graph vertices whose corresponding mesh elements share the mesh vertex. (c) The N-graph construction using a hyperedge for the mesh vertex and connecting the adjacent mesh elements with pins in the N-graph.}
\label{fig:edgecounts}
\end{figure}
The N-graph also supports having multiple edge types,
whether using traditional edges or hyperedges. This
allows representing multiple layers of connection
between the graph vertices. This is useful to represent
applications that use multidimensional data or complex
levels of communication. One example is an unstructured
mesh which have vertices,
edges, and faces (in three dimensions) that are shared
between mesh elements. To represent these data
structures for a range of application needs, the
N-graph supports the arbitrary use of edge types to allow
different configurations for applications. Figure
\ref{fig:Mesh2Graph} depicts the mapping of a 2D unstructured mesh (a) to a
representation where mesh elements map to graph vertices and mesh vertices map
to graph hyperedges (b). In (c) a second mapping is shown where mesh edges
are also mapped to a second edge type in the graph. The labels of the entities
in (a) are carried to (b) and (c) to show which mesh entity is represented by
the corresponding graph entity.
\begin{figure}[!ht]
\centering
\includegraphics[width=3.5in]{exampleMesh2Graph.png}
\caption{(a) a 2D unstructured mesh. (b) N-graph construction with elements$\rightarrow$vertices, vertices$\rightarrow$hyperedges. (c) additional mesh edges are used for a second set of hyperedges. Mesh labeling is shared in all three to correlate mesh entities to graph entities.}
\label{fig:Mesh2Graph}
\end{figure}
\section{Diffusive Load Balancing}
%What is the algorithm? Summarize the stuff that is the same as ParMA. Focus on
%the differences and improvements.
The diffusive load balancing techniques used in both ParMA and EnGPar follow
the same basic framework. The techniques iteratively perform a series of steps
until user specified criteria are met or the algorithm can not improve
the partition quality further. The main metric used in EnGPar for partition quality is the imbalance of a set of entities. This value is defined by calculating the sum of the weights of the entities in each part. Then, the maximum across all parts is divided by the average. For example, when equal weights are used, the imbalance of vertices in the N-graph is the maximum number of vertices on a single part divided by the average number of vertices per part, across all processes.
Algorithm \ref{alg:engpar} lists a general
framework of the multi-criteria load balancing procedures in ParMA.
\begin{algorithm}
\caption{ParMA Load Balancing Framework}
\label{alg:engpar}
\small
\begin{algorithmic}[1]
\Procedure{RunStep}{$mesh$,$d$}
\State -Determine the neighboring parts and size of part boundaries.
\State -Compute the weight of the entities in dimension $d$
\State -Share the computed weight with each neighbor.
\State -Determine which neighbors that can receive more weight.
\State -Calculate how much weight to send to each neighbor.
\State -Construct an ordering of vertices to traverse the part boundary.
\State -Create migration plan that reduces the imbalance of dimension $d$.
\State -Adjust the plan to maintain balance of previous dimensions.
\State -Perform Migration
\EndProcedure
\Procedure{Balance}{$mesh$,$dimensions$}
\ForAll{$d \in dimensions$}
\While{imbalance of $d$ > tolerance}
\Call{RunStep}{$mesh$,$d$}
\If{Balancing Stagnates}
\State break
\EndIf
\EndWhile
\EndFor
\EndProcedure
\end{algorithmic}
\end{algorithm}
The framework's \texttt{BALANCE} procedure iteratively
calls \texttt{RUNSTEP} to improve the
target imbalance. The \texttt{RUNSTEP} procedure performs six
stages to determine the diffusive load balancing of each
step. The first step on line 2 determines who are the
neighbors of each part and what is the size of the part
boundaries between them. The second computes the weight
of the target dimension $d$ and sends the weight to the
neighboring parts on lines 3 and 4. Then, the heavy
parts decide which neighbors to send weight to and how
much to send on lines 5 and 6. The fourth stage constructs
an ordering of the entities on the boundary for selecting
on line 7. Lines 8 and 9 select entities to be migrated
to the neighboring parts. Finally, a migration is performed
to send the selected entities to neighboring parts on line 10.
The input to the \texttt{BALANCE}
procedure, $dimensions$, is an ordering of the criteria
to be balanced. It is interpreted such that earlier
entries have higher priorities and thus will be balanced
first and their reduced imbalance will be maintained in following
steps. Line 8
of Algorithm \ref{alg:engpar} adjusts the migration plan to
ensure that the imbalance of completed dimensions is not
increased. An example of multi-criteria load balancing
is the ``vertex > element'' case for finite element
methods when degrees of freedom are defined by the mesh vertices.
The degrees of freedom contain the largest
portion of computational work in these simulations
and thus, make balancing the mesh vertices a top
priority. However, these simulations also require balancing
the mesh elements for efficient linear system assembly.
In this example mesh vertices would be the first criteria
followed up by mesh elements.
The work to improve the multi-criteria load
balancing in EnGPar is focused
on two aspects of this algorithm. One is the
generalization to support for a larger range of applications
while the second is to improve the speed of the
more communication and computational expensive parts.
Each of these is discussed in more detail in the following sections.
\subsection{Generalization of Multicriteria Load Balancing}
The generalization of the framework in EnGPar to support more
applications is partially completed with the usage of the N-graph.
Since ParMA works directly on meshes, it cannot
scale-free graphs \cite{pienta2013parallel,gonzalez2012powergraph}
or directly work with unstructured
meshes using vertex-based partitions. Since EnGPar utilizes
the N-graph, it natively
supports other relation based data.
Beyond supporting other forms of data, many applications have
different priorities of balancing the different criteria. While
ParMA is built to perform load balancing targeting finite element
applications, EnGPar takes a more general approach
allowing users to define the ordering and tolerances of criteria.
This is highlighted in Algorithm \ref{alg:engpar} on line 11 with
the $dimensions$ argument. This argument controls the ordering in
which dimensions are balanced with earlier dimensions having higher
priority. In ParMA these inputs would be defined by the balancer
that is applied. Meaning that for ParMA to support additional
applications new balancers need to be defined for the user to use.
In EnGPar the users can use any ordering of priorities
without any new development. This is done by providing
a richer user interface with inputs to control all
necessary components of the diffusive balancing procedure.
\subsection{Graph Distance}
One of the optimizations in ParMA targets decreasing surface area across parts
by ordering the selection of elements to migrate based on their topological
distance from the center of the part. Several challenges arise in the method
due to the existence of disconnected components throughout the part. To get
an optimal ordering it is important to offset the distances of the disconnected
components such that shallower components get a higher priority. This leads to
the shallower components being migrated first.
ParMA computes the graph distance using multiple breadth-first traversals; one
traversal to find disjoint sets, another to locate the topological centers of
each set, and another to compute the distance.
EnGPar reduces the number of traversals from three to two. This
is done by locating the disconnected components during the
traversal to compute distances using a disjoint set data structure.
EnGPar's distance computation algorithm is listed in
Algorithm~\ref{alg:graphdist}. The general breadth-first
traversal is on lines 1 to 5. This performs a
traversal over the entire graph where every time a vertex
is reached the input $kernel$ is run. The algorithm begins
by calling \texttt{COMPUTEDISTANCE}. On line 51, the initial
traversal is used to find the topological center of all
disconnected components simultaneously as well as
the depth of each vertex. The first traversal is seeded
by the vertices that are on the part boundary. Then, an
additional traversal is performed on lines 55-60. This traversal is seeded
by the deepest vertices of the first traversal found
in the \texttt{SETLABELS} procedure on lines 25-39. This
traversal is repeated until every vertex has a
distance by computing the next deepest level of vertices
that does not have a distance.
When multiple vertices share the same depth,
it is possible that multiple disconnected components will
be traversed simultaneously. To differentiate the
components, a disjoint set is initialized for each
vertex in the deepest level. The disjoint set data
structure, commonly known for its use in Kruskal's
algorithm for finding minimum spanning trees \cite{disjointset}, allows fast joining
and comparison of sets. The data structure consists of a label for each vertex
and a parent pointer that initially points to itself. When two sets are unioned,
one of the sets is chosen as the parent and the other set points its parent
pointer to that set. This allows all sets to be represented by its highest
parent allowing comparison of two sets in $O(\log{n})$, in the average case,
where $n$ is the number of sets in the beginning. The
remaining usage of the disjoint sets
is in the \texttt{DISTANCE\_KERNEL} on lines 15-19 where new vertices are assigned
a label and already assigned vertices are unioned together. If there are
multiple disjoint sets after the traversal is complete,
the distances of each disjoint
set is offset such that the deepest disjoint sets have lower distances.
\begin{algorithm}
\caption{EnGPar Graph Distance Computation}\label{alg:graphdist}
\small
\begin{algorithmic}[1]
\Procedure{Traversal}{$seed$, $graph$, $kernel$}
\State Breadth-first traversal of the $graph$ vertices
\State starting with the $seed$ vertices.
\State The $kernel$ is called for each visited vertex.
\EndProcedure
\Procedure{depth\_kernel}{$u$, $G$}
\For{ $v \in e(u,v)$ }
\If{ not $visited(v)$ }
\State $depth(v) \gets depth(u)+1$
\EndIf
\EndFor
\EndProcedure
\Procedure{distance\_kernel}{$u$, $G$}
\For{ $v \in e(u,v)$ }
\If{ not \Call{find}{$v$} }
\State $label(v) \gets$ \Call{find}{$u$}
\ElsIf{ \Call{find}{$u$} != \Call{find}{$v$} }
\State \Call{union}{$u$,$v$}
\Comment{{\tiny merge them into the same set}}
\EndIf
\If{ not $visited(v)$ }
\State $distance(v) \gets distance(u)+1$
\EndIf
\EndFor
\EndProcedure
\Procedure{setLabels}{$visited$,$levels$}
\State $init \gets \emptyset$
\For{ $u \in levels$ }
\If{ not $visited(u)$ }
\State break
\EndIf
\EndFor
\State $maxdepth \gets depth(u)$
\For{ $u \in levels$ }
\If{ not $visited(u)$ and $depth(u) == maxdepth$ }
\State $init \gets init \bigcup u$
\EndIf
\EndFor
\State \textbf{return} $init$
\EndProcedure
\Procedure{maxVisitedDepth}{$visited$}
\State $depth \gets 0$
\For{ $u \in visited$ }
\If{ $visited(u)$ and $depth < depth(u)$}
\State $depth \gets depth(u)$
\EndIf
\EndFor
\State \textbf{return} $depth$
\EndProcedure
\Procedure{computeDistance}{$G$}
\State $init \gets$ vertices classified on ${d-1}$ partition model entities
\State \Call{Traversal}{$init$,$G$,$depth\_kernel$}
\State $levels \gets$ \Call{sort}{$G$}
\Comment{{\tiny $levels$ is an array vertices in order of decreasing depth}}
\State $visited \gets array(|V|,0)$
\State $depth \gets 0$
\While{ $init \gets$ \Call{setlabels}{$visited$,$levels$} }
\Comment{{\tiny Label the vertices with the largest remaining
depth. If none remain, then break from the while loop.}}
\State foreach $u \in init$: \Call{makeSet}{$u$}
\State foreach $u \in init$: $depth(u) \gets depth$
\State \Call{Traversal}{$init$,$G$,$distance\_kernel$} \label{step:distbfs}
\State $depth \gets$ \Call{maxVisitedDepth}{$visited$}
\EndWhile
\EndProcedure
\end{algorithmic}
\end{algorithm}
%see
%\url{http://www.csl.mtu.edu/cs4321/www/Lectures/Lecture\%2019\%20-\%20Kruskal\%20Algorithm\%20and\%20Dis-joint\%20Sets.htm}
%for details of Kruskal's disjoint set procedures
\section{Experiments}
EnGPar and ParMA are compared by running a workflow
starting from a PUMI mesh and ending with a repartitioned
PUMI mesh.
For ParMA, this consists of running its load balancer
on the PUMI mesh,
but for EnGPar, this includes conversion from the PUMI mesh
to the N-graph, running the EnGPar balancer, and
migrating the PUMI mesh elements following the EnGPar partitioning assignments.
For our experiments,
the N-graph is constructed such that the mesh elements are
represented by graph vertices and the mesh vertices correspond
to graph hyperedges. The final repartition of the PUMI mesh
is done by running the PUMI migration routine used in ParMA
with the partition of the N-graph.
We compare the performance of EnGPar's load balancing
procedure to ParMA on the Mira BlueGene/Q system at
the Argonne Leadership Computing Facility. The
experiments use one processes per hardware thread \cite{haring2012ibm}.
Tests were run on a one billion element mesh
of an airplane's vertical tail structure. The original
4Ki($4*2^{10}$) part mesh was partitioned to 8Ki parts using
global ParMETIS part k-way \cite{karypis1999parallel}
which repartitions the global structure to ensure
equal load across all parts. Then, partitions in
powers of two are created from 128Ki to 512Ki parts
using local ParMETIS part k-way which splits each
part, but only balances the newly split parts instead
of the global partition. The local method is much
faster than the global method especially at large
part counts. These meshes start with an element imbalance
of 1.02 while the vertex imbalance ranges from 1.12 at 128Ki
to 1.53 512Ki processes. ParMA and EnGPar are run with
mesh vertices > mesh elements criteria with a target
imbalance of 1.05.
Statistics are gathered for the
imbalance of vertices and elements. Additionally, since PUMI copies mesh vertices that
exist on part boundaries we report the average number of
mesh vertices per part before and after EnGPar and ParMA
are run. Lastly, the overall time of the
balancing is provided. In the case of EnGPar, this time includes the
conversion from PUMI mesh to N-graph and the repartition of the
mesh after running EnGPar's balancer. Finer grain timing is
reported for a deeper understanding of the most expensive
operations in ParMA and EnGPar.
\section{Results}
Figure \ref{fig:imbgraphs} shows the vertex imbalance
and element imbalance respectively. The original partition,
labeled `initial' in the charts, is also shown
as well as a line representing the target imbalance. For vertices, EnGPar
is able to reduce the imbalance to the target level in both the 128Ki and
256Ki cases. In the 512Ki case EnGPar significantly
reduces the imbalance from 1.53 to 1.12, but it is not able to
reach the target imbalance like ParMA does. EnGPar is able to
maintain the element imbalance at the target level for all three process counts.
\begin{figure}[!ht]
\centering
\includegraphics[width=3in]{results/vimb_v_cores}
\includegraphics[width=3in]{results/eimb_v_cores}
\caption{Vertex(top) and Element(bottom) imbalances for the initial partitioning and the partitions from EnGPar and ParMA. Additionally a line representing the tolerance is provided.}
\label{fig:imbgraphs}
\end{figure}
Table \ref{tbl:avgvtx} details the average number of
vertices in each part of the mesh. As larger surface area
leads to more shared vertices across part boundaries in the PUMI mesh, the average number of shared vertices increases. This increase results in more computational work and communication for the application.
ParMA maintains the
average number of vertices with a slight decrease in all cases. EnGPar
increases the number of vertices by one to four percent across the
three runs. EnGPar currently does not target surface area as a criterion
and therefore will prioritize the entity imbalance over an increase in
surface area.
\begin{table}[!h]
\centering
\begin{tabular}{||c|c|c|c||}
\hline
&128Ki&256Ki&512Ki \\
\hline
Initial & 2146.404 & 1138.881 & 611.673 \\
ParMA & 2141.965 & 1137.343 & 610.959 \\
EnGPar & 2161.521 & 1155.727 & 637.354 \\
\hline
\end{tabular}
\caption{Average number of mesh vertices per part.}
\label{tbl:avgvtx}
\end{table}
Figure \ref{fig:timegraph} shows the runtime for ParMA and EnGPar.
Since EnGPar does not reach the tolerance in the 512Ki parts case, the timing
presented for ParMA is for a run where the target
imbalance is set to the imbalance that EnGPar
reaches when it finishes, 1.12 vertex and 1.05
element imbalance. In all cases, EnGPar runs faster
than ParMA. For the 128Ki and 256Ki cases, where
EnGPar successfully reaches the target imbalance,
EnGPar is around 49\% and 38\% faster than ParMA.
In the 512Ki case, EnGPar reaches the 1.12 vertex
imbalance and 1.05 element imbalance targets 33\% faster than ParMA does.
\begin{figure}[!ht]
\centering
\includegraphics[width=3in]{results/time_v_cores.eps}
\caption{Runtime of ParMA and EnGPar running a vertex>elements balancer at 128Ki, 256Ki, and 512Ki processes. ParMA is run until the mesh has been balanced to the end imbalance that EnGPar reaches.}
\label{fig:timegraph}
\end{figure}
Figures \ref{fig:gdtime}
and \ref{fig:migration} displays the time spent computing the
graph distance and migrating entities respectively. We report these two
operations because they have the most computational work in ParMA.
Figure \ref{fig:gdtime} shows the improvements in EnGPar's graph
distance computation algorithm over ParMA's with an 80-90\% speedup
across each part count. Also, EnGPar continues to scale across all
three runs, while ParMA runs slower at 512Ki than 256Ki. The
improvements to the graph distance computation are
responsible for most of the speedup to the EnGPar
balancing over ParMA.
\begin{figure}[!ht]
\centering
\includegraphics[width=3in]{results/gd_v_cores.eps}
\caption{Time spent computing the graph distance for ParMA and EnGPar at 128Ki, 256Ki, and 512Ki processes.}
\label{fig:gdtime}
\end{figure}
While there is a large speedup because of the graph
distance computation, the same is not the case for migration.
As seen in Figure \ref{fig:migration}, ParMA spends
less time in migration than EnGPar for all three cases.
At 128Ki ParMA
spends 9\% less time, 16\% at 256Ki and 32\% at 512Ki.
Even though EnGPar is spending more total time
migrating entities than ParMA, the EnGPar migration
routine generally runs faster than ParMA's due to
having less data to migrate. In
Table \ref{tbl:steps} the number of iterations of the
\texttt{RUNSTEP} function in Algorithm \ref{alg:engpar} is
shown for both ParMA and EnGPar. In all three cases,
EnGPar takes a few more iterations than ParMA to reach
the target imbalance. Thus, EnGPar performs more migrations
of entities than ParMA. This results in an increase in the
runtime of the load balancer.
\begin{figure}[!ht]
\centering
\includegraphics[width=3in]{results/migrate_v_cores.eps}
\caption{Time spent migrating entities in ParMA and EnGPar at 128Ki, 256Ki, and 512Ki processes.}
\label{fig:migration}
\end{figure}
\begin{table}[!h]
\centering
\begin{tabular}{||c|c|c|c||}
\hline
&128Ki&256Ki&512Ki \\
\hline
ParMA &7&6 &10 \\
EnGPar&8 &9 &16 \\
\hline
\end{tabular}
\caption{Number of iterations of the \texttt{RunStep} procedure from Algorithm \ref{alg:engpar} for each process count for EnGPar and ParMA.}
\label{tbl:steps}
\end{table}
Figure \ref{fig:partsgraph} breaks down the three portions of the timing for
the EnGPar runs. The construction of the N-graph exhibits strong
scaling quite nicely, however repartitioning scales much slower.
This step is dominated by the PUMI migration, so the step's
scaling is driven by the scaling of the user's
migration routine.
Unlike construction and repartition, balancing does
not decrease in runtime with increasing number of
parts. The amount of time that is spent
migrating entities during balancing is shown as the bottom
portion of each bar. Migration
takes from 49\% to 51\% of the balancing step of each run.
\begin{figure}[!ht]
\centering
\includegraphics[width=3in]{results/timeparts_v_cores.eps}
\caption{A breakdown of timing for each step of the EnGPar runs. The bottom part of the bar for balancing represents the migration component of the balancer.}
\label{fig:partsgraph}
\end{figure}
\section{Closing Remarks and Future Work}
EnGPar has been shown to significantly reduce, or maintain, the
imbalance for multiple criteria simultaneously.
EnGPar is able to perform these load balancing
algorithms faster than its predecessor ParMA. However, there
are cases where EnGPar stagnates and is unable
to reach the target imbalance that ParMA is able
to achieve.
Furthermore, there is an increase in the number of mesh
vertices in the final partition which means that the
length of the part boundaries is increasing. Thus, we are
tracking down the causes of these deficiencies and
how to resolve them.
One deficiency we have tracked is due to
the surface area of parts. This is a result of one
aspect of ParMA that is currently not in EnGPar.
In ParMA, several stages of element selection are affected by the current
surface area of the part and the effect of migration
on the surface area. In EnGPar, these
checks have not been implemented and is likely a major
cause of the increase in the number of vertices in the mesh.
While EnGPar is running faster than ParMA, there
are several inefficiencies that improving upon would
further speedup EnGPar. Most of the inefficiencies
are caused by the increase in the number of iterations it
takes to finish. Sending more weight
in each step or improving the selection of entities
to send will lead to fewer iterations required and
thus faster runtimes. Beyond just the number of
iterations, many of the algorithms in EnGPar can be
performed well on GPUs. Implementing the algorithms
on GPUs will lead to further speedups in key portions
such as the graph distance computation and migration.
Towards this, initial implementations of the
breadth-first (hyper)graph traversal is being written
and tested using Kokkos \cite{edwards2013kokkos}.
The results here are all given for unstructured meshes
and compared to EnGPar's predecessor, ParMA. Now that
EnGPar has the ability to perform load balancing
operations on general structures rather than just
meshes, these techniques can be applied to other
applications that use relation based data structures
like scale-free graphs and vertex-based meshes.
\begin{acks}
This research is supported by the National Science
Foundation under Grant ACI 1533581. The support of
the U.S. Department of Energy, Office of Science,
Office of Advanced Scientific Computing Research,
under award DE-SC00066117 (FASTMath SciDAC Institute)
is also acknowledged.
An award of computer time was provided by the Innovative and Novel
Computational Impact on Theory and Experiment (INCITE) program and a separate
award of computer time by the Theta Early Science program.
This research used resources of the Argonne Leadership Computing Facility,
which is a DOE Office of Science User Facility supported under Contract
DE-AC02-06CH11357.
\end{acks}
|
{"hexsha": "db09880f5eafcf314ed33df0b24ce87fbfc40def", "size": 34016, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sc17/paper/engparSC17-body.tex", "max_stars_repo_name": "SCOREC/EnGPar-Docs", "max_stars_repo_head_hexsha": "e99ac24b81842e2638f34420abce7cf981efbca1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sc17/paper/engparSC17-body.tex", "max_issues_repo_name": "SCOREC/EnGPar-Docs", "max_issues_repo_head_hexsha": "e99ac24b81842e2638f34420abce7cf981efbca1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sc17/paper/engparSC17-body.tex", "max_forks_repo_name": "SCOREC/EnGPar-Docs", "max_forks_repo_head_hexsha": "e99ac24b81842e2638f34420abce7cf981efbca1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.6414565826, "max_line_length": 482, "alphanum_fraction": 0.783778222, "num_tokens": 8238}
|
# load tools
import os
import random
import numpy as np
from scipy.spatial import distance_matrix as distM
import math
import abc
from jmetal.core.problem import PermutationProblem
from jmetal.core.solution import PermutationSolution
import jmetal.algorithm.singleobjective as so
from jmetal.core.operator import Mutation, Crossover, Selection
from jmetal.util.ckecking import Check
import copy
from typing import List, TypeVar
from jmetal.util.termination_criterion import StoppingByEvaluations
from jmetal.operator.selection import BinaryTournamentSelection
# *************************************************************************
# ***********************CHANGES TO jmetalpy CLASSES***********************
# *************************************************************************
# TSP problem definition (based on jmetalpy improved to import distance matrix directly)
class TSP2(PermutationProblem):
""" Class representing TSP Problem personnalized. """
def __init__(self, coord_matrix):
super(TSP2, self).__init__()
self.coord_matrix = coord_matrix
self.distance_matrix = self._compute_distance()
self.obj_directions = [self.MINIMIZE]
self.number_of_variables = len(coord_matrix)
self.number_of_objectives = 1
self.number_of_constraints = 0
def _compute_distance(self):
# Euclidian distance matrix
computed_matrix = distM(self.coord_matrix, self.coord_matrix)
return computed_matrix
def evaluate(self, solution: PermutationSolution) -> PermutationSolution:
fitness = 0
for i in range(self.number_of_variables - 1):
x = solution.variables[i]
y = solution.variables[i + 1]
fitness += self.distance_matrix[x][y]
first_city, last_city = solution.variables[0], solution.variables[-1]
fitness += self.distance_matrix[first_city][last_city]
solution.objectives[0] = fitness
return solution
def create_solution(self) -> PermutationSolution:
new_solution = PermutationSolution(number_of_variables=self.number_of_variables,
number_of_objectives=self.number_of_objectives)
new_solution.variables = random.sample(range(self.number_of_variables), k=self.number_of_variables)
return new_solution
@property
def number_of_cities(self):
return self.number_of_variables
@property
def matrix_of_distances(self):
return self.distance_matrix
def get_name(self):
return 'Symmetric TSP'
# Mutation definition (contains an index error in jmetalpy)
class PermutationSwapMutation(Mutation[PermutationSolution]):
def __init__(self, probability: float, randMut, D=0, n=0, first=True):
super(PermutationSwapMutation, self).__init__(probability=probability)
self.randMut = randMut
self.D = D
self.n = n
self.first = first
def execute(self, solution: PermutationSolution) -> PermutationSolution:
Check.that(type(solution) is PermutationSolution, "Solution type invalid")
rand = random.random()
if rand <= self.probability:
if self.randMut == 1:
# pos_one, pos_two = random.sample(range(solution.number_of_variables - 1), 2)
# there is no use for the -1 above in the original algorithm
pos_one, pos_two = random.sample(range(solution.number_of_variables), 2)
elif self.randMut == 2:
path = solution.variables
pos_one, pos_two, length = bestMutation2(self.D, self.n, path, self.first)
solution.variables[pos_one], solution.variables[pos_two] = solution.variables[pos_two], solution.variables[pos_one]
return solution
def get_name(self):
return 'Permutation Swap mutation'
class PMXCrossover(Crossover[PermutationSolution, PermutationSolution]):
def __init__(self, probability: float):
super(PMXCrossover, self).__init__(probability=probability)
def execute(self, parents: List[PermutationSolution]) -> List[PermutationSolution]:
if len(parents) != 2:
raise Exception('The number of parents is not two: {}'.format(len(parents)))
offspring = [copy.deepcopy(parents[0]), copy.deepcopy(parents[1])]
permutation_length = offspring[0].number_of_variables
rand = random.random()
if rand <= self.probability:
cross_points = sorted([random.randint(0, permutation_length) for _ in range(2)])
def _repeated(element, collection):
c = 0
for e in collection:
if e == element:
c += 1
return c > 1
def _swap(data_a, data_b, cross_points):
c1, c2 = cross_points
new_a = data_a[:c1] + data_b[c1:c2] + data_a[c2:]
new_b = data_b[:c1] + data_a[c1:c2] + data_b[c2:]
return new_a, new_b
def _map(swapped, cross_points):
n = len(swapped[0])
c1, c2 = cross_points
s1, s2 = swapped
map_ = s1[c1:c2], s2[c1:c2]
for i_chromosome in range(n):
if not c1 < i_chromosome < c2:
for i_son in range(2):
while _repeated(swapped[i_son][i_chromosome], swapped[i_son]):
map_index = map_[i_son].index(swapped[i_son][i_chromosome])
swapped[i_son][i_chromosome] = map_[1 - i_son][map_index]
return s1, s2
swapped = _swap(parents[0].variables, parents[1].variables, cross_points)
mapped = _map(swapped, cross_points)
offspring[0].variables, offspring[1].variables = mapped
return offspring
def get_number_of_parents(self) -> int:
return 2
def get_number_of_children(self) -> int:
return 2
def get_name(self):
return 'Partially Matched crossover'
S = TypeVar('S')
# this class has been modified to look for minimization
class RouletteWheelSelection(Selection[List[S], S]):
"""Performs roulette wheel selection.
"""
def __init__(self, beta):
super(RouletteWheelSelection).__init__()
self.beta = beta
def execute(self, front: List[S]) -> S:
if front is None:
raise Exception('The front is null')
elif len(front) == 0:
raise Exception('The front is empty')
if self.beta == 0:
# default formula on jmetalpy :
# maximum = sum([solution.objectives[0] for solution in front])
# new :
maximum = sum([1/solution.objectives[0] for solution in front])
else:
maximum = sum([math.exp(- self.beta * solution.objectives[0])
for solution in front])
rand = random.uniform(0.0, maximum)
value = 0.0
for solution in front:
if self.beta == 0:
# value += solution.objectives[0] # default formula in jmetalpy
value += 1/solution.objectives[0]
else:
value += math.exp(-self.beta * solution.objectives[0])
if value > rand:
return solution
return None
def get_name(self) -> str:
return 'Roulette wheel selection'
# *************************************************************************
# ***********************IMPLEMENT GENETIC ALGORITHM***********************
# *************************************************************************
def tryTSP(problem, population_size, offspring_population_size, mutation,
crossover, selection, termination_criterion):
myAlgo = so.GeneticAlgorithm(
problem=problem,
population_size=population_size,
offspring_population_size=offspring_population_size,
mutation=mutation,
crossover=crossover,
selection=selection,
termination_criterion=termination_criterion)
myAlgo.run()
result = myAlgo.get_result()
print('Algorithm: {}'.format(myAlgo.get_name()))
print('Problem: {}'.format(problem.get_name()))
print('Solution: {}'.format(result.variables))
print('Fitness: {}'.format(result.objectives[0]))
print('Computing time: {}'.format(myAlgo.total_computing_time))
return result
# *************************************************************************
# ******************CLOSEST NEIGHBOUR ALGORITHM****************************
# *************************************************************************
# determine the closest neighbour of a given city
def findClosest(D, M, from_city, among_ind):
dist = M + 1
neighb = from_city
# to be optimized with numpy !
for i in among_ind :
if D[from_city, i] < dist :
dist = D[from_city, i]
neighb = i
return (neighb, dist)
# find the optimal path (w.r.t. closest neighbour) from a given city
def findOptPath(D, n, M, from_city = 0):
city = from_city
totDist = 0
arrDist = np.zeros(n)
availableCities = np.arange(n, dtype=int)
path = np.zeros(n, dtype=int)
path[0]=from_city
for i in range(1, n):
availableCities = availableCities[ availableCities != city ]
(neighb, dist) = findClosest(D, M, city, availableCities)
path[i] = neighb
arrDist[i-1] = dist
totDist += dist
city = neighb
arrDist[i] = D[from_city, city]
totDist += D[from_city, city]
return (totDist, path, arrDist)
# find the optimal path w.r.t. closest neighbour including choice of starting city
# if optimize==True then path is optimized with best mutation
def findGLobalOptPath(D, n, optimize = False, maxLoop=100, verbal = False, first = False):
vecDist = np.zeros(n)
matOptPath = np.zeros((n,n), dtype=int)
matArrDist = np.zeros((n,n))
M = np.amax(D, axis = None)
for from_city in range(n):
(vecDist[from_city], matOptPath[from_city,:], matArrDist[from_city,:]) = \
findOptPath(D, n, M, from_city)
if optimize:
(matOptPath[from_city,:], vecDist[from_city]) = \
optimizePath(D, n, matOptPath[from_city,:], verbal=verbal, maxLoop=maxLoop, first=first)
return(vecDist, matOptPath, matArrDist)
# find best mutation to improve a path
def bestMutation2(D, n, path, first=False):
refLen = pathLen(path, D, n)
myMin = refLen
mut1, mut2 = 0, 0
Lpath = np.array(path).tolist() # convert in cases of getting a path as an array
Dpath = D[Lpath, :][:, Lpath]
d0 = np.diagonal(Dpath, 1)
d_loop = Dpath[0, n-1]
d1 = np.append( d0, d_loop)
slideP = np.roll(np.arange(n), 1).tolist()
slideM = np.roll(np.arange(n), -1).tolist()
# for non consecutive mutations (i, i+3 at least)
loss0 = d1 + d1[slideP]
loss1 = np.array([loss0,]*n) + np.array([loss0,]*n).transpose()
gain1 = Dpath[slideM, :] + Dpath[slideP, :] + Dpath[:, slideM] + Dpath[:, slideP]
mutationMat1 = gain1 - loss1
# for consecutive mutations (i, i+1)
loss01 = d1[slideM] + d1[slideP]
loss2 = np.diag(loss01[0:(n-1)], 1)
loss2[0,n-1] = loss01[n-1]
gain21 = Dpath[slideP, :] + Dpath[:, slideM]
gain2 = np.diag( np.diag( gain21, 1 ), 1 )
gain2[0, n-1] = gain21[0, n-1]
mutationMat2 = gain2 - loss2
mutationMat = np.triu(mutationMat1, 2) + np.diag( np.diag( mutationMat2, 1 ), 1 )
mutationMat[0, n-1] = Dpath[0, n-2] + Dpath[1, n-1] - Dpath[0, 1] - Dpath[n-2, n-1]
improvedMut = mutationMat[mutationMat<0]
improvedShape = improvedMut.shape
if improvedShape != (0,):
if (first == False):
bestPair = np.argwhere(mutationMat == np.min(mutationMat))[0]
(mut1, mut2, myMin) = (bestPair[0], bestPair[1], mutationMat[bestPair] + refLen)
elif (first == True):
choose = np.random.choice(improvedMut)
bestPair = np.argwhere(mutationMat == choose)[0]
return (mut1, mut2, myMin)
# compute the length of a given path
def pathLen(path, D, n):
dist = D[path[0], path[n-1]]
for i in range(n-1):
dist += D[path[i], path[i+1]]
return(dist)
# implement a mutation
def mutate(path, i, j):
path[i], path[j] = path[j], path[i]
return path
# implement best mutation optimization
def optimizePath(D, n, path, verbal = True, maxLoop = 100, first=False ):
go_on = True
currentPath = copy.deepcopy(path)
count = 0
while go_on & (count < maxLoop) :
(mut1, mut2, myMin) = bestMutation2(D, n, path=currentPath,first=first)
if verbal:
print(myMin)
if mut1 != mut2 :
currentPath = mutate(currentPath, mut1, mut2)
count += 1
return (currentPath, myMin)
|
{"hexsha": "2645249976f409ad42651034587f2ed58f495b34", "size": 13008, "ext": "py", "lang": "Python", "max_stars_repo_path": "TSP_util.py", "max_stars_repo_name": "comevussor/Metaheuristic-Optimization", "max_stars_repo_head_hexsha": "548b3c587e885aa0dacb9f6469b2d2142c6449bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TSP_util.py", "max_issues_repo_name": "comevussor/Metaheuristic-Optimization", "max_issues_repo_head_hexsha": "548b3c587e885aa0dacb9f6469b2d2142c6449bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TSP_util.py", "max_forks_repo_name": "comevussor/Metaheuristic-Optimization", "max_forks_repo_head_hexsha": "548b3c587e885aa0dacb9f6469b2d2142c6449bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1417322835, "max_line_length": 127, "alphanum_fraction": 0.5925584256, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3141}
|
% !TEX spellcheck = en_US
% !TEX encoding = UTF-8
\documentclass[a4paper, 12pt]{article}
\usepackage{graphicx}
\usepackage[tuenc]{fontspec}
\usepackage{xcolor}
\usepackage[hidelinks]{hyperref}
\usepackage{csquotes}
\usepackage[british]{babel}
\usepackage[backend=biber, sorting=none, dateabbrev=false]{biblatex}
\usepackage[left=3.4cm, right=2.5cm, top=3cm, bottom=3cm]{geometry}
\usepackage{tikz}
\usepackage{pgfgantt}
\usepackage{subcaption}
\usepackage{float}
\usepackage{array}
\usepackage{enumitem}
\usepackage{multirow}
\usepackage{makecell}
\usepackage{hhline}
\usetikzlibrary{positioning}
\setmainfont{CMU Serif}
\setlength{\parskip}{\baselineskip}
\newcolumntype{P}[1]{>{\raggedright\arraybackslash}p{#1}}
% Fix bibliography parameters for overfull
\setcounter{biburlnumpenalty}{5000}
\setcounter{biburllcpenalty}{7000}
\setcounter{biburlucpenalty}{8000}
% Bibliography
\addbibresource{../bibliography/all.bib}
\addbibresource{../bibliography/medical.bib}
\addbibresource{../bibliography/neural.bib}
%%%%%%%%%%%% BEGIN OF DOCUMENT %%%%%%%%%%%%%%%%%%
\begin{document}
% Title page can be commented to remove it
\begin{titlepage}
\centering
\vspace{1.5cm}
{\huge \textbf{\textsc{Unlock the potential of medical imaging data using deep learning}} \par}
\vspace{2cm}
{\Large \textit{Joan Marcè i Igual}\par}
\vfill
Director: Dr.~Benjamin \textsc{Haibe-Kains}
\vfill
\includegraphics[width=0.2\textwidth]{images/logo_upc}\par\vspace{1cm}
\vfill
% Bottom of the page
{\LARGE Universitat Politècnica de Catalunya \par}
{\LARGE 2018 \par}
\end{titlepage}
\tableofcontents
\listoffigures
\listoftables
\pagebreak
% Just comment this lines to enable/disable some parts of the document
% Deliverable 1
\include{001_context}
% Deliverable 2
\include{002_planning}
% Deliverable 3
\include{003_budget}
\pagebreak
\section{References}
\printbibliography[heading=none]{}
\end{document}
|
{"hexsha": "fbf49c54a238476623052ae49ef42068b71670b5", "size": 1934, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "LATEX/GEP_deliverable/main.tex", "max_stars_repo_name": "jmigual/FIB-TFG", "max_stars_repo_head_hexsha": "7551a3c13a985ee7eecf7a4f38a6ee4803b05ff1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-02T15:17:51.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-02T15:17:51.000Z", "max_issues_repo_path": "LATEX/GEP_deliverable/main.tex", "max_issues_repo_name": "jmigual/FIB-TFG", "max_issues_repo_head_hexsha": "7551a3c13a985ee7eecf7a4f38a6ee4803b05ff1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LATEX/GEP_deliverable/main.tex", "max_forks_repo_name": "jmigual/FIB-TFG", "max_forks_repo_head_hexsha": "7551a3c13a985ee7eecf7a4f38a6ee4803b05ff1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-23T08:11:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-23T08:11:28.000Z", "avg_line_length": 21.9772727273, "max_line_length": 97, "alphanum_fraction": 0.7507755946, "num_tokens": 629}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# import model
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# import module to calculate model perfomance metrics
from sklearn import metrics
import pickle
import json
sns.set()
dataPath = "/media/joshua/martian/staffordshireUniversity/phd-thesis/datafiles/workload_prediction_data.csv"
model_file_path = '/media/joshua/martian/staffordshireUniversity/phd-thesis/models'
model_name = 'regres_finalized_model.sav'
out_path = "/media/joshua/martian/staffordshireUniversity/mlthesis/out"
workload_pred = "workload_pred.json"
colNames = ['Payload','RunningTime','ThroughputPersec']
#dataset = pd.read_csv(dataPath, delimiter="|", names=colNames, header=None)
def load_dataset(dataPath, colNames):
"""Will load all the data points in the training data set returning a panda data frame
This will be used for the model prediction
"""
return pd.read_csv(dataPath, delimiter="|", names=colNames, header=None)
dataset = load_dataset(dataPath,colNames)
def get_dataframe_head(n):
""" Returns first n rows of the DataFrame"""
return dataset.head(n)
def get_dataframe_tail(n):
""" Returns last n rows of the DataFrame"""
return dataset.tail(n)
def get_dataframe_shape():
"""Returns the number of rows and columns"""
return dataset.shape()
def get_summary():
"""Returns the statistical metrics"""
return dataset.describe()
stats_summary = get_summary()
checkType = isinstance(stats_summary, pd.core.frame.DataFrame)
# Stream stats summary to JSON formatted file
json_out = out_path + "/" + workload_pred
stats_summary.to_json(json_out)
print "Created file " + json_out
#select all the rows of the first and third columns/attributes
# through put per sec
# x-axis is expected to be a 2D array and not 1D array
xAxis = dataset.iloc[:,[2]].values
# running time
yAxis = dataset.iloc[:,1].values
#print dataset.Payload
#print dataset["Payload"]
# confirmation that what is loaded is a panda data frame type
#print type(dataset)
# Splitting X and y into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(xAxis, yAxis, test_size=0.2, random_state=0)
#print X_train
#print y_train
# Linear Regression Model
linreg = LinearRegression()
# fit the model to the training data (learn the coefficients)
linreg.fit(X_train, y_train)
# make predictions on the testing set
y_pred = linreg.predict(X_test)
#print X_test
#print y_pred
# returns a 1D view of X_test
xTest1D = X_test.ravel()
#intercept also known as bias B0
intercept = linreg.intercept_
print("Constant/Intercept: ", intercept)
# coefficient of x, also known as the slope of the graph.
# y = mx + c
# Denoted as B1
xCoef = linreg.coef_
print("Slope of the graph: ",xCoef)
df = pd.DataFrame({'Through put per sec': xTest1D, 'Actual': y_test, 'Predicted': y_pred})
#Evaluating the algorithm
# compute the RMSE of our predictions
rootMeanSquaredError = np.sqrt(metrics.mean_squared_error(y_test, y_pred))
meanAbsoluteError = metrics.mean_absolute_error(y_test, y_pred)
meanSquaredError = metrics.mean_squared_error(y_test, y_pred)
print("MAE: {}".format(meanAbsoluteError))
print("MSE: {}".format(meanSquaredError))
print("RMSE: {}".format(rootMeanSquaredError))
def get_metrics():
metrics_dict = {}
metrics_dict['intercept'] = intercept
metrics_dict['coefficient'] = xCoef[0]
metrics_dict['mae'] = meanAbsoluteError
metrics_dict['mse'] = meanSquaredError
metrics_dict['rms'] = rootMeanSquaredError
#[intercept,xCoef[0],meanAbsoluteError,meanSquaredError,rootMeanSquaredError]
return metrics_dict
#dataset.plot(x="Throughput/sec", y="RunningTime", style=".")
#plt.scatter(xAxis,yAxis, label='True Position', alpha=1)
# use the function regplot to make a scatterplot
#sns.regplot(x=dataset["Throughput/sec"], y=dataset["RunningTime"])
#sns.regplot(x="Throughput/sec", y="RunningTime", data=dataset)
# same as
#sns.regplot(x=xAxis, y=yAxis, marker="*")
sns.regplot(x=xTest1D, y=y_pred, marker="o")
sns.regplot(x=xTest1D, y=y_test, color="green", marker="*")
#sns.lmplot(x=xTest1D, y=y_pred, hue=y_test, markers=["o", "x"])
#sns.pairplot(df, x_vars=[df.Actual], y_vars=[df.Predicted])
#plt.title('Payload vs Running Time')
plt.xlabel('Payload through put per sec')
plt.ylabel('Running Time (sec)')
#plt.show()
model_path = model_file_path + "/" + model_name
pickle.dump(linreg, open(model_path, 'wb'))
|
{"hexsha": "266156740ce81bce75b148202246c897fb671516", "size": 4538, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/regression/linear_regression.py", "max_stars_repo_name": "joshluisaac/ml-project", "max_stars_repo_head_hexsha": "08b8d1b90510182fd8958509f11afbab1ef92850", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/regression/linear_regression.py", "max_issues_repo_name": "joshluisaac/ml-project", "max_issues_repo_head_hexsha": "08b8d1b90510182fd8958509f11afbab1ef92850", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/regression/linear_regression.py", "max_forks_repo_name": "joshluisaac/ml-project", "max_forks_repo_head_hexsha": "08b8d1b90510182fd8958509f11afbab1ef92850", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-28T21:55:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T21:55:26.000Z", "avg_line_length": 28.9044585987, "max_line_length": 108, "alphanum_fraction": 0.7496694579, "include": true, "reason": "import numpy", "num_tokens": 1158}
|
[STATEMENT]
lemma empty_mult1 [simp]:
"({#}, {#a#}) \<in> mult1 R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
have "{#a#} = {#} + {#a#}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {#a#} = {#} + {#a#}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
{#a#} = {#} + {#a#}
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
{#a#} = {#} + {#a#}
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
have "{#} = {#} + {#}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {#} = {#} + {#}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
{#} = {#} + {#}
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
{#} = {#} + {#}
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
have "\<forall>b. b \<in># {#} \<longrightarrow> (b, a) \<in> R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>b. b \<in># {#} \<longrightarrow> (b, a) \<in> R
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>b. b \<in># {#} \<longrightarrow> (b, a) \<in> R
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
{#a#} = {#} + {#a#}
{#} = {#} + {#}
\<forall>b. b \<in># {#} \<longrightarrow> (b, a) \<in> R
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
{#a#} = {#} + {#a#}
{#} = {#} + {#}
\<forall>b. b \<in># {#} \<longrightarrow> (b, a) \<in> R
goal (1 subgoal):
1. ({#}, {#a#}) \<in> mult1 R
[PROOF STEP]
unfolding mult1_def
[PROOF STATE]
proof (prove)
using this:
{#a#} = {#} + {#a#}
{#} = {#} + {#}
\<forall>b. b \<in># {#} \<longrightarrow> (b, a) \<in> R
goal (1 subgoal):
1. ({#}, {#a#}) \<in> {(N, M). \<exists>a M0 K. M = add_mset a M0 \<and> N = M0 + K \<and> (\<forall>b. b \<in># K \<longrightarrow> (b, a) \<in> R)}
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
({#}, {#a#}) \<in> mult1 R
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1076, "file": "Well_Quasi_Orders_Multiset_Extension", "length": 14}
|
import os
from collections import Counter, defaultdict
import networkx
import requests
import json
from synonymes.mirnaID import miRNA, miRNAPART
from utils.cytoscape_grapher import CytoscapeGrapher
class DataBasePlotter:
@classmethod
def fetchSimple(cls, requestDict):
serverAddress = "https://turingwww.bio.ifi.lmu.de"
serverPort = None
serverPath = "yancDB"
serverAddress = "http://localhost"
serverPort = "65500"
serverPath = "/"
def makeServerAddress(address, port, path):
ret = address
if port != None:
ret += ":" + str(port)
if path != None:
ret += "/" + path + "/"
return ret
r = requests.post(makeServerAddress(serverAddress, serverPort, serverPath) + "/find_interactions",
data=json.dumps(requestDict))
jsonRes = r.json()
return jsonRes
@classmethod
def fetchGenes(cls, requestDict, gene2name=None, minPMIDEvCount=0, minTgtCount=0, MIRNASTRPARTS=[miRNAPART.MATURE, miRNAPART.ID], acceptEv = None, verbose=False):
jsonRes = cls.fetchSimple(requestDict)
graph = networkx.Graph()
if verbose:
print(len(jsonRes['rels']))
nodeCounter = Counter()
allGenes2Name = {}
if gene2name != None:
for gene in gene2name:
for elem in gene2name[gene]:
allGenes2Name[elem.upper()] = gene
targets2sources = defaultdict(set)
edge2datasourceCount = defaultdict(lambda: Counter())
edge2celltypes = defaultdict(set)
edge2celltypePMID = defaultdict(lambda: defaultdict(set))
for rel in jsonRes['rels']:
source = rel['lid']
target = rel['rid']
if gene2name != None:
if source.upper() in allGenes2Name:
source = allGenes2Name[source.upper()]
if target.upper() in allGenes2Name:
target = allGenes2Name[target.upper()]
try:
target = miRNA(target)
target = target.getStringFromParts(MIRNASTRPARTS , normalized=True)
except:
pass
edge = (source, target)
for ev in rel['evidences']:
ds = ev['data_source']
if acceptEv != None:
evRes = acceptEv(ev)
else:
evRes = True
if evRes:
edge2datasourceCount[edge][ds] += 1
if ds in ["pmid"]:
docid = ev["docid"]
allCellEvs = jsonRes['pmidinfo'].get("cells", {}).get(docid, None)
if allCellEvs != None:
for cellEv in allCellEvs:
cellInfo = (cellEv['termid'], cellEv['termname'])
if cellInfo[1].lower() in ['cell', 'protein', 'has', 'signaling', 'function', 'role', 'sfswt-1', 'has-15']:
continue
if not cellInfo[0].startswith("CL"):# and not cellInfo[0].startswith("CVCL"):
continue
if not docid in jsonRes['pmidinfo']['disease']:
continue
edge2celltypes[edge].add(cellInfo)
edge2celltypePMID[edge][cellInfo].add(docid)
targets2sources[target].add(source)
for rel in jsonRes['rels']:
source = rel['lid']
target = rel['rid']
if gene2name != None:
if source.upper() in allGenes2Name:
source = allGenes2Name[source.upper()]
if target.upper() in allGenes2Name:
target = allGenes2Name[target.upper()]
if target.upper().startswith("MIR") or target.upper().startswith("LET"):
try:
target = miRNA(target)
target = target.getStringFromParts(MIRNASTRPARTS , normalized=True)
except:
pass
elif source.upper().startswith("MIR") or source.upper().startswith("LET"):
try:
source = miRNA(source)
source = source.getStringFromParts(MIRNASTRPARTS, normalized=True)
except:
pass
edge = (source, target)
edgeCounts = edge2datasourceCount[edge]
allEvCount = sum([1 for x in edgeCounts])
otherEvCount = sum([1 for x in edgeCounts if x != "pmid"])
if allEvCount == 0:
if verbose:
print("Removing edge", edge, "for 0 count")
continue
if otherEvCount == 0 and edge2datasourceCount[edge]["pmid"] < minPMIDEvCount:
continue
if len(targets2sources[target]) < minTgtCount:
continue
graph.add_node(source, color='red')
graph.add_node(target, color='blue')
graph.add_edge(source, target, celldata=edge2celltypes[edge], cellEvidence=edge2celltypePMID[edge])
nodeCounter[source] += 1
nodeCounter[target] += 1
return graph, nodeCounter, edge2datasourceCount, jsonRes
@classmethod
def makePlotForGenes(cls, path, name, gene2name, add=None, cv=False):
allGenes = set()
for x in gene2name:
allGenes.add(x)
allGenes.add(gene2name[x])
allGenes = list(allGenes)
if cv:
requestData = {
'disease': [{'group': 'disease', 'termid': 'DOID:1287', 'name': 'cardiovascular system disease'}],
'gene': allGenes, 'sentences': "false"}
else:
requestData = {
'gene': allGenes, 'sentences': "false",
'organism': [{'termid': "Homo sapiens"}]
}
if add != None:
for x in add:
requestData[x] = add[x]
print(requestData)
graph, nodeCounter = cls.fetchGenes(requestData)
newNodes = []
for (node, nodeAttr) in graph.nodes(data=True):
if node in nodeCounter:
nodeAttr['size'] = 20 + nodeCounter[node]
newNodes.append((node, nodeAttr))
seenNodes = set()
for (node, nodeAttr) in newNodes:
if nodeCounter[node] > 0:
seenNodes.add(node)
graph.add_node(node, nodeAttr)
print(set(allGenes).difference(seenNodes))
mygraph = CytoscapeGrapher.showGraph(graph, location=path,
name=name)
if __name__ == '__main__':
interactions = {
'CCL9': ['miR-30d-3p', 'miR-3473c', 'let-7g-5p'],
'CXCL5': ['miR-204-5p', 'let-7g-5p', 'miR-362-3p', 'miR-155-5p'],
'CXCL1': ['miR-194-2-3p', 'miR-128-3p', 'miR-194-5p', 'miR-199b-5p', 'miR-467g', 'miR-122-5p'],
'CXCL13': ['miR-122-5p'],
'CXCL14': ['miR-301b-3p'],
'CXCR2': ['let-7g-5p', 'let-7b-5p', 'let-7f-5p', 'let-7c-5p', 'let-7a-5p', 'let-7i-5p', 'miR-98-5p'],
'CXCL7': ['let-7g-5p'],
'CCL2': ['let-7a-5p', 'let-7b-5p', 'let-7f-5p', 'let-7c-5p', 'let-7g-5p', 'let-7i-5p', 'miR-181a-5p'],
'CXCL9': ['miR-1935'],
'CCL3': ['miR-30a-5p','miR-30b-5p','miR-30c-5p','miR-30d-5p','miR-30e-5p'],
'CCL7': ['miR-181a-5p', 'miR-322-5p', 'miR-29a-5p', 'miR-29b-1-5p'],
'CCL22': [ 'miR-34a-5p'],
'CXCL10': ['miR-503-3p', 'miR-186-5p'],
'CCR5': ['miR-186-5p', 'miR-669j', 'miR-21-5p', 'miR-146a-5p', 'miR-150-5p', 'miR-146b-5p', 'miR-669k-3p', 'miR-142-3p', 'miR-34a-5p'],
'CCL4': ['miR-27b-3p', 'miR-27a-3p', 'miR-21-3p', 'miR-467f'],
'CX3CL1': ['miR-15a-5p', 'miR-322-5p', 'miR-706', 'miR-762', 'miR-665-3p', 'miR-758-3p', 'miR-381-3p'],
'CXCR4': ['miR-381-3p', 'miR-21-3p', 'miR-467a-5p', 'miR-467h', 'miR-218-5p', 'miR-1a-3p', 'miR-181d-5p', 'miR-206-3p', 'miR-181b-5p', 'miR-9-5p', 'miR-132-3p', 'miR-25-3p', 'miR-467d-5p', 'miR-669k-3p', 'miR-146b-5p', 'miR-467b-5p', 'miR-467e-5p', 'miR-467f', 'miR-146a-5p'],
'CCR7': ['let-7g-5p', 'miR-23b-3p', 'miR-669p-5p', 'miR-23a-5p', 'let-7e-5p', 'miR-669l-5p', 'miR-15a-5p', 'miR-467e-5p', 'miR-21-5p', 'miR-16-5p', 'let-7d-5p', 'miR-669n', 'miR-98-5p', 'let-7b-5p', 'let-7a-5p', 'let-7i-5p', 'let-7c-5p', 'miR-15b-5p', 'miR-467h'],
'CXCL12': [
'miR-532-5p', 'miR-130b-3p', 'miR-222-3p', 'miR144-3p', 'miR-542-3p', 'miR-149-5p', 'miR-330-3p', 'miR-532-3p', 'miR-3470b', 'miR-125b-5p', 'miR-221-3p', 'miR-19b-3p', 'miR-301b-3p',
'miR-34b-5p', 'miR-125a-3p', 'miR-126-3p', 'miR-16-1-3p', 'miR-882', 'miR-497-5p', 'miR-26a-5p', 'miR-124-3p', 'miR-26b-5p', 'miR-5620-3p', 'mIR-19a-3p', 'miR-130a-3p', 'miR-690',
'miR-185-5p', 'miR-31-5p', 'miR-340-5p', 'miR-1843-5p', 'miR-466f-3p', 'miR-301a-3p', 'miR-101a-3p', 'miR-210-3p', 'miR-107-3p', 'miR-706', 'miR-23b-3p', 'miR-146a-5p', 'miR-467f',
'miR-322-5p', 'miR-15a-5p', 'miR-29b-1-5p', 'let-7e-5p', 'miR-23a-3p', 'miR-338-3p', 'miR-103-3p', 'miR-362-3p', 'let-7g-5p', 'miR-155-5p', 'miR-140-5p', 'miR-122-5p', 'miR-22-3p', 'miR-3470a', 'let-7d-5p'
]
}
genes = [x for x in interactions]
#genes = ['CCL2', 'CCL3']
genes2name = {'CXCL12': {'CXCL12'}, 'CXCL13': {'CXCL13'}, 'CXCL14': {'CXCL14'}, 'PPBP': {'PPBP'}, 'XCL1': {'XCL1'},
'CCL2': {'CCL2'}, 'PF4': {'PF4'}, 'CXCL10': {'CXCL10'}, 'CXCL5': {'CXCL5'}, 'CCL3': {'CCL3'},
'XCL2': {'XCL1', 'XCL2'}, 'CXCL1': { 'CXCL1'}, 'CCL13': {'CCL13'}, 'CXCL11': {'CXCL11'},
'CXCL8': {'CXCL8'}, 'CCL14': {'CCL14'}, 'CCL3': {'CCL3', 'CCL3L3'}, 'CCL5': {'CCL5'},
'CXCL2': { 'CXCL2'}, 'CCL15': {'CCL15'}, 'CXCL3': {'CXCL3'},
'CCL21': {'CCL21C', 'CCL21A', 'CCL21', 'GM10591', 'GM13304', 'GM21541', 'CCL21B'}, 'CCL17': {'CCL17'},
'CXCL6': {'CXCL6'}, 'CCL11': {'CCL11'}, 'CCL7': {'CCL7'}, 'CCL4': {'CCL4'}, 'CCL1': {'CCL1'},
'CXCL16': {'CXCL16'}, 'CCL18': {'CCL18'}, 'CCL19': {'GM2564', 'CCL19'}, 'CXCL9': {'CXCL9'},
'CCL8': {'CCL8', 'CCL12'}, 'CCL20': {'CCL20'}, 'C5': {'C5', 'HC'}, 'CCL22': {'CCL22'}, 'CCL24': {'CCL24'},
'CX3CL1': {'CX3CL1'}, 'CCL25': {'CCL25'}, 'CCL28': {'CCL28'}, 'CCL23': {'CCL23'},
'CCL26': {'CCL26'}, 'CCL27': {'CCL27A', 'CCL27', 'CCL27B', 'GM13306'}, 'CCL6': {'CCL6'}, 'CCL9':{'CCL9'}, 'CCL3': {'CCL3'}}
gene2name = {}
allGenes = set()
for x in genes2name:
allGenes.add(x)
for g in genes2name[x]:
allGenes.add(g)
gene2name[g] = x
for x in interactions:
if x not in allGenes:
gene2name[x] = x
allGenes.add(x)
print("Manual add", x)
allGenes = list(allGenes)
print(len(allGenes), allGenes)
os.makedirs("/mnt/c/Users/mjopp/Desktop/yanc_network/", exist_ok=True)
DataBasePlotter.makePlotForGenes('/mnt/c/Users/mjopp/Desktop/yanc_network/', 'all_chemokines', gene2name)
def subsetGene2Name(newgenes):
sgene2name = {}
for x in gene2name:
if x in newgenes or gene2name[x] in newgenes:
sgene2name[x] = gene2name[x]
if x in newgenes:
newgenes.remove(x)
else:
newgenes.remove(gene2name[x])
for gene in newgenes:
sgene2name[gene] = gene
return sgene2name
subPlot = ['CCL2', 'CXCL1', 'CXCL12']
subPlot += ['CXCR4', 'RGS16', 'HUR', 'ETS1', 'IRAK1', 'TRAF6']
subPlot += ['SOCS5', 'KLF2', 'KLF4', 'TAK1', 'SIRT1', 'THBS1', 'TGFBR1', 'SMAD2', 'JUN']
subPlot += ['KPNA4', 'BTRC', 'PPARA']
sgene2name = subsetGene2Name(subPlot)
addRestrict = {
'cells': [{ "group": "cells", "name": "endothelial cell", "termid": "CL:0000115" }]
}
DataBasePlotter.makePlotForGenes('/mnt/c/Users/mjopp/Desktop/yanc_network/', 'chemokines_sp1', sgene2name, add=addRestrict, cv=True)
subPlot = ['CCL2', 'CXCL1', 'CXCL12']
subPlot += ['CXCR4', 'RGS16', 'HUR', 'ETS1', 'IRAK1', 'TRAF6']
subPlot += ['SOCS5', 'KLF2', 'KLF4', 'TAK1', 'SIRT1', 'THBS1', 'TGFBR1', 'SMAD2', 'JUN']
subPlot += ['KPNA4', 'BTRC', 'PPARA']
sgene2name = subsetGene2Name(subPlot)
addRestrict = {
'cells': [{ "group": "cells", "name": "endothelial cell", "termid": "CL:0000115" }]
}
DataBasePlotter.makePlotForGenes('/mnt/c/Users/mjopp/Desktop/yanc_network/', 'chemokines_sp1_all_cv', sgene2name, add=None, cv=True)
subPlot = ['KLF2', 'CHI3L1', 'TLR4', 'TRAF6', 'IRAK1', 'BMPR2', 'AKT1', 'BCL6', 'LPL', 'CCL2']
sgene2name = subsetGene2Name(subPlot)
addRestrict = {
'cells': [{ 'group': "cells", 'name': "monocyte", 'termid': "CL:0000576" }]
}
DataBasePlotter.makePlotForGenes('/mnt/c/Users/mjopp/Desktop/yanc_network/', 'chemokines_sp2', sgene2name, add=addRestrict, cv=True)
subPlot = ['KLF2', 'CHI3L1', 'TLR4', 'TRAF6', 'IRAK1', 'BMPR2', 'AKT1', 'BCL6', 'LPL', 'CCL2']
sgene2name = subsetGene2Name(subPlot)
addRestrict = {
'cells': [{ 'group': "cells", 'name': "monocyte", 'termid': "CL:0000576" }]
}
DataBasePlotter.makePlotForGenes('/mnt/c/Users/mjopp/Desktop/yanc_network/', 'chemokines_sp2_all_cv', sgene2name, add=None, cv=True)
|
{"hexsha": "a0c664ac92d001c360d7ccf90b30ca30c534873d", "size": 13563, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/textdb/makeNetworkView.py", "max_stars_repo_name": "mjoppich/miRExplore", "max_stars_repo_head_hexsha": "32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/textdb/makeNetworkView.py", "max_issues_repo_name": "mjoppich/miRExplore", "max_issues_repo_head_hexsha": "32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/textdb/makeNetworkView.py", "max_forks_repo_name": "mjoppich/miRExplore", "max_forks_repo_head_hexsha": "32760d88d65e7bc23b2bfb49415efcd0a7c7c5e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7769230769, "max_line_length": 284, "alphanum_fraction": 0.5256211753, "include": true, "reason": "import networkx", "num_tokens": 4631}
|
"""
Collection of utility functions.
"""
import functools
from types import FunctionType
import numpy as np
import numba
import pandas as pd
from .functions import kww, kww_1e
from scipy.ndimage.filters import uniform_filter1d
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from .logging import logger
def five_point_stencil(xdata, ydata):
"""
Calculate the derivative dy/dx with a five point stencil.
This algorith is only valid for equally distributed x values.
Args:
xdata: x values of the data points
ydata: y values of the data points
Returns:
Values where the derivative was estimated and the value of the derivative at these points.
This algorithm is only valid for values on a regular grid, for unevenly distributed
data it is only an approximation, albeit a quite good one.
See: https://en.wikipedia.org/wiki/Five-point_stencil
"""
return xdata[2:-2], (
(-ydata[4:] + 8 * ydata[3:-1] - 8 * ydata[1:-3] + ydata[:-4]) /
(3 * (xdata[4:] - xdata[:-4]))
)
def filon_fourier_transformation(time, correlation,
frequencies=None, derivative='linear', imag=True,
):
"""
Fourier-transformation for slow varrying functions. The filon algorithmus is
described in detail in ref [Blochowicz]_, ch. 3.2.3.
Args:
time: List of times where the correlation function was sampled.
correlation: Values of the correlation function.
frequencies (opt.):
List of frequencies where the fourier transformation will be calculated.
If None the frequencies will be choosen based on the input times.
derivative (opt.):
Approximation algorithmus for the derivative of the correlation function.
Possible values are: 'linear', 'stencil' or a list of derivatives.
imag (opt.): If imaginary part of the integral should be calculated.
If frequencies are not explicitly given they will be evenly placed on a log scale
in the interval [1/tmax, 0.1/tmin] where tmin and tmax are the smallest respectively
the biggest time (greater than 0) of the provided times. The frequencies are cut off
at high values by one decade, since the fourier transformation deviates quite strongly
in this regime.
.. [Blochowicz]
T. Blochowicz, Broadband dielectric spectroscopy in neat and binary
molecular glass formers, Ph.D. thesis, Uni-versität Bayreuth (2003)
"""
if frequencies is None:
f_min = 1 / max(time)
f_max = 0.05**(1.2 - max(correlation)) / min(time[time > 0])
frequencies = 2 * np.pi * np.logspace(
np.log10(f_min), np.log10(f_max), num=60
)
frequencies.reshape(1, -1)
if derivative is 'linear':
derivative = (np.diff(correlation) / np.diff(time)).reshape(-1, 1)
elif derivative is 'stencil':
_, derivative = five_point_stencil(time, correlation)
time = ((time[2:-1] * time[1:-2])**.5).reshape(-1, 1)
derivative = derivative.reshape(-1, 1)
elif np.iterable(derivative) and len(time) is len(derivative):
derivative.reshape(-1, 1)
else:
raise NotImplementedError(
'Invalid approximation method {}. Possible values are "linear", "stencil" or a list of values.'
)
time = time.reshape(-1, 1)
integral = (np.cos(frequencies * time[1:]) - np.cos(frequencies * time[:-1])) / frequencies**2
fourier = (derivative * integral).sum(axis=0)
if imag:
integral = 1j * (np.sin(frequencies * time[1:]) - np.sin(frequencies * time[:-1])) / frequencies**2
fourier = fourier + (derivative * integral).sum(axis=0) + 1j * correlation[0] / frequencies
return frequencies.reshape(-1,), fourier
def mask2indices(mask):
"""
Return the selected indices of an array mask.
If the mask is two-dimensional, the indices will be calculated for the second axis.
Example:
>>> mask2indices([True, False, True, False])
array([0, 2])
>>> mask2indices([[True, True, False], [True, False, True]])
array([[0, 1], [0, 2]])
"""
mask = np.array(mask)
if len(mask.shape) == 1:
indices = np.where(mask)
else:
indices = np.array([np.where(m) for m in mask])
return indices
def superpose(x1, y1, x2, y2, N=100, damping=1.0):
if x2[0] == 0:
x2 = x2[1:]
y2 = y2[1:]
reg1 = x1 < x2[0]
reg2 = x2 > x1[-1]
x_ol = np.logspace(
np.log10(max(x1[~reg1][0], x2[~reg2][0]) + 0.001),
np.log10(min(x1[~reg1][-1], x2[~reg2][-1]) - 0.001),
(sum(~reg1) + sum(~reg2)) / 2
)
def w(x):
A = x_ol.min()
B = x_ol.max()
return (np.log10(B / x) / np.log10(B / A))**damping
xdata = np.concatenate((x1[reg1], x_ol, x2[reg2]))
y1_interp = interp1d(x1[~reg1], y1[~reg1])
y2_interp = interp1d(x2[~reg2], y2[~reg2])
ydata = np.concatenate((
y1[x1 < x2.min()],
w(x_ol) * y1_interp(x_ol) + (1 - w(x_ol)) * y2_interp(x_ol),
y2[x2 > x1.max()]
))
return xdata, ydata
def runningmean(data, nav):
"""
Compute the running mean of a 1-dimenional array.
Args:
data: Input data of shape (N, )
nav: Number of points over which the data will be averaged
Returns:
Array of shape (N-(nav-1), )
"""
return np.convolve(data, np.ones((nav,)) / nav, mode='valid')
def moving_average(A,n=3):
"""
Compute the running mean of an array.
Uses the second axis if it is of higher dimensionality.
Args:
data: Input data of shape (N, )
n: Number of points over which the data will be averaged
Returns:
Array of shape (N-(n-1), )
Supports 2D-Arrays.
Slower than runningmean for small n but faster for large n.
"""
k1 = int(n/2)
k2 = int((n-1)/2)
if k2 == 0:
if A.ndim > 1:
return uniform_filter1d(A,n)[:,k1:]
return uniform_filter1d(A,n)[k1:]
if A.ndim > 1:
return uniform_filter1d(A,n)[:,k1:-k2]
return uniform_filter1d(A,n)[k1:-k2]
def coherent_sum(func, coord_a, coord_b):
"""
Perform a coherent sum over two arrays :math:`A, B`.
.. math::
\\frac{1}{N_A N_B}\\sum_i\\sum_j f(A_i, B_j)
For numpy arrays this is equal to::
N, d = x.shape
M, d = y.shape
coherent_sum(f, x, y) == f(x.reshape(N, 1, d), x.reshape(1, M, d)).sum()
Args:
func: The function is called for each two items in both arrays, this should return a scalar value.
coord_a, coord_b: The two arrays.
"""
if isinstance(func, FunctionType):
func = numba.jit(func, nopython=True, cache=True)
@numba.jit(nopython=True)
def cohsum(coord_a, coord_b):
res = 0
for i in range(len(coord_a)):
for j in range(len(coord_b)):
res += func(coord_a[i], coord_b[j])
return res
return cohsum(coord_a, coord_b)
def coherent_histogram(func, coord_a, coord_b, bins, distinct=False):
"""
Compute a coherent histogram over two arrays, equivalent to coherent_sum.
For numpy arrays ofthis is equal to::
N, d = x.shape
M, d = y.shape
bins = np.arange(1, 5, 0.1)
coherent_histogram(f, x, y, bins) == histogram(f(x.reshape(N, 1, d), x.reshape(1, M, d)), bins=bins)
Args:
func: The function is called for each two items in both arrays, this should return a scalar value.
coord_a, coord_b: The two arrays.
bins: The bins used for the histogram must be distributed regular on a linear scale.
"""
if isinstance(func, FunctionType):
func = numba.jit(func, nopython=True, cache=True)
assert np.isclose(np.diff(bins).mean(), np.diff(bins)).all(), 'A regular distribution of bins is required.'
hmin = bins[0]
hmax = bins[-1]
N = len(bins) - 1
dh = (hmax - hmin) / N
@numba.jit(nopython=True)
def cohsum(coord_a, coord_b):
res = np.zeros((N,))
for i in range(len(coord_a)):
for j in range(len(coord_b)):
if not (distinct and i == j):
h = func(coord_a[i], coord_b[j])
if hmin <= h < hmax:
res[int((h - hmin) / dh)] += 1
return res
return cohsum(coord_a, coord_b)
def Sq_from_gr(r, gr, q, ρ):
r"""
Compute the static structure factor as fourier transform of the pair correlation function. [Yarnell]_
.. math::
S(q) - 1 = \\frac{4\\pi \\rho}{q}\\int\\limits_0^\\infty (g(r) - 1)\\,r \\sin(qr) dr
Args:
r: Radii of the pair correlation function
gr: Values of the pair correlation function
q: List of q values
ρ: Average number density
.. [Yarnell]
Yarnell, J. L., Katz, M. J., Wenzel, R. G., & Koenig, S. H. (1973). Physical Review A, 7(6), 2130–2144.
http://doi.org/10.1017/CBO9781107415324.004
"""
ydata = ((gr - 1) * r).reshape(-1, 1) * np.sin(r.reshape(-1, 1) * q.reshape(1, -1))
return np.trapz(x=r, y=ydata, axis=0) * (4 * np.pi * ρ / q) + 1
def Fqt_from_Grt(data, q):
"""
Calculate the ISF from the van Hove function for a given q value by fourier transform.
.. math::
F_q(t) = \\int\\limits_0^\\infty dr \\; G(r, t) \\frac{\\sin(qr)}{qr}
Args:
data:
Input data can be a pandas dataframe with columns 'r', 'time' and 'G'
or an array of shape (N, 3), of tuples (r, t, G).
q: Value of q.
Returns:
If input data was a dataframe the result will be returned as one too, else two arrays
will be returned, which will contain times and values of Fq(t) respectively.
"""
if isinstance(data, pd.DataFrame):
df = data.copy()
else:
df = pd.DataFrame(data, columns=['r', 'time', 'G'])
df['isf'] = df['G'] * np.sinc(q / np.pi * df['r'])
isf = df.groupby('time')['isf'].sum()
if isinstance(data, pd.DataFrame):
return pd.DataFrame({'time': isf.index, 'isf': isf.values, 'q': q})
else:
return isf.index, isf.values
@numba.jit
def norm(vec):
return (vec**2).sum()**0.5
def singledispatchmethod(func):
"""A decorator to define a genric instance method, analogue to functools.singledispatch."""
dispatcher = functools.singledispatch(func)
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
wrapper.register = dispatcher.register
functools.update_wrapper(wrapper, func)
return wrapper
def histogram(data, bins):
"""Compute the histogram of the given data. Uses numpy.bincount function, if possible."""
dbins = np.diff(bins)
dx = dbins.mean()
if bins.min() == 0 and dbins.std() < 1e-6:
logger.debug("Using numpy.bincount for histogramm compuation.")
hist = np.bincount((data // dx).astype(int), minlength=len(dbins))[:len(dbins)]
else:
hist = np.histogram(data, bins=bins)[0]
return hist, runningmean(bins, 2)
def quick1etau(t, C, n=7):
"""
Estimate the time for a correlation function that goes from 1 to 0 to decay to 1/e.
If successful, returns tau as fine interpolation with a kww fit.
The data is reduce to points around 1/e to remove short and long times from the kww fit!
t is the time
C is C(t) the correlation function
n is the minimum number of points around 1/e required
"""
# first rough estimate, the closest time. This is returned if the interpolation fails!
tau_est = t[np.argmin(np.fabs(C-np.exp(-1)))]
# reduce the data to points around 1/e
k = 0.1
mask = (C < np.exp(-1)+k) & (C > np.exp(-1)-k)
while np.sum(mask) < n:
k += 0.01
mask = (C < np.exp(-1)+k) & (C > np.exp(-1)-k)
if k + np.exp(-1) > 1.0:
break
# if enough points are found, try a curve fit, else and in case of failing keep using the estimate
if np.sum(mask) >= n:
try:
with np.errstate(invalid='ignore'):
fit, _ = curve_fit(kww, t[mask], C[mask], p0=[0.9, tau_est, 0.9], maxfev=100000)
tau_est = kww_1e(*fit)
except:
pass
return tau_est
|
{"hexsha": "a6bfb510039cdf17e7ca6210a56a41572c026280", "size": 12338, "ext": "py", "lang": "Python", "max_stars_repo_path": "mdevaluate/utils.py", "max_stars_repo_name": "lheyer/mdevaluate", "max_stars_repo_head_hexsha": "990d9714b435d0d9cb8ff5a74533d78b0a5a1578", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mdevaluate/utils.py", "max_issues_repo_name": "lheyer/mdevaluate", "max_issues_repo_head_hexsha": "990d9714b435d0d9cb8ff5a74533d78b0a5a1578", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mdevaluate/utils.py", "max_forks_repo_name": "lheyer/mdevaluate", "max_forks_repo_head_hexsha": "990d9714b435d0d9cb8ff5a74533d78b0a5a1578", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3459459459, "max_line_length": 111, "alphanum_fraction": 0.6031771762, "include": true, "reason": "import numpy,from scipy,import numba", "num_tokens": 3489}
|
import numpy as np
from typing import List
from .genome import Genome
def one_point_crossover(father: Genome, mother: Genome) -> List[Genome]:
"""Performs a one point crossover for parents
Arguments:
father {Genome} -- Parent one
mother {Genome} -- Parent two
Returns:
List[Genome] -- Two offsprings
"""
cross_point = np.random.randint(0, len(father))
offspring1 = list(father[:cross_point]) + list(mother[cross_point:])
offspring2 = list(mother[:cross_point]) + list(father[cross_point:])
offsprings = [
father.create_children(np.array(offspring1), mother=mother),
father.create_children(np.array(offspring2), mother=mother)
]
return offsprings
def k_point_crossover(father: Genome, mother: Genome, k=None) -> List[Genome]:
"""Performs a k point crossover for parents
Arguments:
father {[type]} -- Parent one
mother {[type]} -- Parent two
Keyword Arguments:
k {int/None} -- how many crossover points exists (default: None = inner k)
Returns:
List[Genome] -- Two offsprings
"""
try:
if k is None:
k = k_point_crossover.k
except AttributeError:
pass # If k_points_crossover does not has k set, do nothing
finally:
if k is None: # If k is not set, standard value of 2 will be set
k = 2
cross_points = np.random.randint(0, len(father), k)
cross_points = np.append(cross_points, 0)
cross_points = np.append(cross_points, len(father))
cross_points = np.sort(cross_points)
parents = [father, mother]
offspring1 = []
offspring2 = []
for i in range(k+1):
offspring1 = offspring1 + list(parents[i % 2][cross_points[i]:cross_points[i+1]])
offspring2 = offspring2 + list(parents[i+1 % 2][cross_points[i]:cross_points[i+1]])
offsprings = [
father.create_children(np.array(offspring1), mother=mother),
father.create_children(np.array(offspring2), mother=mother)
]
return offsprings
def uniform_crossover(father: Genome, mother: Genome) -> List[Genome]:
"""Performs a uniform random crossover for parents
Arguments:
father {Genome} -- Parent one
mother {Genome} -- Parent two
Returns:
List[Genome] -- Two offsprings
"""
parents = (father, mother)
point_size = len(parents[0])
point_rand = np.random.randint(0, 2, point_size)
offspring1 = [parents[point_rand[i]][i] for i in range(point_size)]
offspring2 = [parents[1-point_rand[i]][i] for i in range(point_size)]
return [
father.create_children(np.array(offspring1), mother=mother),
father.create_children(np.array(offspring2), mother=mother)
]
|
{"hexsha": "f8b7d5f7fcb27a337b2094b82490690472be9182", "size": 2765, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/genetic_algorithm/crossover.py", "max_stars_repo_name": "ahillbs/minimum_scan_cover", "max_stars_repo_head_hexsha": "e41718e5a8e0e3039d161800da70e56bd50a1b97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/genetic_algorithm/crossover.py", "max_issues_repo_name": "ahillbs/minimum_scan_cover", "max_issues_repo_head_hexsha": "e41718e5a8e0e3039d161800da70e56bd50a1b97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/genetic_algorithm/crossover.py", "max_forks_repo_name": "ahillbs/minimum_scan_cover", "max_forks_repo_head_hexsha": "e41718e5a8e0e3039d161800da70e56bd50a1b97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0674157303, "max_line_length": 91, "alphanum_fraction": 0.6466546112, "include": true, "reason": "import numpy", "num_tokens": 701}
|
[STATEMENT]
lemma simple_path_eq_arc: "pathfinish g \<noteq> pathstart g \<Longrightarrow> (simple_path g = arc g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pathfinish g \<noteq> pathstart g \<Longrightarrow> simple_path g = arc g
[PROOF STEP]
by (simp add: arc_simple_path)
|
{"llama_tokens": 103, "file": null, "length": 1}
|
import numpy as np
from scipy import misc
from PIL import Image
import pickle
import cv2
IMAGE_SIZE = 28
def images_to_sprite(data):
"""
Creates the sprite image
Parameters
----------
data: [batch_size, height, weight, n_channel]
Returns
-------
data: Sprited image::[height, weight, n_channel]
"""
if len(data.shape) == 3:
data = np.tile(data[..., np.newaxis], (1, 1, 1, 3))
data = data.astype(np.float32)
min = np.min(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1, 2, 3, 0) - min).transpose(3, 0, 1, 2)
max = np.max(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1, 2, 3, 0) / max).transpose(3, 0, 1, 2)
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, 0),
(0, 0)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant',
constant_values=0)
data = data.reshape((n, n) + data.shape[1:]).transpose(
(0, 2, 1, 3) + tuple(range(4, data.ndim + 1))
)
data = data.reshape(
(n * data.shape[1], n * data.shape[3]) + data.shape[4:])
data = (data * 255).astype(np.uint8)
return data
if __name__ == '__main__':
data = []
with open('/home/pham.hoang.anh/prj/face_detect/X_train_triplet.pkl', 'rb') as f:
X = pickle.load(f)
for x in X:
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
img = misc.imresize(x, (IMAGE_SIZE, IMAGE_SIZE, 3))
data.append(img)
img_sprite = images_to_sprite(np.array(data))
sprite = Image.fromarray(img_sprite.astype(np.uint8))
sprite.save("/home/pham.hoang.anh/prj/face_detect/visualize/128D-Facenet-LFW-Embedding-Visualisation/oss_data/LFW_HA_sprites.png")
|
{"hexsha": "e5565eab261b8ec19595ad5cd4f35a274567a9d5", "size": 1799, "ext": "py", "lang": "Python", "max_stars_repo_path": "sprite_image.py", "max_stars_repo_name": "hoanganhpham1006/face-detector-Visualize", "max_stars_repo_head_hexsha": "8bf4009768516f138221ed510560d9a3349544d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-16T04:49:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-16T04:49:43.000Z", "max_issues_repo_path": "sprite_image.py", "max_issues_repo_name": "leanhtuanwru/face-detector-Visualize", "max_issues_repo_head_hexsha": "d057febd6a8b65521ca38e28845c52bf3329a936", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sprite_image.py", "max_forks_repo_name": "leanhtuanwru/face-detector-Visualize", "max_forks_repo_head_hexsha": "d057febd6a8b65521ca38e28845c52bf3329a936", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.125, "max_line_length": 134, "alphanum_fraction": 0.5825458588, "include": true, "reason": "import numpy,from scipy", "num_tokens": 562}
|
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorlayer as tl
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
from models_shapes import nmn3_seq
from models_shapes import nmn3_assembler
from models_shapes.nmn3_modules import Modules
from models_shapes.nmn3_layers import fc_layer as fc, conv_layer as conv, shapes_convnet as shapes_convnet
class NMN3ModelAtt:
def __init__(self, image_batch, text_seq_batch, seq_length_batch,T_decoder,
num_vocab_txt, embed_dim_txt, num_vocab_nmn,embed_dim_nmn, lstm_dim,
num_layers, EOS_idx, encoder_dropout, decoder_dropout, decoder_sampling,
num_choices, gt_layout_batch=None, scope='neural_module_network', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# STEP 1: Get Visual feature by CNN
with tf.variable_scope('image_feature_cnn'):
self.image_feat_grid = shapes_convnet(image_batch)
# STEP 2: Get module layout tokens by Seq2seq RNN
with tf.variable_scope('layout_generation'):
att_seq2seq = nmn3_seq.AttentionSeq2Seq(text_seq_batch,
seq_length_batch, T_decoder, num_vocab_txt,embed_dim_txt,
num_vocab_nmn, embed_dim_nmn, lstm_dim, num_layers, EOS_idx,
encoder_dropout, decoder_dropout, decoder_sampling,
gt_layout_batch)
# Set the variables in att_seq2seq
self.att_seq2seq = att_seq2seq
self.predicted_tokens = att_seq2seq.predicted_tokens
self.token_probs = att_seq2seq.token_probs
self.neg_entropy = att_seq2seq.neg_entropy
self.word_vecs = att_seq2seq.word_vecs
self.atts = att_seq2seq.atts
# Log probability of each generated sequence
self.log_seq_prob = tf.reduce_sum(tf.log(self.token_probs), axis=0)
# STEP 3: Build Neural Module Network by assembling different modules
with tf.variable_scope('layout_execution'):
self.modules = Modules(self.image_feat_grid, self.word_vecs, num_choices)
# Recursion of the Find & Transform & And modules according to the layout
# and get output scores for choice with AndModule
recursion_result = self.modules.do_recur()
output_scores = self.modules.get_output_scores(recursion_result)
# Compile and get the output scores
self.compiler = td.Compiler.create(output_scores)
self.scores = self.compiler.output_tensors[0]
# Step 4: Regularization: Entropy + L2
self.entropy_reg = tf.reduce_mean(self.neg_entropy)
module_weights = [v for v in tf.trainable_variables()
if (scope in v.op.name and v.op.name.endswith('weights'))]
self.l2_reg = tf.add_n([tf.nn.l2_loss(v) for v in module_weights])
|
{"hexsha": "12b5072e651e26a10e10bbcdb9c78532a470ec29", "size": 3102, "ext": "py", "lang": "Python", "max_stars_repo_path": "n2nmn-tensorlayer/models_shapes/nmn3_model.py", "max_stars_repo_name": "jiaqi-xi/Neural-Module-Networks.Tensorlayer", "max_stars_repo_head_hexsha": "3607e6717473aed51c653cf931dc7d80866b0227", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "n2nmn-tensorlayer/models_shapes/nmn3_model.py", "max_issues_repo_name": "jiaqi-xi/Neural-Module-Networks.Tensorlayer", "max_issues_repo_head_hexsha": "3607e6717473aed51c653cf931dc7d80866b0227", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "n2nmn-tensorlayer/models_shapes/nmn3_model.py", "max_forks_repo_name": "jiaqi-xi/Neural-Module-Networks.Tensorlayer", "max_forks_repo_head_hexsha": "3607e6717473aed51c653cf931dc7d80866b0227", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-12T15:45:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-31T13:07:46.000Z", "avg_line_length": 49.2380952381, "max_line_length": 106, "alphanum_fraction": 0.6624758221, "include": true, "reason": "import numpy", "num_tokens": 654}
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 15:40:44 2015
@author: poldrack
"""
import os,glob
import urllib
import numpy
def dequote_string(l):
if l.find('"')<0:
return l
in_quotes=False
l_dequoted=[]
for c in l:
if c=='"' and in_quotes:
in_quotes=False
elif c=='"' and not in_quotes:
in_quotes=True
elif in_quotes and c==' ':
l_dequoted.append('_')
else:
l_dequoted.append(c)
return ''.join(l_dequoted)
def load_R_dataframe(filename):
"""
load an R data frame from text file or url or filehandle
"""
try:
# check whether it's a urllib handle
filename.url
f=filename
except:
if filename.find('http')==0:
f=urllib.urlopen(filename)
else:
f=open(filename)
header=f.readline().strip().split()
lines=f.readlines()
f.close()
data=[]
rowlabels=[]
for l in lines:
# first need to replace spaces contained within quotes
l=dequote_string(l)
l_s=[i.replace('"','') for i in l.strip().split()]
rowlabels.append(l_s[0])
data.append([float(i) for i in l_s[1:]])
data=numpy.array(data)
return data,rowlabels,header
def load_wgcna_module_assignments(filename):
"""
load module assignment file
"""
try:
# check whether it's a urllib handle
filename.url
f=filename
except:
if filename.find('http')==0:
f=urllib.urlopen(filename)
else:
f=open(filename)
lines=f.readlines()
f.close()
data=[]
rowlabels=[]
for l in lines:
# first need to replace spaces contained within quotes
l=dequote_string(l)
l_s=[i.replace('"','') for i in l.strip().split()]
rowlabels.append(l_s[0])
data.append([float(i) for i in l_s[1:]])
data=numpy.array(data)
return data,rowlabels
def load_dataframe(filename,thresh=0.1):
if not filename.find('http')==0:
f=open(filename)
else:
f=urllib.urlopen(filename)
# return p value, t stat, and correlation
header=f.readline()
lines=f.readlines()
f.close()
data={}
for l in lines:
# first need to replace spaces contained within quotes
l=dequote_string(l)
l_s=[i.replace('"','') for i in l.strip().split()]
try:
if float(l_s[-1])<thresh:
#print l_s
data[(l_s[1],l_s[2])]=[float(l_s[-1]),float(l_s[4]),float(l_s[3]),int(l_s[-2])]
except:
pass
return data
|
{"hexsha": "e54051f582fa55e88a659af8a753d77f0b35b13b", "size": 2655, "ext": "py", "lang": "Python", "max_stars_repo_path": "myconnectome/utils/load_dataframe.py", "max_stars_repo_name": "poldrack/myconnectome", "max_stars_repo_head_hexsha": "201f414b3165894d6fe0be0677c8a58f6d161948", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2015-04-02T16:43:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-17T20:04:26.000Z", "max_issues_repo_path": "myconnectome/utils/load_dataframe.py", "max_issues_repo_name": "poldrack/myconnectome", "max_issues_repo_head_hexsha": "201f414b3165894d6fe0be0677c8a58f6d161948", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2015-05-19T02:57:22.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-17T17:36:16.000Z", "max_forks_repo_path": "myconnectome/utils/load_dataframe.py", "max_forks_repo_name": "poldrack/myconnectome", "max_forks_repo_head_hexsha": "201f414b3165894d6fe0be0677c8a58f6d161948", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-05-21T17:01:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T04:28:08.000Z", "avg_line_length": 24.3577981651, "max_line_length": 91, "alphanum_fraction": 0.5551789077, "include": true, "reason": "import numpy", "num_tokens": 673}
|
import argparse
import torch
# pip install --upgrade torchvision (Run this after installing torch)
import torchvision
from torchvision.transforms import functional as F
import numpy as np
import os
import time
import torch.nn.parallel
from contextlib import suppress
from non_max_suppression import calculate_iou_on_label, get_labels_categ
from interface import develop_voice_over
from efficientdet_processing import simple_iou_thresh, transforms_coco_eval
import cv2
from effdet import create_model
from effdet.data import resolve_input_config
from timm.models.layers import set_layer_config
from PIL import Image
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
classes = ["Placeholder", "Apples", "Strawberry", "Peach", "Tomato", "Bad_Spots"]
COLORS = [(0, 0, 0), (0, 255, 0), (0, 0 , 255), (255, 255, 0), (255, 0, 0)]
''' Setting model device'''
def set_device(input_device):
global device
device = torch.device(input_device)
print("Device: {}".format(input_device))
'''Intializing Model State Dicts'''
def create_effdet():
model_args = dict()
model_args['num_classes'] = len(classes) - 1
model_args['pretrained'] = True
model_args['checkpoint'] = "device/effecientdet_d0/effecientdet_d0_brain.pth.tar"
model_args['redundant_bias'] = None
model_args['model'] = 'efficientdet_d0'
model_args['soft_nms'] = None
model_args['use_ema'] = True
model_args['img_size'] = 512
model_args['torchscript'] = True
model_args['pretrained'] = model_args['pretrained'] or not model_args['checkpoint'] # might as well try to validate something
# create model
with set_layer_config(scriptable=model_args['torchscript']):
extra_args = dict(image_size=(model_args['img_size'] ,model_args['img_size']))
bench = create_model(
model_args['model'],
bench_task='predict',
num_classes=model_args['num_classes'],
pretrained=model_args['pretrained'],
redundant_bias=model_args['redundant_bias'],
soft_nms=model_args['soft_nms'],
checkpoint_path=model_args['checkpoint'],
checkpoint_ema=model_args['use_ema'],
**extra_args,
)
model_config = bench.config
input_config = resolve_input_config(model_args, model_config)
param_count = sum([m.numel() for m in bench.parameters()])
print('Model %s created, param count: %d' % (model_args['model'], param_count))
bench = bench.to(device)
amp_autocast = suppress
bench.eval()
return bench, amp_autocast
def load_torchvision_models(model_name, map_loc):
if model_name == "ssdlite_mobilenet":
ssd_lite = torchvision.models.detection.ssdlite320_mobilenet_v3_large(pretrained_backbone=True, num_classes = len(classes))
ssd_lite.load_state_dict(torch.load("device/ssdlite_mobilenet/ssdlite_mobilenet_brain.pth", map_location=map_loc))
print("Loaded model weights for ssdlite_mobilenet turning to eval mode")
return ssd_lite.eval()
else:
mobilenet_fasterrcnn = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)
mobilenet_fasterrcnn.roi_heads.box_predictor.cls_score.out_features = len(classes)
mobilenet_fasterrcnn.roi_heads.box_predictor.bbox_pred.out_features = 4 * (len(classes))
mobilenet_fasterrcnn.load_state_dict(torch.load("device/mobilenet_fasterrcnn/mobilenet_fasterrcnn_brain.pth", map_location = map_loc))
print("Loaded model weights for mobilenet_fasterrcnn turning to eval model")
return mobilenet_fasterrcnn.eval()
'''Inference functions for all models'''
def infer_effdet(model, frame, amp_autocast, nms_thresh, voice_over):
#Preprocessing steps for every frame
transformed_frame, img_scale = transforms_coco_eval(frame, device, 512)
with amp_autocast():
output = model(transformed_frame)[0]
final_out = list()
for ii, pred in enumerate(output):
#Nonmax Suppression
if pred[-2] > float(nms_thresh):
final_out.append(pred)
else:
break
if len(final_out) != 0:
final_out = torch.stack(final_out)
if len(final_out) > 1:
final_out = simple_iou_thresh(final_out, 0.2)
else:
final_out = []
torch_image = F.to_tensor(frame)
written_image = draw_boxes(final_out[:, :4] * img_scale, final_out[:, -1].to(torch.uint8), torch_image, put_text= True)
cv2.imshow('Output', written_image)
if voice_over:
results = [{
"boxes" : final_out[:, :4] * img_scale,
"labels": final_out[:, -1].to(torch.uint8),
"scores": final_out[:, -2]
}]
voice_over = develop_voice_over(results, classes)
print(voice_over)
cv2.waitKey(0)
def infer_image(image, trained_model, distance_thresh, iou_thresh, voice_over):
torch_image = F.to_tensor(image).unsqueeze(0).to(device)
trained_model.to(device)
trained_model.eval()
print("Image Size: {}".format(torch_image.size()))
start_time = time.time()
results = trained_model(torch_image)
end_time = time.time() - start_time
print("Time of Inference {:0.2f}".format(end_time))
valid_box_count = 0
for ii, score in enumerate(results[0]["scores"]):
if score < distance_thresh:
low_index_start = ii
break
else:
valid_box_count += 1
if valid_box_count == len(results[0]["scores"]):
low_index_start = len(results[0]["scores"])
for key in results[0]:
results[0][key] = results[0][key][:low_index_start]
#This is where I place the order of the list
fruit_spot_iou_thresh, bad_spot_iou_thresh = iou_thresh
#Update when I get more data of fruits and when running for script beware of classes.
bad_spot_index = [ii for ii, label in enumerate(results[0]["labels"]) if label in get_labels_categ(classes, "bad_spot")]
fruit_index = [ii for ii, _ in enumerate(results[0]["labels"]) if ii not in bad_spot_index]
bad_spot_results, fruit_results = dict(), dict()
for key in results[0]:
bad_spot_results[key], fruit_results[key] = results[0][key][[bad_spot_index]], results[0][key][[fruit_index]]
assert len(bad_spot_results["boxes"]) == len(bad_spot_results["scores"]) == len(bad_spot_results["labels"])
assert len(fruit_results["boxes"]) == len(fruit_results["scores"]) == len(fruit_results["labels"])
len_of_bad_spots, len_of_fruit = len(bad_spot_results["boxes"]), len(fruit_results["boxes"])
if len_of_bad_spots > 1:
bad_spot_results = calculate_iou_on_label(bad_spot_results, len_of_bad_spots, bad_spot_iou_thresh, device)
if len_of_fruit > 1:
fruit_results = calculate_iou_on_label(fruit_results, len_of_fruit, fruit_spot_iou_thresh, device)
for key in results[0]:
if (key == "boxes"):
results[0]["boxes"] = torch.cat((fruit_results["boxes"], bad_spot_results["boxes"]), axis = 0)
else:
results[0][key] = torch.cat((fruit_results[key], bad_spot_results[key]), dim = 0)
if device == torch.device("cuda"):
torch_image = torch_image.cpu()
written_image = draw_boxes(results[0]["boxes"], results[0]["labels"], torch_image.squeeze(), put_text= True)
cv2.imshow('Output', written_image)
if voice_over:
voice_over = develop_voice_over(results, classes)
print(voice_over)
cv2.waitKey(0)
def draw_boxes(boxes, labels, image, put_text = True):
image = image.permute(1, 2, 0).numpy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
for i, box in enumerate(boxes):
color = COLORS[labels[i] % len(COLORS)]
cv2.rectangle(
image,
(int(box[0]), int(box[1])),
(int(box[2]), int(box[3])),
color, 2
)
if put_text:
cv2.putText(image, classes[labels[i]], (int(box[0]), int(box[1]-5)),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2,
lineType=cv2.LINE_AA)
return image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Infers on images and can give optional voice_over")
parser.add_argument("image_file", type = str, help = "A file path to the image")
parser.add_argument("--device", dest = "device", required = True, help = "name of device used")
parser.add_argument("--model_name", dest = "model_name", required = True, help = "name of models: [efficientdet_d0, mobilenet_fasterrcnn, ssdlite_mobilenet]")
parser.add_argument("--confidence_thresh", dest = "confidence_thresh", required = True, help = "value for confidence thresholding \
in nms")
parser.add_argument("--voice_over", dest = "voice_over", default = False, action='store_true', help = "choice to include user interface. Default is False")
args = parser.parse_args()
set_device(args.device)
pil_image = Image.open(args.image_file).convert("RGB")
if args.model_name == "efficientdet_d0":
model, amp_autocast = create_effdet()
infer_effdet(model, pil_image, amp_autocast, args.confidence_thresh, args.voice_over)
elif args.model_name == "mobilenet_fasterrcnn" or args.model_name == "ssdlite_mobilenet":
model = load_torchvision_models(args.model_name, device)
infer_image(pil_image, model, float(args.confidence_thresh), [0.35, 0.1], args.voice_over)
else:
raise ValueError("model_name can only be [efficientdet_d0, mobilenet_fasterrcnn, ssdlite_mobilenet]")
|
{"hexsha": "07cac21651ed0f2056895301401fd5f2dc099c0a", "size": 9736, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference_device.py", "max_stars_repo_name": "SarthakJaingit/Visually-Impaired-Food-Device-Aid-", "max_stars_repo_head_hexsha": "592745a5ce78616fb5999b5b4f73820eeb27adaa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference_device.py", "max_issues_repo_name": "SarthakJaingit/Visually-Impaired-Food-Device-Aid-", "max_issues_repo_head_hexsha": "592745a5ce78616fb5999b5b4f73820eeb27adaa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference_device.py", "max_forks_repo_name": "SarthakJaingit/Visually-Impaired-Food-Device-Aid-", "max_forks_repo_head_hexsha": "592745a5ce78616fb5999b5b4f73820eeb27adaa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1004016064, "max_line_length": 162, "alphanum_fraction": 0.6857025472, "include": true, "reason": "import numpy", "num_tokens": 2458}
|
from abc import ABCMeta
from torch.utils.data import Dataset
import json
import numpy as np
import os
from PIL import Image
class VideoDataset(Dataset):
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
"""
Args:
json_path: Path to the directory containing the dataset's JSON file (not including the file itself)
load_type: String indicating whether to load training or validation data ('train' or 'val')
clip_length: Number of frames in each clip that will be input into the network
clip_offset: Number of frames from beginning of video to start extracting clips
clip_stride: The temporal stride between clips
num_clips: Number of clips to extract from each video (-1 uses the entire video)
resize_shape: The shape [h, w] of each frame after resizing
crop_shape: The shape [h, w] of each frame after cropping
crop_type: The method used to crop (either random or center)
final_shape: Final shape [h, w] of each frame after all preprocessing, this is input to network
random_offset: Randomly select a clip_length sized clip from a video
"""
# JSON loading arguments
self.json_path = kwargs['json_path']
self.load_type = kwargs['load_type']
# Clips processing arguments
self.clip_length = kwargs['clip_length']
self.clip_offset = kwargs['clip_offset']
self.clip_stride = kwargs['clip_stride']
self.num_clips = kwargs['num_clips']
self.random_offset = kwargs['random_offset']
# Frame-wise processing arguments
self.resize_shape = kwargs['resize_shape']
self.crop_shape = kwargs['crop_shape']
self.crop_type = kwargs['crop_type']
self.final_shape = kwargs['final_shape']
#Experiment arguments
self.batch_size = kwargs['batch_size']
# Creates the self.samples list which will be indexed by each __getitem__ call
self._getClips()
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
raise NotImplementedError("Dataset must contain __getitem__ method which loads videos from memory.")
def _getClips(self):
"""
Loads the JSON file associated with the videos in a datasets and processes each of them into clips
"""
raise NotImplementedError("Dataset must contain getClips method which loads and processes the dataset's JSON file.")
def _extractClips(self, video):
"""
Processes a single video into uniform sized clips that will be loaded by __getitem__
Args:
video: List containing a dictionary of annontations per frame
Additional Parameters:
self.clip_length: Number of frames extracted from each clip
self.num_clips: Number of clips to extract from each video (-1 uses the entire video, 0 paritions the entire video in clip_length clips)
self.clip_offset: Number of frames from beginning of video to start extracting clips
self.clip_stride: Number of frames between clips when extracting them from videos
self.random_offset: Randomly select a clip_length sized clip from a video
"""
if self.clip_offset > 0:
if len(video)-self.clip_offset >= self.clip_length:
video = video[self.clip_offset:]
if self.num_clips < 0:
if len(video) >= self.clip_length:
# Uniformly sample one clip from the video
final_video = [video[_idx] for _idx in np.linspace(0, len(video)-1, self.clip_length, dtype='int32')]
final_video = [final_video]
else:
# Loop if insufficient elements
indices = np.ceil(self.clip_length/float(len(video))) # Number of times to repeat the video to exceed one clip_length
indices = indices.astype('int32')
indices = np.tile(np.arange(0, len(video), 1, dtype='int32'), indices) # Repeat the video indices until it exceeds a clip_length
indices = indices[np.linspace(0, len(indices)-1, self.clip_length, dtype='int32')] # Uniformly sample clip_length frames from the looped video
final_video = [video[_idx] for _idx in indices]
final_video = [final_video]
# END IF
elif self.num_clips == 0:
# Divide entire video into the max number of clip_length segments
if len(video) >= self.clip_length:
indices = np.arange(start=0, stop=len(video)-self.clip_length+1, step=self.clip_stride)
final_video = []
for _idx in indices:
if _idx + self.clip_length <= len(video):
final_video.append([video[true_idx] for true_idx in range(_idx, _idx+self.clip_length)])
# END FOR
else:
# Loop if insufficient elements
indices = np.ceil(self.clip_length/float(len(video)))
indices = indices.astype('int32')
indices = np.tile(np.arange(0, len(video), 1, dtype='int32'), indices)
indices = indices[:self.clip_length]
final_video = [video[_idx] for _idx in indices]
final_video = [final_video]
# END IF
else:
# num_clips > 0, select exactly num_clips from a video
if self.clip_length == -1:
# This is a special case where we will return the entire video
# Batch size must equal one or dataloader items may have varying lengths
# and can't be stacked i.e. throws an error
assert(self.batch_size == 1)
return [video]
required_length = (self.num_clips-1)*(self.clip_stride)+self.clip_length
if self.random_offset:
if len(video) >= required_length:
vid_start = np.random.choice(np.arange(len(video) - required_length + 1), 1)
video = video[int(vid_start):]
if len(video) >= required_length:
# Get indices of sequential clips overlapped by a clip_stride number of frames
indices = np.arange(0, len(video), self.clip_stride)
# Select only the first num clips
indices = indices.astype('int32')[:self.num_clips]
video = np.array(video)
final_video = [video[np.arange(_idx, _idx+self.clip_length).astype('int32')].tolist() for _idx in indices]
else:
# If the video is too small to get num_clips given the clip_length and clip_stride, loop it until you can
indices = np.ceil(required_length /float(len(video)))
indices = indices.astype('int32')
indices = np.tile(np.arange(0, len(video), 1, dtype='int32'), indices)
# Starting index of each clip
clip_starts = np.arange(0, len(indices), self.clip_stride).astype('int32')[:self.num_clips]
video = np.array(video)
final_video = [video[indices[_idx:_idx+self.clip_length]].tolist() for _idx in clip_starts]
# END IF
# END IF
return final_video
class RecognitionDataset(VideoDataset):
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
super(RecognitionDataset, self).__init__(*args, **kwargs)
self.load_type = kwargs['load_type']
def _getClips(self, *args, **kwargs):
"""
Required format for all recognition dataset JSON files:
List(Vidnumber: Dict{
List(Frames: Dict{
Frame Size,
Frame Path,
List(Actions: Dict{
Track ID
Action Class
}) End Object List in Frame
}) End Frame List in Video
Str(Base Vid Path)
}) End Video List in Dataset
Eg: action_label = dataset[vid_index]['frames'][frame_index]['actions'][action_index]['action_class']
"""
self.samples = []
if self.load_type == 'train':
full_json_path = os.path.join(self.json_path, 'train.json')
elif self.load_type == 'val':
full_json_path = os.path.join(self.json_path, 'val.json')
#If val.json doesn't exist, it will default to test.json
if not os.path.exists(full_json_path):
full_json_path = os.path.join(self.json_path, 'test.json')
else:
full_json_path = os.path.join(self.json_path, 'test.json')
# END IF
json_file = open(full_json_path,'r')
json_data = json.load(json_file)
json_file.close()
# Load the information for each video and process it into clips
for video_info in json_data:
clips = self._extractClips(video_info['frames'])
# Each clip is a list of dictionaries per frame containing information
# Example info: object bbox annotations, object classes, frame img path
for clip in clips:
self.samples.append(dict(frames=clip, base_path=video_info['base_path']))
class DetectionDataset(VideoDataset):
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
super(DetectionDataset, self).__init__(*args, **kwargs)
self.load_type = kwargs['load_type']
def _getClips(self):
"""
Required format for all detection datset JSON files:
Json (List of dicts) List where each element contains a dict with annotations for a video:
Dict{
'frame_size' (int,int): Width, Height for all frames in video
'base_path' (str): The path to the folder containing frame images for the video
'frame' (List of dicts): A list with annotation dicts per frame
Dict{
'img_path' (Str): File name of the image corresponding to the frame annotations
'objs' (List of dicts): A list of dicts containing annotations for each object in the frame
Dict{
'trackid' (Int): Id of the current object
'c' (Str or Int): Value indicating the class of the current object
'bbox' (int, int, int, int): Bbox coordinates of the current object in the current frame (xmin, ymin, xmax, ymax)
(Optional) 'iscrowd' (int): Boolean indicating if the object represents a crowed (Used in MSCOCO dataset)
(Optional) 'occ' (int): Boolean indicating if the object is occluded in the current frame (Used in ImageNetVID dataset)
}
}
}
Ex: coordinates = dataset[vid_index]['frames'][frame_index]['objs'][obj_index]['bbox']
"""
# Load all video paths into the samples array to be loaded by __getitem__
self.samples = []
if self.load_type == 'train':
full_json_path = os.path.join(self.json_path, 'train.json')
elif self.load_type == 'val':
full_json_path = os.path.join(self.json_path, 'val.json')
#If val.json doesn't exist, it will default to test.json
if not os.path.exists(full_json_path):
full_json_path = os.path.join(self.json_path, 'test.json')
else:
full_json_path = os.path.join(self.json_path, 'test.json')
json_file = open(full_json_path,'r')
json_data = json.load(json_file)
json_file.close()
# Load the information for each video and process it into clips
for video_info in json_data:
clips = self._extractClips(video_info['frames'])
# Each clip is a list of dictionaries per frame containing information
# Example info: object bbox annotations, object classes, frame img path
for clip in clips:
self.samples.append(dict(frames=clip, base_path=video_info['base_path'], frame_size=video_info['frame_size']))
|
{"hexsha": "a159b251d3ebda713c6dfb0c1f22bad28d7848ef", "size": 12642, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/abstract_datasets.py", "max_stars_repo_name": "MichiganCOG/ViP", "max_stars_repo_head_hexsha": "74776f2575bd5339ba39c784bbda4f04cc859add", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 210, "max_stars_repo_stars_event_min_datetime": "2019-08-12T15:51:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T03:49:54.000Z", "max_issues_repo_path": "datasets/abstract_datasets.py", "max_issues_repo_name": "puhuajiang/ViP", "max_issues_repo_head_hexsha": "74776f2575bd5339ba39c784bbda4f04cc859add", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2019-08-12T22:05:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T09:06:44.000Z", "max_forks_repo_path": "datasets/abstract_datasets.py", "max_forks_repo_name": "puhuajiang/ViP", "max_forks_repo_head_hexsha": "74776f2575bd5339ba39c784bbda4f04cc859add", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2019-08-12T19:16:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T04:54:12.000Z", "avg_line_length": 42.8542372881, "max_line_length": 158, "alphanum_fraction": 0.589384591, "include": true, "reason": "import numpy", "num_tokens": 2606}
|
[STATEMENT]
lemma hlit_of_flit_bij: "bij_betw hlit_of_flit {l. ground\<^sub>l l} UNIV"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw hlit_of_flit {l. ground\<^sub>l l} UNIV
[PROOF STEP]
unfolding bij_betw_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj_on hlit_of_flit {l. ground\<^sub>l l} \<and> hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. inj_on hlit_of_flit {l. ground\<^sub>l l}
2. hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
[PROOF STEP]
show "inj_on hlit_of_flit {l. ground\<^sub>l l}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj_on hlit_of_flit {l. ground\<^sub>l l}
[PROOF STEP]
using inj_on_inverseI flit_of_hlit_hlit_of_flit
[PROOF STATE]
proof (prove)
using this:
(\<And>x. x \<in> ?A \<Longrightarrow> ?g (?f x) = x) \<Longrightarrow> inj_on ?f ?A
ground\<^sub>l ?l \<Longrightarrow> flit_of_hlit (hlit_of_flit ?l) = ?l
goal (1 subgoal):
1. inj_on hlit_of_flit {l. ground\<^sub>l l}
[PROOF STEP]
by (metis (mono_tags, lifting) mem_Collect_eq)
[PROOF STATE]
proof (state)
this:
inj_on hlit_of_flit {l. ground\<^sub>l l}
goal (1 subgoal):
1. hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
[PROOF STEP]
have "\<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'
[PROOF STEP]
using ground_flit_of_hlit hlit_of_flit_flit_of_hlit
[PROOF STATE]
proof (prove)
using this:
ground\<^sub>l (flit_of_hlit ?l)
hlit_of_flit (flit_of_hlit ?l) = ?l
goal (1 subgoal):
1. \<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'
goal (1 subgoal):
1. hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'
[PROOF STEP]
show "hlit_of_flit ` {l. ground\<^sub>l l} = UNIV"
[PROOF STATE]
proof (prove)
using this:
\<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'
goal (1 subgoal):
1. hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
hlit_of_flit ` {l. ground\<^sub>l l} = UNIV
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1193, "file": "Resolution_FOL_TermsAndLiterals", "length": 13}
|
!*robodoc*f* ca_hx/ca_hx_mpi
! NAME
! ca_hx_mpi
! SYNOPSIS
!$Id: ca_hx_mpi.f90 528 2018-03-26 09:02:14Z mexas $
submodule ( ca_hx ) ca_hx_mpi
! DESCRIPTION
! Submodule of module ca_hx with MPI related routines.
! To aid portability, the module works only with default integer
! kind, i.e. MPI_integer. Other MPI integer kinds might not be
! widely available, meaning that other Fortran integer kinds might
! be less portable. So make sure that space array kind is the same
! as default integer. This is likely to be the case with
! integer, parameter :: iarr = selected_int_kind( 8 )
! iarr is set in cgca_m1co.
!
! Creation/release (free) of MPI types is left as the user's
! responsibility. This is because the user might want to change
! halo depth in the same program. This is hard/impossible to keep
! completely invisible to the user.
! AUTHOR
! Anton Shterenlikht
! COPYRIGHT
! See LICENSE
! CONTAINS
! ca_mpi_halo_type_create, ca_mpi_halo_type_free, ca_mpi_hxvn1m, &
! ca_mpi_hxvn1p, ca_mpi_hxvn2m, ca_mpi_hxvn2p, ca_mpi_hxvn3m, &
! ca_mpi_hxvn3p, ca_mpi_hx_all
! USES
! USED BY
! SOURCE
!
! For reference
!
! MPI_SEND( BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR )
! MPI_RECV( BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR )
!
! MPI_ISEND( BUF, COUNT, DATATYPE, DEST, TAG, COMM, REQUEST, IERROR )
! MPI_IRECV( BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, REQUEST, IERROR )
implicit none
! Tags for sending messages in 6 directions.
integer, parameter :: TAG1L = 1, TAG1R = 2, TAG2L = 3, TAG2R = 4, &
TAG3L = 5, TAG3R = 6
integer, save :: &
rank, & ! MPI rank
!status( MPI_STATUS_SIZE ), & ! used in MPI_RECV, etc.
mpi_h1_LV, & ! MPI halo, dim 1, left virtual
mpi_h1_LR, & ! MPI halo, dim 1, left real
mpi_h1_RR, & ! MPI halo, dim 1, right real
mpi_h1_RV, & ! MPI halo, dim 1, right virtual
mpi_h2_LV, & ! MPI halo, dim 2, left virtual
mpi_h2_LR, & ! MPI halo, dim 2, left real
mpi_h2_RR, & ! MPI halo, dim 2, right real
mpi_h2_RV, & ! MPI halo, dim 2, right virtual
mpi_h3_LV, & ! MPI halo, dim 3, left virtual
mpi_h3_LR, & ! MPI halo, dim 3, left real
mpi_h3_RR, & ! MPI halo, dim 3, right real
mpi_h3_RV, & ! MPI halo, dim 3, right virtual
mpi_ca_integer,& ! MPI matching type for iarr
errcode, & ! Need to preserve ierr
errlen ! The length of the output error message
! A flag to track the state of MPI types for halos.
! Set initially to .false.
! Calling ca_mpi_halo_type_create sets it to .true.
! Calling ca_mpi_halo_type_free sets it to .false. again.
logical, save :: halo_type_created = .false.
contains
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_halo_type_create
! NAME
! ca_mpi_halo_type_create
! SYNOPSIS
module subroutine ca_mpi_halo_type_create( space )
! INPUT
integer( kind=iarr), intent( inout ), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! none
! SIDE EFFECTS
! 12 MPI halo types, module variables, are created.
! DESCRIPTION
! For each direction there are 4 MPI halo data types:
! - array elements in the halo part of the array to the
! left of the real data,
! - array elements of halo thickness inside the real part of the
! array on its left side,
! - array elements of halo thickness inside the real part of the
! array on its right side,
! - array elements in the hallp part of the array to the right
! of the real data.
! Refer to the diagram in ca_hx/ca_spalloc.
! NOTES
! Call this routine after ca_halloc.
! All images must call this routine!
! Pay particular attention to the starts of all arrays.
! Refer to the details in e.g:
! https://www.open-mpi.org/doc/v3.0/man3/MPI_Type_create_subarray.3.php
! In particular:
! In a Fortran program with arrays indexed starting from 1,
! if the starting coordinate of a particular dimension
! of the subarray is n, then the entry in array of starts
! for that dimension is n-1.
! A diagram is probably needed for starts, because it's different
! from that in ca_hx/ca_spalloc.
! Using only a single dimension, e.g. 1.
!
! +------+------+----------------+------+------+
! | LV | LR | | RR | RV |
! +------+------+----------------+------+------+
! ^ ^ ^ ^
! | | | |
! 0 hdepth | sizes(1)-hdepth
! sizes(1)-2*hdepth
!
! starts for 4 halo arrays along dim 1
!
! USES
! USED BY
! SOURCE
!MPI_TYPE_CREATE_SUBARRAY(NDIMS, ARRAY_OF_SIZES, ARRAY_OF_SUBSIZES,
! ARRAY_OF_STARTS, ORDER, OLDTYPE, NEWTYPE, IERROR)
!
! INTEGER NDIMS, ARRAY_OF_SIZES(*), ARRAY_OF_SUBSIZES(*),
! ARRAY_OF_STARTS(*), ORDER, OLDTYPE, NEWTYPE, IERROR
integer :: sizes(3), subsizes(3), starts(3)
! Set MPI rank, keep forever
call MPI_COMM_RANK( MPI_COMM_WORLD, rank, ierr)
!write (*,*) "my rank:", rank, "my img:", this_image()
! Set MPI matching type for iarr: mpi_ca_integer.
! Set once, keep forever.
call MPI_TYPE_CREATE_F90_INTEGER( ca_range, mpi_ca_integer, ierr )
! The sizes is just the shape of the space array, for all cases
sizes = shape( space )
! Dimension 1
subsizes = (/ hdepth, sub(2), sub(3) /)
! 1. dimension 1, left virtual (LV) type
starts = (/ 0, hdepth, hdepth /)
!write (*,"(3(a,3(i0,tr1)))") "sizes: ", sizes, " subsizes: ", subsizes, " starts: ", starts
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h1_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
errcode = ierr
call MPI_ERROR_STRING( errcode, errmsg, errlen, ierr )
write (*,"(a,i0,a)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 1: left virtual (LV): error: ", &
errcode, " error message: " // trim(errmsg)
error stop
end if
call MPI_TYPE_COMMIT( mpi_h1_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_COMMIT: dim 1: left virtual (LV): ierr: ", ierr
error stop
end if
! 2. dimension 1, left real (LR) type
starts = (/ hdepth, hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h1_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 1: left real (LR): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h1_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_COMMIT: dim 1: left real (LR): ierr: ", ierr
error stop
end if
! 3. dimension 1, right real (RR) type
starts = (/ sizes(1) - 2*hdepth, hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h1_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 1: right real (RR): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h1_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_COMMIT: dim 1: right real (RR): ierr: ", ierr
error stop
end if
! 4. dimension 1, right virtual (RV) type
starts = (/ sizes(1) - hdepth, hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h1_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 1: right virtual (RV): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h1_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_create: " // &
"MPI_TYPE_COMMIT: dim 1: right virtual (RV): ierr: ", ierr
error stop
end if
! Dimension 2
subsizes = (/ sub(1), hdepth, sub(3) /)
! 5. dimension 2, left virtual (LV) type
starts = (/ hdepth, 0, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h2_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 2: left virtual (LV): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h2_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 2: left virtual (LV): ierr: ", ierr
error stop
end if
! 6. dimension 2, left real (LR) type
starts = (/ hdepth, hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h2_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 2: left real (LR): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h2_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 2: left real (LR): ierr: ", ierr
error stop
end if
! 7. dimension 2, right real (RR) type
starts = (/ hdepth, sizes(2) - 2*hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h2_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 2: right real (RR): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h2_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 2: right real (RR): ierr: ", ierr
error stop
end if
! 8. dimension 2, right virtual (RV) type
starts = (/ hdepth, sizes(2) - hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h2_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 2: right virtual (RV): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h2_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 2: right virtual (RV): ierr: ", ierr
error stop
end if
! Dimension 3
subsizes = (/ sub(1), sub(2), hdepth /)
! 9. dimension 3, left virtual (LV) type
starts = (/ hdepth, hdepth, 0 /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h3_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 3: left virtual (LV): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h3_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 3: left virtual (LV): ierr: ", ierr
error stop
end if
! 10. dimension 3, left real (LR) type
starts = (/ hdepth, hdepth, hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h3_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 3: left real (LR): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h3_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 3: left real (LR): ierr: ", ierr
error stop
end if
! 11. dimension 3, right real (RR) type
starts = (/ hdepth, hdepth, sizes(3) - 2*hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h3_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 3: right real (RR): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h3_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 3: right real (RR): ierr: ", ierr
error stop
end if
! 12. dimension 3, right virtual (RV) type
starts = (/ hdepth, hdepth, sizes(3) - hdepth /)
call MPI_TYPE_CREATE_SUBARRAY( 3, sizes, subsizes, starts, &
MPI_ORDER_FORTRAN, mpi_ca_integer, mpi_h3_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_CREATE_SUBARRAY: dim 3: right virtual (RV): ierr: ", ierr
error stop
end if
call MPI_TYPE_COMMIT( mpi_h3_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type: " // &
"MPI_TYPE_COMMIT: dim 3: right virtual (RV): ierr: ", ierr
error stop
end if
! MPI types for halos have been created.
! Set the corresponding flag to .true.
halo_type_created = .true.
end subroutine ca_mpi_halo_type_create
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_halo_type_free
! NAME
! ca_mpi_halo_type_free
! SYNOPSIS
module subroutine ca_mpi_halo_type_free
! INPUT
! none
! OUTPUT
! none
! SIDE EFFECTS
! 12 MPI halo types, module variables, are freed.
! DESCRIPTION
! Refer to ca_mpi_halo_type_create for details of these 12 types.
! Need to call this routine if want to re-create the halo types,
! perhaps with different halo depth, or for a different space
! array.
! NOTES
! Will give an error if data types are not committed.
! All images must call this routine!
! USES
! USED BY
! SOURCE
! Dimension 1
call MPI_TYPE_FREE( mpi_h1_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 1: left virtual (LV): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h1_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 1: left real (LR): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h1_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 1: right real (RR): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h1_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 1: right virtual (RV): ierr: ", ierr
error stop
end if
! Dimension 2
call MPI_TYPE_FREE( mpi_h2_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 2: left virtual (LV): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h2_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 2: left real (LR): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h2_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 2: right real (RR): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h2_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 2: right virtual (RV): ierr: ", ierr
error stop
end if
! Dimension 3
call MPI_TYPE_FREE( mpi_h3_LV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 3: left virtual (LV): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h3_LR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 3: left real (LR): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h3_RR, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 3: right real (RR): ierr: ", ierr
error stop
end if
call MPI_TYPE_FREE( mpi_h3_RV, ierr )
if ( ierr .ne. MPI_SUCCESS ) then
write (*,"(a,i0)") "ERROR ca_hx_mpi/ca_mpi_halo_type_free: " // &
"MPI_TYPE_FREE: dim 3: right virtual (RV): ierr: ", ierr
error stop
end if
! MPI types have been freed.
! Reset the flag back to .false.
! Will need to re-create MPI types for halos *before* any HX.
halo_type_created = .false.
end subroutine ca_mpi_halo_type_free
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hxvn1m
! NAME
! ca_mpi_hxvn1m
! SYNOPSIS
module subroutine ca_mpi_hxvn1m( space )
! INPUT
integer( kind=iarr), intent(inout), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! space is updated
! SIDE EFFECTS
! none
! DESCRIPTION
! Use non-blocking send/receive.
! An image does 2 remote ops:
! - Send its space array real halo layer, left side (mpi_h1_LR)
! along dimension 1 into a virtual halo layer, right side
! (mpi_h1_RV) on an image which is 1 lower along codimension 1.
! Tag this message with TAG1L.
! - Receive its space array virtual halo layer, left side
! (mpi_h1_LV) along dimension 1 from a real halo layer,
! right side (mpi_h1_RR) on an image which is 1 lower along
! codimension 1. Tag this message with TAG1R.
!
! Schematic diagram, only showing what is relevant for HX along
! dimension 1:
!
! ----------> dimension 1
!
! image P / rank P+1 | image Q / rank Q+1
! |
! |
! | image Q, TAG1L, send data type mpi_h1_LR
! +--------|------------+
! | | |
! | | |
! +----------------|-+ | +-----|------------+
! | | | | | | |
! | +----------+-V-+ | +---+-|------------+
! | | | | | | | ^ |
! | | | h | | | h | |
! | | real | a | | | a | real |
! | | | l | | | l | |
! | | | o | | | o | |
! | | V| | | | | |
! | +---------|+---+ | +-^-+--------------+
! | | | | | | |
! +-------------|----+ | +-+----------------+
! | | |
! | | |
! +-----------|--------+
! | image Q, TAG1R, receive data type mpi_h1_LV
! |
! |
!
! USES
! USED BY
! ca_mpi_hx_all
! SOURCE
integer :: reqs1m(2), stats(MPI_STATUS_SIZE, 2)
if ( ci(1) .ne. 1 ) then
! Rank is image number -1.
! Receive from the left neighbour, tag = TAG1R
call MPI_IRECV( space, 1, mpi_h1_LV, nei_img_L(1)-1, TAG1R, &
MPI_COMM_WORLD, reqs1m(1), ierr )
! Send to the left neighbour, tag = TAG1L
call MPI_ISEND( space, 1, mpi_h1_LR, nei_img_L(1)-1, TAG1L, &
MPI_COMM_WORLD, reqs1m(2), ierr )
call MPI_WAITALL( 2, reqs1m, stats, ierr )
end if
end subroutine ca_mpi_hxvn1m
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hxvn1p
! NAME
! ca_mpi_hxvn1p
! SYNOPSIS
module subroutine ca_mpi_hxvn1p( space )
! INPUT
integer( kind=iarr), intent(inout), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! space is updated
! SIDE EFFECTS
! none
! DESCRIPTION
! Use non-blocking send/receive.
! An image does 2 remote ops:
! - Send its space array real halo layer, right side (mpi_h1_RR)
! along dimension 1 into a virtual halo layer, left side
! (mpi_h1_LV) on an image which is 1 higher along codimension 1.
! Tag this message with TAG1R.
! - Receive its space array virtual halo layer, right side
! (mpi_h1_RV) along dimension 1 from a real halo layer,
! right side (mpi_h1_LR) on an image which is 1 higher along
! codimension 1. Tag this message with TAG1L.
!
! Schematic diagram, only showing what is relevant for HX along
! dimension 1:
!
! ----------> dimension 1
!
! image P / rank P+1 | image Q / rank Q+1
! |
! |
! image P, TAG1L, receive data type mpi_h1_RV
! +--------|------------+
! | | |
! | | |
! +----------------|-+ | +-----|------------+
! | | | | | | |
! | +----------+-V-+ | +---+-|------------+
! | | | | | | | ^ |
! | | | h | | | h | |
! | | real | a | | | a | real |
! | | | l | | | l | |
! | | | o | | | o | |
! | | V| | | | | |
! | +---------|+---+ | +-^-+--------------+
! | | | | | | |
! +-------------|----+ | +-+----------------+
! | | |
! | | |
! +-----------|--------+
! image P, TAG1R, send data type mpi_h1_RR
!
! USES
! USED BY
! ca_mpi_hx_all
! SOURCE
integer :: reqs1p(2), stats(MPI_STATUS_SIZE, 2)
if ( ci(1) .ne. ucob(1) ) then
! Rank is image number -1.
! Receive from the right neighbour, tag = TAG1L
call MPI_IRECV( space, 1, mpi_h1_RV, nei_img_R(1)-1, TAG1L, &
MPI_COMM_WORLD, reqs1p(1), ierr )
! Send to the right neighbour, tag = TAG1R
call MPI_ISEND( space, 1, mpi_h1_RR, nei_img_R(1)-1, TAG1R, &
MPI_COMM_WORLD, reqs1p(2), ierr )
call MPI_WAITALL( 2, reqs1p, stats, ierr )
end if
end subroutine ca_mpi_hxvn1p
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hxvn2m
! NAME
! ca_mpi_hxvn2m
! SYNOPSIS
module subroutine ca_mpi_hxvn2m( space )
! INPUT
integer( kind=iarr ), intent(inout), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! space is updated
! SIDE EFFECTS
! none
! DESCRIPTION
! HX along dimension 2. See ca_mpi_hxvn1m.
! USES
! USED BY
! ca_mpi_hx_all
! SOURCE
integer :: reqs2m(2), stats(MPI_STATUS_SIZE, 2)
if ( ci(2) .ne. 1 ) then
! Rank is image number -1.
! Receive from the left neighbour, tag = TAG2R
call MPI_IRECV( space, 1, mpi_h2_LV, nei_img_L(2)-1, TAG2R, &
MPI_COMM_WORLD, reqs2m(1), ierr )
! Send to the left neighbour, tag = TAG2L
call MPI_ISEND( space, 1, mpi_h2_LR, nei_img_L(2)-1, TAG2L, &
MPI_COMM_WORLD, reqs2m(2), ierr )
call MPI_WAITALL( 2, reqs2m, stats, ierr )
end if
end subroutine ca_mpi_hxvn2m
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hxvn2p
! NAME
! ca_mpi_hxvn2p
! SYNOPSIS
module subroutine ca_mpi_hxvn2p( space )
! INPUT
integer( kind=iarr ), intent(inout), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! space is updated
! SIDE EFFECTS
! none
! DESCRIPTION
! HX along dimension 2. See ca_mpi_hxvn1p.
! USES
! USED BY
! ca_mpi_hx_all
! SOURCE
integer :: reqs2p(2), stats(MPI_STATUS_SIZE, 2)
if ( ci(2) .ne. ucob(2) ) then
! Rank is image number -1.
! Receive from the right neighbour, tag = TAG2L
call MPI_IRECV( space, 1, mpi_h2_RV, nei_img_R(2)-1, TAG2L, &
MPI_COMM_WORLD, reqs2p(1), ierr )
! Send to the right neighbour, tag = TAG2R
call MPI_ISEND( space, 1, mpi_h2_RR, nei_img_R(2)-1, TAG2R, &
MPI_COMM_WORLD, reqs2p(2), ierr )
call MPI_WAITALL( 2, reqs2p, stats, ierr )
end if
end subroutine ca_mpi_hxvn2p
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hxvn3m
! NAME
! ca_mpi_hxvn3m
! SYNOPSIS
module subroutine ca_mpi_hxvn3m( space )
! INPUT
integer( kind=iarr ), intent(inout), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! space is updated
! SIDE EFFECTS
! none
! DESCRIPTION
! HX along dimension 3. See ca_mpi_hxvn1m.
! USES
! USED BY
! ca_mpi_hx_all
! SOURCE
integer :: reqs3m(2), stats(MPI_STATUS_SIZE, 2)
if ( ci(3) .ne. 1 ) then
! Rank is image number -1.
! Receive from the left neighbour, tag = TAG3R
call MPI_IRECV( space, 1, mpi_h3_LV, nei_img_L(3)-1, TAG3R, &
MPI_COMM_WORLD, reqs3m(1), ierr )
! Send to the left neighbour, tag = TAG3L
call MPI_ISEND( space, 1, mpi_h3_LR, nei_img_L(3)-1, TAG3L, &
MPI_COMM_WORLD, reqs3m(2), ierr )
call MPI_WAITALL( 2, reqs3m, stats, ierr )
end if
end subroutine ca_mpi_hxvn3m
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hxvn3p
! NAME
! ca_mpi_hxvn3p
! SYNOPSIS
module subroutine ca_mpi_hxvn3p( space )
! INPUT
integer( kind = iarr ), intent(inout), allocatable :: space(:,:,:)
! space - the CA array
! OUTPUT
! space is updated
! SIDE EFFECTS
! none
! DESCRIPTION
! HX along dimension 3. See ca_mpi_hxvn1p.
! USES
! USED BY
! ca_mpi_hx_all
! SOURCE
integer :: reqs3p(2), stats(MPI_STATUS_SIZE, 2)
if ( ci(3) .ne. ucob(3) ) then
! Rank is image number -1.
! Receive from the right neighbour, tag = TAG3L
call MPI_IRECV( space, 1, mpi_h3_RV, nei_img_R(3)-1, TAG3L, &
MPI_COMM_WORLD, reqs3p(1), ierr )
! Send to the right neighbour, tag = TAG3R
call MPI_ISEND( space, 1, mpi_h3_RR, nei_img_R(3)-1, TAG3R, &
MPI_COMM_WORLD, reqs3p(2), ierr )
call MPI_WAITALL( 2, reqs3p, stats, ierr )
end if
end subroutine ca_mpi_hxvn3p
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_hx_all
! NAME
! ca_mpi_hx_all
! SYNOPSIS
module subroutine ca_mpi_hx_all( space )
! INPUT
integer( kind=iarr ), intent(inout), allocatable :: space(:,:,:)
! space - non coarray array with CA model
! OUTPUT
! space is changed
! SIDE EFFECTS
! none
! DESCRIPTION
! Do all MPI HX.
! To avoid problems I don't allow the user to call individual
! hx routines. These are private to this module.
! The user only calls this routine.
! NOTE
! ca_mpi_halo_type_create must be called prior to calling this
! routine.
! Note! This routine will only work if iarr is the *default* integer.
! This is because MPI_INTEGER is used for space, as other MPI
! integer kinds might not be implemented.
! USES
! ca_mpi_hxvn1m, ca_mpi_hxvn1p, ca_mpi_hxvn2m, ca_mpi_hxvn2p,
! ca_mpi_hxvn3m, ca_mpi_hxvn3p
! USED BY
! SOURCE
! Make sure (some) MPI halo types have been created.
if ( .not. halo_type_created ) then
write (*,"(a)") "ERROR ca_hx_mpi/ca_mpi_hx_all: Need to create " // &
"MPI types. Call ca_mpi_halo_type_create first!"
error stop
end if
call ca_mpi_hxvn1m( space )
call ca_mpi_hxvn1p( space )
call ca_mpi_hxvn2m( space )
call ca_mpi_hxvn2p( space )
call ca_mpi_hxvn3m( space )
call ca_mpi_hxvn3p( space )
end subroutine ca_mpi_hx_all
!*roboend*
!*robodoc*s* ca_hx/ca_mpi_ising_energy
! NAME
! ca_mpi_ising_energy
! SYNOPSIS
module subroutine ca_mpi_ising_energy( space, iter_sub, kernel, &
energy, magnet )
! INPUT
integer( kind=iarr ), intent(inout), allocatable :: space(:,:,:)
procedure( iter_proto ) :: iter_sub
procedure( kernel_proto ) :: kernel
! space - space array before iterations start
! iter_sub - the subroutine performing a single CA iteration, e.g.
! - ca_iter_tl - triple nested loop
! - ca_iter_dc - do concurrent
! - ca_iter_omp - OpenMP
! kernel - a function to be called for every cell inside the loop
! OUTPUT
integer( kind=ilrg ), intent( out ) :: energy, magnet
! energy - Total energy of CA system
! magnet - Total magnetisation of the CA system
! SIDE EFFECTS
! module array tmp_space is updated
! DESCRIPTION
! Calculate the total energy and the total magnetisation
! of CA using Ising model. Note that I'm passing integers of kind
! ilrg to MPI_INTEGER8. This should work as long as ilrg is 8 bytes
! long. So set ilrg to selected_int_kind( 10 ).
! This routine uses MPI_ALLREDUCE with MPI_SUM.
! Magnetisation is defined as the fraction of the 1 spins.
! The only valid kernel is ca_kernel_ising_ener.
! USES
! USED BY
! SOURCE
integer( kind=ilrg ) :: img_energy, img_magnet
call ca_mpi_hx_all( space ) ! space updated, sync images
! tmp_space updated, local op
call iter_sub( space=space, halo=hdepth, kernel=kernel )
img_energy = sum( tmp_space( 1:sub(1), 1:sub(2), 1:sub(3) ) )
img_magnet = sum( space( 1:sub(1), 1:sub(2), 1:sub(3) ) )
! write (*,*) "img:", this_image(), "img_energy:", img_energy, "img_magnet:", img_magnet
call MPI_ALLREDUCE( img_energy, energy, 1, MPI_INTEGER8, MPI_SUM, &
MPI_COMM_WORLD, ierr)
call MPI_ALLREDUCE( img_magnet, magnet, 1, MPI_INTEGER8, MPI_SUM, &
MPI_COMM_WORLD, ierr)
end subroutine ca_mpi_ising_energy
!*roboend*
end submodule ca_hx_mpi
|
{"hexsha": "eedb7720702eb322faca3ee74a6edeb93ee56720", "size": 29766, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "ca_hx_mpi.f90", "max_stars_repo_name": "lcebaman/casup", "max_stars_repo_head_hexsha": "240f25f07d8ea713b9fbed9814d0ac56d0141f86", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ca_hx_mpi.f90", "max_issues_repo_name": "lcebaman/casup", "max_issues_repo_head_hexsha": "240f25f07d8ea713b9fbed9814d0ac56d0141f86", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ca_hx_mpi.f90", "max_forks_repo_name": "lcebaman/casup", "max_forks_repo_head_hexsha": "240f25f07d8ea713b9fbed9814d0ac56d0141f86", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6474103586, "max_line_length": 92, "alphanum_fraction": 0.6101256467, "num_tokens": 9475}
|
"""
generate_data.py
Core script for generating training/test addition data. First, generates random pairs of numbers,
then steps through an execution trace, computing the exact order of subroutines that need to be
called.
"""
import pickle
import numpy as np
from tasks.bubblesort.env.trace import Trace
def generate_bubblesort(prefix, num_examples, debug=False, maximum=10000000000, debug_every=1000):
"""
Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
number of examples.
:param prefix: String prefix for saving the file ('train', 'test')
:param num_examples: Number of examples to generate.
"""
data = []
for i in range(num_examples):
array = np.random.randint(10, size=5)
if debug and i % debug_every == 0:
traces = Trace(array, True).traces
else:
traces = Trace(array).traces
data.append((array, traces))
# print(data)
with open('tasks/bubblesort/data/{}.pik'.format(prefix), 'wb') as f:
pickle.dump(data, f)
|
{"hexsha": "791c5584bdea83569d14ab960720f73311191e1f", "size": 1072, "ext": "py", "lang": "Python", "max_stars_repo_path": "tasks/bubblesort/env/generate_data.py", "max_stars_repo_name": "ford-core-ai/neural-programming-architectures", "max_stars_repo_head_hexsha": "66320b8ba64dc978a34b1df0c1357efd104cec27", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tasks/bubblesort/env/generate_data.py", "max_issues_repo_name": "ford-core-ai/neural-programming-architectures", "max_issues_repo_head_hexsha": "66320b8ba64dc978a34b1df0c1357efd104cec27", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-12T08:53:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-12T08:53:27.000Z", "max_forks_repo_path": "tasks/bubblesort/env/generate_data.py", "max_forks_repo_name": "ford-core-ai/neural-programming-architectures", "max_forks_repo_head_hexsha": "66320b8ba64dc978a34b1df0c1357efd104cec27", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5294117647, "max_line_length": 98, "alphanum_fraction": 0.6791044776, "include": true, "reason": "import numpy", "num_tokens": 253}
|
import io
import os.path as osp
import os
from unittest import TestCase
import shutil
import tempfile
import numpy as np
from pylinac.log_analyzer import MachineLogs, TreatmentType, \
anonymize, TrajectoryLog, Dynalog, load_log, DynalogMatchError, NotADynalogError, NotALogError
from tests_basic.utils import save_file, CloudFileMixin, get_file_from_cloud_test_repo, \
get_folder_from_cloud_test_repo, FromDemoImageTesterMixin, FromURLTesterMixin
TEST_DIR = 'mlc_logs'
ANONYMOUS_SOURCE_FOLDER = get_folder_from_cloud_test_repo(['mlc_logs', '_anonbase'])
ANONYMOUS_DEST_FOLDER = get_folder_from_cloud_test_repo(['mlc_logs', 'anonymous'])
class TestAnonymizeFunction(TestCase):
"""Test the anonymization method."""
def setUp(self):
anon_source = get_folder_from_cloud_test_repo(['mlc_logs', '_anonbase'])
anon_dest = get_folder_from_cloud_test_repo(['mlc_logs', 'anonymous'])
# move over files from other directory, since the filenames get overridden
for file in os.listdir(anon_source):
basefile = osp.join(anon_source, file)
destfile = osp.join(anon_dest, file)
if not osp.isfile(destfile):
shutil.copy(basefile, anon_dest)
@classmethod
def tearDownClass(cls):
# remove files from anonymous folder
files = os.listdir(ANONYMOUS_DEST_FOLDER)
files.remove('dummy.txt')
for file in files:
file = osp.join(ANONYMOUS_DEST_FOLDER, file)
os.remove(file)
def test_anonymize_function(self):
# shouldn't raise
anonymize(osp.join(ANONYMOUS_DEST_FOLDER, 'A1234_patientid.dlg'))
anonymize(ANONYMOUS_DEST_FOLDER, inplace=False)
anonymize(ANONYMOUS_DEST_FOLDER, recursive=False)
def test_dynalog(self):
# test making an anonymized copy
dlog_file = osp.join(ANONYMOUS_DEST_FOLDER, 'A1234_patientid.dlg')
dlog = Dynalog(dlog_file)
dlog.anonymize()
# test doing inplace anonymization
files = dlog.anonymize(inplace=True, suffix='inplace')
for file in files:
self.assertTrue('inplace' in file)
def test_destination(self):
tlog_file = osp.join(ANONYMOUS_DEST_FOLDER, 'PatientID_4DC Treatment_JST90_TX_20140712094246.bin')
tlog = TrajectoryLog(tlog_file)
tlog.anonymize(destination=ANONYMOUS_DEST_FOLDER) # shouldn't raise
def test_bad_name(self):
"""Test that a log with a bad name (no underscore) fails gracefully."""
dlog_file = osp.join(ANONYMOUS_DEST_FOLDER, 'A1234patientid.dlg')
dlog = Dynalog(dlog_file)
with self.assertRaises(NameError):
dlog.anonymize()
def test_invalid(self):
invalid_path = r'nonexistant/path'
with self.assertRaises(NotALogError):
anonymize(invalid_path)
class TestPublishPDF(TestCase):
@classmethod
def setUpClass(cls):
cls.tlog = TrajectoryLog.from_demo()
cls.dlog = Dynalog.from_demo()
def test_publish_pdf(self):
# normal publish; shouldn't raise
with tempfile.TemporaryFile() as t:
self.dlog.publish_pdf(t)
with tempfile.TemporaryFile() as t:
self.tlog.publish_pdf(t)
def test_publish_pdf_w_imaging_log(self):
imaging_tlog = TrajectoryLog(get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'imaging.bin']))
with self.assertRaises(ValueError), tempfile.NamedTemporaryFile() as t:
imaging_tlog.publish_pdf(t.name)
def test_publish_pdf_w_metadata_and_notes(self):
with tempfile.TemporaryFile() as t:
self.dlog.publish_pdf(t, metadata={'unit': 'TB1'}, notes='extra string')
with tempfile.TemporaryFile() as t:
self.tlog.publish_pdf(t, notes=['stuff', 'to', 'list'])
class LogPlottingSavingMixin:
"""Test the plotting methods and plot saving methods."""
def test_plot_axes(self):
for methodname in ('plot_actual', 'plot_expected', 'plot_difference'):
method = getattr(self.log.axis_data.mlc.leaf_axes[10], methodname)
method() # shouldn't raise
def test_save_axes(self):
for methodname in ('save_plot_actual', 'save_plot_expected', 'save_plot_difference'):
# save matplotlib figures
method = getattr(self.log.axis_data.mlc.leaf_axes[10], methodname)
save_file(method)
def test_fluence_plotting(self):
self.log.fluence.actual.calc_map()
self.log.fluence.actual.plot_map()
self.log.fluence.gamma.calc_map()
self.log.fluence.gamma.histogram()
self.log.fluence.gamma.plot_histogram()
self.log.fluence.gamma.plot_passfail_map()
def test_saving_fluence_plots(self):
self.log.fluence.gamma.calc_map()
save_file(self.log.fluence.gamma.save_map)
save_file(self.log.fluence.gamma.save_histogram)
def test_save_summary(self):
self.log.fluence.gamma.calc_map()
save_file(self.log.save_summary)
class TestTrajectoryTreatmentTypes(TestCase):
def test_imaging_log(self):
tlog = TrajectoryLog(get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'imaging.bin']))
self.assertTrue(tlog.treatment_type, TreatmentType.IMAGING.value)
def test_vmat_log(self):
tlog = TrajectoryLog(get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'vmat.bin']))
self.assertTrue(tlog.treatment_type, TreatmentType.VMAT.value)
def test_static_imrt_log(self):
tlog = TrajectoryLog(get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'static_imrt.bin']))
self.assertTrue(tlog.treatment_type, TreatmentType.STATIC_IMRT.value)
def test_dynamic_imrt_log(self):
tlog = TrajectoryLog(get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'dynamic_imrt.bin']))
self.assertTrue(tlog.treatment_type, TreatmentType.DYNAMIC_IMRT.value)
class TestDynalogTreatmentTypes(TestCase):
def test_vmat_log(self):
get_folder_from_cloud_test_repo(['mlc_logs', 'dlogs'])
dlog = Dynalog(get_file_from_cloud_test_repo(['mlc_logs', 'dlogs', 'A_vmat.dlg']))
self.assertTrue(dlog.treatment_type, TreatmentType.VMAT)
def test_static_imrt_log(self):
get_folder_from_cloud_test_repo(['mlc_logs', 'dlogs'])
dlog = Dynalog(get_file_from_cloud_test_repo(['mlc_logs', 'dlogs', 'A_static_imrt.dlg']))
self.assertTrue(dlog.treatment_type, TreatmentType.STATIC_IMRT)
def test_dynamic_imrt_log(self):
pass # need to find one
class TestLoadLog(TestCase):
def test_load_trajectory_log_from_file_object(self):
path = get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'dynamic_imrt.bin'])
ref_log = TrajectoryLog(path)
with open(path, 'rb') as f:
t = TrajectoryLog(f)
self.assertIsInstance(t, TrajectoryLog)
self.assertEqual(t.num_beamholds, ref_log.num_beamholds)
def test_dynalog_file(self):
dynalog = get_file_from_cloud_test_repo(['mlc_logs', 'dlogs', 'A_static_imrt.dlg'])
self.assertIsInstance(load_log(dynalog), Dynalog)
def test_tlog_file(self):
tlog = get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', 'dynamic_imrt.bin'])
self.assertIsInstance(load_log(tlog), TrajectoryLog)
def test_url(self):
url = r'https://s3.amazonaws.com/pylinac/Tlog.bin'
self.assertIsInstance(load_log(url), TrajectoryLog)
def test_dir(self):
dlog_dir = get_folder_from_cloud_test_repo(['mlc_logs', 'dlogs'])
self.assertIsInstance(load_log(dlog_dir), MachineLogs)
def test_zip(self):
zip_file = get_file_from_cloud_test_repo(['mlc_logs', 'mixed_types.zip'])
self.assertIsInstance(load_log(zip_file), MachineLogs)
def test_invalid_file(self):
invalid_file = get_file_from_cloud_test_repo(['mlc_logs', 'Demo-subbeam-0-actual-fluence.npy'])
with self.assertRaises(NotALogError):
load_log(invalid_file)
def test_invalid_path(self):
invalid_path = r'nonexistant/path'
with self.assertRaises(NotALogError):
load_log(invalid_path)
class LogBase:
klass = object
def setUp(self):
self.log = self.klass.from_demo()
# move over files from other directory, since the filenames get overridden
for file in os.listdir(ANONYMOUS_SOURCE_FOLDER):
basefile = osp.join(ANONYMOUS_SOURCE_FOLDER, file)
destfile = osp.join(ANONYMOUS_DEST_FOLDER, file)
if not osp.isfile(destfile):
shutil.copy(basefile, ANONYMOUS_DEST_FOLDER)
@classmethod
def tearDownClass(cls):
# remove files from anonymous folder
files = os.listdir(ANONYMOUS_DEST_FOLDER)
files.remove('dummy.txt')
for file in files:
file = osp.join(ANONYMOUS_DEST_FOLDER, file)
os.remove(file)
def test_run_demo(self):
self.log.run_demo()
def test_anonymize(self):
log_file = osp.join(ANONYMOUS_DEST_FOLDER, self.anon_file)
log = self.klass(log_file)
files = log.anonymize(inplace=True, suffix='inplace')
# self.assertIsInstance(files, list)
for file in files:
self.assertTrue('inplace' in file)
class TestTrajectoryLog(LogPlottingSavingMixin, LogBase, TestCase, FromDemoImageTesterMixin, FromURLTesterMixin):
klass = TrajectoryLog
demo_load_method = 'from_demo'
url = 'Tlog.bin'
anon_file = 'PatientID_4DC Treatment_JST90_TX_20140712094246.bin'
def test_not_logs(self):
# throw an error for files that aren't logs
test_tlog = get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', "Anonymous_4DC_Treatment_JS0_TX_20140712095629.bin"])
not_a_file = test_tlog.replace(".bin", 'blahblah.bin')
self.assertRaises(IOError, TrajectoryLog, not_a_file)
not_a_log = get_file_from_cloud_test_repo(['VMAT', 'DRGSdmlc-105-example.dcm'])
self.assertRaises(IOError, TrajectoryLog, not_a_log)
def test_save_to_csv(self):
save_file(self.log.to_csv)
def test_txt_file_also_loads_if_around(self):
# has a .txt file
_ = get_folder_from_cloud_test_repo(['mlc_logs', 'mixed_types'])
log_with_txt = get_file_from_cloud_test_repo(['mlc_logs', 'mixed_types', "Anonymous_4DC Treatment_JST90_TX_20140712094246.bin"])
log = TrajectoryLog(log_with_txt)
self.assertIsNotNone(log.txt)
self.assertIsInstance(log.txt, dict)
self.assertEqual(log.txt['Patient ID'], 'Anonymous')
# DOESN'T have a txt file
_ = get_folder_from_cloud_test_repo(['mlc_logs', 'tlogs'])
log_no_txt = get_file_from_cloud_test_repo(['mlc_logs', 'tlogs', "Anonymous_4DC_Treatment_JS0_TX_20140712095629.bin"])
log = TrajectoryLog(log_no_txt)
self.assertIsNone(log.txt)
class TestDynalog(LogPlottingSavingMixin, LogBase, TestCase, FromDemoImageTesterMixin):
klass = Dynalog
demo_load_method = 'from_demo'
anon_file = 'A1234_patientid.dlg'
def test_loading_can_find_paired_file(self):
# get all the test files
get_folder_from_cloud_test_repo(['mlc_logs', 'dlogs'])
# shouldn't raise since it can find B-file
a_file = get_file_from_cloud_test_repo(['mlc_logs', 'dlogs', 'Adlog1.dlg'])
Dynalog(a_file)
# ditto for A-file
b_file = get_file_from_cloud_test_repo(['mlc_logs', 'dlogs', 'Bdlog1.dlg'])
Dynalog(b_file)
def test_loading_bad_names(self):
a_but_not_b_dir = get_file_from_cloud_test_repo(['mlc_logs', 'a_no_b_dir', 'Adlog1.dlg'])
self.assertRaises(DynalogMatchError, Dynalog, a_but_not_b_dir)
b_but_not_a_dir = get_file_from_cloud_test_repo(['mlc_logs', 'b_no_a_dir', 'Bdlog1.dlg'])
self.assertRaises(DynalogMatchError, Dynalog, b_but_not_a_dir)
bad_name_dlg = get_file_from_cloud_test_repo(['mlc_logs', 'bad_names', 'bad_name_dlg.dlg'])
self.assertRaises(ValueError, Dynalog, bad_name_dlg)
class IndividualLogBase(CloudFileMixin):
"""Mixin to use when testing a single machine log; must be mixed with unittest.TestCase."""
num_mlc_leaves = 120
num_snapshots = 0
num_beamholds = 0
num_moving_leaves = 0
treatment_type = ''
dir_path = ['mlc_logs']
static_axes = []
moving_axes = []
leaf_move_status = {'moving': tuple(), 'static': tuple()}
average_rms = 0
maximum_rms = 0
average_gamma = 0
percent_pass_gamma = 100
mu_delivered = 0
@classmethod
def setUpClass(cls):
cls.log = load_log(cls.get_filename())
if cls.log.treatment_type != TreatmentType.IMAGING.value:
cls.log.fluence.gamma.calc_map()
def test_num_leaves(self):
"""Test the number of MLC leaves and pairs."""
self.assertEqual(self.log.header.num_mlc_leaves, self.num_mlc_leaves)
def test_treatment_type(self):
"""Test the treatment type."""
self.assertEqual(self.treatment_type, self.log.treatment_type)
def test_rms_error(self):
"""Test the average and maximum RMS errors."""
self.assertAlmostEqual(self.log.axis_data.mlc.get_RMS_avg(), self.average_rms, delta=0.01)
self.assertAlmostEqual(self.log.axis_data.mlc.get_RMS_max(), self.maximum_rms, delta=0.01)
def test_fluence_gamma(self):
"""Test gamma results for fluences."""
if self.log.treatment_type != TreatmentType.IMAGING.value:
self.assertAlmostEqual(self.log.fluence.gamma.avg_gamma, self.average_gamma, delta=0.02)
self.assertAlmostEqual(self.log.fluence.gamma.pass_prcnt, self.percent_pass_gamma, delta=0.1)
def test_mu_delivered(self):
"""Test the number of MU delivered during the log."""
self.assertAlmostEqual(self.log.axis_data.mu.actual[-1], self.mu_delivered, delta=1)
def test_static_axes(self):
"""Test that certain axes did not move during treatment."""
for axis_name in self.static_axes:
axis = getattr(self.log.axis_data, axis_name)
self.assertFalse(axis.moved)
def test_leaf_moved_status(self):
"""Test that the given leaves either moved or did not move."""
moving_leaves = self.leaf_move_status['moving']
for leaf in moving_leaves:
self.assertTrue(self.log.axis_data.mlc.leaf_moved(leaf))
static_leaves = self.leaf_move_status['static']
for leaf in static_leaves:
self.assertFalse(self.log.axis_data.mlc.leaf_moved(leaf))
def test_publish_pdf(self):
with io.BytesIO() as temp:
self.log.publish_pdf(temp)
class IndividualTrajectoryLog(IndividualLogBase):
version = 2.1 # or 3.0
header = 'VOSTL'
header_size = 1024
sampling_interval = 20
num_axes = 14
axis_scale = 1
num_subbeams = 0
is_truncated = 0
mlc_model = 2
first_subbeam_data = {'gantry_angle': 0, 'collimator_angle': 0, 'jaw_x1': 0, 'jaw_x2': 0, 'jaw_y1': 0, 'jaw_y2': 0}
def test_first_subbeam_data(self):
"""Test the first subbeam data."""
first_subbeam = self.log.subbeams[0]
for key, known_value in self.first_subbeam_data.items():
axis = getattr(first_subbeam, key)
self.assertAlmostEqual(known_value, axis.actual, delta=0.1)
def test_subbeam_fluences_unequal_to_cumulative(self):
# as raised in #154
if self.num_subbeams > 1:
cumulative_fluence = self.log.fluence.actual.calc_map()
subbeam_fluences = [subbeam.fluence.actual.calc_map() for subbeam in self.log.subbeams]
if len(self.log.subbeams) > 0:
for subbeam_fluence in subbeam_fluences:
self.assertFalse(np.array_equal(subbeam_fluence, cumulative_fluence))
def test_header(self):
"""Test a few header values; depends on log type."""
header = self.log.header
self.assertEqual(header.version, self.version)
self.assertEqual(header.header, self.header)
self.assertEqual(header.header_size, self.header_size)
self.assertEqual(header.sampling_interval, self.sampling_interval)
self.assertEqual(header.num_axes, self.num_axes)
self.assertEqual(header.axis_scale, self.axis_scale)
self.assertEqual(header.num_subbeams, self.num_subbeams)
self.assertEqual(header.is_truncated, self.is_truncated)
self.assertEqual(header.mlc_model, self.mlc_model)
def test_num_snapshots(self):
"""Test the number of snapshots in the log."""
self.assertEqual(self.log.header.num_snapshots, self.num_snapshots)
def test_num_beamholds(self):
"""Test the number of times the beam was held in the log."""
self.assertEqual(self.log.num_beamholds, self.num_beamholds)
class TestTrajectoryLogV4(IndividualTrajectoryLog, TestCase):
version = 4.0
dir_path = ['mlc_logs', 'tlogs']
file_name = 'v4_log.bin'
header = 'VOSTL'
header_size = 1024
sampling_interval = 20
num_axes = 16
mu_delivered = 100
num_snapshots = 506
axis_scale = 1
num_subbeams = 1
treatment_type = TreatmentType.STATIC_IMRT.value
is_truncated = 0
mlc_model = 2
first_subbeam_data = {'gantry_angle': 180, 'collimator_angle': 270, 'jaw_x1': 10, 'jaw_x2': 10, 'jaw_y1': 10, 'jaw_y2': 10}
plan_name = '4DC Treatment'
def test_metadata(self):
self.assertEqual(self.log.header.metadata.plan_name, self.plan_name)
class IndividualDynalog(IndividualLogBase):
tolerance = 102
clinac_scale = 1
mu_delivered = 25000
version = 'B'
def test_num_snapshots(self):
"""Test the number of snapshots in the log."""
self.assertEqual(self.log.axis_data.num_snapshots, self.num_snapshots)
def test_num_beamholds(self):
"""Test the number of times the beam was held in the log."""
self.assertEqual(self.log.num_beamholds, self.num_beamholds)
class TestDynalogDemo(IndividualDynalog, TestCase):
"""Tests of the dynalog demo."""
treatment_type = TreatmentType.DYNAMIC_IMRT.value
num_beamholds = 20
num_snapshots = 99
average_rms = 0.04
maximum_rms = 0.07
average_gamma = 0.47
percent_pass_gamma = 91
leaf_move_status = {'moving': (9, 3), 'static': (8, )}
delete_file = False
@classmethod
def setUpClass(cls):
cls.log = Dynalog.from_demo()
cls.log.fluence.gamma.calc_map()
def test_fluences(self):
reference_fluence = np.load(get_file_from_cloud_test_repo(['mlc_logs', 'Dynalog-demo-actual-fluence.npy']))
self.log.fluence.actual.calc_map()
demo_fluence = self.log.fluence.actual.array
self.assertTrue(np.array_equal(demo_fluence, reference_fluence))
class TestTrajectoryLogDemo(IndividualTrajectoryLog, TestCase):
"""Tests for the demo trajectory log."""
num_snapshots = 5200 # excluded: 1021
num_subbeams = 2
num_beamholds = 19
mlc_model = 3
treatment_type = TreatmentType.DYNAMIC_IMRT.value
static_axes = ['collimator']
moving_axes = ['gantry']
average_rms = 0.001
maximum_rms = 0.002
percent_pass_gamma = 100
mu_delivered = 183
first_subbeam_data = {'gantry_angle': 310, 'collimator_angle': 180, 'jaw_x1': 3.7, 'jaw_x2': 3.4, 'jaw_y1': 3.8,
'jaw_y2': 3.9}
delete_file = False
@classmethod
def setUpClass(cls):
cls.log = TrajectoryLog.from_demo()
cls.log.fluence.gamma.calc_map()
def test_subbeam_fluences(self):
# subbeam 0
reference_fluence_0 = np.load(get_file_from_cloud_test_repo(['mlc_logs', 'Demo-subbeam-0-actual-fluence.npy']))
self.log.subbeams[0].fluence.actual.calc_map()
demo_fluence_0 = self.log.subbeams[0].fluence.actual.array
self.assertTrue(np.array_equal(demo_fluence_0, reference_fluence_0))
# subbeam 1
reference_fluence_1 = np.load(get_file_from_cloud_test_repo(['mlc_logs', 'Demo-subbeam-1-actual-fluence.npy']))
self.log.subbeams[1].fluence.actual.calc_map()
demo_fluence_1 = self.log.subbeams[1].fluence.actual.array
self.assertTrue(np.array_equal(demo_fluence_1, reference_fluence_1))
def test_calc_gamma_early_fails(self):
log = TrajectoryLog.from_demo()
with self.assertRaises(ValueError):
log.fluence.gamma.plot_map()
class TestMachineLogs(TestCase):
@property
def logs_dir(self):
return get_folder_from_cloud_test_repo(['mlc_logs', 'mixed_types'])
def test_loading(self):
# test root level directory
logs = MachineLogs(self.logs_dir, recursive=False)
self.assertEqual(logs.num_logs, 3)
# test recursive
logs = MachineLogs(self.logs_dir)
self.assertEqual(logs.num_logs, 3)
# test using zip file
zfile = get_file_from_cloud_test_repo(['mlc_logs', 'mixed_types.zip'])
logs = MachineLogs.from_zip(zfile)
self.assertEqual(logs.num_logs, 3)
def test_basic_parameters(self):
# no real test other than to make sure it works
logs = MachineLogs(self.logs_dir)
logs.report_basic_parameters()
def test_num_logs(self):
logs = MachineLogs(self.logs_dir, recursive=False)
self.assertEqual(logs.num_logs, 3)
self.assertEqual(logs.num_tlogs, 2)
self.assertEqual(logs.num_dlogs, 1)
def test_empty_dir(self):
empty_dir = get_folder_from_cloud_test_repo(['mlc_logs', 'empty_dir'])
logs = MachineLogs(empty_dir)
self.assertEqual(logs.num_logs, 0)
with self.assertRaises(ValueError):
logs.avg_gamma()
def test_mixed_types(self):
"""test mixed directory (tlogs & dlogs)"""
log_dir = get_folder_from_cloud_test_repo(['mlc_logs', 'mixed_types'])
logs = MachineLogs(log_dir)
self.assertEqual(logs.num_logs, 3)
def test_dlog_matches_missing(self):
"""Test that Dlogs without a match are skipped."""
log_dir = get_folder_from_cloud_test_repo(['mlc_logs', 'some_matches_missing'])
logs = MachineLogs(log_dir)
self.assertEqual(logs.num_logs, 1)
def test_append(self):
# append a directory
logs = MachineLogs(get_folder_from_cloud_test_repo(['mlc_logs', 'altdir']))
logs.append(get_folder_from_cloud_test_repo(['mlc_logs', 'altdir']))
self.assertEqual(logs.num_logs, 8)
# append a file string
single_file = get_file_from_cloud_test_repo(['mlc_logs', 'altdir', 'Anonymous_4DC Treatment_JST90_TX_20140712094246.bin'])
logs.append(single_file)
# append a MachineLog
single_log = load_log(single_file)
logs.append(single_log)
# try to append something that's not a Log
log = None
with self.assertRaises(TypeError):
logs.append(log)
def test_avg_gamma(self):
logs = MachineLogs(self.logs_dir, recursive=False)
gamma = logs.avg_gamma()
self.assertAlmostEqual(gamma, 0, delta=0.002)
def test_avg_gamma_pct(self):
logs = MachineLogs(self.logs_dir, recursive=False)
gamma = logs.avg_gamma_pct()
self.assertAlmostEqual(gamma, 100, delta=0.01)
def test_writing_to_csv(self):
logs = MachineLogs(self.logs_dir, recursive=False)
files = logs.to_csv()
self.assertIsInstance(files, list)
# clean up by deleting files
for file in files:
os.remove(file)
def test_writing_csv_with_no_logs(self):
empty_dir = get_folder_from_cloud_test_repo(['mlc_logs', 'empty_dir'])
logs = MachineLogs(empty_dir)
logs.to_csv() # shouldn't raise but will print a statement
def test_anonymize(self):
logs = MachineLogs(self.logs_dir, recursive=False)
files = logs.anonymize(inplace=False, suffix='_suffixed')
self.assertIsInstance(files, list)
# cleanup
for pdir, sdir, files in os.walk(self.logs_dir):
to_remove = [file for file in files if 'suffixed' in file]
for file in to_remove:
os.remove(osp.join(pdir, file))
|
{"hexsha": "13b09ddd193b1f02e71c369fb1559b2d1018a055", "size": 24232, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests_basic/test_logs.py", "max_stars_repo_name": "mitcgs/pylinac", "max_stars_repo_head_hexsha": "e36a531b2db72f7d2bd0a754125c6c92ae60e8e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 100, "max_stars_repo_stars_event_min_datetime": "2015-03-05T02:22:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T19:13:01.000Z", "max_issues_repo_path": "tests_basic/test_logs.py", "max_issues_repo_name": "mitcgs/pylinac", "max_issues_repo_head_hexsha": "e36a531b2db72f7d2bd0a754125c6c92ae60e8e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 322, "max_issues_repo_issues_event_min_datetime": "2015-01-26T20:05:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T14:11:21.000Z", "max_forks_repo_path": "tests_basic/test_logs.py", "max_forks_repo_name": "mitcgs/pylinac", "max_forks_repo_head_hexsha": "e36a531b2db72f7d2bd0a754125c6c92ae60e8e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 78, "max_forks_repo_forks_event_min_datetime": "2015-04-20T19:35:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T00:06:44.000Z", "avg_line_length": 38.7712, "max_line_length": 136, "alphanum_fraction": 0.6835176626, "include": true, "reason": "import numpy", "num_tokens": 5952}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, TEAMPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
# import numpy as np
from datetime import timedelta,datetime
from frappe.utils import cint, getdate, get_datetime
class TAGMaster(Document):
def validate(self):
if self.required_quantity and self.sap_quantity:
self.difference = int(self.required_quantity) - int(self.sap_quantity)
if self.required_quantity and self.sap_quantity and self.readiness_qty:
self.readiness_diff = int(self.readiness_qty) - int(self.required_quantity)
def parts(self):
ch = []
qr_code = self.qr
y = qr_code.find('P')
z = qr_code.find('V')
parts_no = qr_code[y:z]
# frappe.errprint(parts_no)
q = qr_code.find('Q')
k = qr_code.find('K')
qty = qr_code[q:k]
# frappe.errprint(qty)
q = qr_code.find('Q')
k = qr_code.rfind('K')
qty = qr_code[q:k]
# frappe.errprint(qty)
return parts_no,qty
# @frappe.whitelist()
def get_delay(self):
status = 'On Time Sent'
delay = frappe.db.get_value("TAG Monitoring Management",None,"delay_duration")
tag_master_time = datetime.strptime(self.date_and_time, '%Y-%m-%d %H:%M:%S')
submission_time = datetime.now()
time_taken = submission_time - tag_master_time
frappe.errprint(time_taken)
allowed_delay_duration = timedelta(seconds=cint(delay))
if time_taken > allowed_delay_duration:
status = 'Delay'
return status,time_taken
|
{"hexsha": "1cafba617448a95087e91ecb08987c6976426403", "size": 1717, "ext": "py", "lang": "Python", "max_stars_repo_path": "thaisummit/thaisummit/doctype/tag_master/tag_master.py", "max_stars_repo_name": "thispl/thaisummit", "max_stars_repo_head_hexsha": "697a43068a87916dedf1e8de10249152a9fd2735", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thaisummit/thaisummit/doctype/tag_master/tag_master.py", "max_issues_repo_name": "thispl/thaisummit", "max_issues_repo_head_hexsha": "697a43068a87916dedf1e8de10249152a9fd2735", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thaisummit/thaisummit/doctype/tag_master/tag_master.py", "max_forks_repo_name": "thispl/thaisummit", "max_forks_repo_head_hexsha": "697a43068a87916dedf1e8de10249152a9fd2735", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0408163265, "max_line_length": 87, "alphanum_fraction": 0.6482236459, "include": true, "reason": "import numpy", "num_tokens": 402}
|
import numpy as np
from mne.utils import logger
class searchlight:
"""Generate indices for searchlight patches.
Generates a sequence of tuples that can be used to index a data array.
Depending on the spatial and temporal radius, each tuple extracts a
searchlight patch along time, space or both.
This function is flexible in regards to shape of the data array. The
intepretation of the dimensions is as follows::
4 or more dimensions
``(n_folds, n_items, n_series, n_samples, ...)``
3 dimensions
``(n_items, n_series, n_samples)``
2 dimensions
``(n_items, n_series)`` when ``spatial_radius`` is not ``None``.
``(n_items, n_samples)`` when ``temporal_radius`` is not ``None``.
1 dimension
``(n_items,)``
The returned tuples will match the dimensions of the data array.
Parameters
----------
shape : tuple of int
The shape of the data array to compute the searchlight patches for, as
obtained with the ``.shape`` attribute.
dist : ndarray or sparse matrix, shape (n_series, n_series) | None
The distances between all source points or sensors in meters.
This parameter needs to be specified if a ``spatial_radius`` is set.
Since the distance matrix can be huge, sparse matrices are also
supported. When the distance matrix is sparse, all zero distances are
treated as infinity. This allows you to skip far away points during
your distance computations.
Defaults to ``None``.
spatial_radius : floats | None
The spatial radius of the searchlight patch in meters. All source
points within this radius will belong to the searchlight patch. Set to
None to only perform the searchlight over time. When this parameter is
set, the ``dist`` parameter must also be specified. Defaults to
``None``.
temporal_radius : float | None
The temporal radius of the searchlight patch in samples. Set to
``None`` to only perform the searchlight over sensors/source points.
Defaults to ``None``.
sel_series : ndarray, shape (n_selected_series,) | None
When set, searchlight patches will only be generated for the subset of
time series with the given indices. Defaults to ``None``, in which case
patches for all series are generated.
samples_from : int
When set, searchlight patches will only be generated for the subset of
time samples with indices equal or greater than the given value. Only
used when the given data shape includes a temporal dimension.
Defaults to 0.
samples_to : int
When set, searchlight patches will only be generated for the subset of
time samples with indices up to, but not including, the given value.
Only used when the given data shape includes a temporal dimension.
Defaults to -1, which means there is no upper bound.
Yields
------
patch : tuple of (slice | ndarray)
A single searchlight patch. Each element of the tuple corresponds to a
dimension of the data array and can be used to index along this
dimension to extract the searchlight patch.
Attributes
----------
shape
"""
def __init__(self, shape, dist=None, spatial_radius=None,
temporal_radius=None, sel_series=None,
samples_from=0, samples_to=-1):
# Interpret the dimensions of the data array (see docstring)
n_dims = len(shape)
if n_dims >= 4:
self.series_dim = 2
self.samples_dim = 3
elif n_dims == 3:
self.series_dim = 1
self.samples_dim = 2
elif n_dims == 2 and spatial_radius is not None:
self.series_dim = 1
self.samples_dim = None
elif n_dims == 2 and temporal_radius is not None:
self.series_dim = None
self.samples_dim = 1
else:
self.series_dim = None
self.samples_dim = None
self.dist = dist
self.spatial_radius = spatial_radius
self.temporal_radius = temporal_radius
self.sel_series = sel_series
# Boundry checking for samples_from and samples_to. Only relevant if
# there is a temporal dimension to the data.
if samples_from != 0 or samples_to != -1:
if self.samples_dim is None:
raise ValueError('Cannot select samples:'
f'the provided data shape {shape} has no '
'temporal dimension.')
n_samples = shape[self.samples_dim]
if samples_from < 0 or samples_from > n_samples:
raise ValueError(f'`samples_from={samples_from}` is out '
f'of bounds given data shape ({shape}).')
if samples_to > n_samples:
raise ValueError(f'`samples_to={samples_to}` is out '
f'of bounds given data shape ({shape}).')
if samples_to != -1 and samples_to < samples_from:
raise ValueError(f'`samples_to={samples_to} is smaller '
f'than `samples_from={samples_from}.')
self.samples_from = samples_from
self.samples_to = samples_to
# Will we be creating spatial searchlight patches?
if self.spatial_radius is not None:
if self.dist is None:
raise ValueError('A spatial radius was requested, but no '
'distance information was specified '
'(=dist parameter).')
if self.series_dim is None:
raise ValueError('Cannot create spatial searchlight patches: '
f'the provided data shape ({shape}) has no '
'spatial dimension.')
if self.sel_series is None:
self.sel_series = np.arange(shape[self.series_dim])
# Compressed Sparse Row format is optimal for our computations
from scipy.sparse import issparse
if issparse(self.dist):
self.dist = self.dist.tocsr()
# Will we be creating temporal searchlight patches?
if temporal_radius is not None:
if self.samples_dim is None:
raise ValueError('Cannot create temporal searchlight patches: '
f'the provided data shape ({shape}) has no '
'temporal dimension.')
n_samples = shape[self.samples_dim]
# Compute the centers of the searchlight patches in time. Make sure
# that adding/subtracting the temporal_radius does not produce
# array out of bounds errors.
samples_min = temporal_radius
samples_max = n_samples - temporal_radius
if samples_min > samples_max:
raise ValueError(
f'Temporal radius ({temporal_radius}) too large for the '
f'given data shape ({shape}).')
self.time_centers = list(range(
np.clip(samples_from, samples_min, samples_max),
np.clip(n_samples if samples_to == -1 else samples_to,
samples_min, samples_max)
))
# Create a template for the patches that will be generated that is
# compatible with the data array dimensions. By default, we select
# everything along every dimension, taking `sel_series`, `samples_from`
# and `samples_to` into account. This template will be filled-in inside
# the __iter__ function.
self.patch_template = [slice(None)] * n_dims
if self.sel_series is not None:
if self.series_dim is None:
raise ValueError('Cannot select series:'
f'the provided data shape {shape} has no '
'spatial dimension.')
self.patch_template[self.series_dim] = self.sel_series
if self.samples_from != 0 or self.samples_to != -1:
if self.samples_dim is None:
raise ValueError('Cannot select samples:'
f'the provided data shape {shape} has no '
'temporal dimension.')
self.patch_template[self.samples_dim] = slice(self.samples_from,
self.samples_to)
# Setup the main generator function that will be providing the
# searchlight patches.
if (self.spatial_radius is not None
and self.temporal_radius is not None):
self._generator = self._iter_spatio_temporal()
elif self.spatial_radius is not None:
self._generator = self._iter_spatial()
elif self.temporal_radius is not None:
self._generator = self._iter_temporal()
else:
# Single searchlight patch only
self._generator = iter([tuple(self.patch_template)])
def __iter__(self):
return self
def __next__(self):
"""Generate searchlight patches."""
return next(self._generator)
def _iter_spatio_temporal(self):
"""Generate spatio-temporal searchlight patches."""
logger.info('Creating spatio-temporal searchlight patches')
patch = list(self.patch_template) # Copy the template
for series in self.sel_series:
# Compute all spatial locations in the searchligh path.
spat_ind = _get_in_radius(self.dist, series, self.spatial_radius)
patch[self.series_dim] = spat_ind
for sample in self.time_centers:
temp_ind = slice(sample - self.temporal_radius,
sample + self.temporal_radius + 1)
patch[self.samples_dim] = temp_ind
yield tuple(patch)
def _iter_spatial(self):
"""Generate spatial searchlight patches only."""
logger.info('Creating spatial searchlight patches')
patch = list(self.patch_template) # Copy the template
for series in self.sel_series:
spat_ind = _get_in_radius(self.dist, series, self.spatial_radius)
patch[self.series_dim] = spat_ind
yield tuple(patch)
def _iter_temporal(self):
"""Generate temporal searchlight patches only."""
logger.info('Creating temporal searchlight patches')
patch = list(self.patch_template) # Copy the template
for sample in self.time_centers:
patch[self.samples_dim] = slice(sample - self.temporal_radius,
sample + self.temporal_radius + 1)
yield tuple(patch)
@property
def shape(self):
"""Number of generated patches along multiple dimensions.
This is useful for re-shaping the result obtained after consuming the
this generator.
Returns
-------
shape : tuple of int
For a spatio-temporal searchlight:
Two elements: the number of time-series and number of time
samples for which patches are generated.
For a spatial searchlight:
One element: the number of time-series for which patches are
generated.
For a temporal searchlight:
One element: the number of time-samples for which patches are
generated.
For no searchlight:
Zero elements.
"""
if (self.spatial_radius is not None
and self.temporal_radius is not None):
return (len(self.sel_series), len(self.time_centers))
elif self.spatial_radius is not None:
return (len(self.sel_series),)
elif self.temporal_radius is not None:
return (len(self.time_centers),)
else:
return tuple()
def __len__(self):
"""Get total number of searchlight patches that will be generated."""
total = 1
for n in self.shape: # Number of patches generated in each dimension
total *= n
return total
def _get_in_radius(dist, seed, radius):
"""Obtain indices for all points within the given radius from a seed point.
Takes care to work with sparse matrices too.
Parameters
----------
dist : ndarray or sparse matrix, shape (n_points, n_points)
The distances between all points.
seed : int
The index of the point used as a seed.
radius : float
The maximum distance that points can be to be included.
Returns
-------
ind : ndarray, shape (n_points_in_radius,)
Indices of all points in the given radius from the seed point.
"""
from scipy.sparse import issparse
if issparse(dist):
# Treat all zero distances as missing data
ind = dist[seed].nonzero()[1]
# Find indices for points within the radius
ind = ind[dist[seed].data < radius]
ind.sort()
# Be sure to add the seed point, which has distance of 0 to itself
ind = np.hstack((ind, [seed])) # Di
else:
ind = np.flatnonzero(dist[seed] < radius)
return sorted(ind)
|
{"hexsha": "be3b64dec94886b81b5f974cfe8fa458757c8bfc", "size": 13402, "ext": "py", "lang": "Python", "max_stars_repo_path": "mne_rsa/searchlight.py", "max_stars_repo_name": "Yuan-fang/mne-rsa", "max_stars_repo_head_hexsha": "c1638fa985e13cf5729eb9ef8f3caaaa3f5b0b23", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mne_rsa/searchlight.py", "max_issues_repo_name": "Yuan-fang/mne-rsa", "max_issues_repo_head_hexsha": "c1638fa985e13cf5729eb9ef8f3caaaa3f5b0b23", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mne_rsa/searchlight.py", "max_forks_repo_name": "Yuan-fang/mne-rsa", "max_forks_repo_head_hexsha": "c1638fa985e13cf5729eb9ef8f3caaaa3f5b0b23", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0932475884, "max_line_length": 79, "alphanum_fraction": 0.6034174004, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2689}
|
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import json
import requests
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import f1_score
from scipy.stats import pearsonr
BASE_DATE = pd.to_datetime('1/1/2016')
def load_data(filename_train: str, filename_test):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename_train: str
Path to house prices dataset
filename_test: str
Path to test set dataset
Returns
-------
Tuple of dateFrame of the train data, ndarray of the labels and dateFrame of the test data
"""
full_data_train = pd.read_csv(filename_train).drop_duplicates()
full_data_train = full_data_train.assign(is_train=np.ones(full_data_train.values.shape[0]))
full_data_test = pd.read_csv(filename_test).drop_duplicates()
full_data_test = full_data_test.assign(is_train=np.zeros(full_data_test.values.shape[0]))
full_data = pd.concat([full_data_train, full_data_test], ignore_index=True)
for d in ["booking_datetime","checkin_date","checkout_date","hotel_live_date","cancellation_datetime"]:
full_data[d] = pd.to_datetime(full_data[d])
# change nan to 0
full_data = full_data.fillna(0)
# change dates to numbers
index_date = np.flatnonzero(np.core.defchararray.find(full_data.columns.values.astype(str), "date") != -1)
vec_date_to_numeric = np.vectorize(date_to_numeric)
mat = vec_date_to_numeric(full_data.values[:, index_date.astype(int)])
for i, feature in enumerate(full_data.columns.values[index_date]):
full_data = full_data.drop(feature, axis=1)
full_data.insert(int(index_date[i]), feature, mat[:, i], True)
full_data = parse_all_dates(full_data)
full_data = drop_unvalid_records(full_data)
# choose the relevant feature, dropped: h_booking_id, h_customer_id 'hotel_area_code', 'hotel_brand_code',
# 'hotel_chain_code', 'hotel_city_code', original_payment_currency
# 'hotel_country_code', 'accommadation_type_name', 'customer_nationality',
# 'guest_nationality_country_name', 'origin_country_code', 'language',
# 'original_payment_method', , 'request_airport' 'hotel_id',
# 'booking_datetime', 'checkin_date', 'checkout_date', 'hotel_live_date',
features = full_data[['hotel_star_rating', 'hotel_booking', 'booking_check_in',
'guest_is_not_the_customer', 'check_in_check_out',
'no_of_adults', 'no_of_children',
'no_of_extra_bed', 'no_of_room',
'original_selling_amount', 'is_user_logged_in', 'request_airport',
'cancellation_policy_code', 'is_first_booking', 'request_nonesmoke',
'request_latecheckin', 'request_highfloor', 'request_largebed',
'request_twinbeds', 'request_earlycheckin',"is_train"]]
# conversion_rates = requests.get('https://v6.exchangerate-api.com/v6/b7516dbaf2d4a78e08d4c8cf/latest/USD').json()[
# "conversion_rates"]
# to_usd = full_data["original_payment_currency"].apply(lambda x: conversion_rates[x])
# features["original_selling_amount"] = features["original_selling_amount"] * to_usd
dummies = ['accommadation_type_name','original_payment_method','charge_option',
'original_payment_type','guest_nationality_country_name']
ohe = OneHotEncoder(handle_unknown='ignore')
features = pd.concat([features, pd.DataFrame(ohe.fit_transform(full_data[dummies]).toarray(),index=full_data.index,dtype=int)], axis=1)
# features = pd.concat([features, pd.get_dummies(full_data[['accommadation_type_name']])], axis=1)
# features = pd.concat([features, pd.get_dummies(full_data[['origin_country_code']])], axis=1)
# features = pd.concat([features, pd.get_dummies(full_data[['original_payment_method']])], axis=1)
# features = pd.concat([features, pd.get_dummies(full_data[['charge_option']])], axis=1)
# features = pd.concat([features, pd.get_dummies(full_data[['original_payment_type']])], axis=1)
# features = pd.concat([features, pd.get_dummies(full_data[['hotel_brand_code']])], axis=1)
features = parse_cancellation_policy(features)
features_train = features[features['is_train'] == 1]
features_test = features[features['is_train'] == 0]
features_test = features_test.drop("is_train", axis=1)
features_train = features_train.drop("is_train", axis=1)
# labels = np.zeros(full_data_train.values.shape[0])
# labels[np.where(full_data['cancellation_datetime'] != 0)[0]] = 1
# labels = full_data[['booking_cancellation','is_train']]
# labels = labels[labels['is_train'] == 1]
# labels = labels['booking_cancellation']
labels = full_data[['cancellation_datetime','is_train']]
labels = labels[labels['is_train'] == 1]
labels = labels['cancellation_datetime']
labels[labels != 0] = 1
# for feature in features_train.columns.values:
# pc = pearsonr(features_train[feature],labels)[0]
# if abs(pc) < 0.05:
# # print(feature, pc)
# features_train = features_train.drop(feature, axis=1)
# features_test = features_test.drop(feature, axis=1)
return features_train, labels, features_test
def date_to_numeric(date):
if date == 0:
return 0
return (date - BASE_DATE).days
def parse_all_dates(all_data):
all_data = all_data.assign(hotel_booking=(all_data['booking_datetime'] - all_data['hotel_live_date']))
all_data = all_data.assign(booking_check_in=(all_data['checkin_date'] - all_data['booking_datetime']))
all_data = all_data.assign(check_in_check_out=(all_data['checkout_date'] - all_data['checkin_date']))
all_data = all_data.assign(booking_cancellation=(all_data['checkout_date'] - all_data['cancellation_datetime']) * (all_data['booking_datetime'] - all_data['cancellation_datetime']))
# all_data = all_data.drop("booking_datetime", axis=1)
# all_data = all_data.drop("checkin_date", axis=1)
# all_data = all_data.drop("checkout_date", axis=1)
# all_data = all_data.drop("hotel_live_date", axis=1)
return all_data
def drop_unvalid_records(all_data):
for feature in ['hotel_booking','booking_check_in','check_in_check_out','no_of_adults', 'no_of_children',
'no_of_extra_bed', 'no_of_room','hotel_star_rating']:
all_data = all_data.drop(all_data[all_data[feature].values < 0].index)
all_data = all_data.drop(all_data[all_data['no_of_adults'].values >= 30].index)
return all_data
def parse_cancellation_policy(dataframe):
vec_parse_cancellation = np.vectorize(parse_one_cancellation_policy)
mat = vec_parse_cancellation(dataframe["cancellation_policy_code"])
split_cancellation_policy = np.vectorize(cancellation_policy_index, excluded=[1])
dataframe = dataframe.assign(cpc_d1=split_cancellation_policy(mat, 0))
dataframe = dataframe.assign(cpc_p1=split_cancellation_policy(mat, 1))
dataframe = dataframe.assign(cpc_n1=split_cancellation_policy(mat, 2))
dataframe = dataframe.assign(cpc_d2=split_cancellation_policy(mat, 3))
dataframe = dataframe.assign(cpc_p2=split_cancellation_policy(mat, 4))
dataframe = dataframe.assign(cpc_n2=split_cancellation_policy(mat, 5))
dataframe = dataframe.assign(cpc_no_show_p=split_cancellation_policy(mat, 6))
dataframe = dataframe.assign(cpc_no_show_n=split_cancellation_policy(mat, 7))
dataframe = dataframe.drop("cancellation_policy_code", axis=1)
return dataframe
def cancellation_policy_index(cancellation_a, i):
return cancellation_a[i]
def parse_one_cancellation_policy(cancellation):
if cancellation == "UNKNOWN":
return np.zeros(8)
cancellation_split = cancellation.split("_")
parsed = np.zeros(8).astype(pd.Series)
for i, phase in enumerate(cancellation_split):
if "D" in phase:
parsed[0 + i] = int(phase.split("D")[0])
else:
if "P" in phase:
parsed[6] = int(phase.split("P")[0])
elif "N" in phase:
parsed[7] = int(phase.split("N")[0])
continue
if "P" in phase:
parsed[1 + i] = int(phase.split("D")[1].split("P")[0])
elif "N" in phase:
parsed[2 + i] = int(phase.split("D")[1].split("N")[0])
return parsed.astype(pd.Series)
if __name__ == '__main__':
np.random.seed(0)
"""
Important Note: we pre-processed the train and the test data combine, so load data function must get two
filenames
"""
# Load data
df, cancellation_labels, test_data = load_data("../datasets/agoda_cancellation_train.csv","../datasets/test_set_week_4.csv")
train_X, train_y, test_X, test_y = train_test_split(df, pd.Series(cancellation_labels), 0.75)
# index = np.where(test_y <= 1000)[0]
# not_index = np.setdiff1d(np.arange(len(test_y)),index)
# test_y[index] = 1
# test_y[not_index] = 0
# test_y[test_y!=0] = 1
# loss = f1_score(test_y, np.zeros(len(test_y)))
# print(f"default loss is {loss}")
# linear regression
linear_regression = LinearRegression()
linear_regression.fit(train_X, train_y)
y_ = linear_regression.predict(test_X)
y1_ = y_ * 1
#threshold = 500000
# index = np.where(0.5 <= y_)[0]
# not_index = np.setdiff1d(np.arange(len(y_)),index)
# y_[index] = 1
# y_[not_index] = 0
# y_[y_ >= 0.7] = 0
y1_[y1_ < 0.5] = 0
y1_[y1_ >= 0.5] = 1
threshold_binary = 0.3
y_[np.where(y_ <= threshold_binary)[0]] = 0
y_[np.where(y_ > threshold_binary)[0]] = 1
for y in [y1_, y_]:
loss = f1_score(test_y, y, average='macro')
print(f"linear regression's loss is {loss}")
accuracy = np.abs(test_y - y).mean()
print(f"linear regression's accuracy is {accuracy}")
recall = np.sum(test_y * y)
print(f"linear regression's TP is {recall} out of {np.sum(test_y)}")
fp = y - test_y
fp[fp < 0] = 0
fp = fp.sum()
print(f"linear regression's FP is {fp} out of {len(test_y) - np.sum(test_y)}\n")
# logistic regression
# LR = LogisticRegression()
# LR.fit(train_X, train_y)
# y1_ = LR.predict_proba(test_X)[:, 1]
# cutoff = 0.4
# y1_[np.where(y1_ <= cutoff)] = 0
# y1_[np.where(y1_ > cutoff)] = 1
# loss = f1_score(test_y, y1_)
# print(f"logistic regression's loss is {loss}")
# recall = np.sum(test_y * y1_)
# print(f"logistic regression's TP is {recall} out of {np.sum(test_y)}")
#knn
# knn = KNeighborsClassifier()
# knn.fit(train_X, train_y)
# y2_ = knn.predict(test_X)
# threshold = 1
# index = np.where(y2_ <= threshold)[0]
# not_index = np.setdiff1d(np.arange(len(y_)),index)
# y2_[index] = 1
# y2_[not_index] = 0
# loss = f1_score(test_y, y2_)
# print(f"knn's loss is {loss}")
# accuracy = np.abs(test_y - y2_).mean()
# print(f"knn's accuracy is {accuracy}")
# recall = np.sum(test_y * y2_)
# print(f"knn's TP is {recall} out of {np.sum(test_y)}")
# fp = y2_ - test_y
# fp[fp < 0] = 0
# fp = fp.sum()
# print(f"knn's FP is {fp} out of {len(test_y) - np.sum(test_y)}\n")
# or_y = y2_ + y_
# or_y[or_y >= 2] = 1
# loss = f1_score(test_y, or_y)
# print(f"combine or's loss is {loss}")
# recall = np.sum(test_y * or_y)
# print(f"combine or's TP is {recall} out of {np.sum(test_y)}")
# and_y = y2_ * y_
# loss = f1_score(test_y, and_y)
# print(f"combine and's loss is {loss}")
# recall = np.sum(test_y * and_y)
# print(f"combine and's TP is {recall} out of {np.sum(test_y)}")
#
# most_y = y1_ + y_ + y2_
# most_y[and_y == 1] = 0
# most_y[and_y >= 2] = 1
# loss = np.abs(most_y - thresh_y).mean()
# print(f"combine most's loss is {loss}")
# recall = np.sum(test_y * most_y)
# print(f"combine most's TP is {recall} out of {np.sum(test_y)}")
#Fit model over data
# estimator = LinearRegression().fit(df, cancellation_labels)
# Store model predictions over test set
# y_ = estimator.predict(test_data)
# threshold2 = 0.3
# y_[np.where(y_ <= threshold2)[0]] = 0
# y_[np.where(y_ > threshold2)[0]] = 1
# pd.DataFrame(y_, columns=["predicted_values"]).to_csv("313434235_311119895_315421768.csv", index=False)
|
{"hexsha": "3c0dfa4f69e0a7a55d7e3c60860076998c59bac1", "size": 12773, "ext": "py", "lang": "Python", "max_stars_repo_path": "challenge/agoda_cancellation_prediction.py", "max_stars_repo_name": "ItamarLevine/IML.HUJI", "max_stars_repo_head_hexsha": "9949a10f86083435ae427ef65d3f9c9ee6f3fedf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "challenge/agoda_cancellation_prediction.py", "max_issues_repo_name": "ItamarLevine/IML.HUJI", "max_issues_repo_head_hexsha": "9949a10f86083435ae427ef65d3f9c9ee6f3fedf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "challenge/agoda_cancellation_prediction.py", "max_forks_repo_name": "ItamarLevine/IML.HUJI", "max_forks_repo_head_hexsha": "9949a10f86083435ae427ef65d3f9c9ee6f3fedf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.1119133574, "max_line_length": 185, "alphanum_fraction": 0.6605339388, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3393}
|
import pandas as pd
from scipy.sparse import hstack
from sklearn.externals import joblib
import os
# use this to change to this folder, since this might be run from anywhere in project...
from definitions import ML_PATH
# https://stackoverflow.com/questions/431684/how-do-i-change-directory-cd-in-python/13197763#13197763
# make a nice cd command that auto changes directory back when exited
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
# Run this file to groom events_current_collection and make events_current_processed_collection, delete old events_current_processed_collection first
# Needed to get access to mappening.utils.database when running just this file since this is under mappening.ml
import sys
sys.path.insert(0,'./../..')
# TODO: use these DBs for categorizeAllCurrentEvents
from mappening.utils.database import events_fb_collection, events_eventbrite_collection
from mappening.utils.database import events_current_processed_collection
LIST_OF_CATEGORIES = [u'ART', u'CAUSE', u'COMEDY_PERFORMANCE', u'DANCE', u'DRINKS', u'FILM', u'FITNESS', u'FOOD',
u'GAMES', u'GARDENING', u'HEALTH', u'LITERATURE', u'MEETUP', u'MUSIC', u'NETWORKING', u'PARTY',
u'RELIGION', u'SHOPPING', u'SPORTS', u'THEATER', u'WELLNESS']
def categorizeEvents(events, threshold=.1):
"""
:param X: should be list of dictionary elements
Returns list of events updated with a list of categories
"""
# ensure there is a name and description for machine learning
for event in events:
if 'name' not in event or not event['name']:
event['name'] = ''
if 'description' not in event or not event['description']:
event['description'] = ''
# Load data
X = pd.DataFrame(events)
# change path to load these files, for sure (correct directory)
with cd(ML_PATH):
rf = joblib.load('categorizationModel.jl')
nameVectorizer = joblib.load('nameVectorizer.jl')
detailVectorizer = joblib.load('detailVectorizer.jl')
catLists = predictCategories(nameVectorizer, detailVectorizer, rf, X, threshold)
# basically if the event already has a category put that first and then ensure no duplicates
for (event, catList) in zip(events, catLists):
curCategory = event.get('category', None)
if curCategory not in LIST_OF_CATEGORIES:
event['categories'] = catList
else:
event['categories'] = [curCategory]
for cat in catList:
if cat != curCategory:
event['categories'].append(cat)
# UNDO initial empty description and name adds and base category
if 'category' in event:
del event['category']
if event['name'] == '':
del event['name']
if event['description'] == '':
del event['description']
return events
def predictCategories(nameVectorizer, detailVectorizer, classifier, X, threshold=.1):
"""
:param nameVectorizer: TfidfVectorizer for the event names
:param detailVectorizer: TfidfVectorizer for details
:param classifer: scikit classifer with predict probability function(e.g RandomForestClassifier)
:param X: pandas dataframe with 'name' and 'description' columns
:param threshold: probabilty threshold for classifer prediction(note depending on classifer p varies)
Returns parallel list of categories where the first elements have higher probability
"""
X_name_transform = nameVectorizer.transform(X['name'])
X_details_transform = detailVectorizer.transform(X['description'])
X_total_transform = hstack([X_name_transform, X_details_transform])
y_pred = classifier.predict_proba(X_total_transform)
y_categories = []
#create list of categories of len > 0 or more if over threshold
for i in range(0, X_total_transform.shape[0]):
current_categories_probabilities = [] #tuple of category name and prob
#create tuple of category, class
for j in range(0, len(classifier.classes_)):
current_categories_probabilities.append((classifier.classes_[j], y_pred[i][j]))
current_categories_probabilities.sort(key=lambda x: x[1], reverse=True) #put highest prob categories first
current_categories = []
for k, cp in enumerate(current_categories_probabilities):
if k == 0 or cp[1] > threshold: #ensures at least one cat
current_categories.append(cp[0])
y_categories.append(current_categories)
return y_categories
def categorizeAllCurrentEvents():
"""
:Description: Takes all the events in ALL source DBs and puts in new events_current_processed_collection
with all events having a list of categories now
"""
# TODO: get events from multiple raw event DBs (imported above) instead of 1 events_current
# since these include historical events, make sure event dates end after NOW
# allEvents = [e for e in events_current_collection.find()]
# allEvents = categorizeEvents(allEvents)
# events_current_processed_collection.insert_many(allEvents)
print("Added to categorized event collection: events_current_processed")
if __name__ == "__main__":
categorizeAllCurrentEvents()
|
{"hexsha": "5d93fb3a369e4ab80641e2c322aee5ec5aa685bb", "size": 5553, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mappening/ml/autocategorization.py", "max_stars_repo_name": "ucladevx/Bmaps-Backend", "max_stars_repo_head_hexsha": "8dcbb4ca98d183499e03429b944ec0c7865065a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-22T15:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-13T09:52:45.000Z", "max_issues_repo_path": "src/mappening/ml/autocategorization.py", "max_issues_repo_name": "ucladevx/Mappening-Backend", "max_issues_repo_head_hexsha": "8dcbb4ca98d183499e03429b944ec0c7865065a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2017-11-03T00:55:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-09T02:07:50.000Z", "max_forks_repo_path": "src/mappening/ml/autocategorization.py", "max_forks_repo_name": "ucladevx/Mappening-Backend", "max_forks_repo_head_hexsha": "8dcbb4ca98d183499e03429b944ec0c7865065a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-15T08:51:12.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-03T04:32:09.000Z", "avg_line_length": 43.7244094488, "max_line_length": 149, "alphanum_fraction": 0.7037637313, "include": true, "reason": "from scipy", "num_tokens": 1237}
|
import pickle
import gzip
import numpy as np
from keras.datasets import mnist
from svm_classification import svm_classify
from models import create_model
from keras.optimizers import Adam
from objectives import lda_loss
if __name__ == '__main__':
save_to = './new_features.gz'
outdim_size = 10
epoch_num = 100
batch_size = 800
reg_par = 1e-5
margin = 1.0
n_components = 9
C = 1e-1
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.reshape(x_train, (len(x_train), -1))
x_test = np.reshape(x_test, (len(x_test), -1))
# Building, training, and producing the new features by Deep LDA
model = create_model(x_train.shape[-1], reg_par, outdim_size)
model_optimizer = Adam()
model.compile(loss=lda_loss(n_components, margin), optimizer=model_optimizer)
model.summary()
model.fit(x_train, y_train, batch_size=batch_size, epochs=epoch_num, shuffle=True, validation_data=(x_test, y_test), verbose=2)
x_train_new = model.predict(x_train)
x_test_new = model.predict(x_test)
# Training and testing of SVM with linear kernel on the new features
[train_acc, test_acc] = svm_classify(x_train_new, y_train, x_test_new, y_test, C=C)
print("Accuracy on train data is:", train_acc * 100.0)
print("Accuracy on test data is:", test_acc*100.0)
# Saving new features in a gzip pickled file specified by save_to
print('Saving new features ...')
f = gzip.open(save_to, 'wb')
pickle.dump([(x_train_new, y_train), (x_test_new, y_test)], f)
f.close()
|
{"hexsha": "369bd2e99b39d26784057ad84d429baff08da6e4", "size": 1584, "ext": "py", "lang": "Python", "max_stars_repo_path": "Groups/Group_ID_20/MvLDAN/DeepLDA.py", "max_stars_repo_name": "sonaldangi12/DataScience", "max_stars_repo_head_hexsha": "3d7cd529a96f37c2ef179ee408e2c6d8744d746a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Groups/Group_ID_20/MvLDAN/DeepLDA.py", "max_issues_repo_name": "sonaldangi12/DataScience", "max_issues_repo_head_hexsha": "3d7cd529a96f37c2ef179ee408e2c6d8744d746a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Groups/Group_ID_20/MvLDAN/DeepLDA.py", "max_forks_repo_name": "sonaldangi12/DataScience", "max_forks_repo_head_hexsha": "3d7cd529a96f37c2ef179ee408e2c6d8744d746a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3333333333, "max_line_length": 131, "alphanum_fraction": 0.702020202, "include": true, "reason": "import numpy", "num_tokens": 428}
|
!* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
!* See https://llvm.org/LICENSE.txt for license information.
!* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
! OpenMP Parallel Region
! parallel private subroutine call
program p
parameter(n=10)
integer result(n)
integer expect(n)
data expect/101,201,301,401,102,202,302,402,103,203/
call sp1(result,n)
!print *,result
call check(result,expect,n)
end
subroutine sp1(x,n)
integer n
integer x(n)
integer omp_get_thread_num
integer omp_get_num_threads
!$omp parallel private(iam,np,ipoints)
iam = omp_get_thread_num()+1
np = omp_get_num_threads()
call subdomain(x,iam,n,np)
!$omp end parallel
end
subroutine subdomain(x,iam,n,np)
integer n
integer x(n),iam,np
integer i,j
j = 0
do i = iam,n,np
j = j + 1
x(i) = iam*100 + j
enddo
end
|
{"hexsha": "616ba054221f625a7873e8df4d296dc02cc9760f", "size": 867, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "test/mp_correct/src/par11.f", "max_stars_repo_name": "abrahamtovarmob/flang", "max_stars_repo_head_hexsha": "bcd84b29df046b6d6574f0bfa34ea5059092615a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 716, "max_stars_repo_stars_event_min_datetime": "2017-05-17T17:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:20:58.000Z", "max_issues_repo_path": "test/mp_correct/src/par11.f", "max_issues_repo_name": "abrahamtovarmob/flang", "max_issues_repo_head_hexsha": "bcd84b29df046b6d6574f0bfa34ea5059092615a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 794, "max_issues_repo_issues_event_min_datetime": "2017-05-18T19:27:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:22:11.000Z", "max_forks_repo_path": "test/mp_correct/src/par11.f", "max_forks_repo_name": "abrahamtovarmob/flang", "max_forks_repo_head_hexsha": "bcd84b29df046b6d6574f0bfa34ea5059092615a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 157, "max_forks_repo_forks_event_min_datetime": "2017-05-17T18:50:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:06:45.000Z", "avg_line_length": 21.675, "max_line_length": 80, "alphanum_fraction": 0.7174163783, "num_tokens": 267}
|
export proper_divisors, is_abundant
is_abundant(n) = begin
sum(proper_divisors(n)) > n
end
|
{"hexsha": "4ca7f5ed5e217d6eb522d75cb5dc21a27d79647b", "size": 98, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/abundant-numbers.jl", "max_stars_repo_name": "JuliaTagBot/ProjectEulerUtil.jl-1", "max_stars_repo_head_hexsha": "efbf29a9e1297cb95f81028d2c5d6dfa255d985e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-28T16:48:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T21:25:03.000Z", "max_issues_repo_path": "src/abundant-numbers.jl", "max_issues_repo_name": "JuliaTagBot/ProjectEulerUtil.jl-1", "max_issues_repo_head_hexsha": "efbf29a9e1297cb95f81028d2c5d6dfa255d985e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-19T12:54:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T04:14:44.000Z", "max_forks_repo_path": "src/abundant-numbers.jl", "max_forks_repo_name": "JuliaTagBot/ProjectEulerUtil.jl-1", "max_forks_repo_head_hexsha": "efbf29a9e1297cb95f81028d2c5d6dfa255d985e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-05-19T16:22:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T00:44:55.000Z", "avg_line_length": 12.25, "max_line_length": 35, "alphanum_fraction": 0.7346938776, "num_tokens": 32}
|
import numpy as np
import torch
from cogdl.datasets import build_dataset_from_name
from cogdl.utils import get_degrees
class Test_Data(object):
def setup_class(self):
self.dataset = build_dataset_from_name("cora")
self.data = self.dataset[0]
self.num_nodes = self.data.num_nodes
self.num_edges = self.data.num_edges
self.num_features = self.data.num_features
print("Call Setup")
def test_subgraph_sampling(self):
sampled_nodes = np.random.randint(0, self.num_nodes, (100,))
sampled_nodes = np.unique(sampled_nodes)
subgraph = self.data.subgraph(sampled_nodes)
assert subgraph.x.shape[0] == len(set(sampled_nodes))
assert subgraph.x.shape[1] == self.data.x.shape[1]
def test_edge_subgraph_sampling(self):
sampled_edges = np.random.randint(0, self.num_edges, (200,))
subgraph = self.data.edge_subgraph(sampled_edges, require_idx=False)
row, col = subgraph.edge_index
assert row.shape[0] == col.shape[0]
assert row.shape[0] == len(sampled_edges)
def test_adj_sampling(self):
sampled_nodes = np.arange(0, 10)
edge_index = torch.stack(self.data.edge_index).t().cpu().numpy()
edge_index = [tuple(x) for x in edge_index]
print(np.array(edge_index).shape)
for size in [5, -1]:
node_idx, sampled_edge_index = self.data.sample_adj(sampled_nodes, size)
node_idx = node_idx.cpu().numpy()
assert (set(node_idx) & set(sampled_nodes)) == set(sampled_nodes)
def test_to_csr(self):
self.data._adj._to_csr()
symmetric = self.data.is_symmetric()
assert symmetric is True
degrees = self.data.degrees()
row, col = self.data.edge_index
_degrees = get_degrees(row, col)
assert (degrees == _degrees).all()
|
{"hexsha": "78d5ca0a24e03ec86ed09b600d2ae1f506fc47df", "size": 1869, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/datasets/test_data.py", "max_stars_repo_name": "cenyk1230/cogdl", "max_stars_repo_head_hexsha": "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1072, "max_stars_repo_stars_event_min_datetime": "2019-08-02T05:46:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:51:53.000Z", "max_issues_repo_path": "tests/datasets/test_data.py", "max_issues_repo_name": "cenyk1230/cogdl", "max_issues_repo_head_hexsha": "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 96, "max_issues_repo_issues_event_min_datetime": "2019-08-05T17:27:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T08:36:57.000Z", "max_forks_repo_path": "tests/datasets/test_data.py", "max_forks_repo_name": "cenyk1230/cogdl", "max_forks_repo_head_hexsha": "fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 299, "max_forks_repo_forks_event_min_datetime": "2019-08-08T07:33:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T09:30:07.000Z", "avg_line_length": 38.1428571429, "max_line_length": 84, "alphanum_fraction": 0.6565008026, "include": true, "reason": "import numpy", "num_tokens": 442}
|
import numpy as np
class PSCMRecovery:
def __init__(self, w=None, a=None, b=None, alpha=1e-10):
self.a_true = a # True adjacency matrix (for checking satisfiability)
self.b = b # True exogenous connection matrix (for checking satisfiability)
if w is None:
self.w = np.linalg.solve(np.eye(a.shape[0]) - a, b)
else:
self.w = w
self.p = self.w.shape[0]
self.m = self.w.shape[1]
self.alpha = alpha # Threshold for pruning after recovery
self.order = None
self.rev_order = None
self.unique = None
self.pp = [] # Possible parent set
self.comp = [] # Component set
self.list_pp = [] # Possible parent set in list structure
self.A = np.eye(self.p) # Recovered adjacency matrix
def permute(self):
# Recover the correct causal order
self.w[abs(self.w) < self.alpha] = 0
nonzero_row = np.count_nonzero(self.w, axis=1)
self.order = np.argsort(nonzero_row)
self.rev_order = np.arange(len(self.order))[np.argsort(self.order)]
self.w = self.w[self.order]
if self.b is not None:
self.b = self.b[self.order]
self.a_true = self.a_true[np.ix_(self.order, self.order)]
def find_pp(self):
# Find possible parent set for each observed variable
for k in range(self.p):
self.comp.append(set(np.nonzero(self.w[k])[0]))
temp_pp = {k}
candidate_pp = set(range(k))
for i in reversed(range(k)):
if i in candidate_pp and self.comp[i] <= self.comp[k]:
temp_pp = temp_pp | self.pp[i]
candidate_pp = candidate_pp - self.pp[i]
self.pp.append(temp_pp)
def _find_unique_component(self, k, list_pp, check=False):
# Find the unique component set of an observed variable, following the iteration procedure
index = []
uni = []
for j in self.comp[k]:
if check:
temp_w = np.where(abs(self.b[list_pp, j]) > self.alpha)[0]
else:
temp_w = np.where(abs(self.w[list_pp, j]) > self.alpha)[0]
if len(temp_w) == 1:
i = list_pp[temp_w[0]]
if i not in index:
index.append(i)
uni.append(j)
else:
if check:
uni.append(j)
return index, uni
def _find_local_structure(self, k, list_pp):
# Recover the local structure of the linear P-SCM at variable k
index, uni = self._find_unique_component(k, list_pp)
if index:
while index:
i = index.pop()
j = uni.pop()
self.A[k, i] = self.w[k, j] / self.w[i, j]
self.w[k] = self.w[k] - self.A[k, i] * self.w[i]
list_pp.remove(i)
self._find_local_structure(k, list_pp)
else:
index_nz = np.where(np.any(abs(self.w[list_pp]) > self.alpha, axis=0))[0]
if index_nz.any():
solve = self.w[np.ix_(self.list_pp[k], index_nz)].T
self.A[k, list_pp] = np.linalg.lstsq(solve, self.w[k, index_nz])[0]
self.w[k, index_nz] = 0
def find_structure(self):
# Recover the structure of the linear P-SCM
if self.unique is None:
self.permute()
self.find_pp()
for k in range(self.p):
list_pp = list(self.pp[k])
list_pp.remove(k)
self._find_local_structure(k, list_pp)
self.w[k, abs(self.w[k]) < self.alpha] = 0
self.A = np.eye(self.p) - np.linalg.inv(self.A)
self.A[abs(self.A) < self.alpha] = 0
return self.A[np.ix_(self.rev_order, self.rev_order)], self.w[self.rev_order]
def check_uniqueness(self):
# Check if the given linear P-SCM is uniquely identifiable
self.permute()
self.find_pp()
self.unique = True
for k in range(self.p):
list_pp = list(self.pp[k])
list_pp.remove(k)
if self.a_true is not None:
parent = set(np.where(abs(self.a_true[k]) > self.alpha)[0])
if parent - set(list_pp):
self.unique = False
break
self._check_local_uniqueness(k, list_pp)
if not self.unique:
break
return self.unique
def _check_local_uniqueness(self, k, list_pp, marriage_condition=False):
# Check if variable k in the given linear P-SCM is uniquely identifiable
index, uni = self._find_unique_component(k, list_pp, check=True)
if index:
list_pp = list(set(list_pp) - set(index))
if any(abs(self.b[k, uni]) > self.alpha):
self.unique = False
return
self._check_local_uniqueness(k, list_pp)
else:
n_nz = len(list_pp)
index_nz = np.where(np.any(abs(self.b[list_pp]) > self.alpha, axis=0))[0]
if marriage_condition and min(n_nz, len(index_nz)) > 0:
# Check marriage condition by matrix rank
cost_matrix = self.b[np.ix_(list_pp, index_nz)]
check_mc = (np.linalg.matrix_rank(cost_matrix) < n_nz)
else:
# Comparing the number of elements in I_k and the components included
check_mc = (len(index_nz) < n_nz)
if check_mc or any(abs(self.b[k, index_nz]) > self.alpha):
self.unique = False
|
{"hexsha": "4e7611616e71306db8323bd6ffa8a64cb2ea214d", "size": 5679, "ext": "py", "lang": "Python", "max_stars_repo_path": "PSCM.py", "max_stars_repo_name": "Yuqin-Yang/propagation-scm", "max_stars_repo_head_hexsha": "7374866e35fa462d59aa8f7431706f5f003990b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PSCM.py", "max_issues_repo_name": "Yuqin-Yang/propagation-scm", "max_issues_repo_head_hexsha": "7374866e35fa462d59aa8f7431706f5f003990b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PSCM.py", "max_forks_repo_name": "Yuqin-Yang/propagation-scm", "max_forks_repo_head_hexsha": "7374866e35fa462d59aa8f7431706f5f003990b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7169811321, "max_line_length": 98, "alphanum_fraction": 0.539707695, "include": true, "reason": "import numpy", "num_tokens": 1337}
|
#!/usr/bin/env python
u"""
histograms.py
by Yara Mohajerani (Last Update 11/2018)
Forked from CNNvsSobelHistogram.py by Michael Wood
find path of least resistance through an image and quantify errors
Update History
11/2018 - Forked from CNNvsSobelHistogram.py
Add option for manual comparison
make sure all the fronts are in the same order
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from PIL import Image
import getopt
import copy
from shapely.geometry import LineString, shape
#############################################################################################
# All of the functions are run here
#-- main function to get user input and make training data
def main():
#-- Read the system arguments listed after the program
long_options = ['subdir=','method=','step=','indir=','interval=','buffer=','manual']
optlist,arglist = getopt.getopt(sys.argv[1:],'=D:M:S:I:V:B:m:',long_options)
subdir= 'all_data2_test'
method = ''
step = 50
n_interval = 1000
buffer_size=500
indir = ''
set_manual = False
for opt, arg in optlist:
if opt in ('-D','--subdir'):
subdir = arg
elif opt in ('-M','--method'):
method = arg
elif opt in ('-S','--step'):
step = np.int(arg)
elif opt in ('-V','--interval'):
n_interval = np.int(arg)
elif opt in ('-B','--buffer'):
buffer_size = np.int(arg)
elif opt in ('-I','--indir'):
indir = os.path.expanduser(arg)
elif opt in ('-m','--manual'):
set_manual = True
#-- directory setup
#- current directory
current_dir = os.path.dirname(os.path.realpath(__file__))
headDirectory = os.path.join(current_dir,'..','FrontLearning_data')
glaciersFolder=os.path.join(headDirectory,'Glaciers')
results_dir = os.path.join(headDirectory,'Results', subdir)
#-- if user input not given, set label folder
#-- else if input directory is given, then set the method based on that
if indir == '':
indir = os.path.join(results_dir,method,method)
else:
method = os.path.basename(indir)
if method=='':
sys.exit("Please do not put '/' at the end of indir.")
print('input directory ONLY for NN output:%s'%indir)
print('METHOD:%s'%method)
#-- make histohtam filder if it doesn't exist
histFolder = os.path.join(results_dir,'Histograms')
if (not os.path.isdir(histFolder)):
os.mkdir(histFolder)
outputFolder= os.path.join(histFolder,method+'_'+str(step)+'_%isegs'%n_interval+'_%ibuffer'%buffer_size)
#-- make output folders
if (not os.path.isdir(outputFolder)):
os.mkdir(outputFolder)
if set_manual:
datasets = ['NN','Sobel','Manual']
else:
datasets = ['NN','Sobel']
print(datasets)
pixelFolder = {}
frontFolder = {}
pixelFolder['NN'] = os.path.join(results_dir,method,method+' Pixel CSVs '+str(step))
pixelFolder['Sobel'] = os.path.join(results_dir,'Sobel/Sobel Pixel CSVs '+str(step))
if 'Manual' in datasets:
pixelFolder['Manual'] = os.path.join(results_dir,'output_handrawn/output_handrawn Pixel CSVs '+str(step))
frontFolder['NN'] = os.path.join(results_dir,method,method+' Geo CSVs '+str(step))
frontFolder['Sobel'] = os.path.join(results_dir,'Sobel/Sobel Geo CSVs '+str(step))
if 'Manual' in datasets:
frontFolder['Manual'] = os.path.join(results_dir,'output_handrawn/output_handrawn Geo CSVs '+str(step))
def seriesToNPoints(series,N):
#find the total length of the series
totalDistance=0
for s in range(len(series[:,0])-1):
totalDistance+=((series[s,0]-series[s+1,0])**2+(series[s,1]-series[s+1,1])**2)**0.5
intervalDistance=totalDistance/(N-1)
#make the list of points
newSeries=series[0,:]
currentS = 0
currentPoint1=series[currentS,:]
currentPoint2=series[currentS+1,:]
for p in range(N-2):
distanceAccrued = 0
while distanceAccrued<intervalDistance:
currentLineDistance=((currentPoint1[0]-currentPoint2[0])**2+(currentPoint1[1]-currentPoint2[1])**2)**0.5
if currentLineDistance<intervalDistance-distanceAccrued:
distanceAccrued+=currentLineDistance
currentS+=1
currentPoint1 = series[currentS, :]
currentPoint2 = series[currentS + 1, :]
else:
distance=intervalDistance-distanceAccrued
newX=currentPoint1[0]+(distance/currentLineDistance)*(currentPoint2[0]-currentPoint1[0])
newY = currentPoint1[1] + (distance / currentLineDistance) * (currentPoint2[1] - currentPoint1[1])
distanceAccrued=intervalDistance+1
newSeries=np.vstack([newSeries,np.array([newX,newY])])
currentPoint1=np.array([newX,newY])
newSeries = np.vstack([newSeries, series[-1,:]])
return(newSeries)
def frontComparisonErrors(front1,front2):
errors=[]
for ff in range(len(front1)):
dist=((front1[ff,0]-front2[ff,0])**2+(front1[ff,1]-front2[ff,1])**2)**0.5
errors.append(dist)
return(errors)
def rmsError(error):
return(np.sqrt(np.mean(np.square(error))))
def generateLabelList(labelFolder):
labelList=[]
for fil in os.listdir(labelFolder):
# if fil[-6:] == 'B8.png' or fil[-6:] == 'B2.png':
# labelList.append(fil[:-4])
if fil.endswith('_nothreshold.png'):
labelList.append(fil.replace('_nothreshold.png',''))
return(labelList)
# get glacier names
def getGlacierList(labelList):
f=open(os.path.join(glaciersFolder,'Scene_Glacier_Dictionary.csv'),'r')
lines=f.read()
f.close()
lines=lines.split('\n')
glacierList = []
for sceneID in labelList:
for line in lines:
line=line.split(',')
if line[0]==sceneID:
glacierList.append(line[1])
return(glacierList)
#code to get the list of fronts and their images
def getFrontList(glacierList,labelList):
frontsList = []
for ind,label in enumerate(labelList):
glacier = glacierList[ind]
f=open(os.path.join(glaciersFolder, glacier, '%s Image Data.csv'%glacier),'r')
lines=f.read()
f.close()
lines=lines.split('\n')
for line in lines:
line=line.split(',')
if line[1][:-4] == label:
frontsList.append(line[0])
return(frontsList)
def fjordBoundaryIndices(glacier):
boundary1file=os.path.join(glaciersFolder,glacier,'Fjord Boundaries',glacier+' Boundary 1 V2.csv')
boundary1=np.genfromtxt(boundary1file,delimiter=',')
boundary2file = os.path.join(glaciersFolder,glacier,'Fjord Boundaries',glacier + ' Boundary 2 V2.csv')
boundary2 = np.genfromtxt(boundary2file, delimiter=',')
boundary1=seriesToNPoints(boundary1,1000)
boundary2 = seriesToNPoints(boundary2, 1000)
return(boundary1,boundary2)
labelList=generateLabelList(indir)
glacierList=getGlacierList(labelList)
frontList=getFrontList(glacierList,labelList)
allerrors = {}
allerrors['NN']=[]
allerrors['Sobel']=[]
allerrors['Manual']=[]
N=1
N=len(labelList)
for ll in range(N):
glacier = glacierList[ll]
label=labelList[ll]
trueFrontFile=frontList[ll]
print(label)
############################################################################
# This section to get the front images
trueImageFolder=os.path.join(headDirectory,'Glaciers',glacier,'Small Images')
trueImage = Image.open(os.path.join(trueImageFolder,label+'_Subset.png')).transpose(Image.FLIP_LEFT_RIGHT).convert("L")
frontImageFolder = {}
frontImageFolder['NN'] = indir
frontImageFolder['Sobel'] = os.path.join(results_dir,'Sobel/Sobel')
if 'Manual' in datasets:
frontImageFolder['Manual'] = os.path.join(os.path.dirname(indir),'output_handrawn')
frontImage = {}
pixels = {}
for d,tl in zip(datasets,['_nothreshold','','_nothreshold']):
frontImage[d] = Image.open(os.path.join(frontImageFolder[d],label \
+ '%s.png'%tl)).transpose(Image.FLIP_LEFT_RIGHT).convert("L")
############################################################################
# This section to get the front pixels
# get the front
pixelsFile = glacier + ' ' + label + ' Pixels.csv'
pixels[d] = np.genfromtxt(os.path.join(pixelFolder[d],pixelsFile), delimiter=',')
pixels[d] = seriesToNPoints(pixels[d], n_interval)
############################################################################
# Get the fjord boundaries for the current glacier
bounds = {}
bounds[1], bounds[2] = fjordBoundaryIndices(glacier)
buff = {}
for i in [1,2]:
# Form buffer around boundary
lineStringSet=bounds[i]
line=LineString(lineStringSet)
buff[i] = line.buffer(buffer_size)
############################################################################
# This section to get the front data
#get the true front
trueFrontFolder = os.path.join(glaciersFolder,glacier,'Front Locations','3413')
trueFront=np.genfromtxt(trueFrontFolder+'/'+trueFrontFile,delimiter=',')
#-- make sure all fronts go in the same direction
#-- if the x axis is not in increasng order, reverse
if trueFront[0,0] > trueFront[-1,0] and glacier!='Helheim':
print('flipped true front.')
trueFront = trueFront[::-1,:]
trueFront=seriesToNPoints(trueFront,n_interval)
#-- get rid of poitns too close to the edges
l1 = LineString(trueFront)
int1 = l1.difference(buff[1])
int2 = int1.difference(buff[2])
try:
trueFront = np.array(shape(int2).coords)
except:
lengths = [len(np.array(shape(int2)[i].coords)) for i in range(len(shape(int2)))]
max_ind = np.argmax(lengths)
trueFront = np.array(shape(int2)[max_ind].coords)
#-- testing
print(lengths)
print(lengths[max_ind])
#-- rebreak into n_interval segments
trueFront=seriesToNPoints(trueFront,n_interval)
front = {}
errors = {}
for d in datasets:
#get the front
frontFile=glacier+' '+label+' Profile.csv'
temp_front=np.genfromtxt(os.path.join(frontFolder[d],frontFile),delimiter=',')
#-- make sure all fronts go in the same direction
#-- if the x axis is not in increasng order, reverse
#if temp_front[0,0] > temp_front[-1,0]:
# print('flipped %s'%d)
# temp_front = temp_front[::-1,:]
front[d]=seriesToNPoints(temp_front,n_interval)
#-- get rid of points to close to the edges
#-- get rid of poitns too close to the edges
l1 = LineString(front[d])
int1 = l1.difference(buff[1])
int2 = int1.difference(buff[2])
try:
front[d] = np.array(shape(int2).coords)
except:
lengths = [len(np.array(shape(int2)[i].coords)) for i in range(len(shape(int2)))]
max_ind = np.argmax(lengths)
front[d] = np.array(shape(int2)[max_ind].coords)
#-- testing
print(lengths)
print(lengths[max_ind])
#-- rebreak into n_interval segments
front[d]=seriesToNPoints(front[d],n_interval)
errors[d]=frontComparisonErrors(trueFront,front[d])
for error in errors[d]:
allerrors[d].append(error)
#-- plot fronts for debugging purposes -- double checking.
# plt.plot(trueFront[:,0],trueFront[:,1],label='True')
# plt.plot(front['NN'][:,0],front['NN'][:,1,],label='NN')
# plt.legend()
# plt.show()
frontXmin = np.min(np.concatenate(([np.min(trueFront[:, 0])], [np.min(front[d][:,0]) for d in datasets])))
frontXmax = np.max(np.concatenate(([np.max(trueFront[:, 0])], [np.max(front[d][:, 0]) for d in datasets])))
frontYmin = np.min(np.concatenate(([np.min(trueFront[:, 1])], [np.min(front[d][:, 1]) for d in datasets])))
frontYmax = np.max(np.concatenate(([np.max(trueFront[:, 1])], [np.max(front[d][:, 1]) for d in datasets])))
fig=plt.figure(figsize=(10,8))
n_panels = len(front)+1
plt.subplot(2,n_panels,1)
plt.imshow(trueImage, cmap='gray')
plt.gca().set_xlim([0, 200])
plt.gca().set_ylim([300,0])
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.title('Original Image',fontsize=12)
p = 2
for d in datasets:
plt.subplot(2, n_panels, p)
plt.imshow(frontImage[d], cmap='gray')
plt.plot(pixels[d][:, 0], pixels[d][:, 1], 'y-',linewidth=3)
plt.gca().set_xlim([0, 200])
plt.gca().set_ylim([300, 0])
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.title('%s Solution'%d,fontsize=12)
p += 1
plt.subplot(2,n_panels,p)
plt.title('Geocoded Solutions',fontsize=12)
plt.ylabel('Northing (km)',fontsize=12)
plt.xlabel('Easting (km)',fontsize=12)
plt.plot(trueFront[:,0]/1000,trueFront[:,1]/1000,'k-',label='True')
for d,c in zip(datasets,['b-','g-','r-']):
plt.plot(front[d][:,0]/1000,front[d][:,1]/1000,c,label=d)
plt.gca().set_xlim([frontXmin/1000,frontXmax/1000])
plt.gca().set_ylim([frontYmin/1000, frontYmax/1000])
plt.gca().set_xticks([frontXmin/1000,frontXmax/1000])
plt.gca().set_yticks([frontYmin / 1000, frontYmax / 1000])
plt.legend(loc=0)
p += 1
p_temp = copy.copy(p)
x = {}
y = {}
for d,c in zip(datasets,['b','g','r']):
plt.subplot(2,n_panels,p)
plt.title('%s Errors Histogram'%d,fontsize=12)
bins=range(0,5000,100)
y[d], x[d], _ =plt.hist(errors[d],alpha=0.5,color=c,bins=bins,label='NN')
#plt.xlabel('RMS Error = '+'{0:.2f}'.format(rmsError(errors[d]))+' m',fontsize=12)
plt.xlabel('Mean Diff. = '+'{0:.2f}'.format(np.mean(np.abs(errors[d])))+' m',fontsize=12)
p += 1
#-- set histogram bounds
for d in datasets:
plt.subplot(2,n_panels,p_temp)
plt.gca().set_ylim([0,np.max([y[d] for d in datasets])])
plt.gca().set_xlim([0, np.max([x[d] for d in datasets])])
p_temp += 1
plt.savefig(os.path.join(outputFolder, label + '.png'),bbox_inches='tight')
plt.close(fig)
fig=plt.figure(figsize=(11,4))
x = {}
y = {}
for i,d,c,lbl in zip(range(len(datasets)),datasets,['b','g','r'],['e','f','g']):
plt.subplot(1,len(datasets),i+1)
plt.title(r"$\bf{%s)}$"%lbl + " %s Error Histogram"%d,fontsize=12)
bins=range(0,5000,100)
y[d], x[d], _ =plt.hist(allerrors[d],alpha=0.5,color=c,bins=bins,label=d)
#plt.xlabel('RMS Error = '+'{0:.2f}'.format(rmsError(allerrors[d]))+' m',fontsize=12)
plt.xlabel('Mean Difference = '+'{0:.2f}'.format(np.mean(np.abs(allerrors[d])))+' m',fontsize=12)
if i==0:
plt.ylabel('Count (100 m bins)',fontsize=12)
for i in range(len(datasets)):
plt.subplot(1,len(datasets),i+1)
plt.gca().set_ylim([0,np.max([y[d] for d in datasets])])
plt.gca().set_xlim([0,np.max([x[d] for d in datasets])])
plt.savefig(os.path.join(results_dir,\
'Figure_4_'+'_'.join(method.split())+'_'+str(step)+'_%isegs'%n_interval+'_%ibuffer'%buffer_size+'.pdf'),bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
{"hexsha": "717195cdc4c8356ce779ba76f8e33a5fc166d609", "size": 16516, "ext": "py", "lang": "Python", "max_stars_repo_path": "histograms.py", "max_stars_repo_name": "yaramohajerani/FrontLearning", "max_stars_repo_head_hexsha": "70f0e4c2991ff5ba585e20fbc6aa9e7b82ca312c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-07-20T02:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T19:56:05.000Z", "max_issues_repo_path": "histograms.py", "max_issues_repo_name": "yaramohajerani/FrontLearning", "max_issues_repo_head_hexsha": "70f0e4c2991ff5ba585e20fbc6aa9e7b82ca312c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "histograms.py", "max_forks_repo_name": "yaramohajerani/FrontLearning", "max_forks_repo_head_hexsha": "70f0e4c2991ff5ba585e20fbc6aa9e7b82ca312c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-04-06T10:09:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-13T10:04:02.000Z", "avg_line_length": 39.99031477, "max_line_length": 132, "alphanum_fraction": 0.5698716396, "include": true, "reason": "import numpy", "num_tokens": 4142}
|
function [ center, eccent, parity ] = tree_arc_center ( nnode, inode, jnode )
%*****************************************************************************80
%
%% TREE_ARC_CENTER computes the center, eccentricity, and parity of a tree.
%
% Discussion:
%
% A tree is an undirected graph of N nodes, which uses N-1 edges,
% and is connected.
%
% A graph with N-1 edges is not guaranteed to be a tree, and so this
% routine must first check that condition before proceeding.
%
% The edge distance between two nodes I and J is the minimum number of
% edges that must be traversed in a path from I and J.
%
% The eccentricity of a node I is the maximum edge distance between
% node I and the other nodes J in the graph.
%
% The radius of a graph is the minimum eccentricity over all nodes
% in the graph.
%
% The diameter of a graph is the maximum eccentricity over all nodes
% in the graph.
%
% The center of a graph is the set of nodes whose eccentricity is
% equal to the radius, that is, the set of nodes of minimum eccentricity.
%
% For a tree, the center is either a single node, or a pair of
% neighbor nodes.
%
% The parity of the tree is 1 if the center is a single node, or 2 if
% the center is 2 nodes.
%
% The center of a tree can be found by removing all "leaves", that is,
% nodes of degree 1. This step is repeated until only 1 or 2 nodes
% are left.
%
% Thanks to Alexander Sax for pointing out that a previous version of the
% code was failing when the tree had an odd parity, that is, a single
% center node, 15 April 2013.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 28 June 2013
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer NNODE, the number of nodes.
%
% Input, integer INODE(NNODE-1), JNODE(NNODE-1), the edges of
% the tree. Edge I connects nodes INODE(I) and JNODE(I).
%
% Output, integer CENTER(2). CENTER(1) is the index of the
% first node in the center. CENTER(2) is 0 if there is only one node
% in the center, or else the index of the second node.
%
% Output, integer ECCENT, the eccentricity of the nodes in
% the center, and the radius of the the tree.
%
% Output, integer PARITY, the parity of the tree, which is
% normally 1 or 2.
%
eccent = 0;
center(1) = 0;
center(2) = 0;
parity = 0;
if ( nnode <= 0 )
fprintf ( 1, '\n' );
fprintf ( 1, 'TREE_ARC_CENTER - Fatal error!\n' );
fprintf ( 1, ' NNODE <= 0.\n' );
error ( 'TREE_ARC_CENTER - Fatal error!' );
elseif ( nnode == 1 )
eccent = 0;
center(1) = 1;
center(2) = 0;
parity = 1;
return
elseif ( nnode == 2 )
eccent = 1;
center(1) = 1;
center(2) = 2;
parity = 2;
return
end
%
% Is this graph really a tree?
%
nedge = nnode - 1;
result = graph_arc_is_tree ( nedge, inode, jnode, nnode );
if ( result == 0 )
fprintf ( 1, '\n' );
fprintf ( 1, 'TREE_ARC_CENTER - Fatal error!\n' );
fprintf ( 1, ' This graph is NOT a tree.\n' );
error ( 'TREE_ARC_CENTER - Fatal error!' );
end
%
% Compute the degrees.
%
degree = graph_arc_degree ( nnode, nedge, inode, jnode );
%
% Defoliate the tree.
%
nnode2 = nnode;
while ( 1 )
eccent = eccent + 1;
%
% Find and mark the leaves.
%
nleaf = 0;
for i = 1 : nnode
if ( degree(i) == 1 )
nleaf = nleaf + 1;
list(nleaf) = i;
end
end
%
% Delete the leaves.
%
for ileaf = 1 : nleaf
i = list(ileaf);
iedge = 0;
j = 0;
while ( 1 )
iedge = iedge + 1;
if ( nedge < iedge )
fprintf ( 1, '\n' );
fprintf ( 1, 'TREE_ARC_CENTER - Fatal error!\n' );
fprintf ( 1, ' Data or algorithm failure.\n' );
error ( 'TREE_ARC_CENTER - Fatal error!' );
end
if ( inode(iedge) == i )
j = jnode(iedge);
inode(iedge) = - inode(iedge);
jnode(iedge) = - jnode(iedge);
elseif ( jnode(iedge) == i )
j = inode(iedge);
inode(iedge) = - inode(iedge);
jnode(iedge) = - jnode(iedge);
end
if ( j ~= 0 )
break
end
end
degree(i) = -1;
nnode2 = nnode2 - 1;
degree(j) = degree(j) - 1;
%
% If the other node has degree 0, we must have just finished
% stripping all leaves from the tree, leaving a single node.
% Don't kill it here. It is our odd center.
%
% if ( degree(j) == 0 )
% nnode2 = nnode2 - 1;
% end
end
%
% Find the remaining nodes.
%
nnode2 = 0;
for i = 1 : nnode
if ( 0 <= degree(i) )
nnode2 = nnode2 + 1;
list(nnode2) = i;
end
end
%
% If at least 3, more pruning is required.
%
if ( nnode2 < 3 )
break
end
end
%
% If only one or two nodes left, we are done.
%
parity = nnode2;
center(1:nnode2) = list(1:nnode2);
inode(1:nedge) = abs ( inode(1:nedge) );
jnode(1:nedge) = abs ( jnode(1:nedge) );
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/treepack/tree_arc_center.m"}
|
using Base: Float64
"""
File with definitions of functions for structural analysis of aircraft configurations using beam elements
"""
Fmax = 1e15
"""
Get elasticity matrix for a single beam
"""
beam_get_K(
L::Fg,
EA::Fg,
GJ::Fg,
EIy::Fg,
EIz::Fg
) where {Fg <: Real} = - Fg[
(EA / L) 0.0 0.0 0.0 0.0 0.0 (- EA / L) 0.0 0.0 0.0 0.0 0.0;
0.0 (12.0 * EIz / L ^ 3) 0.0 0.0 0.0 (6.0 * EIz / L ^ 2) 0.0 (- 12.0 * EIz / L ^ 3) 0.0 0.0 0.0 (6.0 * EIz / L ^ 2);
0.0 0.0 (12.0 * EIy / L ^ 3) 0.0 (- 6.0 * EIy / L ^ 2) 0.0 0.0 0.0 (- 12.0 * EIy / L ^ 3) 0.0 (- 6.0 * EIy / L ^ 2) 0.0;
0.0 0.0 0.0 (GJ / L) 0.0 0.0 0.0 0.0 0.0 (- GJ / L) 0.0 0.0;
0.0 0.0 (- 6.0 * EIy / L ^ 2) 0.0 (4.0 * EIy / L) 0.0 0.0 0.0 (6.0 * EIy / L ^ 2) 0.0 (2.0 * EIy / L) 0.0;
0.0 (6.0 * EIz / L ^ 2) 0.0 0.0 0.0 (4.0 * EIy / L) 0.0 (- 6.0 * EIz / L ^ 2) 0.0 0.0 0.0 (2.0 * EIz / L);
(- EA / L) 0.0 0.0 0.0 0.0 0.0 (EA / L) 0.0 0.0 0.0 0.0 0.0;
0.0 (- 12.0 * EIz / L ^ 3) 0.0 0.0 0.0 (- 6.0 * EIz / L ^ 2) 0.0 (12.0 * EIz / L ^ 3) 0.0 0.0 0.0 (- 6.0 * EIz / L ^ 2);
0.0 0.0 (- 12.0 * EIy / L ^ 3) 0.0 (6.0 * EIy / L ^ 2) 0.0 0.0 0.0 (12.0 * EIy / L ^ 3) 0.0 (6.0 * EIy / L ^ 2) 0.0;
0.0 0.0 0.0 ( - GJ / L) 0.0 0.0 0.0 0.0 0.0 (GJ / L) 0.0 0.0;
0.0 0.0 (- 6.0 * EIy / L ^ 2) 0.0 (2.0 * EIy / L) 0.0 0.0 0.0 (6.0 * EIy / L ^ 2) 0.0 (4.0 * EIy / L) 0.0;
0.0 (6.0 * EIz / L ^ 2) 0.0 0.0 0.0 (2.0 * EIz / L) 0.0 (- 6.0 * EIz / L ^ 2) 0.0 0.0 0.0 (4.0 * EIz / L)
]
|
{"hexsha": "f5add9eb402e0884fc4f092b152bc35fe6378b62", "size": 1444, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/StructuralAnalysis.jl", "max_stars_repo_name": "Equipe-AeroDesign-ITA/WingBiology", "max_stars_repo_head_hexsha": "9d5d0cb5beaf564cd7fa51a2c02677b32609b7f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-02T14:22:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T17:56:39.000Z", "max_issues_repo_path": "src/StructuralAnalysis.jl", "max_issues_repo_name": "Equipe-AeroDesign-ITA/WingBiology", "max_issues_repo_head_hexsha": "9d5d0cb5beaf564cd7fa51a2c02677b32609b7f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-31T22:10:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-06T05:11:50.000Z", "max_forks_repo_path": "src/StructuralAnalysis.jl", "max_forks_repo_name": "Equipe-AeroDesign-ITA/WingBiology", "max_forks_repo_head_hexsha": "9d5d0cb5beaf564cd7fa51a2c02677b32609b7f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5806451613, "max_line_length": 121, "alphanum_fraction": 0.4494459834, "num_tokens": 965}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.