arxiv_id stringlengths 0 16 | text stringlengths 10 1.65M |
|---|---|
# -*- coding: utf-8 -*-
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
# 训练集的路径
train_file = '/media/jxnu/Files/dog_vs_cat/train'
def get_file(file_path):
cats = []
label_cats = []
dogs = []
label_dogs = []
for file in os.listdir(file_path):
# 遍历文件夹里的所有照片
cat = 'cat'
if cat in file:
cats.append(file_path + '/' + file)
# 找出猫图
label_cats.append(0)
# 猫的标签为0
else :
dogs.append(file_path + '/' + file)
label_dogs.append(1)
# print "共有%d 只猫, %d 只狗"%(len(label_cats), len(label_dogs))
image_list = np.hstack((cats, dogs))
label_list = np.hstack((label_cats, label_dogs))
# print image_list
temp = np.array([image_list, label_list])
# 使用array 来打乱顺序
# print temp
temp = temp.transpose()
# 纵向
# print temp
np.random.shuffle(temp)
# print temp
# 打乱顺序
image_list = list(temp[:, 0])
label_list = list(temp[:, 1])
label_list = [int(i) for i in label_list]
return image_list, label_list
def get_batch(image, label, image_w, image_h, batch_size, capacity):
image = tf.cast(image, tf.string)
label = tf.cast(label, tf.int32)
# 类型转换
input_queue = tf.train.slice_input_producer([image, label])
# 输入的队列, 生成队列函数,
label = input_queue[1]
image_content = tf.read_file(input_queue[0])
# 读取图片文件
image = tf.image.decode_jpeg(image_content, channels=3)
# 图片解码成jpg图片格式,通道3 彩色图片
image = tf.image.resize_image_with_crop_or_pad(image, image_w,
image_h)
# reshape 图片, 裁剪或填充 ,从中心开始的
# image = tf.image.per_image_standardization(image)
# 数据标准化,
image_batch, label_batch = tf.train.batch([image, label],
batch_size=batch_size,
num_threads=64,
capacity=capacity)
image_batch = tf.cast(image_batch, tf.float32)
label_batch = tf.reshape(label_batch, [batch_size])
return image_batch, label_batch
# 测试batch 函数是否工作
batch_size = 1
capacity = 256
image_h = 227
image_w = 227
image_list, label_list = get_file(train_file)
image_batch, label_batch = get_batch(image_list, label_list,
image_w=image_w,image_h=image_h,
batch_size=batch_size,
capacity=capacity)
# with tf.Session() as sess:
#
# t = 0
# coord = tf.train.Coordinator()
# # 用来帮助多个线程协同合作,多个线程同步终止
#
# threads = tf.train.start_queue_runners(coord=coord)
# # 线程开启,使用队列
#
# try:
# while not coord.should_stop() and t < 3:
# img, label = sess.run([image_batch, label_batch])
# convert = tf.cast(img, tf.float32)
# sess.run(convert)
#
# for i in np.arange(batch_size):
# print('label : %d'%label[i])
# plt.imshow(img[i, :, :, :])
# plt.show()
#
# t += 1
#
# except tf.errors.OutOfRangeError:
# # 防止错误使程序结束
# # 捕捉异常
# print "done"
# finally:
# coord.request_stop()
# # 请求停止
# coord.join(threads)
# #等待被指定的 线程终止 | |
from deepnote.modules import Metric, Note
import numpy as np
from scipy.stats import entropy
import itertools
from .repr import MusicRepr
from .scale import Scale
def pitch_histogram_entropy(seq : MusicRepr, window : int = 1, pitch_class: bool = False, return_probs=True):
"""
seq : input sequence
window : number of bars as window
"""
bars = seq.get_bars()
n = len(bars)
if n < window:
window = n
print(f'[Warning] Window size set to {window}.')
ents = []
probs = []
for idx in range(0, n-window+1):
piece = MusicRepr.concatenate(bars[idx:idx+window]).to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False)
pitch_acts = piece.sum(axis=1)
if pitch_class:
res = []
for i in range(12):
res += [pitch_acts[i::12].sum()]
pitch_acts = np.array(res)
prob = pitch_acts / pitch_acts.sum() if pitch_acts.sum() > 0 else pitch_acts
ents += [entropy(prob) / np.log(2)]
probs += [prob]
if return_probs:
return np.array(ents), np.array(probs).T
return np.array(ents)
def polyphony(seq : MusicRepr):
pitch_ons = seq.to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False).sum(axis=0)
return pitch_ons.sum()/np.sum(pitch_ons > 0)
def polyphony_rate(seq : MusicRepr):
pitch_ons = seq.to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False).sum(axis=0)
return np.sum(pitch_ons > 0) / pitch_ons.shape[0]
def pitch_in_scale_rate(seq : MusicRepr):
scale = Scale()
pianoroll = seq.to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False)
scores = {}
prev_idx = 0
prev_chord = None
for idx, e in enumerate(seq.events):
if isinstance(e, Metric) and e.chord is not None:
if prev_chord is None:
prev_chord = e.chord
elif e.chord != prev_chord:
if prev_chord not in scores:
scores[prev_chord] = {'score': 0, 'n_pitches' : 0}
scores[prev_chord]['score'] += scale.pitch_in_scale(pianoroll[:, prev_idx:idx], chord=prev_chord)
scores[prev_chord]['n_pitches'] += np.sum(pianoroll[:, prev_idx:idx] > 0)
prev_chord = e.chord
prev_idx = idx
for chord in scores:
scores[chord] = scores[chord]['score'] / scores[chord]['n_pitches'] if scores[chord]['n_pitches'] > 0 else 0.
return scores
def empty_beat_rate(seq: MusicRepr):
note_ons = seq.to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False).sum(axis=0)
return np.sum(note_ons == 0) / note_ons.shape[0]
def grooving_pattern_similarity(seq : MusicRepr):
def xor_distance(bar1, bar2):
onsets1 = bar1.to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False).sum(axis=0)
onsets2 = bar2.to_pianoroll(separate_tracks=False, binarize=True, add_tempo_chord=False).sum(axis=0)
return 1 - np.logical_xor(onsets1 > 0, onsets2 > 0).sum() / onsets1.shape[0]
bars = seq.get_bars()
scores = []
for pair in itertools.combinations(range(len(bars)), 2):
idx1, idx2 = pair
scores += [xor_distance(bars[idx1], bars[idx2])]
return np.mean(scores)
def chord_progression_irregularity(seq : MusicRepr, ngram=3, ret_unique_ngrams=False):
chords = [e.chord for e in list(filter(lambda x: isinstance(x, Metric), seq.events)) if e.chord is not None]
num_ngrams = len(chords) - ngram
unique_set = set()
for i in range(num_ngrams):
word = '|'.join(chords[i:i+ngram])
unique_set.update([word])
res = len(unique_set) / num_ngrams
if ret_unique_ngrams:
return res, unique_set
return res
def structuredness_indicator(seq : MusicRepr):
raise NotImplementedError | |
# Copyright 2020 NXP Semiconductors
# Copyright 2020 Marco Franchi
#
# This file was copied from NXP Semiconductors PyeIQ project respecting its
# rights. All the modified parts below are according to NXP Semiconductors PyeIQ
# project`s LICENSE terms.
#
# Reference: https://source.codeaurora.org/external/imxsupport/pyeiq/
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from contextlib import contextmanager
from datetime import timedelta
from time import monotonic
from tflite_runtime.interpreter import Interpreter
class InferenceTimer:
def __init__(self):
self.time = 0
self.int_time = 0
@contextmanager
def timeit(self, message: str = None):
begin = monotonic()
try:
yield
finally:
end = monotonic()
self.convert(end-begin)
print("{0}: {1}".format(message, self.time))
def convert(self, elapsed):
self.int_time = elapsed
self.time = str(timedelta(seconds=elapsed))
class TFLiteInterpreter:
def __init__(self, model=None):
self.interpreter = None
self.input_details = None
self.output_details = None
self.inference_time = None
self.time_to_print = []
if model is not None:
self.interpreter = Interpreter(model)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
def dtype(self):
return self.input_details[0]['dtype']
def height(self):
return self.input_details[0]['shape'][1]
def width(self):
return self.input_details[0]['shape'][2]
def get_tensor(self, index, squeeze=False):
if squeeze:
return np.squeeze(self.interpreter.get_tensor(
self.output_details[index]['index']))
return self.interpreter.get_tensor(
self.output_details[index]['index'])
def set_tensor(self, image):
self.interpreter.set_tensor(self.input_details[0]['index'], image)
def get_time_average(self):
return self.time_to_print
def run_inference(self):
timer = InferenceTimer()
with timer.timeit("Inference time"):
self.interpreter.invoke()
self.inference_time = timer.time
self.time_to_print.append(timer.int_time) | |
"""
Author: Peratham Wiriyathammabhum
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
eps = np.finfo(float).eps
"""
Importance sampling is a framework. It enables estimation of an r.v. X from a black box target distribution f
using a known proposal distribution g (importance distribution) representing an r.v. Y.
Well, we cannot sample from f but we can evaluate using f.
That is, we sample Y from g to estimate probabilities of X in f.
We multiply some function h(x) with the probability value for each sample using the importance weight f/g.
See: http://www.acme.byu.edu/wp-content/uploads/2016/12/Vol1B-MonteCarlo2-2017.pdf
https://machinelearning1.wordpress.com/2017/10/22/importance-sampling-a-tutorial/
"""
def estimate_p_gt_3_for_gaussian(nsamples=2000):
"""
The answer should approach 0.0013499 for sufficiently large samples.
See: http://www.acme.byu.edu/wp-content/uploads/2016/12/Vol1B-MonteCarlo2-2017.pdf
"""
h = lambda x: x > 3
f = lambda x: stats.norm().pdf(x)
g = lambda x: stats.norm(loc=4, scale=1).pdf(x)
X = np.random.normal(4, scale=1, size=nsamples) # samples from g
est = np.sum(h(X) * (f(X) / g(X)))
est /= nsamples
return est
def main(opts):
nsamples = opts['nsamples']
est = estimate_p_gt_3_for_gaussian(nsamples=nsamples)
print('[Info] estimate P(X > 3)=0.0013499 for normal distribution: {:.7f}'.format(est))
print('[Info] estimation difffer by: {:.7f}'.format(np.absolute(est - 0.0013499)))
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='run importance sampling.')
parser.add_argument('--num_samples', dest='nsamples',
help='number of samples',
default=2000, type=int)
args = parser.parse_args()
opts = vars(args)
main(opts) | |
# !/cs/usr/liorf/PycharmProjects/proj_scwgbs/venv/bin python
# !/cs/usr/liorf/PycharmProjects/proj_scwgbs/venv/bin python
import argparse
import collections
import copy
import glob
import os
import re
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
sys.path.append(os.path.dirname(os.getcwd()))
sys.path.append(os.getcwd())
from commons import data_tools, files_tools
CPG_FORMAT_FILE_FORMAT = "all_cpg_ratios_*_chr%d.dummy.pkl.zip"
CPG_FORMAT_FILE_RE = re.compile(".+(CRC\d+)_(chr\d+).dummy.pkl.zip")
MET_AVG_FILE_FORMAT = "nc_average_methylation_chr%d.dummy.pkl.zip"
BEDGRAPH_OUTPUT_FILE_FORMAT = "nc_methylated_coverage_chr%d_threshold_%d.bedgraph"
BEDGRAPH_FILE_FORMAT = "nc_methylated_coverage_chr*_threshold_*.bedgraph"
BEDGRAPH_FILE_FORMAT_RE = re.compile(".+nc_methylated_coverage_chr(\d+)_threshold_\d+.bedgraph")
CSV_FILE = "average_methylation_of_nc_%s.csv"
# PATIENTS = ['CRC01', 'CRC13', 'CRC11']
# PATIENTS = ['CRC01', 'CRC13', 'CRC04', 'CRC10', 'CRC11']
PATIENTS = ['CRC01', 'CRC13', 'CRC11', "CRC02", "CRC04", "CRC09", "CRC10", "CRC12", "CRC14", "CRC15"]
MET_THRESHOLD = 0.5
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--cpg_format_folder', help='Path to folder of parsed scWGBS', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False)
parser.add_argument('--nc_avg',
help='Create a csv file with counts of how many times each methylation average appears',
required=False)
parser.add_argument('--methylation_diff', help='', required=False)
parser.add_argument('--nc_coverage_bedgraph',
help='Counts how many of the patients cover each CpG and output to a bedgraph', required=False)
parser.add_argument('--nc_coverage_inds',
help='Creates a pickle file with a dictionary of the methylated indices per chromosome',
required=False)
parser.add_argument('--methylation_coverage',
help='Receives the above bedgraph and creates a csv of how many CpGs are methylated for how many patients',
required=False)
parser.add_argument('--methylation_average',
help='Creates a DF per chromosome t=with the avergae methylation of each patient',
required=False)
args = parser.parse_args()
return args
def average_nc_methylation(cpg_format_file):
"""
Creates a list of all the average values of the normal cells per patient.
:param cpg_format_file: File with the CpG data
:return: List containing all the average values
"""
df = pd.read_pickle(cpg_format_file)
normal_cell_ids = [cell_id for cell_id in df.index if cell_id.startswith('NC')]
normal_df = df.loc[normal_cell_ids, :]
average = np.mean(normal_df, axis=0)
return average.dropna().values
def avg_main(args, output):
"""
Create a csv file with counts of how many times each methylation average appears
:param args: The program arguments
:param output: The output folder
"""
cpg_format_file_path = os.path.join(args.cpg_format_folder, CPG_FORMAT_FILE_FORMAT)
all_cpg_format_file_paths = glob.glob(cpg_format_file_path)
counter = collections.Counter()
patient = os.path.split(args.cpg_format_folder)[-1]
# for file in tqdm(all_cpg_format_file_paths, desc='files'):
for file in tqdm(all_cpg_format_file_paths):
average = average_nc_methylation(file)
counter.update(average)
data_tools.counter_to_csv(counter, os.path.join(output, CSV_FILE % patient))
def create_nc_coverage_bedgraph(patients_list, chr, output, bedgraph=False, dump_indices=False):
"""
creates different stats of the coverage of normal cells - fins
If dump_indices saves a dictionary of all the methylated indices - over MET_THRESHOLD% of the cells had at least a
threshold% methylation average and if bedgraph saves to a bedgraph the number per CpG.
:param patients_list: All the paths of CpG files per chromosome
:param chr: The current chromosome
:param output: The output folder
:param bedgraph: Save to bedgraph?
:param dump_indices: Return a list of the indices?
:return: Only if dump_indices
"""
all_met = []
all_nan = []
threshold = 0.6
path = os.path.join(output, BEDGRAPH_OUTPUT_FILE_FORMAT % (chr, threshold * 10))
not_nan = None
for patient in patients_list:
df = pd.read_pickle(patient)
normal_cell_ids = [cell_id for cell_id in df.index if cell_id.startswith('NC')]
normal_df = df.loc[normal_cell_ids, :]
average = np.mean(normal_df, axis=0)
met = average >= threshold
nan = ~np.isnan(average)
if not_nan is None:
not_nan = average.index[np.where(average.notnull())[0]]
else:
not_nan &= average.index[np.where(average.notnull())[0]]
all_met.append(met)
all_nan.append(nan)
if len(all_met) == 0:
return None
all_met_df = pd.concat(all_met, axis=1).astype(int)
met_coverage = np.sum(all_met_df, axis=1)
if bedgraph:
met_coverage_not_nan = met_coverage.loc[not_nan]
index = pd.Series(met_coverage_not_nan.index).astype(int)
chromosome = pd.DataFrame(np.full((index.shape[0],), chr))
bed = pd.concat([chromosome, index, index + 1, met_coverage_not_nan.reset_index(drop=True)], axis=1)
bed.to_csv(path, sep='\t', header=False, index=False)
if dump_indices:
all_nan_df = pd.concat(all_nan, axis=1).astype(int)
nan_coverage = np.sum(all_nan_df, axis=1)
met_nan = pd.concat([met_coverage, nan_coverage], axis=1, names=['met', 'nan'])
met_nan_ratio = met_coverage / nan_coverage
methylated_indices = nan_coverage.index[met_nan_ratio >= MET_THRESHOLD]
return list(methylated_indices)
def methylation_diff(patients_list):
"""
Finds the percentage per chromosome of the number of CpGs that were methylated (average of over 0.6%) out of the
total amount of covered CpGs.
:param patients_list:
:return:
"""
all_met_ind = []
not_nan = None
for patient in patients_list:
df = pd.read_pickle(patient)
normal_cell_ids = [cell_id for cell_id in df.index if cell_id.startswith('NC')]
normal_df = df.loc[normal_cell_ids, :]
average = np.mean(normal_df, axis=0)
met_ind = average.index[np.where(average >= 0.6)[0]]
if not_nan is None:
not_nan = average.index[np.where(average.notnull())[0]]
else:
not_nan &= average.index[np.where(average.notnull())[0]]
all_met_ind.append(met_ind)
if len(all_met_ind) == 0:
return None
met_and = copy.deepcopy(all_met_ind[0])
met_or = copy.deepcopy(all_met_ind[0])
for i in range(1, len(all_met_ind)):
met_ind = copy.deepcopy(all_met_ind[i])
met_and &= met_ind
met_or |= met_ind
return len(met_and & not_nan) / len(met_or & not_nan) * 100
def diff_main(args, output):
"""
Different stats depending on the args
:param output:
:return:
"""
f = open(os.path.join(output, "avg_methylation_60.out"), "w")
all_cpg_format_file_paths = create_chr_paths_dict(args)
for chr in tqdm(all_cpg_format_file_paths):
v = methylation_diff(all_cpg_format_file_paths[chr])
f.write("chr%s:%s\n" % (chr, v))
print(v)
def nc_coverage_main(args, output):
"""
Different stats depending on the args
:param output:
:return:
"""
all_cpg_format_file_paths = create_chr_paths_dict(args)
methylation_indices_dict = {}
for chr in tqdm(all_cpg_format_file_paths):
indices_list = create_nc_coverage_bedgraph(all_cpg_format_file_paths[chr], chr, output,
bedgraph=args.nc_coverage_bedgraph,
dump_indices=args.nc_coverage_inds)
methylation_indices_dict[chr] = indices_list
path = os.path.join(output, "methylated_indices_threshold_%d.pickle.zlib" % (MET_THRESHOLD * 100))
files_tools.save_as_compressed_pickle(path, methylation_indices_dict)
def create_chr_paths_dict(args):
all_cpg_format_file_paths = dict()
for chr in range(1, 23):
all_cpg_format_file_paths[chr] = []
for patient in PATIENTS:
for chr in range(1, 23):
cpg_format_file_path = os.path.join(args.cpg_format_folder, patient, CPG_FORMAT_FILE_FORMAT % chr)
all_cpg_format_file_paths[chr] += glob.glob(cpg_format_file_path)
return all_cpg_format_file_paths
def met_coverage_main(args, output):
"""
Counts how many of the patients cover each CpG and output to a bedgraph
:param args: The program arguments
:param output: The output folder
"""
cpg_format_file_path = os.path.join(args.cpg_format_folder, BEDGRAPH_FILE_FORMAT)
all_methylation_coverage_paths = glob.glob(cpg_format_file_path)
f = open(os.path.join(output, "methylation_coverage_60.csv"), "w")
f.write("chr, 1, 2, 3, 4, 5\n")
for path in all_methylation_coverage_paths:
chr = BEDGRAPH_FILE_FORMAT_RE.findall(path)[0]
df = pd.read_csv(path, sep='\t')
agree_percent = []
num_of_met = len(np.where(df.iloc[:, -1] > 0)[0])
for i in range(5): # num of patients
num_over = len(np.where(df.iloc[:, -1] > i)[0])
agree_percent.append(num_over / num_of_met * 100)
f.write("chr%s," % chr)
f.write(",".join(str(perc) for perc in agree_percent))
f.write('\n')
f.close()
def create_patient_series(patients_list):
"""
Receives a list of all the paths of CpG files per chromosome, creates an average of all the normal cells and return
as a DF of patient/genome location.
"""
avg_dict = {}
for patient in patients_list:
name = CPG_FORMAT_FILE_RE.findall(patient)[0][0]
df = pd.read_pickle(patient)
normal_cell_ids = [cell_id for cell_id in df.index if cell_id.startswith('NC')]
normal_df = df.loc[normal_cell_ids, :]
average = np.mean(normal_df, axis=0)
avg_dict[name] = average
return pd.DataFrame(avg_dict)
def avg_df_main(args, output):
"""
Creates a DF per chromosome t=with the avergae methylation of each patient.
:param args: The program arguments
:param output: The output folder
"""
all_cpg_format_file_paths = create_chr_paths_dict(args)
for chr in tqdm(all_cpg_format_file_paths):
path = os.path.join(output, MET_AVG_FILE_FORMAT % chr)
chr_avgs = create_patient_series(all_cpg_format_file_paths[chr])
chr_avgs.to_pickle(path)
def main():
args = parse_input()
output = args.output_folder
if not output:
output = os.path.dirname(sys.argv[0])
if args.nc_avg:
avg_main(args, output)
elif args.methylation_diff:
diff_main(args, output)
elif args.methylation_coverage:
met_coverage_main(args, output)
elif args.nc_coverage_inds or args.nc_coverage_bedgraph:
nc_coverage_main(args, output)
elif args.methylation_average:
avg_df_main(args, output)
if __name__ == '__main__':
main() | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
plt.style.use('ggplot')
import arch
from arch.unitroot import ADF
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
import os
import datetime as dt
#
# dff_df_R001 = df.diff()
# adf = ADF(df_R001.dropna())
# print(adf.summary().as_text())
# adf.lags=1
# plot_acf(df_R001, lags=25, alpha=0.5)#自相关系数ACF图
# plot_pacf(df_R001, lags=25, alpha=0.5)#偏相关系数PACF图
#
# adf.lags = 4
#
# reg_res = adf.regression
# print(reg_res.summary().as_text())
#
# type(reg_res)
class TSAnalysis(object):
def plot_trend(self, df_ts, size):
ax = plt.subplot()
# 对size个数据进行移动平均
rol_mean = df_ts.rolling(window=size).mean()
# 对size个数据进行加权移动平均
rol_weighted_mean = df_ts.ewm(span=size).mean()
df_ts.plot(color='blue', label='Original', ax=ax)
rol_mean.plot(color='red', label='Rolling Mean', ax=ax)
rol_weighted_mean.plot(color='black', label='Weighted Rolling Mean', ax=ax)
plt.legend(loc='best')
plt.title('Rolling Mean')
plt.show()
def plot_ts(self, df_ts):
ax = plt.subplot()
df_ts.plot(color='blue', ax=ax)
plt.show()
def ADF_test(self, df_ts, lags=None):
if lags == 'None':
try:
adf = ADF(df_ts)
except:
adf = ADF(df_ts.dropna())
else:
try:
adf = ADF(df_ts)
except:
adf = ADF(df_ts.dropna())
adf.lags = lags
print(adf.summary().as_text())
return adf
def plot_acf_pacf(self, df_ts, lags=31):
f = plt.figure(facecolor='white', figsize=(12, 8))
ax1 = f.add_subplot(211)
plot_acf(df_ts, lags=31, ax=ax1)
ax2 = f.add_subplot(212)
plot_pacf(df_ts, lags=31, ax=ax2)
plt.show()
if __name__ == '__main__':
print(os.getcwd())
RU = pd.read_excel('/home/nealzc1991/PycharmProjects/Py4Invst/Fundamental/RU.xls')
RU.dropna(inplace=True)
df = pd.DataFrame(columns=['Date', 'Close'])
df.loc[:, 'Date'] = RU.loc[:, 'Date']
df.loc[:, 'Close'] = RU.loc[:, 'Close']
df.set_index('Date', inplace=True)
df_RU = df.loc[dt.datetime(2015, 7, 31):dt.datetime(2016, 7, 29), :]
a = TSAnalysis()
adf = a.ADF_test(df.apply(np.log).diff(1).dropna())
a.plot_acf_pacf(df.apply(np.log).diff(1).dropna())
df_diff_1 = df.apply(np.log).diff(1).dropna()
from statsmodels.tsa.arima_model import ARMA
model = ARMA(df_diff_1, order=(1, 1))
result_arma = model.fit(disp=-1, method='css')
predict_ts = result_arma.predict()
diff_shift_ts = df_diff_1.shift(1).dropna()
diff_recover_1 = predict_ts + diff_shift_ts.loc[:,'Close'] | |
## Leetcode problem 210: Course schedule II.
#https://leetcode.com/problems/course-schedule-ii/
#based on topological sorting.
import numpy as np
import algorith.clr_book.ch22_elemtary_graph.graph as gr
from typing import List
class Solution():
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
self.lst = []
g=gr.Graph().init_from_edge_list(numCourses,prerequisites)
self.dfs_topolo_sort(g)
return self.lst
def dfs_topolo_sort(self,g):
for v in g.vertices:
if v.color=="white":
try:
self.dfs_visit(g,v)
except:
self.lst=[]
break
def dfs_visit(self,g,u):
u.color="grey"
for v in g.adj[u]:
if v.color=="grey":
raise Exception("cycle detected!")
elif v.color=="white":
self.dfs_visit(g,v)
u.color="black"
self.lst.append(u.key)
if __name__ == "__main__":
numCourses = 4; prerequisites = [[1,0],[2,0],[3,1],[3,2]]; output_1= [0,2,1,3]; output_2= [0,1,2,3]
lst = Solution().findOrder(numCourses,prerequisites)
print(lst)
numCourses = 2; prerequisites = [[1,0]]; output= [0,1]
lst = Solution().findOrder(numCourses,prerequisites)
print(lst)
numCourses = 2; prerequisites = [[0,1]]; output= [1,0]
lst = Solution().findOrder(numCourses,prerequisites)
print(lst)
numCourses = 2; prerequisites = [[0,1],[1,0]]; output= []
lst = Solution().findOrder(numCourses,prerequisites)
print(lst) | |
# written by LazyGuyWithRSI
import pyautogui
import ctypes
import time
from PIL import ImageGrab
import numpy as np
import cv2 as cv
import win32api
import keyboard
from configparser import ConfigParser
# TODO HOTKEY not implemented yet
# change HOTKEY to whatever key you want (ex. 'a', 'f2') even modifiers (ex. 'ctrl+d')
HOTKEY = 'space' #default: 'space'
# change LOOT_COLOR to whatever color the loot you want to pick up is (R, G, B)
LOOT_COLOR = (255, 0, 239) #default: (255, 0, 239)
COLOR_DEVIATION = 10
searchScaleWidth = 0.8
searchScaleHeight = 0.8
searchOffset = 0.02
offsetX = 1050
offsetY = 540
searchWidth = 1000
searchHeight = 850
keyDown = False
keyState = 0
def init(): # global gross, but I am writing this at 11pm so I'll fix it later
global offsetX, offsetY, searchWidth, searchHeight, searchScaleWidth, searchScaleHeight, HOTKEY, LOOT_COLOR
res = ctypes.windll.user32.GetSystemMetrics(0), ctypes.windll.user32.GetSystemMetrics(1)
searchWidth = res[0] * searchScaleWidth
searchHeight = res[1] * searchScaleHeight
offsetX = (res[0] / 2) - (searchWidth / 2)
offsetY = (res[1] / 2) - (searchHeight / 2) - (res[1] * searchOffset)
# load values from config.ini if there are any
parser = ConfigParser()
parser.read("config.ini")
if (parser.has_option('config', 'hotkey')):
HOTKEY = parser.get('config', 'hotkey')
print("loading HOTKEY=" + str(HOTKEY) + " from config.ini")
if (parser.has_option('config', 'loot_color')):
LOOT_COLOR = eval(parser.get('config', 'loot_color'))
print("loading LOOT_COLOR=" + str(LOOT_COLOR)+ " from config.ini")
print("-- init finished --\n\n")
def leftClick(sleep=0.005):
ctypes.windll.user32.mouse_event(2, 0, 0, 0,0) # left down
time.sleep(sleep)
ctypes.windll.user32.mouse_event(4, 0, 0, 0,0) # left up
time.sleep(sleep)
def extrapolate(xVals, yVals, lagCompensation = 1.0):
if len(xVals) < 2 or len(yVals) < 2:
return (0, 0)
slope = (xVals[1] - xVals[0]) * lagCompensation, (yVals[1] - yVals[0]) * lagCompensation
return slope
def grabLoot():
cXList = []
cYList = []
for i in range(2):
### detect the loot ###
img = np.array(ImageGrab.grab(bbox=(offsetX, offsetY, offsetX + searchWidth, offsetY + searchHeight)))
scale_percent = 25 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
frame = cv.resize(img, dim, interpolation = cv.INTER_AREA)
frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)
lowerBound = (
max(0, min(255,LOOT_COLOR[2] - COLOR_DEVIATION)),
max(0, min(255,LOOT_COLOR[1] - COLOR_DEVIATION)),
max(0, min(255,LOOT_COLOR[0] - COLOR_DEVIATION)))
upperBound = (
max(0, min(255,LOOT_COLOR[2] + COLOR_DEVIATION)),
max(0, min(255,LOOT_COLOR[1] + COLOR_DEVIATION)),
max(0, min(255,LOOT_COLOR[0] + COLOR_DEVIATION)))
frameGray = cv.inRange(frame, lowerBound, upperBound)
#cv.imshow("pre thresh", frameGray)
ret, thresh = cv.threshold(frameGray, 100, 255, 0)
#thresh = cv.erode(thresh, None, iterations=1)
thresh = cv.dilate(thresh, None, iterations=1)
#cv.imshow("test", thresh)
contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
#cv.drawContours(frame, contours, -1, (0,255,0), 1)
foundThingToClick = False
for contour in contours:
if cv.contourArea(contour) < 100:
continue
approx = cv.approxPolyDP(contour, 0.01*cv.arcLength(contour, True), True)
if len(approx) >= 4 and len(approx) <= 50:
cv.drawContours(frame, [contour], 0, (0, 50, 200), 2)
foundThingToClick = True
# find center of the loot
M = cv.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
#scale cX and cY by image scale
cX *= 100/scale_percent
cY *= 100/scale_percent
cXList.append(cX)
cYList.append(cY)
break
### click on the loot! ###
if foundThingToClick:
last = pyautogui.position()
vector = extrapolate(cXList, cYList, 1.1)
predictedPoint = cXList[len(cXList) - 1] + vector[0], cYList[len(cYList) - 1] + vector[1]
ctypes.windll.user32.SetCursorPos(int(offsetX + predictedPoint[0]), int(offsetY + predictedPoint[1]))
time.sleep(0.005)
leftClick()
ctypes.windll.user32.SetCursorPos(int(last[0]), int(last[1]))
print("nabbin' a loot")
#cv.imshow("frame", frame)
#cv.waitKey(10)
print(
"\nPath of Automation:\n" +
" _ _ _ _ ____ _ _ _ \n" +
" / \ _ _| |_ ___ | | ___ ___ | |_ / ___| (_) ___| | _____ _ __ \n" +
" / _ \| | | | __/ _ \ _____| | / _ \ / _ \| __| | | | | |/ __| |/ / _ \ '__|\n" +
" / ___ \ |_| | || (_) |_____| |__| (_) | (_) | |_ | |___| | | (__| < __/ | \n" +
" /_/ \_\__,_|\__\___/ |_____\___/ \___/ \__| \____|_|_|\___|_|\_\___|_| \n\n" +
"by LazyGuyWithRSI\n\n"
)
init()
print("Ready to grab some loot!\n")
keyboard.add_hotkey(HOTKEY, grabLoot)
while True:
time.sleep(0.03) #keep it alive | |
import multiprocessing
import re
from pyomyo import Myo, emg_mode
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import bone
import serial_utils as s
# Use device manager to find the Arduino's serial port.
COM_PORT = "COM9"
RESET_SCALE = True
LEGACY_DECODE = False # If false, will use alpha encodings
q = multiprocessing.Queue()
# Plot Setup
fig = plt.figure("Grip plots from Myo")
ax = fig.add_subplot(111, projection='3d')
plt.subplots_adjust(left=0.25, bottom=0.25)
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_zlabel('Z [m]')
# ------------ Myo Setup ---------------
def emg_worker(q):
m = Myo(mode=emg_mode.PREPROCESSED)
m.connect()
def add_to_queue(emg, movement):
q.put(emg)
m.add_emg_handler(add_to_queue)
def print_battery(bat):
print("Battery level:", bat)
m.add_battery_handler(print_battery)
# Green logo and bar LEDs
m.set_leds([0, 128, 0], [0, 128, 0])
# Vibrate to know we connected okay
m.vibrate(1)
"""worker function"""
while True:
m.run()
print("Worker Stopped")
def animate(i):
fingers = [0,0,0,0,0]
while not(q.empty()):
fingers = list(q.get())
# Turn finger values into Lerp Vals
val = fingers[0] / 1000
print("Finger val", val)
# Plot
ax.clear()
ax.set_xlabel('X [mm]')
ax.set_ylabel('Y [mm]')
ax.set_zlabel('Z [mm]')
pose = bone.lerp_pose(val)
points = bone.build_hand(pose, True)
# Plot the Points
bone.plot_steam_hand(points, "Lerped Pose", ax)
if __name__ == "__main__":
p = multiprocessing.Process(target=emg_worker, args=(q,), daemon=True)
p.start()
anim = animation.FuncAnimation(fig, animate, blit=False, interval=1)
try:
plt.show()
except KeyboardInterrupt:
quit() | |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
sys.path.insert(0, '../py')
from graviti import *
from numpy.linalg import norm
import numpy as np
import os
import os.path
from os import path
import sys
import glob
import h5py
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.use('Agg')
import plotly.graph_objects as go
from plotly.graph_objs import *
import plotly.express as px
import hdbscan
import pandas as pd
import umap
import networkx as nx
from scipy import sparse, linalg
import pickle
from sklearn.preprocessing import normalize, scale
from sklearn.decomposition import PCA
from scipy.sparse import find
from numpy.linalg import norm
import timeit
import multiprocessing
from joblib import Parallel, delayed
from datetime import datetime
from tqdm import tqdm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from plotly.graph_objs import *
import plotly.express as px
import plotly
import warnings
warnings.filterwarnings('ignore')
from sklearn.neighbors import KDTree
from sklearn.neighbors import NearestNeighbors
# In[33]:
samples = glob.glob('/media/garner1/hdd2/TCGA_polygons/*/*/*.freq10.covdNN50.features.pkl')
num_cores = multiprocessing.cpu_count() # numb of cores
# The barycenters array contain the list of covd-barycenters, one per sample
barycenter_list = Parallel(n_jobs=num_cores)(
delayed(load_barycenters)(sample) for sample in tqdm(samples)
)
barycenters = np.zeros((len(samples),pd.read_pickle(samples[0])['descriptor'].iloc[0].shape[0]))
row = 0
for b in barycenter_list:
barycenters[row,:] = b
row += 1
barycenters = barycenters[~np.all(barycenters == 0, axis=1)]
cancer_type = []
sample_id = []
for sample in samples:
cancer_type.append( sample.split('/')[5] )
sample_id.append( os.path.basename(sample).split('.')[0] )
print(len(cancer_type),set(cancer_type))
# In[42]:
reducer = umap.UMAP(n_components=3)
embedding = reducer.fit_transform(barycenters)
# Generate Data
x = embedding[:,0]
y = embedding[:,1]
z = embedding[:,2]
df = pd.DataFrame(dict(x=x, y=y, z=z, label=cancer_type, sample=sample_id))
filename = 'umap.s'+str(df.shape[0])
scattered3d_tcga(df,filename)
# groups = df.groupby('label')
# # Plot
# fig, ax = plt.subplots(figsize=(10,10))
# ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
# for name, group in groups:
# ax.plot(group.x, group.y, marker='o', linestyle='', ms=6, label=name, alpha=0.5)
# ax.legend()
# plt.title('UMAP projection of the TCGA dataset', fontsize=12)
# plt.savefig('tcga.umap.s'+str(df.shape[0])+'.png')
# # In[37]:
pca = PCA(n_components=3)
principalComponents = pca.fit_transform(barycenters)
x = principalComponents[:,0]
y = principalComponents[:,1]
z = principalComponents[:,2]
df = pd.DataFrame(dict(x=x, y=y, z=z, label=cancer_type, sample=sample_id))
filename = 'pca.s'+str(df.shape[0])
scattered3d_tcga(df,filename)
# groups = df.groupby('label')
# # Plot
# fig, ax = plt.subplots(figsize=(10, 10))
# ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
# for name, group in groups:
# ax.plot(group.x, group.y, marker='o', linestyle='', ms=6, label=name, alpha=0.5)
# ax.legend()
# plt.title('PCA projection of the TCGA dataset', fontsize=12)
# plt.savefig('tcga.pca.s'+str(df.shape[0])+'.png') | |
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
Rays are stand-ins for lightrays heading from the camera through the scene.
.. moduleauthor:: Adrian Köring
"""
import numpy as np
from padvinder.util import normalize
from padvinder.util import check_finite
class Ray(object):
"""
A ray consists of a starting position and a direction.
Both are specified as vectors.
The starting position is a point in the scene, the ray
begins in. The direction is where the ray is heading
in and is always normalized.
Parameters
----------
position : numpy.ndarray_like
an array of three dimension
direction : numpy.ndarray_like
Direction must have the same number of dimension as position. The
direction vector will be stored normalized to a length of one and can
not initially have lenght zero.
Raises
------
ValueError
Raises a ValueError if the input contains NaNs or Infs.
ZeroDivisionError
Raises a ZeroDivisionError if the direction vector has length zero
Examples
--------
>>> Ray((0, 0, 0), (1, 0, 0))
Ray([0.0, 0.0, 0.0], [1.0, 0.0, 0.0])
>>> Ray((3.0, 2.3, -5.1), (-10.0, 34.0, -2.0))
Ray([3.0, 2.3, -5.1], [-0.28171808, 0.95784149, -0.05634362])
"""
def __init__(self,
position = (0,0,0),
direction = (1,0,0)
):
check_finite(position, direction)
self._position = np.array(position).astype(np.float64)
self._direction = normalize(direction).astype(np.float64)
@property
def position(self):
"""
Return the ray's position.
"""
return self._position
@property
def direction(self):
"""
Return the ray's normalized direction.
"""
return self._direction
def point(self, distance):
"""
Returns a point lying t-units from the origin on the ray.
Parameters
----------
distance : float
The number of units along the ray to get the point of
Returns
-------
point on ray : numpy.ndarray_like
where the point is calculated as ray_origin + distance*ray_direction
Examples
--------
>>> Ray((0, 0, 0), (1, 0, 0)).point(10)
[10.0, 0.0, 0.0]
"""
return self.position + distance*self.direction
def __repr__(self):
# converting the numpy array to string
p, d = self.position, self.direction
return "Ray({0}, {1})".format(p, d) | |
#import matplotlib
#matplotlib.use('Agg')
from matplotlib.animation import FuncAnimation, writers
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
import numpy as np
skeleton_parents = [0,1,2,0,4,5,0,7,8,9,8,11,12,8,14,15] #h36m
#skeleton_parents = [0,0,1,2,0,0,5,6,7,8,0,0,11,12,13,14]
global keypoints_3d,scat3d, lines3d
global im, images
def draw_lines(sk):
if sk.shape[-1]==3:
xlines = [[i, j] for i,j in zip(sk[1:,0] , sk[skeleton_parents,0])]
ylines = [[i, j] for i,j in zip(sk[1:,1] , sk[skeleton_parents,1])]
zlines = [[i, j] for i,j in zip(sk[1:,2] , sk[skeleton_parents,2])]
return xlines,ylines,zlines
else:
xlines = [[i, j] for i,j in zip(sk[1:,0] , sk[skeleton_parents,0])]
ylines = [[i, j] for i,j in zip(sk[1:,1] , sk[skeleton_parents,1])]
return xlines, ylines, None
def animate(i):
global images
global keypoints_3d, scat3d, lines3d
scat3d._offsets3d = (keypoints_3d[i][:,0], keypoints_3d[i][:,1], keypoints_3d[i][:,2])
image = cv2.cvtColor(images[i], cv2.COLOR_BGR2RGB)
im.set_array(image)
xlines,ylines,zlines = draw_lines(keypoints_3d[i])
for i, (x,y,z) in enumerate(zip(xlines, ylines, zlines)):
lines3d[i].set_data(x, y)
lines3d[i].set_3d_properties(z)
# for i in range(NUMBER_OF_2D_KEYPOINTS):
# ax.text(results[0][i,0],results[0][i,1],results[0][i,2], '%s' % (str(i)), size=11, zorder=1, color='k')
artists = [scat3d, im]
artists.extend(lines3d)
return artists
def visualize(results, output):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=15., azim=70)
radius=1.7
ax.set_xlim3d([-radius/2, radius/2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius/2, radius/2])
global keypoints_3d, scat3d, lines3d
keypoints_3d = results.copy()
scat3d = ax.scatter(*keypoints_3d[0].T) #sk[:,0],sk[:,1],sk[:,2])
lines3d = []
xlines,ylines,zlines = draw_lines(keypoints_3d[0])
for x,y,z in zip(xlines,ylines,zlines):
lines3d.extend(ax.plot(x,y,z,color='red'))
anim = FuncAnimation(fig, animate, interval=30, blit=True)
Writer = writers['ffmpeg']
writer = Writer(metadata={}, bitrate=3000)
anim.save(output, writer=writer)
# input all frames + kps
# writes the results in a file specified by filename
def visualize_all(image_2d, results, filename):
global im, images
fig = plt.figure()
ax_img = fig.add_subplot(1,2,1)
image = cv2.cvtColor(image_2d[0], cv2.COLOR_BGR2RGB)
im = ax_img.imshow(image)
images = image_2d
ax = fig.add_subplot(1,2,2, projection='3d')
ax.view_init(elev=15., azim=70)
radius=1.7
ax.set_xlim3d([-radius/2, radius/2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius/2, radius/2])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
image = cv2.cvtColor(image_2d[0], cv2.COLOR_BGR2RGB)
im = ax_img.imshow(image)
images = image_2d
global keypoints_3d, scat3d, lines3d
keypoints_3d = results.copy()
scat3d = ax.scatter(*keypoints_3d[0].T) #sk[:,0],sk[:,1],sk[:,2])
lines3d = []
xlines,ylines,zlines = draw_lines(keypoints_3d[0])
for x,y,z in zip(xlines,ylines,zlines):
lines3d.extend(ax.plot(x,y,z,color='red'))
anim = FuncAnimation(fig, animate,frames=len(images), interval=30, blit=True)
Writer = writers['ffmpeg']
writer = Writer(metadata={}, bitrate=3000)
anim.save(filename, writer=writer)
plt.ioff()
class Visualizer():
def __init__(self, frame_width, frame_height ):
DPI = 96
self.width = frame_width // DPI * 2
self.height = frame_height // DPI
self.fig = plt.figure(figsize=(self.width, self.height))
plt.ion()
self.ax_img = self.fig.add_subplot(1,2,1)
self.ax = self.fig.add_subplot(1,2,2, projection='3d')
self.ax.view_init(elev=15., azim=70)
radius=1.7
self.ax.set_xlim3d([-radius/2, radius/2])
self.ax.set_zlim3d([0, radius])
self.ax.set_ylim3d([-radius/2, radius/2])
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
self.ax.set_zticklabels([])
self.lines3d = []
self.im = None;
# input : one frame + kp3d
# return one image
def draw(self, frame, kps3d):
if self.im is None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.im = self.ax_img.imshow(frame)
xlines,ylines,zlines = draw_lines(kps3d)
for x,y,z in zip(xlines,ylines,zlines):
self.lines3d.extend(self.ax.plot(x,y,z,color='red'))
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.im.set_data(frame)
xlines,ylines,zlines = draw_lines(kps3d)
for i, (x,y,z) in enumerate(zip(xlines, ylines,zlines)):
self.lines3d[i].set_data(x, y)
self.lines3d[i].set_3d_properties(z)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
img = np.fromstring(self.fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape(self.fig.canvas.get_width_height()[::-1] + (3,))
# img is rgb, convert to opencv's default bgr
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
return img | |
"""
Reward normalization schemes.
"""
from math import sqrt
import numpy as np
class RewardNormalizer:
"""
Normalize rewards in rollouts with a gradually
updating divisor.
"""
def __init__(self, update_rate=0.05, discount=0.0, scale=1.0, epsilon=1e-5):
"""
Create a reward normalizer.
Arguments:
update_rate: the speed at which the normalizing
coefficient updates (0 through 1). Set to None
to use a running average over all rewards.
discount: the discount factor to use. Using 0
means that rewards themselves are normalized.
Using a larger value means that a geometric
sum over rewards is normalized.
scale: a scalar to multiply rewards by after
normalizing.
epsilon: used to avoid dividing by 0
"""
self._average = OnlineAverage(rate=update_rate)
self._discount = discount
self._scale = scale
self._epsilon = epsilon
def update(self, rollouts):
"""
Update the statistics using the rollouts and
return a normalized copy of the rollouts.
"""
squares = [x**2 for x in self._advantages(rollouts)]
self._average.update(squares)
return [self._normalized_rollout(r) for r in rollouts]
def _normalized_rollout(self, rollout):
"""
Normalize the rollout.
"""
scale = self._scale / (self._epsilon + sqrt(self._average.value))
rollout = rollout.copy()
rollout.rewards = [r*scale for r in rollout.rewards]
return rollout
def _advantages(self, rollouts):
if self._discount == 0.0:
return [rew for r in rollouts for rew in r.rewards]
result = []
for rollout in rollouts:
if rollout.trunc_end and 'values' in rollout.model_outs[-1]:
accumulator = rollout.model_outs[-1]['values'][0]
else:
accumulator = 0
for reward in rollout.rewards[::-1]:
accumulator *= self._discount
accumulator += reward
result.append(accumulator)
return result
class OnlineAverage:
"""
A moving or running average.
Running averages are unbiased and compute the mean of
a list of values, even if those values come in in a
stream.
Moving averages are biased towards newer values,
updating in a way that forgets the distant past.
"""
def __init__(self, rate=None):
"""
Create a new OnlineAverage.
Args:
rate: the moving average update rate. Used in
update as `rate*(new-old)`, where new is the
average of a new batch. If None, a dynamic
update rate is chosen such that the online
average is a running average over all the
samples.
"""
self.rate = rate
self._current = 0
self._num_samples = 0
@property
def value(self):
"""
Get the current average value.
"""
return self._current
def update(self, values):
"""
Update the moving average with the value batch.
Args:
values: a sequence of numerics.
Returns:
The new online average.
"""
if self.rate is not None:
rate = self.rate
if self._num_samples == 0:
rate = 1
else:
rate = len(values) / (len(values) + self._num_samples)
self._current += rate * (np.mean(values) - self._current)
self._num_samples += len(values)
return self._current | |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from natsort import natsorted
from glob import glob
import numpy as np
import cv2
from PIL import Image
import paddle
from .base_predictor import BasePredictor
from ppgan.models.generators import MPRNet
from ppgan.utils.download import get_path_from_url
from ppgan.utils.visual import make_grid, tensor2img, save_image
from ppgan.datasets.mpr_dataset import to_tensor
from paddle.vision.transforms import Pad
from tqdm import tqdm
model_cfgs = {
'Deblurring': {
'model_urls':
'https://paddlegan.bj.bcebos.com/models/MPR_Deblurring.pdparams',
'n_feat': 96,
'scale_unetfeats': 48,
'scale_orsnetfeats': 32,
},
'Denoising': {
'model_urls':
'https://paddlegan.bj.bcebos.com/models/MPR_Denoising.pdparams',
'n_feat': 80,
'scale_unetfeats': 48,
'scale_orsnetfeats': 32,
},
'Deraining': {
'model_urls':
'https://paddlegan.bj.bcebos.com/models/MPR_Deraining.pdparams',
'n_feat': 40,
'scale_unetfeats': 20,
'scale_orsnetfeats': 16,
}
}
class MPRPredictor(BasePredictor):
def __init__(self,
output_path='output_dir',
weight_path=None,
seed=None,
task=None):
self.output_path = output_path
self.task = task
self.max_size = 640
self.img_multiple_of = 8
if weight_path is None:
if task in model_cfgs.keys():
weight_path = get_path_from_url(model_cfgs[task]['model_urls'])
checkpoint = paddle.load(weight_path)
else:
raise ValueError(
'Predictor need a weight path or a pretrained model type')
else:
checkpoint = paddle.load(weight_path)
self.generator = MPRNet(
n_feat=model_cfgs[task]['n_feat'],
scale_unetfeats=model_cfgs[task]['scale_unetfeats'],
scale_orsnetfeats=model_cfgs[task]['scale_orsnetfeats'])
self.generator.set_state_dict(checkpoint)
self.generator.eval()
if seed is not None:
paddle.seed(seed)
random.seed(seed)
np.random.seed(seed)
def get_images(self, images_path):
if os.path.isdir(images_path):
return natsorted(
glob(os.path.join(images_path, '*.jpg')) +
glob(os.path.join(images_path, '*.JPG')) +
glob(os.path.join(images_path, '*.png')) +
glob(os.path.join(images_path, '*.PNG')))
else:
return [images_path]
def read_image(self, image_file):
img = Image.open(image_file).convert('RGB')
max_length = max(img.width, img.height)
if max_length > self.max_size:
ratio = max_length / self.max_size
dw = int(img.width / ratio)
dh = int(img.height / ratio)
img = img.resize((dw, dh))
return img
def run(self, images_path=None):
os.makedirs(self.output_path, exist_ok=True)
task_path = os.path.join(self.output_path, self.task)
os.makedirs(task_path, exist_ok=True)
image_files = self.get_images(images_path)
for image_file in tqdm(image_files):
img = self.read_image(image_file)
image_name = os.path.basename(image_file)
img.save(os.path.join(task_path, image_name))
tmps = image_name.split('.')
assert len(
tmps) == 2, f'Invalid image name: {image_name}, too much "."'
restoration_save_path = os.path.join(
task_path, f'{tmps[0]}_restoration.{tmps[1]}')
input_ = to_tensor(img)
# Pad the input if not_multiple_of 8
h, w = input_.shape[1], input_.shape[2]
H, W = ((h + self.img_multiple_of) //
self.img_multiple_of) * self.img_multiple_of, (
(w + self.img_multiple_of) //
self.img_multiple_of) * self.img_multiple_of
padh = H - h if h % self.img_multiple_of != 0 else 0
padw = W - w if w % self.img_multiple_of != 0 else 0
input_ = paddle.to_tensor(input_)
transform = Pad((0, 0, padw, padh), padding_mode='reflect')
input_ = transform(input_)
input_ = paddle.to_tensor(np.expand_dims(input_.numpy(), 0))
with paddle.no_grad():
restored = self.generator(input_)
restored = restored[0]
restored = paddle.clip(restored, 0, 1)
# Unpad the output
restored = restored[:, :, :h, :w]
restored = restored.numpy()
restored = restored.transpose(0, 2, 3, 1)
restored = restored[0]
restored = restored * 255
restored = restored.astype(np.uint8)
cv2.imwrite(restoration_save_path,
cv2.cvtColor(restored, cv2.COLOR_RGB2BGR))
print('Done, output path is:', task_path) | |
# Copyright 2019 DIVERSIS Software. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
def get_scope_variable(scope, var, shape=None,initializer=None):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
v = tf.get_variable(var, initializer=initializer(shape),trainable=True)
return v
def ConstructInferenceNetwork(InputSize,batchSize,layerOutDimSize,kernelSize,strideSize,poolKernelSize,poolSize,IMAGE_SIZE):
# Generate Placeholders
inputFramePlaceHolder = tf.placeholder(tf.float32, shape=[batchSize, InputSize[0], InputSize[1], InputSize[2]],name='inputFramePlaceHolder')
labelPlaceHolder = tf.placeholder(tf.float32, shape=[batchSize, layerOutDimSize[-1]],name='labelPlaceHolder')
dropoutInputPlaceHolder = tf.placeholder(tf.float32, shape=[],name='dropoutInputPlaceHolder')
dropoutPoolPlaceHolder = tf.placeholder(tf.float32, shape=[len(layerOutDimSize)],name='dropoutPoolPlaceHolder')
inputDistortionPlaceholder = tf.placeholder(tf.bool, shape=[],name='inputDistortionPlaceholder')
# Construct distorted input if desired
inputFramePlaceHolderResized = [tf.reshape(distorted_inputs(inputFramePlaceHolder[i,:,:,:],IMAGE_SIZE,inputDistortionPlaceholder),[1,IMAGE_SIZE,IMAGE_SIZE,InputSize[2]]) for i in range(inputFramePlaceHolder.shape[0])]
inputFramePlaceHolderResized = tf.concat(inputFramePlaceHolderResized,axis=0)
# Construct inference network structure
layerSize = len(layerOutDimSize)
# Apply input dropout
inputFrame = tf.nn.dropout(inputFramePlaceHolderResized,dropoutInputPlaceHolder,name="InputDropout")
latentOut = tf.multiply(inputFrame,1.0)
for lIndx in range(layerSize):
print("**************** Layer-%d ****************"%lIndx)
print("Input Tensor: ")
print(latentOut)
inputDim = latentOut.get_shape().as_list()
outputDim = layerOutDimSize[lIndx]
# if kernel is convolution
if len(kernelSize[lIndx]) > 1:
shapeW = [kernelSize[lIndx][0],kernelSize[lIndx][1],inputDim[-1],outputDim]
else:
# kernel is FC
if len(inputDim) == 4:
shapeW = [inputDim[1]*inputDim[2]*inputDim[3],outputDim]
else:
shapeW = [inputDim[-1],outputDim]
weight = get_scope_variable('Layer-%d'%lIndx, 'Weight', shape=shapeW,initializer=tf.contrib.layers.xavier_initializer())
bias = get_scope_variable('Layer-%d'%lIndx, 'Bias', shape=[outputDim],initializer=tf.zeros_initializer())
print("Weight: ")
print(weight)
print("Bias: ")
print(bias)
# Construct layer
lastLayer = (lIndx == (layerSize-1))
latentOut = ConstructLayer(latentOut,weight,bias,strideSize[lIndx],'Layer-%d-OP'%lIndx,dropoutPoolPlaceHolder[lIndx],poolKernelSize[lIndx],poolSize[lIndx],lastLayer)
print("Output Tensor: ")
print(latentOut)
print("******************************************")
# Compute prediction metric
softMaxOut = tf.nn.softmax(logits=latentOut,name="softMaxOut")
correct_prediction = tf.equal(tf.argmax(softMaxOut,1,name="Argmax_softMaxOut"), tf.argmax(labelPlaceHolder,1,name="Argmax_Label"),name="CorrectPrediction")
# Compute accuracy metric
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32,name="Cast_Accuracy"),name="Accuracy")
placeHolders = {'inputFramePlaceHolder':inputFramePlaceHolder, 'labelPlaceHolder':labelPlaceHolder, 'dropoutInputPlaceHolder':dropoutInputPlaceHolder, 'dropoutPoolPlaceHolder':dropoutPoolPlaceHolder, 'inputDistortionPlaceholder':inputDistortionPlaceholder}
return latentOut,accuracy,placeHolders
def ConstructLayer(layerInput,weight,bias,strideSize,nameScope,dropoutPoolPlaceHolder,poolKernelSize,poolSize,lastLayer):
convSize = weight.get_shape().as_list()
with tf.name_scope(nameScope):
if len(convSize) == 4:
outOp = tf.nn.conv2d(layerInput, weight, strides=[1,strideSize,strideSize,1], padding='SAME',name="ConvOP")
else:
layerInputFC = tf.reshape(layerInput,(layerInput.shape[0],-1))
outOp = tf.matmul(layerInputFC, weight,name="MatMul")
if poolSize > 1:
outOp = tf.nn.max_pool(outOp, ksize=[1, poolKernelSize[0], poolKernelSize[1], 1], strides=[1, poolSize, poolSize, 1],padding='SAME', name='pool')
layerOutput = tf.add(outOp,bias,name="BiasAdd")
if lastLayer == False:
layerOutput = tf.nn.dropout(layerOutput,dropoutPoolPlaceHolder)
layerOutput = tf.nn.relu(layerOutput)
return layerOutput
def ConstructOptimizer(output,labelPlaceHolder,momentum,weightDecay=None):
learningRatePlaceHolder = tf.placeholder(tf.float32, shape=[],name='learningRatePlaceHolder')
# Compute l2Loss
if weightDecay is not None:
l2LossList = [tf.nn.l2_loss(var) for var in tf.trainable_variables()]
l2Loss = tf.multiply(tf.add_n(l2LossList),weightDecay)
else:
l2Loss = tf.zeros(shape=[])
# Compute coross entropy loss
crossEntropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labelPlaceHolder, logits=output),name="CrossEntropy")
# Compute totLoss used for training and accuracy metric
totLoss = tf.add_n([crossEntropy,l2Loss])
# Generate optimizer operation
with tf.variable_scope('Momentum-0', reuse=tf.AUTO_REUSE):
train_step = tf.train.MomentumOptimizer(learning_rate=learningRatePlaceHolder, momentum=momentum).minimize(totLoss)
return train_step,totLoss,l2Loss,learningRatePlaceHolder
def GetTestAccuracy(sess,accuracyOp,data,labels,testBatchSize,frameBufferForTest,inputFramePlaceHolder,inputDistortionPlaceholder,labelPlaceHolder,dropoutInputPlaceHolder,dropoutPoolPlaceHolder):
dataLen = data.shape[3]
iternum = int(dataLen / testBatchSize)
batchLabels = np.zeros((testBatchSize,labels.shape[0]))
accuracy = 0
for i in range(iternum):
for j in range(testBatchSize):
frameBufferForTest[j,:,:,:] = data[:,:,:,i*testBatchSize+j]
batchLabels[j,:] = labels[:,i*testBatchSize+j]
# Determine feed_dict for testing accuracy
feed_latent = {inputFramePlaceHolder:frameBufferForTest, inputDistortionPlaceholder:False, labelPlaceHolder:batchLabels, dropoutInputPlaceHolder:1.0, dropoutPoolPlaceHolder:np.ones((dropoutPoolPlaceHolder.shape[0]))}
classiferAccuracyVal = sess.run(accuracyOp,feed_dict = feed_latent)
accuracy += classiferAccuracyVal / iternum
return accuracy
# We changed the files "distorted_inputs" in "https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py"
# to generate "distorted_inputs" used in this file.
def distorted_inputs(image,imSize,inputDistortionPlaceholder):
with tf.name_scope('data_augmentation'):
height = imSize
width = imSize
if inputDistortionPlaceholder == True:
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image = tf.image.random_brightness(distorted_image,max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, image.shape[-1]])
else:
float_image = inputs_test(image,imSize)
# Generate a batch of images and labels by building up a queues of examples.
return float_image
# We changed the files "inputs" in "https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py"
# to generate "inputs_test" used in this file.
def inputs_test(image,imSize):
height = imSize
width = imSize
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(image,height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, image.shape[-1]])
# Generate a batch of images and labels by building up a queue of examples.
return float_image | |
import xspec
import numpy as n
import sys
nh_vals = 10**n.arange(-2,4,0.05)
z_vals = 10**n.arange(-3,0.68,0.025)
nh_val = 1000.# nh_vals[0]
redshift = 2. # z_vals[0]
def get_fraction_obs(nh_val, redshift, kev_min_erosita = 0.5, kev_max_erosita = 2.0):
print(nh_val, redshift)
kev_min_erosita_RF = kev_min_erosita*(1+redshift)
kev_max_erosita_RF = kev_max_erosita*(1+redshift)
m1 = xspec.Model("atable{torus1006.fits} + zpowerlw")
m1.torus.nH = nh_val
m1.torus.Theta_tor = 45.
m1.torus.Theta_inc = 87.
m1.torus.z = redshift
m1.torus.norm = 1.
m1.zpowerlw.PhoIndex=2.
m1.zpowerlw.Redshift=redshift
m1.zpowerlw.norm=0.01
# observed frame attenuated flux
xspec.AllModels.calcFlux(str(kev_min_erosita)+" "+str(kev_max_erosita))
flux_obs = m1.flux[0]
# rest frame intrinsic flux
m1.torus.nH = 0.01
xspec.AllModels.calcFlux(str(kev_min_erosita_RF)+" "+str(kev_max_erosita_RF))
flux_intrinsic = m1.flux[0]
fraction_observed = flux_obs / flux_intrinsic
return fraction_observed
frac = n.array([n.array([get_fraction_obs(nh_val, redshift) for nh_val in nh_vals]) for redshift in z_vals ])
z_all = n.array([n.array([ redshift for nh_val in nh_vals]) for redshift in z_vals ])
nh_all = n.array([n.array([ nh_val for nh_val in nh_vals]) for redshift in z_vals ])
n.savetxt("fraction_observed.txt", n.transpose([n.hstack((z_all)), 22 + n.log10(n.hstack((nh_all))), n.hstack((frac))]), header='z log_nh fraction_observed') | |
#Write by Chiru Ge, contact: gechiru@126.com
# -*- coding: utf-8 -*-
## use GPU
import os
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES']='0'
config=tf.ConfigProto()
config.gpu_options.allow_growth= True
sess=tf.Session(config=config)
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam
from sklearn import metrics, preprocessing
from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ssrn_SS_Houston_3FF_F1
import h5py
from keras.models import load_model
from keras.utils.vis_utils import plot_model
def sampling1(trainlabels, testlabels):
labels_loc = {}
train_indices=[]
test_indices=[]
m={}
m=np.max(trainlabels[:])
for i in range(m):
indices = [j for j, x in enumerate(trainlabels.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
train_indices += labels_loc[i]
for i in range(m):
indices = [j for j, x in enumerate(testlabels.ravel().tolist()) if x == i + 1]
labels_loc[i] = indices
test_indices += labels_loc[i]
return train_indices, test_indices
def sampling(proptionVal, groundTruth): #divide dataset into train and test datasets
labels_loc = {}
train = {}
test = {}
m = max(groundTruth)
for i in range(m):
indices = [j for j, x in enumerate(groundTruth.ravel().tolist()) if x == i + 1]
np.random.shuffle(indices)
labels_loc[i] = indices
nb_val = int(proptionVal * len(indices))
train[i] = indices[:-nb_val]
test[i] = indices[-nb_val:]
# whole_indices = []
train_indices = []
test_indices = []
for i in range(m):
# whole_indices += labels_loc[i]
train_indices += train[i]
test_indices += test[i]
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
return train_indices, test_indices
def indexToAssignment(index_, Row, Col, pad_length):
new_assign = {}
for counter, value in enumerate(index_):
assign_0 = value // Col + pad_length
assign_1 = value % Col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def assignmentToIndex( assign_0, assign_1, Row, Col):
new_index = assign_0 * Col + assign_1
return new_index
def selectNeighboringPatch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[range(pos_row-ex_len,pos_row+ex_len+1), :]
selected_patch = selected_rows[:, range(pos_col-ex_len, pos_col+ex_len+1)]
return selected_patch
def classification_map(map, groundTruth, dpi, savePath):
fig = plt.figure(frameon=False)
fig.set_size_inches(groundTruth.shape[1]*2.0/dpi, groundTruth.shape[0]*2.0/dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.add_axes(ax)
ax.imshow(map, aspect='equal')
fig.savefig(savePath, dpi = dpi)
return 0
def res4_model_ss():
model_res4 = ssrn_SS_Houston_3FF_F1.ResnetBuilder.build_resnet_2_2((1, img_rows, img_cols, img_channels), nb_classes)
RMS = RMSprop(lr=0.0003)
# Let's train the model using RMSprop
model_res4.compile(loss='categorical_crossentropy', optimizer=RMS, metrics=['accuracy'])
return model_res4
######### Load data HSI ########
mat_LiDAR = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI.mat')
data_Houston_HSI = mat_LiDAR['HSI']
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
del mat_data, mat_LiDAR
######### Load data HSI_EPLBP ########
file=h5py.File('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/HSI_EPLBP.mat','r')
file.keys()
data = file['HSI_EPLBP'][:]
data_Houston_HSIEPLBP=data.transpose(2,1,0);
file.close()
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
del mat_data, data
######### Load data LiDAR_EPLBP ########
mat_LiDAR = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/LiDAR_DSM_EPLBP.mat')
data_Houston_LiDAREPLBP = mat_LiDAR['LiDAR_DSM_EPLBP']
mat_data = sio.loadmat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/datasets/Houston/Houston_gt.mat')
trainlabels = mat_data['trainlabels']
testlabels = mat_data['testlabels']
del mat_data, mat_LiDAR
######### Training parameter setting ##########
batch_size = 16 #sample number of each batch
nb_classes = 15 #class number
nb_epoch = 200 #epoch
img_rows, img_cols = 7, 7
patience = 200
PATCH_LENGTH = 3 #Patch_size
TEST_SIZE = 12197
TRAIN_SIZE = 2832
TOTAL_SIZE = TRAIN_SIZE+TEST_SIZE
img_channels_HSI = 144
img_channels_HSIEPLBP = 815
img_channels_LiDAREPLBP = 134
CATEGORY = 15
ALL_SIZE = data_Houston_HSI.shape[0] * data_Houston_HSI.shape[1]
######### Data normalization ########
data = data_Houston_HSI.reshape(np.prod(data_Houston_HSI.shape[:2]),np.prod(data_Houston_HSI.shape[2:]))# 3D to 2D
data = preprocessing.scale(data) #normalization
whole_data_HSI = data.reshape(data_Houston_HSI.shape[0], data_Houston_HSI.shape[1],data_Houston_HSI.shape[2])
padded_data_HSI = zeroPadding.zeroPadding_3D(whole_data_HSI, PATCH_LENGTH)
del data,data_Houston_HSI
data = data_Houston_HSIEPLBP.reshape(np.prod(data_Houston_HSIEPLBP.shape[:2]),np.prod(data_Houston_HSIEPLBP.shape[2:]))# 3维矩阵转换为2维矩阵
data = preprocessing.scale(data) #normalization
whole_data_HSIEPLBP = data.reshape(data_Houston_HSIEPLBP.shape[0], data_Houston_HSIEPLBP.shape[1],data_Houston_HSIEPLBP.shape[2])
padded_data_HSIEPLBP = zeroPadding.zeroPadding_3D(whole_data_HSIEPLBP, PATCH_LENGTH)
del data,data_Houston_HSIEPLBP
data = data_Houston_LiDAREPLBP.reshape(np.prod(data_Houston_LiDAREPLBP.shape[:2]),np.prod(data_Houston_LiDAREPLBP.shape[2:]))# 3维矩阵转换为2维矩阵
data = preprocessing.scale(data) #normalization
whole_data_LiDAREPLBP = data.reshape(data_Houston_LiDAREPLBP.shape[0], data_Houston_LiDAREPLBP.shape[1],data_Houston_LiDAREPLBP.shape[2])
padded_data_LiDAREPLBP = zeroPadding.zeroPadding_3D(whole_data_LiDAREPLBP, PATCH_LENGTH)
del data,data_Houston_LiDAREPLBP
############ Full image mapping and model reading ############
best_weights_RES_path_ss4 = ('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/models/Houston/3DFF/Houston_3FF_7-7_2-2_24_0.0003.hdf5')
model=load_model(best_weights_RES_path_ss4)
##Grouping the testing samples
n=60
group=ALL_SIZE//n
group_last=ALL_SIZE%n
Group=[]
for i in range(n+1):
if i==0:
Group.append(range(group))
elif i!= n and i > 0:
Group.append(range(group*i,group*(i+1)))
elif i==n:
Group.append(range(group*i,group*i+group_last))
GROUP=[]
for i in range(n+1):
if i!= n:
GROUP.append(group)
elif i==n:
GROUP.append(group_last)
##Predict each set of test samples. imagemap is the final map.
imagemap=[]
imageprob=[]
for i in range(len(Group)):
print(i)
all_data_HSI = np.zeros((GROUP[i], 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels_HSI))
all_data_HSIEPLBP = np.zeros((GROUP[i], 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels_HSIEPLBP))
all_data_LiDAREPLBP = np.zeros((GROUP[i], 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels_LiDAREPLBP))
all_assign = indexToAssignment(Group[i], whole_data_HSI.shape[0], whole_data_HSI.shape[1], PATCH_LENGTH)
for j in range(len(all_assign)):
all_data_HSI[j] = selectNeighboringPatch(padded_data_HSI, all_assign[j][0], all_assign[j][1], PATCH_LENGTH)
all_data_HSIEPLBP[j] = selectNeighboringPatch(padded_data_HSIEPLBP, all_assign[j][0], all_assign[j][1], PATCH_LENGTH)
all_data_LiDAREPLBP[j] = selectNeighboringPatch(padded_data_LiDAREPLBP, all_assign[j][0], all_assign[j][1], PATCH_LENGTH)
prob_image = model.predict(
[all_data_HSI.reshape(all_data_HSI.shape[0], all_data_HSI.shape[1], all_data_HSI.shape[2], all_data_HSI.shape[3], 1),
all_data_HSIEPLBP.reshape(all_data_HSIEPLBP.shape[0], all_data_HSIEPLBP.shape[1], all_data_HSIEPLBP.shape[2], all_data_HSIEPLBP.shape[3], 1),
all_data_LiDAREPLBP.reshape(all_data_LiDAREPLBP.shape[0], all_data_LiDAREPLBP.shape[1], all_data_LiDAREPLBP.shape[2], all_data_LiDAREPLBP.shape[3], 1)])
pred_image=prob_image.argmax(axis=1)
imageprob=imageprob+[prob_image]
imagemap=imagemap+[pred_image]
adict={}
adict['imageprob']=imageprob
adict['imagemap']=imagemap
sio.savemat('/home/amax/Documents/GCR/RNPRF-RNDFF-RNPMF/records/Houston/map/3FF_map.mat',adict) | |
import re
import os
import time
import itertools as it
import numpy as np
# logger
def logger(verbose = False):
def log(*arg):
if verbose:
print(*arg)
return log
# time utils
def tic():
return time.time()
def toc(start, msg=None):
end = time.time()
print("Done en {}s".format((end - start) // 1), msg)
# lecture
with open("input.txt", "r") as f:
data = [i.rstrip().replace("B","1").replace("L","0").replace("F","0").replace("R","1") for i in f.readlines()]
def calcul_score(x):
return x[0]*8+x[1]
def score_ticket(a):
b = a % 8
c = a // 8
return c, b
def nombre_ticket(i):
ticket = str(bin(i))[:-3].replace("1","B").replace("0","L")[2:]
tock = str(bin(i))[-3:].replace("0","F").replace("1","R")
return (ticket, tock)
prescores = [(int(i[:-3], 2),int(i[-3:], 2)) for i in data]
scores = list(map(calcul_score, prescores))
max_t = max(scores)
min_t = min(scores)
print("max is", max_t)
print("min is", min_t)
with open("input.txt", "r") as f:
base = [i.rstrip("\n") for i in f.readlines()]
for i in range(min_t,max_t):
step = nombre_ticket(i)
if not step[0]+step[1] in base:
...
for i in range(min_t,max_t):
if not i in scores:
if i - 1 in scores:
if i + 1 in scores:
print("ticket", nombre_ticket(i))
print("score", i) | |
#!/usr/bin/env python
import argparse
import shutil
import keras
from keras.models import Sequential
import numpy as np
np.random.seed(1234)
import cPickle as pickle
import h5py
from keras.layers import Conv1D, MaxPool1D,Dense, Activation, Dropout, GaussianDropout, ActivityRegularization, Flatten
from keras.optimizers import *
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras import regularizers
from keras.activations import softmax, relu, elu
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.utils import to_categorical
import os, sys, shutil
import glob
from math import exp, log
#import tensorflow as tf
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from keras.models import load_model
from clr_callback import *
from sklearn import preprocessing
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
class bbhparams:
def __init__(self,mc,M,eta,m1,m2,ra,dec,iota,psi,idx,fmin,snr,SNR):
self.mc = mc
self.M = M
self.eta = eta
self.m1 = m1
self.m2 = m2
self.ra = ra
self.dec = dec
self.iota = iota
self.psi = psi
self.idx = idx
self.fmin = fmin
self.snr = snr
self.SNR = SNR
def parser():
"""
Parses command line arguments
:return: arguments
"""
#TODO: complete help sections
parser = argparse.ArgumentParser(prog='CNN-keras.py', description='Convolutional Neural Network in keras with tensorflow')
# arguments for data
parser.add_argument('-Nts', '--Ntimeseries', type=int, default=10000,
help='number of time series for training')
#parser.add_argument('-ds', '--set-seed', type=str,
# help='seed number for each training/validaiton/testing set')
parser.add_argument('-Ntot', '--Ntotal', type=int, default=10,
help='number of available datasets with the same name as specified dataset')
parser.add_argument('-Nval', '--Nvalidation', type=int, default=10000,
help='')
parser.add_argument('-d', '--dataset', type=str,
help='your dataset')
parser.add_argument('-bs', '--batch_size', type=int, default=20,
help='size of batches used for training/validation')
parser.add_argument('-nw', '--noise_weight', type=float, default=1.0,
help='')
parser.add_argument('-sw', '--sig_weight', type=float, default=1.0,
help='')
# arguments for optimizer
parser.add_argument('-opt', '--optimizer', type=str, default='SGD',
help='')
parser.add_argument('-lr', '--lr', type=float, default=0.01,
help='learning rate')
parser.add_argument('-mlr', '--max_learning_rate', type=float, default=0.01,
help='max learning rate for cyclical learning rates')
parser.add_argument('-NE', '--n_epochs', type=int, default=20,
help='number of epochs to train for')
parser.add_argument('-dy', '--decay', type=float ,default=0.0,
help='help')
parser.add_argument('-ss', '--stepsize', type=float, default=500,
help='help')
parser.add_argument('-mn', '--momentum', type=float, default=0.9,
help='momentum for updates where applicable')
parser.add_argument('--nesterov', type=bool, default=True,
help='')
parser.add_argument('--rho', type=float, default=0.9,
help='')
parser.add_argument('--epsilon', type=float, default=1e-08,
help='')
parser.add_argument('--beta_1', type=float, default=0.9,
help='')
parser.add_argument('--beta_2', type=float, default=0.999,
help='')
parser.add_argument('-pt', '--patience', type=int, default=10,
help='')
parser.add_argument('-lpt', '--LRpatience', type=int, default=5,
help='')
#arguments for network
parser.add_argument('-f', '--features', type=str, default="1,1,1,1,0,4" ,
help='order and types of layers to use, see RunCNN_bbh.sh for types')
parser.add_argument('-nf', '--nfilters', type=str, default="16,32,64,128,32,2",
help='number of kernels/neurons per layer')
parser.add_argument('-fs', '--filter_size', type=str, default="1-1-32,1-1-16,1-1-8,1-1-4,0-0-0,0-0-0" ,
help='size of convolutional layers')
parser.add_argument('-fst', '--filter_stride', type=str, default="1-1-1,1-1-1,1-1-1,1-1-1",
help='stride for max-pooling layers')
parser.add_argument('-fpd', '--filter_pad', type=str, default="0-0-0,0-0-0,0-0-0,0-0-0",
help='padding for convolutional layers')
parser.add_argument('-dl', '--dilation', type=str, default="1-1-1,1-1-1,1-1-4,1-1-4,1-1-1",
help='dilation for convolutional layers, set to 1 for normal convolution')
parser.add_argument('-p', '--pooling', type=str, default="1,1,1,1",
help='')
parser.add_argument('-ps', '--pool_size', type=str, default="1-1-8,1-1-6,1-1-4,1-1-2",
help='size of max-pooling layers after convolutional layers')
parser.add_argument('-pst', '--pool_stride', type=str, default="1-1-4,1-1-4,1-1-4,0-0-0,0-0-0",
help='stride for max-pooling layers')
parser.add_argument('-ppd', '--pool_pad', type=str, default="0-0-0,0-0-0,0-0-0",
help='')
parser.add_argument('-dp', '--dropout', type=str, default="0.0,0.0,0.0,0.0,0.1,0.0",
help='dropout for the fully connected layers')
parser.add_argument('-fn', '--activation_functions', type=str, default='elu,elu,elu,elu,elu,softmax',
help='activation functions for layers')
# general arguments
parser.add_argument('-od', '--outdir', type=str, default='./history',
help='')
parser.add_argument('--notes', type=str,
help='')
return parser.parse_args()
class network_args:
def __init__(self, args):
self.features = np.array(args.features.split(','))
self.num_classes = 1
self.class_weight = {0:args.noise_weight, 1:args.sig_weight}
self.Nfilters = np.array(args.nfilters.split(",")).astype('int')
self.kernel_size = np.array(args.filter_size.split(",")).astype('int')
self.stride = np.array(args.filter_stride.split(",")).astype('int')
self.dilation = np.array(args.dilation.split(",")).astype('int')
self.activation = np.array(args.activation_functions.split(','))
self.dropout = np.array(args.dropout.split(",")).astype('float')
self.pooling = np.array(args.pooling.split(',')).astype('bool')
self.pool_size = np.array(args.pool_size.split(",")).astype('int')
self.pool_stride = np.array(args.pool_stride.split(",")).astype('int')
def choose_optimizer(args):
lr = args.lr
if args.optimizer == 'SGD':
return SGD(lr=lr, momentum=args.momentum, decay=args.decay, nesterov=args.nesterov)
if args.optimizer == 'RMSprop':
return RMSprop(lr=lr, rho=args.rho, epsilon=args.epsilon, decay=args.decay)
if args.optimizer == 'Adagrad':
return Adagrad(lr=lr, epsilon=args.epsilon, decay=args.decay)
if args.optimizer == 'Adadelta':
return Adadelta(lr=lr, rho=args.rho, epsilon=args.epsilon, decay=args.decay)
if args.optimizer =='Adam':
return Adamax(lr=lr, beta_1=args.beta_1, beta_2=args.beta_2, epsilon=args.epsilon, decay=args.decay)
if args.optimizer == 'Adamax':
return Adam(lr=lr, beta_1=args.beta_1, beta_2=args.beta_2, epsilon=args.epsilon, decay=args.decay)
if args.optimizer =='Nadam':
return Nadam(lr=lr, beta_1=args.beta_1, beta_2=args.beta_2, epsilon=args.epsilon, schedule_decay=args.decay)
def network(args, netargs, shape, outdir, data, targets):
model = Sequential()
optimizer = choose_optimizer(args)
#TODO: add support for advanced activation functions
count = 0
for i, op in enumerate(netargs.features):
if int(op) == 1:
count+=1
#print(count)
# standard convolutional layer with max pooling
model.add(Conv1D(
netargs.Nfilters[i],
input_shape=(512, 1),
kernel_size=netargs.kernel_size[i],
strides= netargs.stride[i],
padding= 'valid',
dilation_rate=netargs.dilation[i],
use_bias=True,
kernel_initializer=initializers.glorot_normal(),
bias_initializer=initializers.glorot_normal(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
))
if netargs.activation[i] == 'leakyrelu':
model.add(LeakyReLU(alpha=0.01))#netargs.activation[i]))
elif netargs.activation[i] == 'prelu':
model.add(PReLU())
else:
model.add(Activation(netargs.activation[i]))
model.add(BatchNormalization(
axis=1
))
model.add(GaussianDropout(netargs.dropout[i]))
if netargs.pooling[i]:
print(netargs.pool_size[i])
model.add(MaxPool1D(
pool_size=netargs.pool_size[i],
strides=None,
#strides=netargs.pool_stride[i],
padding='valid',
))
elif int(op) == 0:
# standard fully conected layer
model.add(Flatten())
model.add(Dense(
netargs.Nfilters[i]
#kernel_regularizer=regularizers.l1(0.01)
))
if netargs.activation[i] == 'leakyrelu':
model.add(LeakyReLU(alpha=0.01))#netargs.activation[i]))
elif netargs.activation[i] == 'prelu':
model.add(PReLU())
else:
model.add(Activation(netargs.activation[i]))
model.add(GaussianDropout(netargs.dropout[i]))
elif int(op) == 2:
# standard fully conected layer
#model.add(Flatten())
model.add(Dense(
netargs.Nfilters[i]
#kernel_regularizer=regularizers.l1(0.01)
))
if netargs.activation[i] == 'leakyrelu':
model.add(LeakyReLU(alpha=0.01))#netargs.activation[i]))
elif netargs.activation[i] == 'prelu':
model.add(PReLU())
else:
model.add(Activation(netargs.activation[i]))
model.add(GaussianDropout(netargs.dropout[i]))
elif int(op) == 4:
# softmax output layer
model.add(Dense(
netargs.num_classes
))
if netargs.activation[i] == 'leakyrelu':
model.add(LeakyReLU(alpha=0.01))#netargs.activation[i]))
elif netargs.activation[i] == 'prelu':
model.add(PReLU())
else:
model.add(Activation(netargs.activation[i]))
print('Compiling model...')
model.compile(
loss="binary_crossentropy",
optimizer=optimizer,
metrics=["accuracy", "binary_crossentropy"]
)
model.summary()
#TODO: add options to enable/disable certain callbacks
clr = CyclicLR(base_lr=args.lr, max_lr=args.max_learning_rate, step_size=args.stepsize)
earlyStopping = EarlyStopping(monitor='val_acc', patience=args.patience, verbose=0, mode='auto')
redLR = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=args.LRpatience, verbose=0, mode='auto',
epsilon=0.0001, cooldown=0, min_lr=0)
modelCheck = ModelCheckpoint('{0}/best_weights.hdf5'.format(outdir), monitor='val_acc', verbose=0, save_best_only=True,save_weights_only=True, mode='auto', period=0)
print('Fitting model...')
if args.lr != args.max_learning_rate:
hist = model.fit(data, targets,
epochs=args.n_epochs,
batch_size=args.batch_size,
#class_weight=netargs.class_weight,
validation_split=0.10,
shuffle=True,
verbose=1,
callbacks=[clr, earlyStopping, redLR, modelCheck])
else:
hist = model.fit(data, targets,
epochs=args.n_epochs,
batch_size=args.batch_size,
#class_weight=netargs.class_weight,
validation_split=0.10,
shuffle=True,
verbose=1,
callbacks=[earlyStopping, modelCheck])
print('Evaluating model...')
model.load_weights('{0}/best_weights.hdf5'.format(outdir))
p = model.predict(data[len(data) - len(data) /5:])
t = targets[len(data) - len(data) / 5:]
#eval_results = model.evaluate(p, t,
# sample_weight=None,
# batch_size=args.batch_size, verbose=1)
#preds = model.predict(p)
gr, gt = (p[t==0] < 0.5).sum(), (t==0).sum()
sr, st = (p[t==1] > 0.5).sum(), (t==1).sum()
print i, "Glitch Accuracy: %1.3f" % (float(gr) / float(gt))
print i, "Signal Accuracy: %1.3f" % (float(sr) / float(st))
return model, hist, p
def main(args):
# get arguments
# convert args to correct format for network
netargs = network_args(args)
# read the samples in
f = h5py.File(args.dataset, 'r')
glitch = f['noise'][:]
signal = f['signal'][:]
data = np.concatenate([glitch, signal])
targets = np.zeros(len(data))
targets[:(len(targets)/2)] = 1
#Randomize sample positions
idx = np.arange(0, len(data), 1)
np.random.shuffle(idx)
data = data[idx]
targets = targets[idx]
if not os.path.exists('{0}'.format(args.outdir)):
os.makedirs('{0}'.format(args.outdir))
Nrun = 0
while os.path.exists('{0}/run{1}'.format(args.outdir,Nrun)):
Nrun += 1
os.makedirs('{0}/run{1}'.format(args.outdir, Nrun))
width = 512
shape = width
out = '{0}/run{1}'.format(args.outdir, Nrun)
# train and test network
model, hist, preds = network(args, netargs, shape, out,
data, targets)
with open('{0}/run{1}/args.pkl'.format(args.outdir, Nrun), "wb") as wfp:
pickle.dump(args, wfp)
for m in model.metrics_names:
print('Test {0}:'.format(m))
#shutil.copy('./runCNN.sh', '{0}/SNR{1}/run{2}'.format(args.outdir, args.SNR,Nrun))
model.save('{0}/run{1}/nn_model.hdf5'.format(args.outdir,Nrun))
np.save('{0}/run{1}/targets.npy'.format(args.outdir,Nrun),y_test)
np.save('{0}/run{1}/preds.npy'.format(args.outdir,Nrun), preds)
np.save('{0}/run{1}/history.npy'.format(args.outdir,Nrun), hist.history)
#np.save('{0}/run{1}/test_results.npy'.format(args.outdir,Nrun),eval_results)
print('Results saved at: {0}/run{1}'.format(args.outdir,Nrun))
if __name__ == '__main__':
args = parser()
main(args) | |
import os
import gzip
import urllib.request
import numpy as np
import time
import zipfile
import io
from scipy.io.wavfile import read as wav_read
from tqdm import tqdm
class warblr:
"""Binary audio classification, presence or absence of a bird.
`Warblr <http://machine-listening.eecs.qmul.ac.uk/bird-audio-detection-challenge/#downloads>`_
comes from a UK bird-sound crowdsourcing
research spinout called Warblr. From this initiative we have
10,000 ten-second smartphone audio recordings from around the UK.
The audio totals around 44 hours duration. The audio will be
published by Warblr under a Creative Commons licence. The audio
covers a wide distribution of UK locations and environments, and
includes weather noise, traffic noise, human speech and even human
bird imitations. It is directly representative of the data that is
collected from a mobile crowdsourcing initiative.
"""
def download(path):
"""
Download the data
"""
# Load the dataset (download if necessary) and set
# the class attributes.
print("Loading warblr")
t = time.time()
if not os.path.isdir(path + "warblr"):
print("\tCreating Directory")
os.mkdir(path + "warblr")
if not os.path.exists(path + "warblr/warblrb10k_public_wav.zip"):
url = "https://archive.org/download/warblrb10k_public/warblrb10k_public_wav.zip"
urllib.request.urlretrieve(url, path + "warblr/warblrb10k_public_wav.zip")
if not os.path.exists(path + "warblr/warblrb10k_public_metadata.csv"):
url = "https://ndownloader.figshare.com/files/6035817"
urllib.request.urlretrieve(
url, path + "warblr/warblrb10k_public_metadata.csv"
)
def load(path=None):
"""
Load the data given a path
"""
if path is None:
path = os.environ["DATASET_PATH"]
warblr.download(path)
# Load the dataset (download if necessary) and set
# the class attributes.
print("Loading warblr")
t = time.time()
# Loading Labels
labels = np.loadtxt(
path + "warblr/warblrb10k_public_metadata.csv",
delimiter=",",
skiprows=1,
dtype="str",
)
# Loading the files
f = zipfile.ZipFile(path + "warblr/warblrb10k_public_wav.zip")
N = labels.shape[0]
wavs = list()
for i, files_ in tqdm(enumerate(labels), ascii=True):
wavfile = f.read("wav/" + files_[0] + ".wav")
byt = io.BytesIO(wavfile)
wavs.append(np.expand_dims(wav_read(byt)[1].astype("float32"), 0))
labels = labels[:, 1].astype("int32")
print("Dataset warblr loaded in", "{0:.2f}".format(time.time() - t), "s.")
return wavs, labels | |
from typing import Any, Dict, Tuple, Union
import gym
import numpy as np
import torch
import torch.optim as opt
from torch.autograd import Variable
from ....environments import VecEnv
from ...common import RolloutBuffer, get_env_properties, get_model, safe_mean
from ..base import OnPolicyAgent
class VPG(OnPolicyAgent):
"""
Vanilla Policy Gradient algorithm
Paper https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf
:param network_type: The deep neural network layer types ['mlp']
:param env: The environment to learn from
:param timesteps_per_actorbatch: timesteps per actor per update
:param gamma: discount factor
:param actor_batchsize: trajectories per optimizer epoch
:param epochs: the optimizer's number of epochs
:param lr_policy: policy network learning rate
:param save_interval: Number of episodes between saves of models
:param seed: seed for torch and gym
:param device: device to use for tensor operations; \
'cpu' for cpu and 'cuda' for gpu
:param run_num: if model has already been trained
:param save_model: True if user wants to save
:param load_model: model loading path
:param rollout_size: Rollout Buffer Size
:type network_type: str
:type env: Gym environment(s)
:type timesteps_per_actorbatch: int
:type gamma: float
:type actor_batchsize: int
:type epochs: int
:type lr_policy: float
:type save_interval: int
:type seed: int
:type device: str
:type run_num: bool
:type save_model: bool
:type load_model: string
:type rollout_size: int
"""
def __init__(
self,
network_type: str,
env: Union[gym.Env, VecEnv],
batch_size: int = 256,
gamma: float = 0.99,
epochs: int = 1000,
lr_policy: float = 0.01,
layers: Tuple = (32, 32),
rollout_size: int = 2048,
**kwargs
):
super(VPG, self).__init__(
network_type,
env,
batch_size,
layers,
gamma,
lr_policy,
None,
epochs,
rollout_size,
**kwargs
)
self.empty_logs()
self.create_model()
def create_model(self):
"""
Initialize the actor and critic networks
"""
state_dim, action_dim, discrete, action_lim = get_env_properties(self.env)
# Instantiate networks and optimizers
self.actor = get_model("p", self.network_type)(
state_dim, action_dim, self.layers, "V", discrete, action_lim=action_lim
).to(self.device)
# load paramaters if already trained
if self.load_model is not None:
self.load(self)
self.actor.load_state_dict(self.checkpoint["policy_weights"])
for key, item in self.checkpoint.items():
if key not in ["policy_weights", "value_weights", "save_model"]:
setattr(self, key, item)
print("Loaded pretrained model")
self.optimizer_policy = opt.Adam(self.actor.parameters(), lr=self.lr_policy)
self.rollout = RolloutBuffer(
self.rollout_size,
self.env.observation_space,
self.env.action_space,
n_envs=self.env.n_envs,
)
def select_action(
self, state: np.ndarray, deterministic: bool = False
) -> np.ndarray:
"""
Select action for the given state
:param state: State for which action has to be sampled
:param deterministic: Whether the action is deterministic or not
:type state: int, float, ...
:type deterministic: bool
:returns: The action
:rtype: int, float, ...
"""
state = Variable(torch.as_tensor(state).float().to(self.device))
# create distribution based on policy_fn output
a, c = self.actor.get_action(state, deterministic=False)
return a, c.log_prob(a), None
def get_value_log_probs(self, state, action):
a, c = self.actor.get_action(state, deterministic=False)
return c.log_prob(action)
def get_traj_loss(self, value, done):
"""
Calculates the loss for the trajectory
"""
self.rollout.compute_returns_and_advantage(value.detach().cpu().numpy(), done)
def update_policy(self) -> None:
for rollout in self.rollout.get(self.batch_size):
actions = rollout.actions
if isinstance(self.env.action_space, gym.spaces.Discrete):
actions = actions.long().flatten()
log_prob = self.get_value_log_probs(rollout.observations, actions)
policy_loss = rollout.returns * log_prob
policy_loss = -torch.mean(policy_loss)
self.logs["policy_loss"].append(policy_loss.item())
loss = policy_loss
self.optimizer_policy.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
self.optimizer_policy.step()
def collect_rollouts(self, state):
for i in range(self.rollout_size):
action, old_log_probs, _ = self.select_action(state)
next_state, reward, dones, _ = self.env.step(action.numpy())
self.epoch_reward += reward
if self.render:
self.env.render()
self.rollout.add(
state,
action.reshape(self.env.n_envs, 1),
reward,
dones,
torch.Tensor([0] * self.env.n_envs),
old_log_probs.detach(),
)
state = next_state
self.collect_rewards(dones)
return torch.Tensor([0] * self.env.n_envs), dones
def get_hyperparams(self) -> Dict[str, Any]:
hyperparams = {
"network_type": self.network_type,
"batch_size": self.batch_size,
"gamma": self.gamma,
"lr_policy": self.lr_policy,
"rollout_size": self.rollout_size,
"weights": self.ac.state_dict(),
}
return hyperparams
def get_logging_params(self) -> Dict[str, Any]:
"""
:returns: Logging parameters for monitoring training
:rtype: dict
"""
logs = {
"policy_loss": safe_mean(self.logs["policy_loss"]),
"mean_reward": safe_mean(self.rewards),
}
self.empty_logs()
return logs
def empty_logs(self):
"""
Empties logs
"""
self.logs = {}
self.logs["policy_loss"] = []
self.logs["policy_entropy"] = []
self.rewards = [] | |
# BSD 3-Clause License.
#
# Copyright (c) 2019-2021 Robert A. Milton. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A function that can be used to make predictions on a test data csv file using either a GP or a ROM or both.
Some terminology on the directories:
Base_Path: the base path is where the initial data is found.
Store: inside the base path there maybe multiple stores depending on how the data has been treated using "store_and_fold".
Split: the next folder will usually be the splits if the data has more than 1 output.
Fold: Each fold will then be found.
Models: The models can be the GP (e.g. "RBF") or a ROM (e.g. "ROM.optimized"). Each rotation of the ROM will be in here too (e.g. "ROM.0").
After that, files are collected together back down these directories.
"""
import time
import numpy as np
import pandas as pd
from shutil import rmtree, copytree
from pathlib import Path
from random import shuffle
from itertools import chain
from romcomma.data import Store, Fold, Frame
import romcomma.model as model
from romcomma.typing_ import NP, Union, Tuple, Sequence, List
def make_predictions(input_source: str, store_name: str, is_split: bool = True, is_standardized: bool = False, shuffle_before_folding: bool = True):
# Ensure the functions can get to the main directories.
base_path = Path(BASE_PATH)
store = Store(BASE_PATH / store_name)
# Read the input test data file which has two rows of headers and 1 index column.
# This file should be located in the BASE_PATH with the initial training data.
input_data = pd.read_csv(base_path / input_source, header=[0, 1], index_col=0)
# has the data already been standardized?
if is_standardized is True:
stand_inputs = input_data.values[:, :]
stand_inputs_df = pd.DataFrame(stand_inputs)
else:
# Read the csv storing the standardised information in STORE
mean_std = pd.read_csv(store.standard_csv, header=[0, 1], index_col=0)
# Remove the standard values for the outputs.
mean_std = mean_std.drop(columns=['Output', 'Output']).values
# Standardise the input data
dataset = input_data.values
mean = mean_std[0].astype(float)
std = mean_std[1].astype(float)
stand_inputs= (dataset - mean) / std
stand_inputs = stand_inputs[:, :]
stand_inputs_df = pd.DataFrame(stand_inputs)
K = store.K
N = len(stand_inputs_df.index)
# assert 1 <= K <= N, "K={K:d} does not lie between 1 and N=len(stand_inputs_df.index)={N:d} inclusive".format(K=K, N=N)
indices = list(range(N))
# looks like I need to follow the inner functions Fold.indicators and Fold.fold_from_indices.
if shuffle_before_folding:
shuffle(indices)
K_blocks = [list(range(K)) for i in range(int(N / K))]
K_blocks.append(list(range(N % K)))
for K_range in K_blocks:
shuffle(K_range)
indicators = list(chain(*K_blocks))
stand_inputs_df['indicators'] = indicators
# I've added indicators as a column to the df, now can i copy each indicator into it's corresponding fold repository?
for k in range(K):
train = [index for index, indicator in zip(indices, indicators) if k != indicator]
test = [index for index, indicator in zip(indices, indicators) if k == indicator]
assert len(train) > 0
"""
indicators = _indicators()
for k in range(K):
_fold_from_indices(_k=k,
train=[index for index, indicator in zip(indices, indicators) if k != indicator],
test=[index for index, indicator in zip(indices, indicators) if k == indicator])
def _fold_from_indices(_k: int, train: List[int], test: List[int]):
assert len(train) > 0
meta = {**Fold.DEFAULT_META, **{'parent_dir': str(parent.folder), 'k': _k,
'K': parent.K}}
fold = Store.from_df(parent.fold_dir(_k), parent.data.df.iloc[train], meta)
fold.standardize(standard)
fold.__class__ = cls
if len(test) < 1:
if replace_empty_test_with_data_:
fold._test = fold.create_standardized_frame(fold.test_csv, parent.data.df.iloc[train])
else:
fold._test = Frame(fold.test_csv,
DataFrame(data=NaN, index=[-1], columns=parent.data.df.columns))
else:
fold._test = fold.create_standardized_frame(fold.test_csv, parent.data.df.iloc[test])"""
return
if __name__ == '__main__':
BASE_PATH = Path("Z:\\comma_group1\\Rom\\dat\\AaronsTraining\\Veysel-Copy")
STORE_NAME_1 = "10_Folds"
STORE_NAME_2 = "1_Fold"
make_predictions(input_source="Input_Data.csv", store_name=STORE_NAME_1, is_split=False) | |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import random
MAXLIFE = 120
SCALE = 1
RESCALE = 1
true_rul = []
test_engine_id = 0
training_engine_id = 0
def kink_RUL(cycle_list, max_cycle):
'''
Piecewise linear function with zero gradient and unit gradient
^
|
MAXLIFE |-----------
| \
| \
| \
| \
| \
|----------------------->
'''
knee_point = max_cycle - MAXLIFE
kink_RUL = []
stable_life = MAXLIFE
for i in range(0, len(cycle_list)):
if i < knee_point:
kink_RUL.append(MAXLIFE)
else:
tmp = kink_RUL[i - 1] - (stable_life / (max_cycle - knee_point))
kink_RUL.append(tmp)
return kink_RUL
def compute_rul_of_one_id(FD00X_of_one_id, max_cycle_rul=None):
'''
Enter the data of an engine_id of train_FD001 and output the corresponding RUL (remaining life) of these data.
type is list
'''
cycle_list = FD00X_of_one_id['cycle'].tolist()
if max_cycle_rul is None:
max_cycle = max(cycle_list) # Failure cycle
else:
max_cycle = max(cycle_list) + max_cycle_rul
# print(max(cycle_list), max_cycle_rul)
# return kink_RUL(cycle_list,max_cycle)
return kink_RUL(cycle_list, max_cycle)
def compute_rul_of_one_file(FD00X, id='engine_id', RUL_FD00X=None):
'''
Input train_FD001, output a list
'''
rul = []
# In the loop train, each id value of the 'engine_id' column
if RUL_FD00X is None:
for _id in set(FD00X[id]):
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id]))
return rul
else:
rul = []
for _id in set(FD00X[id]):
# print("#### id ####", int(RUL_FD00X.iloc[_id - 1]))
true_rul.append(int(RUL_FD00X.iloc[_id - 1]))
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id], int(RUL_FD00X.iloc[_id - 1])))
return rul
def get_CMAPSSData(save=False, save_training_data=True, save_testing_data=True, files=[1, 2, 3, 4, 5],
min_max_norm=False):
'''
:param save: switch to load the already preprocessed data or begin preprocessing of raw data
:param save_training_data: same functionality as 'save' but for training data only
:param save_testing_data: same functionality as 'save' but for testing data only
:param files: to indicate which sub dataset needed to be loaded for operations
:param min_max_norm: switch to enable min-max normalization
:return: function will save the preprocessed training and testing data as numpy objects
'''
if save == False:
return np.load("normalized_train_data.npy"), np.load("normalized_test_data.npy"), pd.read_csv(
'normalized_train_data.csv', index_col=[0]), pd.read_csv('normalized_test_data.csv', index_col=[0])
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
if save_training_data: ### Training ###
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD002 = pd.read_table("./CMAPSSData/train_FD002.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD004 = pd.read_table("./CMAPSSData/train_FD004.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD002.columns = column_name
train_FD003.columns = column_name
train_FD004.columns = column_name
previous_len = 0
frames = []
for data_file in ['train_FD00' + str(i) for i in files]: # load subdataset by subdataset
#### standard normalization ####
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
# print("std", std)
################################
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file))
current_len = len(eval(data_file))
# print(eval(data_file).index)
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
# print(eval(data_file).index)
frames.append(eval(data_file))
print(data_file)
train = pd.concat(frames)
global training_engine_id
training_engine_id = train['engine_id']
train = train.drop('engine_id', 1)
train = train.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# train = train.drop('setting3', 1)
# train = train.drop('s18', 1)
# train = train.drop('s19', 1)
train_values = train.values * SCALE
np.save('normalized_train_data.npy', train_values)
train.to_csv('normalized_train_data.csv')
###########
else:
train = pd.read_csv('normalized_train_data.csv', index_col=[0])
train_values = train.values
if save_testing_data: ### testing ###
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD002 = pd.read_table("./CMAPSSData/test_FD002.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD004 = pd.read_table("./CMAPSSData/test_FD004.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD002.columns = column_name
test_FD003.columns = column_name
test_FD004.columns = column_name
# load RUL data
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD002 = pd.read_table("./CMAPSSData/RUL_FD002.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD004 = pd.read_table("./CMAPSSData/RUL_FD004.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD002.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD004.columns = ['RUL']
previous_len = 0
frames = []
for (data_file, rul_file) in [('test_FD00' + str(i), 'RUL_FD00' + str(i)) for i in files]:
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file), RUL_FD00X=eval(rul_file))
current_len = len(eval(data_file))
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
frames.append(eval(data_file))
print(data_file)
if len(files) == 1:
global test_engine_id
test_engine_id = eval(data_file)['engine_id']
test = pd.concat(frames)
test = test.drop('engine_id', 1)
test = test.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# test = test.drop('setting3', 1)
# test = test.drop('s18', 1)
# test = test.drop('s19', 1)
test_values = test.values * SCALE
np.save('normalized_test_data.npy', test_values)
test.to_csv('normalized_test_data.csv')
###########
else:
test = pd.read_csv('normalized_test_data.csv', index_col=[0])
test_values = test.values
return train_values, test_values, train, test
def get_PHM08Data(save=False):
"""
Function is to load PHM 2008 challenge dataset
"""
if save == False:
return np.load("./PHM08/processed_data/phm_training_data.npy"), np.load("./PHM08/processed_data/phm_testing_data.npy"), np.load(
"./PHM08/processed_data/phm_original_testing_data.npy")
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
phm_training_data = pd.read_table("./PHM08/train.txt", header=None, delim_whitespace=True)
phm_training_data.columns = column_name
phm_testing_data = pd.read_table("./PHM08/final_test.txt", header=None, delim_whitespace=True)
phm_testing_data.columns = column_name
print("phm training")
mean = phm_training_data.iloc[:, 2:len(list(phm_training_data))].mean()
std = phm_training_data.iloc[:, 2:len(list(phm_training_data))].std()
phm_training_data.iloc[:, 2:len(list(phm_training_data))] = (phm_training_data.iloc[:, 2:len(
list(phm_training_data))] - mean) / std
phm_training_data['RUL'] = compute_rul_of_one_file(phm_training_data)
print("phm testing")
mean = phm_testing_data.iloc[:, 2:len(list(phm_testing_data))].mean()
std = phm_testing_data.iloc[:, 2:len(list(phm_testing_data))].std()
phm_testing_data.iloc[:, 2:len(list(phm_testing_data))] = (phm_testing_data.iloc[:, 2:len(
list(phm_testing_data))] - mean) / std
phm_testing_data['RUL'] = 0
#phm_testing_data['RUL'] = compute_rul_of_one_file(phm_testing_data)
train_engine_id = phm_training_data['engine_id']
# print(phm_training_engine_id[phm_training_engine_id==1].index)
phm_training_data = phm_training_data.drop('engine_id', 1)
phm_training_data = phm_training_data.drop('cycle', 1)
global test_engine_id
test_engine_id = phm_testing_data['engine_id']
phm_testing_data = phm_testing_data.drop('engine_id', 1)
phm_testing_data = phm_testing_data.drop('cycle', 1)
phm_training_data = phm_training_data.values
phm_testing_data = phm_testing_data.values
engine_ids = train_engine_id.unique()
train_test_split = np.random.rand(len(engine_ids)) < 0.80
train_engine_ids = engine_ids[train_test_split]
test_engine_ids = engine_ids[~train_test_split]
# test_engine_id = pd.Series(test_engine_ids)
training_data = phm_training_data[train_engine_id[train_engine_id == train_engine_ids[0]].index]
for id in train_engine_ids[1:]:
tmp = phm_training_data[train_engine_id[train_engine_id == id].index]
training_data = np.concatenate((training_data, tmp))
# print(training_data.shape)
testing_data = phm_training_data[train_engine_id[train_engine_id == test_engine_ids[0]].index]
for id in test_engine_ids[1:]:
tmp = phm_training_data[train_engine_id[train_engine_id == id].index]
testing_data = np.concatenate((testing_data, tmp))
# print(testing_data.shape)
print(phm_training_data.shape, phm_testing_data.shape, training_data.shape, testing_data.shape)
np.save("./PHM08/processed_data/phm_training_data.npy", training_data)
np.savetxt("./PHM08/processed_data/phm_training_data.txt", training_data, delimiter=" ")
np.save("./PHM08/processed_data/phm_testing_data.npy", testing_data)
np.savetxt("./PHM08/processed_data/phm_testing_data.txt", testing_data, delimiter=" ")
np.save("./PHM08/processed_data/phm_original_testing_data.npy", phm_testing_data)
np.savetxt("./PHM08/processed_data/phm_original_testing_data.csv", phm_testing_data, delimiter=",")
return training_data, testing_data, phm_testing_data
def data_augmentation(files=1, low=[10, 40, 90, 170], high=[35, 85, 160, 250], plot=False, combine=False):
'''
This helper function only augments the training data to look like testing data.
Training data always run to a failure. But testing data is mostly stop before a failure.
Therefore, training data augmented to have scenarios without failure
:param files: select wich sub CMPASS dataset
:param low: lower bound for the random selection of the engine cycle
:param high: upper bound for the random selection of the engine cycle
:param plot: switch to plot the augmented data
:return:
'''
DEBUG = False
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
### Loading original data ###
if files == "phm":
train_FD00x = pd.read_table("./PHM08/processed_data/phm_training_data.txt", header=None, delim_whitespace=True)
train_FD00x.drop(train_FD00x.columns[len(train_FD00x.columns) - 1], axis=1, inplace=True)
train_FD00x.columns = column_name
else:
if combine:
train_FD00x,_,_ = combine_FD001_and_FD003()
else:
file_path = "./CMAPSSData/train_FD00" + str(files) + ".txt"
train_FD00x = pd.read_table(file_path, header=None, delim_whitespace=True)
train_FD00x.columns = column_name
print(file_path.split("/")[-1])
### Standered Normal ###
mean = train_FD00x.iloc[:, 2:len(list(train_FD00x))].mean()
std = train_FD00x.iloc[:, 2:len(list(train_FD00x))].std()
std.replace(0, 1, inplace=True)
train_FD00x.iloc[:, 2:len(list(train_FD00x))] = (train_FD00x.iloc[:, 2:len(list(train_FD00x))] - mean) / std
final_train_FD = train_FD00x.copy()
previous_len = 0
frames = []
for i in range(len(high)):
train_FD = train_FD00x.copy()
train_engine_id = train_FD['engine_id']
engine_ids = train_engine_id.unique()
total_ids = len(engine_ids)
train_rul = []
print("*************", final_train_FD.shape, total_ids, low[i], high[i], "*****************")
for id in range(1, total_ids + 1):
train_engine_id = train_FD['engine_id']
indexes = train_engine_id[train_engine_id == id].index ### filter indexes related to id
traj_data = train_FD.loc[indexes] ### filter trajectory data
cutoff_cycle = random.randint(low[i], high[i]) ### randomly selecting the cutoff point of the engine cycle
if cutoff_cycle > max(traj_data['cycle']):
cutoff_cycle = max(traj_data['cycle'])
train_rul.append(max(traj_data['cycle']) - cutoff_cycle) ### collecting remaining cycles
cutoff_cycle_index = traj_data['cycle'][traj_data['cycle'] == cutoff_cycle].index ### cutoff cycle index
if DEBUG:
print("traj_shape: ", traj_data.shape, "current_engine_id:", id, "cutoff_cycle:", cutoff_cycle,
"cutoff_index", cutoff_cycle_index, "engine_fist_index", indexes[0], "engine_last_index",
indexes[-1])
### removing rows after cutoff cycle index ###
if cutoff_cycle_index[0] != indexes[-1]:
drop_range = list(range(cutoff_cycle_index[0] + 1, indexes[-1] + 1))
train_FD.drop(train_FD.index[drop_range], inplace=True)
train_FD.reset_index(drop=True, inplace=True)
### calculating the RUL for augmented data
train_rul = pd.DataFrame.from_dict({'RUL': train_rul})
train_FD['RUL'] = compute_rul_of_one_file(train_FD, RUL_FD00X=train_rul)
### changing the engine_id for augmented data
train_engine_id = train_FD['engine_id']
for id in range(1, total_ids + 1):
indexes = train_engine_id[train_engine_id == id].index
train_FD.loc[indexes, 'engine_id'] = id + total_ids * (i + 1)
if i == 0: # should only execute at the first iteration
final_train_FD['RUL'] = compute_rul_of_one_file(final_train_FD)
current_len = len(final_train_FD)
final_train_FD.index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
### Re-indexing the augmented data
train_FD['RUL'].index = range(previous_len, previous_len + len(train_FD))
previous_len = previous_len + len(train_FD)
final_train_FD = pd.concat(
[final_train_FD, train_FD]) # concatanete the newly augmented data with previous data
frames.append(final_train_FD)
train = pd.concat(frames)
train.reset_index(drop=True, inplace=True)
train_engine_id = train['engine_id']
# print(train_engine_id)
engine_ids = train_engine_id.unique()
# print(engine_ids[1:])
np.random.shuffle(engine_ids)
# print(engine_ids)
training_data = train.loc[train_engine_id[train_engine_id == engine_ids[0]].index]
training_data.reset_index(drop=True, inplace=True)
previous_len = len(training_data)
for id in engine_ids[1:]:
traj_data = train.loc[train_engine_id[train_engine_id == id].index]
current_len = len(traj_data)
traj_data.index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
training_data = pd.concat([training_data, traj_data])
global training_engine_id
training_engine_id = training_data['engine_id']
training_data = training_data.drop('engine_id', 1)
training_data = training_data.drop('cycle', 1)
# if files == 1 or files == 3:
# training_data = training_data.drop('setting3', 1)
# training_data = training_data.drop('s18', 1)
# training_data = training_data.drop('s19', 1)
training_data_values = training_data.values * SCALE
np.save('normalized_train_data.npy', training_data_values)
training_data.to_csv('normalized_train_data.csv')
train = training_data_values
x_train = train[:, :train.shape[1] - 1]
y_train = train[:, train.shape[1] - 1] * RESCALE
print("training in augmentation", x_train.shape, y_train.shape)
if plot:
plt.plot(y_train, label="train")
plt.figure()
plt.plot(x_train)
plt.title("train")
# plt.figure()
# plt.plot(y_train)
# plt.title("test")
plt.show()
def analyse_Data(dataset, files=None, plot=True, min_max=False):
'''
Generate pre-processed data according to the given dataset
:param dataset: choose between "phm" for PHM 2008 dataset or "cmapss" for CMAPSS data set with file number
:param files: Only for CMAPSS dataset to select sub dataset
:param min_max: switch to allow min-max normalization
:return:
'''
if dataset == "phm":
training_data, testing_data, phm_testing_data = get_PHM08Data(save=True)
x_phmtrain = training_data[:, :training_data.shape[1] - 1]
y_phmtrain = training_data[:, training_data.shape[1] - 1]
x_phmtest = testing_data[:, :testing_data.shape[1] - 1]
y_phmtest = testing_data[:, testing_data.shape[1] - 1]
print("phmtrain", x_phmtrain.shape, y_phmtrain.shape)
print("phmtest", x_phmtrain.shape, y_phmtrain.shape)
print("phmtest", phm_testing_data.shape)
if plot:
# plt.plot(x_phmtrain, label="phmtrain_x")
plt.figure()
plt.plot(y_phmtrain, label="phmtrain_y")
# plt.figure()
# plt.plot(x_phmtest, label="phmtest_x")
plt.figure()
plt.plot(y_phmtest, label="phmtest_y")
# plt.figure()
# plt.plot(phm_testing_data, label="test")
plt.show()
elif dataset == "cmapss":
training_data, testing_data, training_pd, testing_pd = get_CMAPSSData(save=True, files=files,
min_max_norm=min_max)
x_train = training_data[:, :training_data.shape[1] - 1]
y_train = training_data[:, training_data.shape[1] - 1]
print("training", x_train.shape, y_train.shape)
x_test = testing_data[:, :testing_data.shape[1] - 1]
y_test = testing_data[:, testing_data.shape[1] - 1]
print("testing", x_test.shape, y_test.shape)
if plot:
plt.plot(y_train, label="train")
plt.figure()
plt.plot(y_test, label="test")
plt.figure()
plt.plot(x_train)
plt.title("train: FD00" + str(files[0]))
plt.figure()
plt.plot(y_train)
plt.title("train: FD00" + str(files[0]))
plt.show()
def combine_FD001_and_FD003():
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD003.columns = column_name
FD001_max_engine_id = max(train_FD001['engine_id'])
train_FD003['engine_id'] = train_FD003['engine_id'] + FD001_max_engine_id
train_FD003.index = range(len(train_FD001), len(train_FD001) + len(train_FD003))
train_FD001_FD002 = pd.concat([train_FD001,train_FD003])
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD003.columns = column_name
FD001_max_engine_id = max(test_FD001['engine_id'])
test_FD003['engine_id'] = test_FD003['engine_id'] + FD001_max_engine_id
test_FD003.index = range(len(test_FD001), len(test_FD001) + len(test_FD003))
test_FD001_FD002 = pd.concat([test_FD001,test_FD003])
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD003.index = range(len(RUL_FD001), len(RUL_FD001) + len(RUL_FD003))
RUL_FD001_FD002 = pd.concat([test_FD001, test_FD003])
return train_FD001_FD002,test_FD001_FD002,RUL_FD001_FD002 | |
from typing import Tuple, List, Any, Optional
from .connection import Connection
from .drone_connection import DroneConnection
from .window_manager import LocationWindowManager, TimeWindowManager
from .waypoint_manager import WaypointManager
from .event_manager import EventManager
from common.protocol import Protocol
from common.headings import *
from enum import Enum, auto
import numpy as np
import datetime
import time
"""
메인 파일
전체 시스템 합친 부분
"""
class Events(Enum):
MissionStart = auto()
MissionFinished = auto()
TakeOff = auto()
Landing = auto()
WaypointReached = auto()
WaypointsReceived = auto()
LWinParamReceived = auto()
TWinParamReceived = auto()
WMngParamReceived = auto()
GPSReceived = auto()
EmergencyLanding = auto()
LocationCheckTime = auto()
class System:
def __init__(
self,
connection: Connection,
drone_connection: DroneConnection,
location_manager: LocationWindowManager,
time_manager: TimeWindowManager,
waypoint_manager: WaypointManager,
desired_cps: Optional[float] = 20,
) -> None:
self.connection = connection
self.drone_connection = drone_connection
self.location_manager = location_manager
self.time_manager = time_manager
self.waypoint_manager = waypoint_manager
self.event_manager = EventManager()
self.protocol = Protocol()
# How many cycles of receiving & processing per seconds
self.desired_cps = desired_cps
self.desired_time_per_cycle = 1/self.desired_cps
self.current_position: Optional[np.ndarray] = None
self.direction_vector: Optional[np.ndarray] = None
self.mission_started = False
events = [
Events.MissionFinished,
Events.MissionStart,
Events.TakeOff,
Events.Landing,
Events.WaypointReached,
Events.WaypointsReceived,
Events.LWinParamReceived,
Events.TWinParamReceived,
Events.WMngParamReceived,
Events.GPSReceived,
Events.EmergencyLanding,
Events.LocationCheckTime,
]
for e in events:
self.event_manager.register(e)
self.setup()
self.running = True
def setup(self):
"""
이벤트 callback 함수들 등록하기
"""
self.event_manager.subscribe(
Events.MissionStart, self.on_mission_start)
self.event_manager.subscribe(
Events.MissionFinished, self.on_mission_finished)
self.event_manager.subscribe(Events.TakeOff, self.on_takeoff)
self.event_manager.subscribe(Events.Landing, self.on_landing)
self.event_manager.subscribe(
Events.LWinParamReceived, self.on_l_win_param_received)
self.event_manager.subscribe(
Events.TWinParamReceived, self.on_t_win_param_received)
self.event_manager.subscribe(
Events.WMngParamReceived, self.on_w_mng_param_received)
self.event_manager.subscribe(Events.GPSReceived, self.on_gps_received)
self.event_manager.subscribe(
Events.WaypointReached, self.on_waypoint_reached)
self.event_manager.subscribe(
Events.WaypointsReceived, self.on_waypoints_received)
self.event_manager.subscribe(
Events.LocationCheckTime, self.on_location_check_time)
"""이벤트 발생 부분"""
def run(self):
"""
프로토콜이 파싱한 데이터의 이름-> 함수로 매핑하고 실행
"""
last_check = None
try:
while self.running:
start = datetime.datetime.now()
encoded = self.connection.get()
if (encoded):
data_list = self.protocol.decode(encoded)
self.receive_from_connection(data_list)
encoded = self.drone_connection.get()
if (encoded):
data_list = self.protocol.decode(encoded)
self.receive_from_drone(data_list)
if self.mission_started:
if last_check:
elapsed = (datetime.datetime.now() - last_check).total_seconds()
if elapsed >= self.location_manager.check_period:
self.event_manager.publish(Events.LocationCheckTime)
last_check=datetime.datetime.now()
else:
last_check = datetime.datetime.now()
if self.waypoint_manager.mission_finished():
self.event_manager.publish(Events.MissionFinished)
else:
self.send_running_state()
elapsed = (datetime.datetime.now() - start).total_seconds()
remaining = self.desired_time_per_cycle - elapsed
if remaining < 0:
print("Warning: System is running slower than desired cps")
else:
time.sleep(remaining)
self.connection.clean()
except KeyboardInterrupt:
self.connection.clean()
raise
def receive_from_connection(self, data_list: List[Tuple[str, Any]]):
for name, value in data_list:
#print(f"RP Received {name} from server")
if name in L_WIN_PARAM_NAMES:
self.event_manager.publish(
Events.LWinParamReceived, name, value)
if name in T_WIN_PARAM_NAMES:
self.event_manager.publish(
Events.TWinParamReceived, name, value)
if name in WAYPOINT_PARAM_NAMES:
self.event_manager.publish(
Events.WMngParamReceived, name, value)
if name == WAYPOINTS:
self.event_manager.publish(Events.WaypointsReceived, value)
if name == MISSION_START:
self.event_manager.publish(Events.MissionStart)
self.event_manager.publish(Events.TakeOff)
def receive_from_drone(self, data_list: List[Tuple[str, Any]]):
for name, value in data_list:
#print(f"RP Received {name} from drone")
if name == GPS_POSITION:
self.event_manager.publish(Events.GPSReceived, value)
"""이벤트 처리 부분"""
def on_l_win_param_received(self, name: str, value: Any):
print(f"Setting Location Window Parameter: {name} - {value}")
setattr(self.location_manager, name, value)
def on_t_win_param_received(self, name: str, value: Any):
print(f"Setting Time Window Parameter: {name} - {value}")
setattr(self.time_manager, name, value)
def on_w_mng_param_received(self, name: str, value: Any):
print(f"Setting Waypoint Parameter: {name} - {value}")
setattr(self.waypoint_manager, name, value)
def on_waypoints_received(self, encoded_waypoints: bytes):
# decode string -> numpy array
waypoints = self.protocol.decode_waypoints(encoded_waypoints)
self.waypoint_manager.set_mission(waypoints)
def on_gps_received(self, encoded_gps_position: bytes):
gps_position = self.protocol.decode_point(encoded_gps_position)
self.current_position = gps_position
#print(f"Current gps: {self.current_position}")
if not self.mission_started:
return
if self.waypoint_manager.waypoint_reached(self.current_position):
self.event_manager.publish(Events.WaypointReached, self.current_position)
def on_waypoint_reached(self, gps: np.ndarray):
print(f"Waypoint {self.waypoint_manager.current_waypoint()} reached")
last_wp = self.waypoint_manager.last_waypoint()
if not self.time_manager.in_range(gps, last_wp):
data = self.protocol.encode(2, RUNNING_STATE)
self.drone_connection.send(data)
self.event_manager.publish(Events.EmergencyLanding)
self.time_manager.update_check_time()
self.location_manager.record_time()
self.waypoint_manager.to_next_waypoint()
if self.waypoint_manager.current_waypoint_index == len(self.waypoint_manager.waypoints):
self.event_manager.publish(Events.MissionFinished)
return
self.send_waypoint()
def on_location_check_time(self):
if self.current_position is None:
print("No current location given, passing locatio check")
return
self.direction_vector = self.waypoint_manager.waypoint2vector(
self.waypoint_manager.current_waypoint(), self.current_position)
if not self.location_manager.in_range(
self.current_position,
self.waypoint_manager.last_waypoint(),
self.direction_vector,
self.waypoint_manager.current_waypoint()
):
data = self.protocol.encode(1, RUNNING_STATE)
self.drone_connection.send(data)
self.event_manager.publish(Events.EmergencyLanding)
def on_mission_start(self):
print(f"Starting Mission")
self.mission_started = True
self.waypoint_manager.start_mission()
self.location_manager.record_time()
self.send_waypoint()
data = self.protocol.encode(self.time_manager.desired_velocity, DESIRED_VELOCITY)
self.drone_connection.send(data)
def on_mission_finished(self):
print("Mission Finished")
self.mission_started = False
self.on_landing()
self.stop()
def on_emergency_landing(self):
print("Emergency landing")
data = self.protocol.encode(1, EMERGENCY_LANDING)
self.drone_connection.send(data)
self.stop()
def send_running_state(self):
data = self.protocol.encode(0, RUNNING_STATE)
self.drone_connection.send(data)
def stop(self):
self.running = False
self.drone_connection.clean()
self.connection.clean()
def send_waypoint(self):
waypoint = self.waypoint_manager.current_waypoint()
print(f"Sending waypoint: {waypoint}")
encoded = self.protocol.encode_point(waypoint)
data = self.protocol.encode(encoded, NEXT_WAYPOINT)
self.drone_connection.send(data)
def on_takeoff(self):
print("Sending takeoff to drone")
data = self.protocol.encode(1, TAKEOFF)
self.drone_connection.send(data)
def on_landing(self):
print("Sending landing to drone")
data = self.protocol.encode(1, LAND)
self.drone_connection.send(data) | |
import itertools
from tempfile import NamedTemporaryFile
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from bokeh.plotting import figure, output_file
def get_validation_plot(true_value, prediction):
output_file(NamedTemporaryFile().name)
x_min = min(min(true_value), min(prediction))
x_max = max(max(true_value), max(prediction))
x_range = [x_min, x_max]
y_range = x_range
plot = figure(width=800, height=800,
x_range=x_range, y_range=y_range)
plot.xaxis.axis_label = "True value"
plot.xaxis.axis_label_text_font_size = '14pt'
plot.xaxis.major_label_text_font_size = '12pt'
plot.yaxis.axis_label = "Prediction"
plot.yaxis.axis_label_text_font_size = '14pt'
plot.yaxis.major_label_text_font_size = '12pt'
plot.circle(true_value, prediction)
plot.line(x_range, y_range, line_dash='dashed', color='gray')
return plot
def get_confusion_matrix_plot(confusion_matrix,
target_names,
title='Confusion matrix',
cmap=None,
normalize=True):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
confusion_matrix: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Usage
-----
plot_confusion_matrix(cm = cm, # confusion matrix created by
# sklearn.metrics.confusion_matrix
normalize = True, # show proportions
target_names = y_labels_vals, # list of names of the classes
title = best_estimator_name) # title of graph
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
if normalize:
confusion_matrix = confusion_matrix.astype('float') / confusion_matrix.sum()
accuracy = np.trace(confusion_matrix) / np.sum(confusion_matrix).astype('float')
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 8))
plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
matplotlib.rcParams.update({'font.size': 20})
thresh = confusion_matrix.max() / 1.5 if normalize else confusion_matrix.max() / 2
for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(confusion_matrix[i, j]),
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(confusion_matrix[i, j]),
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontsize=16)
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass), fontsize=16)
return plt | |
# Copyright 2020 Virginia Polytechnic Institute and State University.
""" OpenFOAM I/O.
These functions are accesible from ``dafi.random_field.foam``.
"""
# standard library imports
import numpy as np
import os
import shutil
import re
import tempfile
import subprocess
# global variables
NDIM = {'scalar': 1,
'vector': 3,
'symmTensor': 6,
'tensor': 9}
# get mesh properties by running OpenFOAM shell utilities
def get_number_cells(foam_case='.'):
""" Get the number of cells in an OpenFOAM case.
Requires OpenFOAM to be sourced, since it calls the ``checkMesh``
utility.
Parameters
----------
foam_case : str
Name (path) of OF case directory.
Returns
-------
ncells : int
Number of cells.
"""
bash_command = "checkMesh -case " + foam_case + \
" -time '0' | grep ' cells:'" # > tmp.ncells"
cells = subprocess.check_output(bash_command, shell=True)
cells = cells.decode("utf-8").replace('\n', '').split(':')[1].strip()
return int(cells)
def _check0(foam_case):
""" Create OpenFOAM 0 directory if it does not exist.
Copies the 0.orig folder if the 0 folder does not exist.
Parameters
----------
foam_case : str
Name (path) of OF case directory.
Returns
-------
dirCreated : bool
Whether the 0 directory was created. If it exists already
returns *'False'*.
"""
dst = os.path.join(foam_case, '0')
dirCreated = False
if not os.path.isdir(dst):
src = os.path.join(foam_case, '0.orig')
shutil.copytree(src, dst)
dirCreated = True
return dirCreated
def _checkMesh(foam_case):
""" Create OpenFOAM mesh if it does not exist.
Requires OpenFOAM to be sourced. Calls ``blockMesh`` utility.
Parameters
----------
foam_case : str
Name (path) of OF case directory.
Returns
-------
meshCreated : bool
Whether the mesh was created. If mesh existed already returns
*`False`*.
"""
meshdir = os.path.join(foam_case, 'constant', 'polyMesh')
meshCreated = False
if not os.path.isdir(meshdir):
bash_command = "blockMesh -case " + foam_case
subprocess.call(bash_command, shell=True)
meshCreated = True
return meshCreated
def get_cell_centres(foam_case='.', group='internalField', keep_file=False):
""" Get the coordinates of cell centers in an OpenFOAM case.
Requires OpenFOAM to be sourced.
Parameters
----------
foam_case : str
Name (path) of OF case directory.
keep_file : bool
Whether to keep the file (C) generated by the OpenFOAM
post-processing utility.
Returns
-------
coords : ndarray
Cell center coordinates (x, y, z).
*dtype=float*, *ndim=2*, *shape=(ncells, 3)*
"""
timedir = '0'
del0 = _check0(foam_case)
delMesh = _checkMesh(foam_case)
bash_command = "postProcess -func writeCellCentres " + \
"-case " + foam_case + f" -time '{timedir}' " + "> /dev/null"
subprocess.call(bash_command, shell=True)
os.remove(os.path.join(foam_case, timedir, 'Cx'))
os.remove(os.path.join(foam_case, timedir, 'Cy'))
os.remove(os.path.join(foam_case, timedir, 'Cz'))
file = os.path.join(foam_case, timedir, 'C')
coords = read_cell_centres(file, group=group)
if not keep_file:
os.remove(file)
if del0:
shutil.rmtree(os.path.join(foam_case, timedir))
if delMesh:
shutil.rmtree(os.path.join(foam_case, 'constant', 'polyMesh'))
return coords
def get_cell_volumes(foam_case='.', keep_file=False):
""" Get the volume of each cell in an OpenFOAM case.
Requires OpenFOAM to be sourced.
Parameters
----------
foam_case : str
Name (path) of OF case directory.
keep_file : bool
Whether to keep the file (V) generated by the OpenFOAM
post-processing utility.
Returns
-------
vol : ndarray
Cell volumes.
*dtype=float*, *ndim=1*, *shape=(ncells)*
"""
timedir = '0'
del0 = _check0(foam_case)
delMesh = _checkMesh(foam_case)
bash_command = "postProcess -func writeCellVolumes " + \
"-case " + foam_case + f" -time '{timedir}' " + "> /dev/null"
subprocess.call(bash_command, shell=True)
file = os.path.join(foam_case, timedir, 'V')
vol = read_cell_volumes(file)
if not keep_file:
os.remove(file)
if del0:
shutil.rmtree(os.path.join(foam_case, timedir))
if delMesh:
shutil.rmtree(os.path.join(foam_case, 'constant', 'polyMesh'))
return vol
def get_neighbors(foam_case='.'):
""" """ # TODO
# create mesh if needed
timedir = '0'
del0 = _check0(foam_case)
delMesh = _checkMesh(foam_case)
ncells = get_number_cells(foam_case)
# read mesh files
mesh_dir = os.path.join(foam_case, 'constant', 'polyMesh')
owner = read_scalar_field(os.path.join(mesh_dir, 'owner'), ' ')
neighbour = read_scalar_field(os.path.join(mesh_dir, 'neighbour'), ' ')
# keep internal faces only
nintfaces = len(neighbour)
owner = owner[:nintfaces]
#
connectivity = {cellid: [] for cellid in range(ncells)}
for iowner, ineighbour in zip(owner, neighbour):
connectivity[int(iowner)].append(int(ineighbour))
connectivity[int(ineighbour)].append(int(iowner))
if del0:
shutil.rmtree(os.path.join(foam_case, timedir))
if delMesh:
shutil.rmtree(os.path.join(foam_case, 'constant', 'polyMesh'))
return connectivity
# read fields
def read_field(file, ndim, group='internalField'):
""" Read the field values from an OpenFOAM field file.
Can read either the internal field or a specified boundary.
Parameters
----------
file : str
Name of OpenFOAM field file.
ndim : int
Field dimension (e.g. 1 for scalar field).
group : str
Name of the group to read: *'internalField'* or name of specific
boundary.
Returns
-------
data : ndarray
Field values on specified group.
*dtype=float*, *ndim=2*, *shape=(ncells, ndim)*
"""
# read file
with open(file, 'r') as f:
content = f.read()
# keep file portion after specified group
content = content.partition(group)[2]
# data structure
whole_number = r"([+-]?[\d]+)"
decimal = r"([\.][\d]*)"
exponential = r"([Ee][+-]?[\d]+)"
floatn = f"{whole_number}{{1}}{decimal}?{exponential}?"
if ndim == 1:
data_structure = f"({floatn}\\n)+"
else:
data_structure = r'(\(' + f"({floatn}" + r"(\ ))" + \
f"{{{ndim-1}}}{floatn}" + r"\)\n)+"
# extract data
pattern = r'\(\n' + data_structure + r'\)'
data_str = re.compile(pattern).search(content).group()
# convert to numpy array
data_str = data_str.replace('(', '').replace(')', '').replace('\n', ' ')
data_str = data_str.strip()
data = np.fromstring(data_str, dtype=float, sep=' ')
if ndim > 1:
data = data.reshape([-1, ndim])
return data
def read_scalar_field(file, group='internalField'):
""" Read an OpenFOAM scalar field file.
See :py:meth:`read_field` for more information.
"""
return read_field(file, NDIM['scalar'], group=group)
def read_vector_field(file, group='internalField'):
""" Read an OpenFOAM vector field file.
See :py:meth:`read_field` for more information.
"""
return read_field(file, NDIM['vector'], group=group)
def read_symmTensor_field(file, group='internalField'):
""" Read an OpenFOAM symmTensor field file.
See :py:meth:`read_field` for more information.
"""
return read_field(file, NDIM['symmTensor'], group=group)
def read_tensor_field(file, group='internalField'):
""" Read an OpenFOAM tensor field file.
See :py:meth:`read_field` for more information.
"""
return read_field(file, NDIM['tensor'], group=group)
def read_cell_centres(file='C', group='internalField'):
""" Read an OpenFOAM mesh coordinate file.
See :py:meth:`read_field` for more information.
"""
return read_vector_field(file, group=group)
def read_cell_volumes(file='V'):
""" Read an OpenFOAM mesh volume file.
See :py:meth:`read_field` for more information.
"""
return read_scalar_field(file, group='internalField')
# read entire field file
def _read_logo(content):
""" Read info from logo in file header. """
def _read_logo(pat):
pattern = pat + r":\s+\S+"
data_str = re.compile(pattern).search(content).group()
return data_str.split(':')[1].strip()
info = {}
for pat in ['Version', 'Website']:
info[pat] = _read_logo(pat)
return info
def _read_header_info(content):
""" Read info from info section in file header. """
def _read_header(pat):
pattern = pat + r"\s+\S+;"
data_str = re.compile(pattern).search(content).group()
return data_str.split(pat)[1][:-1].strip()
info = {}
foam_class = _read_header('class').split('Field')[0].split('vol')[1]
info['foam_class'] = foam_class[0].lower() + foam_class[1:]
info['name'] = _read_header('object')
try:
info['location'] = _read_header('location')
except AttributeError:
info['location'] = None
return info
def read_field_file(file):
""" Read a complete OpenFOAM field file.
This includes header information not just the field values.
The output can be directly used to write the file again, e.g.
.. code-block:: python
>>> content = read_field_file(file)
>>> write_field_file(**content).
Parameters
----------
file : str
Name (path) of OpenFOAM field file.
Returns
-------
info : dictionary
The contents of the file organized with the same structure as
the inputs to the :py:meth:`write_field_file` method. See
:py:meth:`write_field_file` for more information.
"""
with open(file, 'r') as f:
content = f.read()
info = {}
info['file'] = file
# read logo
logo_info = _read_logo(content)
info['foam_version'] = logo_info['Version']
info['website'] = logo_info['Website']
# read header
header_info = _read_header_info(content)
info['foam_class'] = header_info['foam_class']
info['name'] = header_info['name']
info['location'] = header_info['location']
# dimension
pattern = r"dimensions\s+.+"
data_str = re.compile(pattern).search(content).group()
info['dimensions'] = data_str.split('dimensions')[1][:-1].strip()
# internalField: uniform/nonuniform
internal = {}
pattern = r'internalField\s+\S+\s+.+'
data_str = re.compile(pattern).search(content).group()
if data_str.split()[1] == 'uniform':
internal['uniform'] = True
tmp = data_str.split('uniform')[1].strip()[:-1]
tmp = tmp.replace('(', '').replace(')', '').split()
internal['value'] = np.array([float(i) for i in tmp])
else:
internal['uniform'] = False
internal['value'] = read_field(file, NDIM[info['foam_class']])
info['internal_field'] = internal
# boundaries: type and value(optional)
# value can be uniform/nonuniform scalar/(multi)
boundaries = []
bcontent = content.split('boundaryField')[1].strip()[1:].strip()
pattern = r'\w+' + r'[\s\n]*' + r'\{' + r'[\w\s\n\(\);\.\<\>\-+]+' + r'\}'
boundaries_raw = re.compile(pattern).findall(bcontent)
for bc in boundaries_raw:
ibc = {}
# name
pattern = r'[\w\s\n]+' + r'\{'
name = re.compile(pattern).search(bc).group()
name = name.replace('{', '').strip()
ibc['name'] = name
# type
pattern = r'type\s+\w+;'
type = re.compile(pattern).search(bc).group()
type = type.split('type')[1].replace(';', '').strip()
ibc['type'] = type
# value
if 'value' in bc:
value = {}
v = bc.split('value')[1]
if v.split()[0] == 'uniform':
value['uniform'] = True
v = v.split('uniform')[1]
tmp = v.replace('}', '').replace(';', '').strip()
tmp = tmp.replace('(', '').replace(')', '').split()
if len(tmp) == 1:
value['data'] = float(tmp[0])
else:
value['data'] = np.array([float(i) for i in tmp])
else:
value['uniform'] = False
value['data'] = read_field(
file, NDIM[info['foam_class']], group=ibc['name'])
else:
value = None
ibc['value'] = value
boundaries.append(ibc)
info['boundaries'] = boundaries
return info
def read_header(file):
""" Read the information in an OpenFOAM file header.
Parameters
----------
file : str
Name (path) of OpenFOAM file.
Returns
-------
info : dictionary
The information in the file header.
"""
with open(file, 'r') as f:
content = f.read()
info = {}
info['file'] = file
# read logo
logo_info = _read_logo(content)
info['foam_version'] = logo_info['Version']
info['website'] = logo_info['Website']
# read header
header_info = _read_header_info(content)
info['foam_class'] = header_info['foam_class']
info['name'] = header_info['name']
info['location'] = header_info['location']
return info
def read_controlDict():
# TODO
# read header logo
# read header info
# read content
raise NotImplementedError()
# write fields
def foam_sep():
""" Write a separation comment line.
Used by :py:meth:`write_field_file`.
Returns
-------
sep : str
Separation comment string
"""
return '\n// ' + '* '*37 + '//'
def foam_header_logo(foam_version, website):
""" Write the logo part of the OpenFOAM file header.
Used by :py:meth:`write_field_file`.
Parameters
----------
foam_version : str
OpenFOAM version to write in the header.
website : str
OpenFOAM website to write in the header.
Returns
-------
header : str
OpenFOAM file header logo.
"""
def header_line(str1, str2):
return f'\n| {str1:<26}| {str2:<48}|'
header_start = '/*' + '-'*32 + '*- C++ -*' + '-'*34 + '*\\'
header_end = '\n\\*' + '-'*75 + '*/'
logo = ['=========',
r'\\ / F ield',
r' \\ / O peration',
r' \\ / A nd',
r' \\/ M anipulation',
]
info = ['',
'OpenFOAM: The Open Source CFD Toolbox',
f'Version: {foam_version}',
f'Website: {website}',
'',
]
# create header
header = header_start
for l, i in zip(logo, info):
header += header_line(l, i)
header += header_end
return header
def foam_header_info(name, foamclass, location=None, isfield=True):
""" Write the info part of the OpenFOAM file header.
Used by :py:meth:`write_field_file`.
Parameters
----------
name : str
Field name (e.g. 'p').
foamclass : str
OpenFOAM class (e.g. 'scalar').
location : str
File location (optional).
Returns
-------
header : str
OpenFOAM file header info.
"""
def header_line(str1, str2):
return f'\n {str1:<12}{str2};'
VERSION = '2.0'
FORMAT = 'ascii'
if isfield:
foamclass = 'vol' + foamclass[0].capitalize() + foamclass[1:] + 'Field'
# create header
header = 'FoamFile\n{'
header += header_line('version', VERSION)
header += header_line('format', FORMAT)
header += header_line('class', foamclass)
if location is not None:
header += header_line('location', f'"{location}"')
header += header_line('object', name)
header += '\n}'
return header
def write_controlDict(content, foam_version, website, ofcase=None):
"""
Parameters
----------
content : dict
Content of cotrolDict.
foam_version : str
OpenFOAM version to write in the header.
website : str
OpenFOAM website to write in the header.
ofcase : str
OpenFOAM directory. File will be written to
<ofcase>/system/controlDict. If *None* file will be written in
current directory.
Returns
-------
file_loc : str
Location (absolute path) of file written.
file_content : str
Content written to file.
"""
# create content
file_str = foam_header_logo(foam_version, website) + '\n'
file_str += foam_header_info('controlDict', 'dictionary', 'system', False)
file_str += '\n' + foam_sep() + '\n'
for key, val in content.items():
file_str += f'\n{key:<16} {val};'
file_str += '\n' + foam_sep()
# write
if ofcase is None:
file = 'controlDict'
else:
file = os.path.join(ofcase, 'system', 'controlDict')
with open(file, 'w') as f:
f.write(file_str)
return os.path.abspath(file), file_str
def write_field_file(name, foam_class, dimensions, internal_field,
boundaries, foam_version, website, location=None,
file=None):
""" Write an OpenFOAM field file.
Parameters
----------
name : str
Field name (e.g. 'p').
foam_class : str
OpenFOAM class (e.g. 'scalar').
dimensions : str or list
Field dimensions in SI units using OpenFOAM convention
(e.g. '[0 2 -2 0 0 0 0]').
Alternatively can be a list of 7 or 3 integers. If three (MLT)
zeros will be appended at the end, i.e. [M L T 0 0 0 0].
internal_field : dictionary
Dictionary containing internal field information. See note below
for more information.
boundaries : list
List containing one dictionary per boundary. Each dictionary
contains the required information on that boundary. See note
below for more information.
foam_version : str
OpenFOAM version to write in the header.
website : str
OpenFOAM website to write in the header.
location : str
File location (optional).
file : str
File name (path) where to write field. If *'None'* will write in
current directory using the field name as the file name.
Returns
-------
file_loc : str
Location (absolute path) of file written.
file_content : str
Content written to file.
Note
----
**internal_field**
The ``internal_field`` dictionary must have the following
entries:
* **uniform** - *bool*
Whether the internal field has uniform or nonuniform
value.
* **value** - *float* or *ndarray*
The uniform or nonuniform values of the internal field.
**boundaries**
Each boundary dictionary in the ``boundaries`` list must have
the following entries:
* **name** - *str*
Boundary name.
* **type** - *str*
Boundary type.
* **value** - *dict* (optional)
Dictionary with same entries as the *'internal_field'*
dictionary.
"""
def _foam_field(uniform, value, foamclass=None):
def _list_to_foamstr(inlist):
outstr = ''
for l in inlist:
outstr += f'{l} '
return outstr.strip()
def _foam_nonuniform(data):
field = f'{len(data)}\n('
if data.ndim == 1:
for d in data:
field += f'\n{d}'
elif data.ndim == 2:
for d in data:
field += f'\n({_list_to_foamstr(d)})'
else:
raise ValueError('"data" cannot have more than 2 dimensions.')
field += '\n)'
return field
if uniform:
# list type
if isinstance(value, (list, np.ndarray)):
if isinstance(value, np.ndarray):
# value = np.atleast_1d(np.squeeze(value))
value = np.squeeze(value)
if value.ndim != 1:
err_msg = 'Uniform data should have one dimension.'
raise ValueError(err_msg)
value = f'({_list_to_foamstr(value)})'
field = f'uniform {value}'
else:
if foamclass is None:
raise ValueError('foamclass required for nonuniform data.')
value = np.squeeze(value)
field = f'nonuniform List<{foamclass}>'
field += '\n' + _foam_nonuniform(value)
return field
def _foam_dimensions(dimensions):
if isinstance(dimensions, list):
if len(dimensions) == 3:
dimensions = dimensions.append([0, 0, 0, 0])
elif len(dimensions) != 7:
raise ValueError('Dimensions must be length 3 or 7.')
str = ''
for idim in dimensions:
str += f'{idim} '
dimensions = f'[{str.strip()}]'
return dimensions
# create string
file_str = foam_header_logo(foam_version, website)
file_str += '\n' + foam_header_info(name, foam_class, location)
file_str += '\n' + foam_sep()
file_str += '\n'*2 + f'dimensions {_foam_dimensions(dimensions)};'
file_str += '\n'*2 + 'internalField '
file_str += _foam_field(
internal_field['uniform'], internal_field['value'], foam_class) + ';'
file_str += '\n\nboundaryField\n{'
for bc in boundaries:
file_str += '\n' + ' '*4 + bc["name"] + '\n' + ' '*4 + '{'
file_str += '\n' + ' '*8 + 'type' + ' '*12 + bc["type"] + ';'
write_value = False
if 'value' in bc:
if bc['value'] is not None:
write_value = True
if write_value:
data = _foam_field(
bc['value']['uniform'], bc['value']['data'], foam_class)
file_str += '\n' + ' '*8 + 'value' + ' '*11 + data + ';'
file_str += '\n' + ' '*4 + '}'
file_str += '\n}\n' + foam_sep()
# write to file
if file is None:
file = name
with open(file, 'w') as f:
f.write(file_str)
return os.path.abspath(file), file_str
def write(version, fieldname, internal_field, boundaries, location=None,
file=None):
""" Write an OpenFOAM field file for one of the pre-specified fields.
The implemented fields are: 'p', 'k', 'epsilon', 'omega', 'nut',
'Cx', 'Cy', 'Cz', 'V', 'U', 'C', 'Tau', 'grad(U)'.
This calls :py:meth:`write_field_file` but requires less information.
Parameters
----------
version : str
OpenFOAM version. Must be length 1 (e.g. '7') or 4 (e.g. '1912').
fieldname : str
One of the pre-defined fields.
internal_field : dictionary
See :py:meth:`write_field_file`.
boundaries : list
See :py:meth:`write_field_file`.
location : str
See :py:meth:`write_field_file`.
file : str
See :py:meth:`write_field_file`.
"""
def field_info(fieldname):
def get_foam_class(fieldname):
scalarlist = ['p', 'k', 'epsilon', 'omega', 'nut', 'Cx', 'Cy',
'Cz', 'V']
if fieldname in scalarlist:
foam_class = 'scalar'
elif fieldname in ['U', 'C']:
foam_class = 'vector'
elif fieldname == 'Tau':
foam_class = 'symmTensor'
elif fieldname == 'grad(U)':
foam_class = 'tensor'
return foam_class
def get_dimensions(fieldname):
# 1 - Mass (kg)
# 2 - Length (m)
# 3 - Time (s)
# 4 - Temperature (K)
# 5 - Quantity (mol)
# 6 - Current (A)
# 7 - Luminous intensity (cd)
if fieldname == 'U':
dimensions = '[ 0 1 -1 0 0 0 0]'
elif fieldname in ['p', 'k', 'Tau']:
dimensions = '[0 2 -2 0 0 0 0]'
elif fieldname == 'phi':
dimensions = '[0 3 -1 0 0 0 0]'
elif fieldname == 'epsilon':
dimensions = '[0 2 -3 0 0 0 0]'
elif fieldname in ['omega', 'grad(U)']:
dimensions = '[0 0 -1 0 0 0 0]'
elif fieldname == 'nut':
dimensions = '[0 2 -1 0 0 0 0]'
elif fieldname in ['C', 'Cx', 'Cy', 'Cz']:
dimensions = '[0 1 0 0 0 0 0]'
elif fieldname == 'V':
dimensions = '[0 3 0 0 0 0 0]'
return dimensions
field = {'name': fieldname,
'class': get_foam_class(fieldname),
'dimensions': get_dimensions(fieldname)}
return field
def version_info(version):
# string ('7' or '1912')
website = 'www.openfoam.'
if len(version) == 1:
version += '.x'
website += 'org'
elif len(version) == 4:
version = 'v' + version
website += 'com'
foam = {'version': version,
'website': website}
return foam
foam = version_info(version)
field = field_info(fieldname)
file_path, file_str = write_field_file(
foam_version=foam['version'],
website=foam['website'],
name=field['name'],
foam_class=field['class'],
dimensions=field['dimensions'],
internal_field=internal_field,
boundaries=boundaries,
location=location,
file=file)
return file_path, file_str
def write_p(version, internal, boundaries, location=None, file=None):
""" Write a pressure field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'p', internal, boundaries, location, file)
def write_U(version, internal, boundaries, location=None, file=None):
""" Write a velocity field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'U', internal, boundaries, location, file)
def write_Tau(version, internal, boundaries, location=None, file=None):
""" Write a Reynolds stress field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'Tau', internal, boundaries, location, file)
def write_nut(version, internal, boundaries, location=None, file=None):
""" Write an eddy viscosity field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'nut', internal, boundaries, location, file)
def write_k(version, internal, boundaries, location=None, file=None):
""" Write a turbulent kinetic energy (TKE) field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'k', internal, boundaries, location, file)
def write_epsilon(version, internal, boundaries, location=None, file=None):
""" Write a TKE dissipation rate field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'epsilon', internal, boundaries, location, file)
def write_omega(version, internal, boundaries, location=None, file=None):
""" Write a TKE specific dissipation rate field file.
See :func:`~dafi.random_field.foam_utilities.write` for more
information.
"""
return write(version, 'omega', internal, boundaries, location, file) | |
"""Collection of region proposal related utils
The codes were largely taken from the original py-faster-rcnn
(https://github.com/rbgirshick/py-faster-rcnn), and translated
into TensorFlow. Especially, each part was from the following:
1. _whctrs, _mkanchors, _ratio_enum, _scale_enum, get_anchors
- ${py-faster-rcnn}/lib/rpn/generate_anchors.py
2. inv_boxes, inv_boxes_np
- ${py-faster-rcnn}/lib/fast_rcnn/bbox_transform.py
3. get_shifts, filter_boxes
- ${py-faster-rcnn}/lib/rpn/proposal_layer.py
4. nms, nms_np
- ${py-faster-rcnn}/lib/nms/py_cpu_nms.py
5. get_boxes
- ${py-faster-rcnn}/lib/fast_rcnn/test.py
"""
from __future__ import division
import numpy as np
import tensorflow as tf
try:
# installation guide:
# $ git clone git@github.com:deepsense-io/roi-pooling.git
# $ cd roi-pooling
# $ vi roi_pooling/Makefile
# (edit according to https://github.com/tensorflow/tensorflow/
# issues/13607#issuecomment-335530430)
# $ python setup.py install
from roi_pooling.roi_pooling_ops import roi_pooling
except:
def roi_pooling(x, rois, w, h):
raise AssertionError('`roi_pooling` requires deepsense-ai\'s package.')
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + (w - 1) / 2
y_ctr = anchor[1] + (h - 1) / 2
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = (ws - 1) / 2
hs = (hs - 1) / 2
return tf.stack([
x_ctr - ws,
y_ctr - hs,
x_ctr + ws,
y_ctr + hs],
axis=-1)
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = tf.round(tf.sqrt(size_ratios))
hs = tf.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def get_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2**np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = tf.constant(
[0, 0, base_size - 1, base_size - 1], dtype=tf.float32)
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = tf.concat(
[_scale_enum(ratio_anchors[i, :], scales)
for i in xrange(ratio_anchors.shape[0])],
axis=0)
return anchors
def get_shifts(width, height, feat_stride):
shift_x = tf.range(width) * feat_stride
shift_y = tf.range(height) * feat_stride
shift_x, shift_y = tf.meshgrid(shift_x, shift_y)
shift_x = tf.reshape(shift_x, (-1,))
shift_y = tf.reshape(shift_y, (-1,))
shifts = tf.stack([shift_x, shift_y, shift_x, shift_y], axis=0)
shifts = tf.transpose(shifts)
return tf.cast(shifts, dtype=tf.float32)
def inv_boxes(boxes, deltas, height, width):
w = boxes[:, 2] - boxes[:, 0] + 1.0
h = boxes[:, 3] - boxes[:, 1] + 1.0
x = boxes[:, 0] + 0.5 * w
y = boxes[:, 1] + 0.5 * h
pred_x = deltas[:, :, 0] * w + x
pred_y = deltas[:, :, 1] * h + y
pred_w = tf.exp(deltas[:, :, 2]) * w
pred_h = tf.exp(deltas[:, :, 3]) * h
x1 = tf.maximum(tf.minimum(pred_x - 0.5 * pred_w, width - 1), 0)
y1 = tf.maximum(tf.minimum(pred_y - 0.5 * pred_h, height - 1), 0)
x2 = tf.maximum(tf.minimum(pred_x + 0.5 * pred_w, width - 1), 0)
y2 = tf.maximum(tf.minimum(pred_y + 0.5 * pred_h, height - 1), 0)
return tf.stack([x1, y1, x2, y2], axis=-1)
def inv_boxes_np(boxes, deltas, im_shape):
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
x = boxes[:, 0] + 0.5 * w
y = boxes[:, 1] + 0.5 * h
pred_x = deltas[:, 0::4] * w[:, np.newaxis] + x[:, np.newaxis]
pred_y = deltas[:, 1::4] * h[:, np.newaxis] + y[:, np.newaxis]
pred_w = np.exp(deltas[:, 2::4]) * w[:, np.newaxis]
pred_h = np.exp(deltas[:, 3::4]) * h[:, np.newaxis]
x1 = np.maximum(np.minimum(pred_x - 0.5 * pred_w, im_shape[1] - 1), 0)
y1 = np.maximum(np.minimum(pred_y - 0.5 * pred_h, im_shape[0] - 1), 0)
x2 = np.maximum(np.minimum(pred_x + 0.5 * pred_w, im_shape[1] - 1), 0)
y2 = np.maximum(np.minimum(pred_y + 0.5 * pred_h, im_shape[0] - 1), 0)
return np.stack([x1, y1, x2, y2], axis=-1)
def filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[0, :, 2] - boxes[0, :, 0] + 1
hs = boxes[0, :, 3] - boxes[0, :, 1] + 1
keep = tf.where((ws >= min_size) & (hs >= min_size))[:, 0]
return keep
def nms(proposals, scores, thresh):
x1 = proposals[:, 0]
y1 = proposals[:, 1]
x2 = proposals[:, 2]
y2 = proposals[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
num = tf.range(tf.shape(scores)[0])
def body(i, keep, screen):
xx1 = tf.maximum(x1[i], x1)
yy1 = tf.maximum(y1[i], y1)
xx2 = tf.minimum(x2[i], x2)
yy2 = tf.minimum(y2[i], y2)
w = tf.maximum(0.0, xx2 - xx1 + 1)
h = tf.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas - inter)
bools = (ovr <= thresh) & (num >= i) & (screen)
i = tf.cond(tf.count_nonzero(bools) > 0,
lambda: tf.cast(tf.where(bools)[0, 0], tf.int32),
lambda: tf.shape(scores)[0])
return [i, tf.concat([keep, tf.stack([i])], axis=0), bools]
def condition(i, keep, screen):
return i < tf.shape(scores)[0]
i = tf.constant(0)
i, keep, screen = tf.while_loop(
condition, body, [i, tf.stack([i]), num >= 0],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None, ]),
tf.TensorShape([None, ])],
back_prop=False)
return keep[:-1]
def nms_np(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def get_boxes(outs, im_shape, max_per_image=100, thresh=0.05, nmsth=0.3):
classes = (outs.shape[1] - 4) // 5 - 1
scores, boxes, rois = np.split(outs, [classes + 1, -4], axis=1)
pred_boxes = inv_boxes_np(rois, boxes, im_shape)
objs = []
total_boxes = 0
for j in xrange(1, classes + 1):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = pred_boxes[inds, j]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))
keep = nms_np(cls_dets, nmsth)
cls_dets = cls_dets[keep, :]
objs.append(cls_dets)
total_boxes += cls_dets.shape[0]
if max_per_image > 0 and total_boxes > max_per_image:
image_scores = np.hstack([objs[j][:, -1] for j in xrange(classes)])
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(classes):
keep = np.where(objs[j][:, -1] >= image_thresh)[0]
objs[j] = objs[j][keep, :]
return objs | |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import umap
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model, load_model
from sklearn import metrics, mixture
import pickle
from . import linear_assignment as la
class AutoEncoder:
"""AutoeEncoder: standard feed forward autoencoder
Parameters:
-----------
input_dim: int
The number of dimensions of your input
latent_dim: int
The number of dimensions which you wish to represent the data as.
architecture: list
The structure of the hidden architecture of the networks. for example,
the n2d default is [500, 500, 2000],
which means the encoder has the structure of:
[input_dim, 500, 500, 2000, latent_dim], and the decoder has the structure of:
[latent_dim, 2000, 500, 500, input dim]
act: string
The activation function. Defaults to 'relu'
"""
def __init__(self, input_dim, latent_dim, architecture = [500, 500, 2000], act='relu', x_lambda = lambda x: x):
shape = [input_dim] + architecture + [latent_dim]
self.x_lambda = x_lambda
self.dims = shape
self.act = act
self.x = Input(shape=(self.dims[0],), name='input')
self.h = self.x
n_stacks = len(self.dims) - 1
for i in range(n_stacks - 1):
self.h = Dense(
self.dims[i + 1], activation=self.act, name='encoder_%d' % i)(self.h)
self.encoder = Dense(
self.dims[-1], name='encoder_%d' % (n_stacks - 1))(self.h)
self.decoded = Dense(
self.dims[-2], name='decoder', activation=self.act)(self.encoder)
for i in range(n_stacks - 2, 0, -1):
self.decoded = Dense(
self.dims[i], activation=self.act, name='decoder_%d' % i)(self.decoded)
self.decoded = Dense(self.dims[0], name='decoder_0')(self.decoded)
self.Model = Model(inputs=self.x, outputs=self.decoded)
self.encoder = Model(inputs=self.x, outputs=self.encoder)
def fit(self, x, batch_size, epochs,
loss, optimizer, weights,
verbose, weight_id, patience):
"""fit: train the autoencoder.
Parameters:
-------------
x: array-like
the data you wish to fit
batch_size: int
the batch size
epochs: int
number of epochs you wish to run.
loss: string or function
loss function. Defaults to mse
optimizer: string or function
optimizer. defaults to adam
weights: string
if weights is used, the path to the pretrained nn weights.
verbose: int
how verbose you wish the autoencoder to be while training.
weight_id: string
where you wish to save the weights
patience: int
if not None, the early stopping criterion
"""
if weights is None: # if there are no weights to load for the encoder, make encoder
self.Model.compile(
loss=loss, optimizer=optimizer
)
if weight_id is not None: # if we are going to save the weights somewhere
if patience is not None: #if we are going to do early stopping
callbacks = [EarlyStopping(monitor='loss', patience=patience),
ModelCheckpoint(filepath=weight_id,
monitor='loss',
save_best_only=True)]
else:
callbacks = [ModelCheckpoint(filepath=weight_id,
monitor='loss',
save_best_only=True)]
# fit the model with the callbacks
self.Model.fit(
self.x_lambda(x), x,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks, verbose=verbose
)
self.Model.save_weights(weight_id)
else: # if we are not saving weights
if patience is not None:
callbacks = [EarlyStopping(monitor='loss', patience=patience)]
self.Model.fit(
self.x_lambda(x), x,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks, verbose=verbose
)
else:
self.Model.fit(
self.x_lambda(x), x,
batch_size=batch_size,
epochs=epochs,
verbose=verbose
)
else: # otherwise load weights
self.Model.load_weights(weights)
class UmapGMM:
"""
UmapGMM: UMAP gaussian mixing
Parameters:
------------
n_clusters: int
the number of clusters
umap_dim: int
number of dimensions to find with umap. Defaults to 2
umap_neighbors: int
Number of nearest neighbors to use for UMAP. Defaults to 10,
20 is also a reasonable choice
umap_min_distance: float
minimum distance for UMAP. Smaller means tighter clusters,
defaults to 0
umap_metric: string or function
Distance metric for UMAP. defaults to euclidean distance
random_state: int
random state
"""
def __init__(self, n_clusters,
umap_dim=2,
umap_neighbors=10,
umap_min_distance=float(0),
umap_metric='euclidean',
random_state=0
):
self.n_clusters = n_clusters
self.manifold_in_embedding = umap.UMAP(
random_state=random_state,
metric=umap_metric,
n_components=umap_dim,
n_neighbors=umap_neighbors,
min_dist=umap_min_distance
)
self.cluster_manifold = mixture.GaussianMixture(
covariance_type='full',
n_components=n_clusters, random_state=random_state
)
self.hle = None
def fit(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
def predict(self, hl):
manifold = self.manifold_in_embedding.transform(hl)
y_prob = self.cluster_manifold.predict_proba(manifold)
y_pred = y_prob.argmax(1)
return(np.asarray(y_pred))
def predict_proba(self, hl):
manifold = self.manifold_in_embedding.transform(hl)
y_prob = self.cluster_manifold.predict_proba(manifold)
return(np.asarray(y_prob))
def fit_predict(self, hl):
self.hle = self.manifold_in_embedding.fit_transform(hl)
self.cluster_manifold.fit(self.hle)
y_prob = self.cluster_manifold.predict_proba(self.hle)
y_pred = y_prob.argmax(1)
return(np.asarray(y_pred))
def best_cluster_fit(y_true, y_pred):
y_true = y_true.astype(np.int64)
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = la.linear_assignment(w.max() - w)
best_fit = []
for i in range(y_pred.size):
for j in range(len(ind)):
if ind[j][0] == y_pred[i]:
best_fit.append(ind[j][1])
return best_fit, ind, w
def cluster_acc(y_true, y_pred):
_, ind, w = best_cluster_fit(y_true, y_pred)
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
def plot(x, y, plot_id, names=None, n_clusters=10):
viz_df = pd.DataFrame(data=x[:5000])
viz_df['Label'] = y[:5000]
if names is not None:
viz_df['Label'] = viz_df['Label'].map(names)
plt.subplots(figsize=(8, 5))
sns.scatterplot(x=0, y=1, hue='Label', legend='full', hue_order=sorted(viz_df['Label'].unique()), palette=sns.color_palette("hls", n_colors=n_clusters),
alpha=.5,
data=viz_df)
l = plt.legend(bbox_to_anchor=(-.1, 1.00, 1.1, .5), loc="lower left", markerfirst=True,
mode="expand", borderaxespad=0, ncol=n_clusters + 1, handletextpad=0.01, )
# l = plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
l.texts[0].set_text("")
plt.ylabel("")
plt.xlabel("")
plt.tight_layout()
plt.title(plot_id, pad=40)
class n2d:
"""
n2d: Class for n2d
Parameters:
------------
input_dim: int
dimensions of input
manifold_learner: initialized class, such as UmapGMM
the manifold learner and clustering algorithm. Class should have at
least fit and predict methods. Needs to be initialized
autoencoder: class
class of the autoencoder. Defaults to standard AutoEncoder class.
Class must have a fit method, and be structured similar to the example
on read the docs. At the very least, the embedding must be accessible
by name (encoder_%d % middle layer index)
architecture: list
hidden architecture of the autoencoder. Defaults to [500,500,2000],
meaning that the encoder is [input_dim, 500, 500, 2000, ae_dim], and
the decoder is [ae_dim, 2000, 500, 500, input_dim].
ae_dim: int
number of dimensions you wish the autoencoded embedding to be.
Defaults to 10. It is reasonable to set this to the number of clusters
ae_args: dict
dictionary of arguments for the autoencoder. Defaults to just
setting the activation function to relu
"""
def __init__(self,autoencoder, manifold_learner):
self.autoencoder = autoencoder
self.manifold_learner = manifold_learner
self.encoder = self.autoencoder.encoder
self.clusterer = self.manifold_learner.cluster_manifold
self.manifolder = self.manifold_learner.manifold_in_embedding
self.preds = None
self.probs = None
self.hle = None
def fit(self, x, batch_size=256, epochs=1000,
loss='mse', optimizer='adam', weights=None,
verbose=1, weight_id=None,
patience=None):
"""fit: train the autoencoder.
Parameters:
-----------------
x: array-like
the input data
batch_size: int
the batch size
epochs: int
number of epochs you wish to run.
loss: string or function
loss function. Defaults to mse
optimizer: string or function
optimizer. defaults to adam
weights: string
if weights is used, the path to the pretrained nn weights.
verbose: int
how verbose you wish the autoencoder to be while training.
weight_id: string
where you wish to save the weights
patience: int or None
if patience is None, do nothing special, otherwise patience is the
early stopping criteria
"""
self.autoencoder.fit(x=x,
batch_size=batch_size,
epochs=epochs,
loss=loss,
optimizer=optimizer, weights=weights,
verbose=verbose, weight_id=weight_id, patience=patience)
hl = self.encoder.predict(x)
self.manifold_learner.fit(hl)
def predict(self, x):
hl = self.encoder.predict(x)
self.preds = self.manifold_learner.predict(hl)
self.hle = self.manifold_learner.hle
return(self.preds)
def predict_proba(self, x):
hl = self.encoder.predict(x)
self.probs = self.manifold_learner.predict_proba(hl)
self.hle = self.manifold_learner.hle
return(self.probs)
def fit_predict(self, x, batch_size=256, epochs=1000,
loss='mse', optimizer='adam', weights=None,
verbose=1, weight_id=None,
patience=None):
self.autoencoder.fit(x=x,
batch_size=batch_size,
epochs=epochs,
loss=loss,
optimizer=optimizer, weights=weights,
verbose=verbose, weight_id=weight_id, patience=patience)
hl = self.encoder.predict(x)
self.preds = self.manifold_learner.fit_predict(hl)
self.hle = self.manifold_learner.hle
return(self.preds)
def assess(self, y):
y = np.asarray(y)
acc = np.round(cluster_acc(y, self.preds), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, self.preds), 5)
ari = np.round(metrics.adjusted_rand_score(y, self.preds), 5)
return(acc, nmi, ari)
def visualize(self, y, names, n_clusters=10):
"""
visualize: visualize the embedding and clusters
Parameters:
-----------
y: true clusters/labels, if they exist. Numeric
names: the names of the clusters, if they exist
savePath: path to save figures
n_clusters: number of clusters.
"""
y = np.asarray(y)
y_pred = np.asarray(self.preds)
hle = self.hle
plot(hle, y, 'n2d', names, n_clusters=n_clusters)
y_pred_viz, _, _ = best_cluster_fit(y, y_pred)
plot(hle, y_pred_viz, 'n2d-predicted', names, n_clusters=n_clusters)
def save_n2d(obj, encoder_id, manifold_id):
'''
save_n2d: save n2d objects
--------------------------
description: Saves the encoder to an h5 file and the manifold learner/clusterer
to a pickle.
parameters:
- obj: the fitted n2d object
- encoder_id: what to save the encoder as
- manifold_id: what to save the manifold learner as
'''
obj.encoder.save(encoder_id)
pickle.dump(obj.manifold_learner, open(manifold_id, 'wb'))
def load_n2d(encoder_id, manifold_id): # loaded models can only predict currently
'''
load_n2d: load n2d objects
--------------------------
description: loads fitted n2d objects from files. Note you CANNOT train
these objects further, the only method which will perform correctly is `.predict`
parameters:
- encoder_id: where the encoder is stored
- manifold_id: where the manifold learner/clusterer is stored
'''
man = pickle.load(open(manifold_id, 'rb'))
out = n2d(10, man)
out.encoder = load_model(encoder_id, compile = False)
return(out) | |
import numpy as np
import condition
import output
import solver
if __name__ == '__main__':
md = 202
nd = 202
u = np.zeros((md, nd))
v = np.zeros((md, nd))
p = np.zeros((md, nd))
u_old = np.zeros((md, nd))
v_old = np.zeros((md, nd))
xp = np.zeros(md)
yp = np.zeros(nd)
# setup conditions
nue, density, length, height, time, inlet_velocity, outlet_pressure\
= condition.physical_c()
dx, dy, dt, m, n, istep_max\
= condition.grid_c(xp, yp, nue, length, height, time, inlet_velocity)
output.grid(xp, yp, m, n, dt)
istep = 0
time = istep * dt
condition.initial_c(p, u, v, inlet_velocity, outlet_pressure, m, n)
condition.boundary_c(p, u, v, inlet_velocity, outlet_pressure, m, n,
xp, yp, height, length)
# print initial conditions on commandline
# output.solution(p, u, v, m, n)
''' MAC algorithm start '''
for istep in range(1, istep_max + 1):
time = istep * dt
print(f"--- time_steps = {istep}, -- time = {time}")
for i in range(m + 2):
for j in range(n + 2):
u_old[i][j] = u[i][j]
v_old[i][j] = v[i][j]
solver.solve_p(p, u, v, u_old, v_old, nue, density, dx, dy, dt, m, n,
xp, yp, height, length)
solver.solve_u(p, u, density, dx, dt, m, n)
solver.solve_v(p, v, density, dy, dt, m, n)
condition.boundary_c(p, u, v, inlet_velocity, outlet_pressure, m, n,
xp, yp, height, length)
# output.solution(p, u, v, m, n)
output.divergent(p, u, v, dx, dy, m, n, dt)
''' MAC algorithm end '''
# print conditions (recall)
# condition.physical_c()
# condition.grid_c(xp, yp, nue, length, height, time, inlet_velocity)
# print solutions
# output.solution(p, u, v, m, n)
output.solution_post(p, u, v, m, n, dt)
output.paraview(p, xp, yp, m, n, u, v, dt) | |
# -*- coding: utf-8 -*-
"""Discrete wavelet transform."""
import math
import numpy as np
import pandas as pd
from sktime.datatypes import convert
from sktime.transformations.base import BaseTransformer
__author__ = "Vincent Nicholson"
class DWTTransformer(BaseTransformer):
"""Discrete Wavelet Transform Transformer.
Performs the Haar wavelet transformation on a time series.
Parameters
----------
num_levels : int, number of levels to perform the Haar wavelet
transformation.
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": False, # is this an instance-wise transform?
"X_inner_mtype": "nested_univ", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for X?
"fit_is_empty": True,
}
def __init__(self, num_levels=3):
self.num_levels = num_levels
super(DWTTransformer, self).__init__()
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing core logic, called from transform
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, n_features]
each cell of X must contain pandas.Series
Data to fit transform to
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : nested pandas DataFrame of shape [n_instances, n_features]
each cell of Xt contains pandas.Series
transformed version of X
"""
self._check_parameters()
# Get information about the dataframe
col_names = X.columns
Xt = pd.DataFrame()
for x in col_names:
# Convert one of the columns in the dataframe to numpy array
arr = convert(
pd.DataFrame(X[x]),
from_type="nested_univ",
to_type="numpyflat",
as_scitype="Panel",
)
transformedData = self._extract_wavelet_coefficients(arr)
# Convert to a numpy array
transformedData = np.asarray(transformedData)
# Add it to the dataframe
colToAdd = []
for i in range(len(transformedData)):
inst = transformedData[i]
colToAdd.append(pd.Series(inst))
Xt[x] = colToAdd
return Xt
def _extract_wavelet_coefficients(self, data):
"""Extract wavelet coefficients of a 2d array of time series.
The coefficients correspond to the wavelet coefficients
from levels 1 to num_levels followed by the approximation
coefficients of the highest level.
"""
num_levels = self.num_levels
res = []
for x in data:
if num_levels == 0:
res.append(x)
else:
coeffs = []
current = x
approx = None
for _ in range(num_levels):
approx = self._get_approx_coefficients(current)
wav_coeffs = self._get_wavelet_coefficients(current)
current = approx
wav_coeffs.reverse()
coeffs.extend(wav_coeffs)
approx.reverse()
coeffs.extend(approx)
coeffs.reverse()
res.append(coeffs)
return res
def _check_parameters(self):
"""Check the values of parameters passed to DWT.
Throws
------
ValueError or TypeError if a parameters input is invalid.
"""
if isinstance(self.num_levels, int):
if self.num_levels <= -1:
raise ValueError("num_levels must have the value" + "of at least 0")
else:
raise TypeError(
"num_levels must be an 'int'. Found"
+ "'"
+ type(self.num_levels).__name__
+ "' instead."
)
def _get_approx_coefficients(self, arr):
"""Get the approximate coefficients at a given level."""
new = []
if len(arr) == 1:
return [arr[0]]
for x in range(math.floor(len(arr) / 2)):
new.append((arr[2 * x] + arr[2 * x + 1]) / math.sqrt(2))
return new
def _get_wavelet_coefficients(self, arr):
"""Get the wavelet coefficients at a given level."""
new = []
# if length is 1, just return the list back
if len(arr) == 1:
return [arr[0]]
for x in range(math.floor(len(arr) / 2)):
new.append((arr[2 * x] - arr[2 * x + 1]) / math.sqrt(2))
return new | |
# This function is copied from https://github.com/Rubikplayer/flame-fitting
'''
Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved.
This software is provided for research purposes only.
By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license
More information about SMPL is available here http://smpl.is.tue.mpg.
For comments or questions, please email us at: smpl@tuebingen.mpg.de
About this file:
================
This module defines the mapping of joint-angles to pose-blendshapes.
Modules included:
- posemap:
computes the joint-to-pose blend shape mapping given a mapping type as input
'''
import chumpy as ch
import numpy as np
import cv2
class Rodrigues(ch.Ch):
dterms = 'rt'
def compute_r(self):
return cv2.Rodrigues(self.rt.r)[0]
def compute_dr_wrt(self, wrt):
if wrt is self.rt:
return cv2.Rodrigues(self.rt.r)[1].T
def lrotmin(p):
if isinstance(p, np.ndarray):
p = p.ravel()[3:]
return np.concatenate(
[(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel()
if p.ndim != 2 or p.shape[1] != 3:
p = p.reshape((-1, 3))
p = p[1:]
return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel()
def posemap(s):
if s == 'lrotmin':
return lrotmin
else:
raise Exception('Unknown posemapping: %s' % (str(s),)) | |
# -*- coding: utf-8 -*-
"""
@author: A. Popova
"""
import numpy as np
#old_settings = np.seterr(all='ignore')
def fact(x):
res = 1
for i in range(int(x)):
res *= (i+1.)
return res
data_ = np.genfromtxt(r'in_out/Submatrix.dat')
m = len(data_)
Nu = int(10*m)
dnu = 2*np.pi/Nu
M = np.zeros((m, m), dtype=np.complex128)
real_part = []
imaginary_part = []
for i in range(m):
for k in range(0,2*m,2):
real_part.append(data_[i,k])
for i in range(m):
for k in range(1,2*m+1,2):
imaginary_part.append(data_[i,k])
for i in range(m**2):
M[i//m,i%m] = real_part[i] + 1j* imaginary_part[i]
# Import Minors
data_minors = np.genfromtxt(r'in_out/Minors0-1.dat')
data_minors2 = np.genfromtxt(r'in_out/Minors2.dat')
data_minors3 = np.genfromtxt(r'in_out/Minors3.dat')
data_minors4 = np.genfromtxt(r'in_out/Minors4.dat')
p2 = round(fact(m)/(fact(m-2)*2))
p3 = round(fact(m)/(fact(m - 3)*fact(3)))
p4 = round(fact(m)/(fact(m - 4)*fact(4)))
Z_v_0 = np.zeros((Nu),dtype=np.complex128)
Z_v_1 = np.zeros((m, Nu),dtype=np.complex128)
Z_v_2 = np.zeros((p2, Nu),dtype=np.complex128)
Z_v_3 = np.zeros((p3, Nu),dtype=np.complex128)
Z_v_4 = np.zeros((p4, Nu),dtype=np.complex128)
for j in range(Nu):
Z_v_0[j] = data_minors[j,1:2] + 1j*data_minors[j,2:3]
for j in range(Nu):
for n in range(0,2*m,2):
Z_v_1[n//2,j] = data_minors[j,int(3+n)] + 1j*data_minors[j,int(4+n)]
for j in range(Nu):
for n in range(0,2*p2,2):
Z_v_2[n//2,j] = data_minors2[j,int(1+n)] + 1j*data_minors2[j,int(2+n)]
for j in range(Nu):
for n in range(0,2*p3,2):
Z_v_3[n//2,j] = data_minors3[j,int(1+(n))] + 1j*data_minors3[j,int(2+(n))]
for j in range(Nu):
for n in range(0,2*p4,2):
Z_v_4[n//2,j] = data_minors4[j,int(1+(n))] + 1j*data_minors4[j,int(2+(n))]
Z_v_0f = np.fft.fft(Z_v_0)/Nu
Z_v_1f = np.fft.fft(Z_v_1)/Nu
Z_v_2f = np.fft.fft(Z_v_2)/Nu
Z_v_3f = np.fft.fft(Z_v_3)/Nu
Z_v_4f = np.fft.fft(Z_v_4)/Nu
# a range of computed sectors
Nuk = Nu
mean_ = np.zeros(Nuk)
disp_ = np.zeros(Nuk)
m3_ = np.zeros(Nuk)
m4_ = np.zeros(Nuk)
m5_ = np.zeros(Nuk)
def moment_formula(n, *args):
m = 0
for x in args:
moments = x
if n == 2:
m = moments[0] + 2*moments[1] - moments[0]**2
if n == 3:
m = moments[0] + 6*moments[1] + 6*moments[2] - 3*mean_[nu]*(moments[0] + 2*moments[1]) + 2*moments[0]**3
if n == 4:
m_2 = moments[0] + 2*moments[1]
m_3 = moments[0] + 6*moments[1] + 6*moments[2]
m_4 = moments[0] + 14*moments[1] + 36*moments[2] + 24*moments[3]
m = m_4 - 4*m_3*moments[0]- 3*m_2**2 + 12*m_2*moments[0]**2 - 6*moments[0]**4
return m
n_ij_v = np.zeros(Nuk)
n_ijk_v = np.zeros(Nuk)
n_ijkl_v = np.zeros(Nuk)
n_ijklp_v = np.zeros(Nuk)
ind_2 = []
ind_3 = []
ind_4 = []
for i in range(m):
for j in range(i+1, m):
ind_2.append([i,j])
for i in range(m):
for j in range(i+1, m):
for k in range(j+1, m):
ind_3.append([i,j,k])
for i in range(m):
for j in range(i+1, m):
for k in range(j+1, m):
for l in range(k+1, m):
ind_4.append([i,j,k,l])
for z in range(Nuk):
for j in range(m):
mean_[z] += 1 - (Z_v_1f[j,z]/Z_v_0f[z]).real
for nu in range(Nuk):
i_ = 0
for i in range(m):
for j in range(i+1, m):
n_ij_v[nu] += 1 - (( Z_v_1f[j,nu] + Z_v_1f[i,nu] - Z_v_2f[i_,nu])/Z_v_0f[nu]).real
i_ += 1
disp_[nu] = moment_formula(2, [mean_[nu], n_ij_v[nu]])
for nu in range(Nuk):
i_= 0
for i in range(m):
for j in range(i+1, m):
for k in range(j+1, m):
z1 = ind_2.index([i,j])
z2 = ind_2.index([i,k])
z3 = ind_2.index([j,k])
n_ijk_v[nu] += 1 - ((Z_v_1f[i,nu] + Z_v_1f[j,nu] + Z_v_1f[k,nu] - Z_v_2f[z1,nu] - Z_v_2f[z2,nu] - Z_v_2f[z3,nu] + Z_v_3f[i_,nu])/Z_v_0f[nu]).real
i_ += 1
m3_[nu] = moment_formula(3, [mean_[nu], n_ij_v[nu], n_ijk_v[nu]])
for nu in range(Nuk):
i_= 0
for i in range(m):
for j in range(i+1, m):
for k in range(j+1, m):
for l in range(k+1, m):
z1 = ind_2.index([i,j])
z2 = ind_2.index([i,k])
z3 = ind_2.index([i,l])
z4 = ind_2.index([j,k])
z5 = ind_2.index([k,l])
z6 = ind_2.index([j,l])
h1 = ind_3.index([i,j,k])
h2 = ind_3.index([j,k,l])
h3 = ind_3.index([i,k,l])
h4 = ind_3.index([i,j,l])
n_ijkl_v[nu] += 1 - ((Z_v_1f[i,nu] + Z_v_1f[j,nu] + Z_v_1f[k,nu] + Z_v_1f[l,nu] - Z_v_2f[z1,nu] - Z_v_2f[z2,nu] - Z_v_2f[z3,nu] - Z_v_2f[z4,nu] - Z_v_2f[z5,nu] - Z_v_2f[z6,nu] + Z_v_3f[h1,nu] + Z_v_3f[h2,nu] + Z_v_3f[h3,nu] + Z_v_3f[h4,nu] - Z_v_4f[i_,nu])/Z_v_0f[nu]).real
i_ += 1
m4_[nu] = moment_formula(4, [mean_[nu], n_ij_v[nu], n_ijk_v[nu], n_ijkl_v[nu]])
# Export Moments
with open( r"in_out/Moments.dat", 'w') as ouf:
for nu in range(Nuk):
ouf.write( str(mean_[nu].real) + '\t' + str(disp_[nu].real) +'\t' + str(m3_[nu].real) +'\t' + str(m4_[nu].real) +'\t' )
if nu < (Nu+1):
ouf.write('\n') | |
# -*- coding: utf-8 -*-
"""
Interfaz gráfica para el movimiento armónico de un edificio, de forma similar
a un terremoto.
@author: Anthony Gutiérrez
"""
import numpy as np
import tkinter as tk
from matplotlib.animation import FuncAnimation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
from scipy.constants import g as gravedad
from tkinter.messagebox import showerror
# Inicializar la ventana
window = tk.Tk()
window.title("Movimiento de un edificio")
window.state("zoomed")
# Inicializar el cuadro de ingreso de datos
frame = tk.Frame(window)
frame.pack(side=tk.LEFT, padx=10)
# Declarar los valores por defecto para los datos de entrada
base = 0.75 # Semibase del edificio (b = B/2)
altura = 5.71 # Semialtura del edificio (h = H/2)
masa = 164200.0
radio = 5.76
amplitud = 0.06 # Amplitud del desplazamiento (movimiento armónico)
amort = 0.5 # Coeficiente de amortiguamiento del desplazamiento (gamma)
periodo = 0.5 # Periodo del desplazamiento
# Función auxiliar para declarar datos de entrada
def generar_dato(frame, row, text, default):
variable = tk.DoubleVar(value=default)
label = tk.Label(frame, text=text)
label.grid(row=row, column=0, padx=5, pady=5)
entry = tk.Entry(frame, textvariable=variable, justify="right")
entry.grid(row=row, column=1, padx=5, pady=5)
return variable
# Declarar datos de entrada
base_var = generar_dato(frame, 0, "Semi-base (m):", base)
altura_var = generar_dato(frame, 1, "Semi-altura (m):", altura)
masa_var = generar_dato(frame, 2, "Masa (kg):", masa)
radio_var = generar_dato(frame, 3, "Radio (m):", radio)
amplitud_var = generar_dato(frame, 4, "Amplitud (m):", amplitud)
amort_var = generar_dato(frame, 5, "Coef. amortiguamiento:", amort)
periodo_var = generar_dato(frame, 6, "Periodo (s):", periodo)
# Generar gráfico principal
fig = Figure(figsize=(5, 2))
# Generar subgráfico del movimiento del edificio
edificio_ax = fig.add_subplot(211, xlim=(-12, 12), ylim=(0, 12))
edificio = Rectangle((-base, 0), 2*base, 2*altura)
edificio_ax.add_patch(edificio)
edificio_ax.grid(True)
# Generar subgráfico de cociente entre los ángulos
angulos_ax = fig.add_subplot(223, xlim=(0, 20), ylim=(-1, 1))
angulos_ln, = angulos_ax.plot([], [], '-b')
angulos_mov, = angulos_ax.plot([], [], 'ro')
angulos_ax.grid(True)
# Generar subgráfico de desplazamiento del edificio
desplazamiento_ax = fig.add_subplot(224, xlim=(0, 20), ylim=(-0.1, 0.1))
desplazamiento_ln, = desplazamiento_ax.plot([], [], '-b')
desplazamiento_mov, = desplazamiento_ax.plot([], [], 'ro')
desplazamiento_ax.grid(True)
# Agregar gráfico a la ventana
canvas = FigureCanvasTkAgg(fig, master=window)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.RIGHT, fill=tk.BOTH, expand=1)
# Función auxiliar para calcular la nueva posición del edificio
def calcular_posicion(tiempo):
"""
Calcula la posición de un movimiento armónico amortiguado con los datos
del edificio.
"""
global amplitud, amort, periodo
# Calcular los datos intermedios
velang = 2 * np.pi / periodo
assert amort < velang
velamort = np.sqrt(velang**2 - amort**2)
# Aplicar la fórmula
return amplitud * np.exp(-amort * tiempo) * np.cos(velamort * tiempo)
# Función auxiliar para calcular la aceleración del edificio
def calcular_aceleracion(tiempo):
"""
Calcula la segunda derivada de la posición (es decir, la aceleración) de
un movimiento armónico amortiguado con los datos del edificio.
"""
global amplitud, amort, periodo
# Calcular los datos intermedios
velang = 2 * np.pi / periodo
assert amort < velang
velamort = np.sqrt(velang**2 - amort**2)
# Aplicar la fórmula
parte1 = (amort**2 - velamort**2) * np.cos(velamort * tiempo)
parte2 = (2 * amort * velamort) * np.sin(velamort * tiempo)
return amplitud * np.exp(-amort * tiempo) * (parte1 + parte2)
def calcular_angulo(tiempo, angulo, velang):
"""
Calcula el ángulo que tendrá el edificio sometido a un movimiento armónico
amortiguado.
"""
global amplitud, amort, periodo, radio, paso, alfa
# Calcular los datos intermedios
frec = np.sqrt((3*gravedad) / (4*radio))
frecdelta = frec * paso
senh = np.sinh(frecdelta)
cosh = np.cosh(frecdelta)
acel_ahora = calcular_aceleracion(tiempo)
acel_despues = calcular_aceleracion(tiempo + paso)
# Aplicar la fórmula
parte1 = (senh/frec) * velang
parte2 = cosh * angulo
parte3 = ((1 - senh/frecdelta) / gravedad) * acel_despues
parte4 = ((senh/frecdelta - cosh) / gravedad) * acel_ahora
parte5 = alfa * (1 - cosh) * np.sign(angulo)
return parte1 + parte2 + parte3 + parte4 + parte5
def calcular_velang(tiempo, angulo, velang):
"""
Calcula la primera derivada del ángulo (velocidad angular) que tendrá el
edificio sometido a un movimiento armónico amortiguado.
"""
global amplitud, amort, periodo, radio, paso, alfa
# Calcular los datos intermedios
frec = np.sqrt((3*gravedad) / (4*radio))
frecdelta = frec * paso
senh = np.sinh(frecdelta)
cosh = np.cosh(frecdelta)
acel_ahora = calcular_aceleracion(tiempo)
acel_despues = calcular_aceleracion(tiempo + paso)
# Aplicar la fórmula
parte1 = cosh * velang
parte2 = frec * senh * angulo
parte3 = ((1 - cosh) / (gravedad * paso)) * acel_despues
parte4 = ((cosh - frecdelta*senh - 1) / (gravedad * paso)) * acel_ahora
parte5 = -alfa * frec * senh * np.sign(angulo)
return parte1 + parte2 + parte3 + parte4 + parte5
def start():
"""
Calcula los datos para la simulación del movimiento sísmico e inicia una
animación para observar el movimiento del edificio.
"""
global base, altura, masa, radio, amplitud, amort, periodo, alfa
global tiempos, posiciones, angulos, cocientes, anim, paso
try:
# Parar la animación anterior, si es que existe
if anim:
anim.event_source.stop()
# Obtener los datos principales
base = base_var.get()
altura = altura_var.get()
masa = masa_var.get()
radio = radio_var.get()
amplitud = amplitud_var.get()
amort = amort_var.get()
periodo = periodo_var.get()
# Verificar que los datos sean correctos
assert (base > 0 and altura > 0 and masa > 0 and radio > 0 and
amplitud > 0 and amort > 0 and periodo > 0)
# Inicializar los datos
alfa = np.arctan(base/altura)
angulo = 0
velang = 0
# Calcular el desplazamiento del edificio
tiempos, paso = np.linspace(0, 20, 1001, retstep=True)
posiciones = []
angulos = []
cocientes = []
for tiempo in tiempos:
tiempo_real = 10 - tiempo if tiempo <= 10 else tiempo - 10
# Calcular nuevo desplazamiento
posicion = calcular_posicion(tiempo_real)
posiciones.append(posicion)
# Calcular nueva diferencia entre ángulos
angulo, velang = (calcular_angulo(tiempo_real, angulo, velang),
calcular_velang(tiempo_real, angulo, velang))
angulos.append(angulo)
cocientes.append(angulo / alfa)
# Actualizar valores en los gráficos
desplazamiento_ln.set_data(tiempos, posiciones)
angulos_ln.set_data(tiempos, cocientes)
canvas.draw()
# Iniciar la animación
anim = FuncAnimation(fig, update, 1000, interval=20, blit=True)
except Exception as ex:
print(ex)
showerror("Error", "No se pudo generar la simulación. Verifique que "
"los datos ingresados sean correctos y coherentes.")
def update(index):
"""
Controla la posición y ángulo del edificio en la simulación del movimiento
sísmico.
"""
global tiempos, posiciones, angulos, cocientes
# Obtener los datos
tiempo = tiempos[index]
posicion = posiciones[index]
angulo = angulos[index]
cociente = cocientes[index]
# Actualizar valores en el edificio
edificio.set_x(posicion - base)
# Actualizar ángulo del edificio
trans_original = edificio_ax.transData
punto_rotacion = (-base, 0) if angulo >= 0 else (base, 0)
coords = trans_original.transform(punto_rotacion)
trans_rotar = Affine2D().rotate_around(coords[0], coords[1], angulo)
edificio.set_transform(trans_original + trans_rotar)
# Actualizar puntos en las líneas
angulos_mov.set_data(tiempo, cociente)
desplazamiento_mov.set_data(tiempo, posicion)
return edificio, desplazamiento_mov, angulos_mov
# Inicializar botones
button = tk.Button(frame, text="Iniciar", command=start)
button.grid(row=7, columnspan=2, padx=5, pady=5)
# Inicializar los valores
anim = None
start()
# Interactuar con la ventana
window.mainloop() | |
import os
import numpy as np
import torch
from tqdm import tqdm
from zs3.dataloaders import make_data_loader
from zs3.modeling.deeplab import DeepLab
from zs3.modeling.sync_batchnorm.replicate import patch_replication_callback
from zs3.dataloaders.datasets import DATASETS_DIRS
from zs3.utils.calculate_weights import calculate_weigths_labels
from zs3.utils.loss import SegmentationLosses
from zs3.utils.lr_scheduler import LR_Scheduler
from zs3.utils.metrics import Evaluator, Evaluator_seen_unseen
from zs3.utils.saver import Saver
from zs3.utils.summaries import TensorboardSummary
from zs3.parsing import get_parser
from zs3.exp_data import CLASSES_NAMES
class Trainer:
def __init__(self, args):
self.args = args
# Define Saver
self.saver = Saver(args)
self.saver.save_experiment_config()
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.saver.experiment_dir)
self.writer = self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": args.workers, "pin_memory": True}
(self.train_loader, self.val_loader, _, self.nclass,) = make_data_loader(
args, **kwargs
)
model = DeepLab(
num_classes=self.nclass,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn,
imagenet_pretrained_path=args.imagenet_pretrained_path,
)
train_params = [
{"params": model.get_1x_lr_params(), "lr": args.lr},
{"params": model.get_10x_lr_params(), "lr": args.lr * 10},
]
# Define Optimizer
optimizer = torch.optim.SGD(
train_params,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov,
)
# Define Criterion
# whether to use class balanced weights
if args.use_balanced_weights:
classes_weights_path = (
DATASETS_DIRS[args.dataset] / args.dataset + "_classes_weights.npy"
)
if os.path.isfile(classes_weights_path):
weight = np.load(classes_weights_path)
else:
weight = calculate_weigths_labels(
args.dataset, self.train_loader, self.nclass
)
weight = torch.from_numpy(weight.astype(np.float32))
else:
weight = None
self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(
mode=args.loss_type
)
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.nclass, args.seen_classes_idx_metric, args.unseen_classes_idx_metric
)
self.evaluator_seen_unseen = Evaluator_seen_unseen(
self.nclass, args.unseen_classes_idx_metric
)
# Define lr scheduler
self.scheduler = LR_Scheduler(
args.lr_scheduler, args.lr, args.epochs, len(self.train_loader)
)
# Using cuda
if args.cuda:
self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
if not os.path.isfile(args.resume):
raise RuntimeError(f"=> no checkpoint found at '{args.resume}'")
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint["epoch"]
if args.random_last_layer:
checkpoint["state_dict"]["decoder.pred_conv.weight"] = torch.rand(
(
self.nclass,
checkpoint["state_dict"]["decoder.pred_conv.weight"].shape[1],
checkpoint["state_dict"]["decoder.pred_conv.weight"].shape[2],
checkpoint["state_dict"]["decoder.pred_conv.weight"].shape[3],
)
)
checkpoint["state_dict"]["decoder.pred_conv.bias"] = torch.rand(
self.nclass
)
if args.cuda:
self.model.module.load_state_dict(checkpoint["state_dict"])
else:
self.model.load_state_dict(checkpoint["state_dict"])
if not args.ft:
if not args.nonlinear_last_layer:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
print(f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
# Clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
def validation(self, epoch, args):
class_names = CLASSES_NAMES[:21]
self.model.eval()
self.evaluator.reset()
all_target = []
all_pred = []
all_pred_unseen = []
tbar = tqdm(self.val_loader, desc="\r")
test_loss = 0.0
for i, sample in enumerate(tbar):
image, target = sample["image"], sample["label"]
if self.args.cuda:
image, target = image.cuda(), target.cuda()
with torch.no_grad():
if args.nonlinear_last_layer:
output = self.model(image, image.size()[2:])
else:
output = self.model(image)
loss = self.criterion(output, target)
test_loss += loss.item()
tbar.set_description("Test loss: %.3f" % (test_loss / (i + 1)))
pred = output.data.cpu().numpy()
pred_unseen = pred.copy()
target = target.cpu().numpy()
pred = np.argmax(pred, axis=1)
pred_unseen[:, args.seen_classes_idx_metric] = float("-inf")
pred_unseen = np.argmax(pred_unseen, axis=1)
# Add batch sample into evaluator
self.evaluator.add_batch(target, pred)
all_target.append(target)
all_pred.append(pred)
all_pred_unseen.append(pred_unseen)
# Fast test during the training
Acc, Acc_seen, Acc_unseen = self.evaluator.Pixel_Accuracy()
(
Acc_class,
Acc_class_by_class,
Acc_class_seen,
Acc_class_unseen,
) = self.evaluator.Pixel_Accuracy_Class()
(
mIoU,
mIoU_by_class,
mIoU_seen,
mIoU_unseen,
) = self.evaluator.Mean_Intersection_over_Union()
(
FWIoU,
FWIoU_seen,
FWIoU_unseen,
) = self.evaluator.Frequency_Weighted_Intersection_over_Union()
self.writer.add_scalar("val_overall/total_loss_epoch", test_loss, epoch)
self.writer.add_scalar("val_overall/mIoU", mIoU, epoch)
self.writer.add_scalar("val_overall/Acc", Acc, epoch)
self.writer.add_scalar("val_overall/Acc_class", Acc_class, epoch)
self.writer.add_scalar("val_overall/fwIoU", FWIoU, epoch)
self.writer.add_scalar("val_seen/mIoU", mIoU_seen, epoch)
self.writer.add_scalar("val_seen/Acc", Acc_seen, epoch)
self.writer.add_scalar("val_seen/Acc_class", Acc_class_seen, epoch)
self.writer.add_scalar("val_seen/fwIoU", FWIoU_seen, epoch)
self.writer.add_scalar("val_unseen/mIoU", mIoU_unseen, epoch)
self.writer.add_scalar("val_unseen/Acc", Acc_unseen, epoch)
self.writer.add_scalar("val_unseen/Acc_class", Acc_class_unseen, epoch)
self.writer.add_scalar("val_unseen/fwIoU", FWIoU_unseen, epoch)
print("Validation:")
print(
"[Epoch: %d, numImages: %5d]"
% (epoch, i * self.args.batch_size + image.data.shape[0])
)
print(f"Loss: {test_loss:.3f}")
print(f"Overall: Acc:{Acc}, Acc_class:{Acc_class}, mIoU:{mIoU}, fwIoU: {FWIoU}")
print(
"Seen: Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
Acc_seen, Acc_class_seen, mIoU_seen, FWIoU_seen
)
)
print(
"Unseen: Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
Acc_unseen, Acc_class_unseen, mIoU_unseen, FWIoU_unseen
)
)
for class_name, acc_value, mIoU_value in zip(
class_names, Acc_class_by_class, mIoU_by_class
):
self.writer.add_scalar("Acc_by_class/" + class_name, acc_value, epoch)
self.writer.add_scalar("mIoU_by_class/" + class_name, mIoU_value, epoch)
print(class_name, "- acc:", acc_value, " mIoU:", mIoU_value)
def main():
parser = get_parser()
parser.add_argument(
"--out-stride", type=int, default=16, help="network output stride (default: 8)"
)
# PASCAL VOC
parser.add_argument(
"--dataset",
type=str,
default="pascal",
choices=["pascal", "coco", "cityscapes"],
help="dataset name (default: pascal)",
)
parser.add_argument(
"--use-sbd",
action="store_true",
default=True,
help="whether to use SBD dataset (default: True)",
)
parser.add_argument("--base-size", type=int, default=513, help="base image size")
parser.add_argument("--crop-size", type=int, default=513, help="crop image size")
parser.add_argument(
"--loss-type",
type=str,
default="ce",
choices=["ce", "focal"],
help="loss func type (default: ce)",
)
# training hyper params
# PASCAL VOC
parser.add_argument(
"--epochs",
type=int,
default=300,
metavar="N",
help="number of epochs to train (default: auto)",
)
# PASCAL VOC
parser.add_argument(
"--batch-size",
type=int,
default=8,
metavar="N",
help="input batch size for training (default: auto)",
)
# cuda, seed and logging
# checking point
parser.add_argument(
"--imagenet_pretrained_path",
type=str,
default="checkpoint/resnet_backbone_pretrained_imagenet_wo_pascalvoc.pth.tar",
)
# checking point
parser.add_argument(
"--resume",
type=str,
default="checkpoint/deeplab_pascal_voc_02_unseen_GMMN_final.pth.tar",
help="put the path to resuming file if needed",
)
parser.add_argument("--checkname", type=str, default="pascal_eval")
# evaluation option
parser.add_argument(
"--eval-interval", type=int, default=5, help="evaluation interval (default: 1)"
)
### FOR IMAGE SELECTION IN ORDER TO TAKE OFF IMAGE WITH UNSEEN CLASSES FOR TRAINING AND VALIDATION
# keep empty
parser.add_argument("--unseen_classes_idx", type=int, default=[])
### FOR METRIC COMPUTATION IN ORDER TO GET PERFORMANCES FOR TWO SETS
seen_classes_idx_metric = np.arange(21)
# 2 unseen
unseen_classes_idx_metric = [10, 14]
# 4 unseen
# unseen_classes_idx_metric = [10, 14, 1, 18]
# 6 unseen
# unseen_classes_idx_metric = [10, 14, 1, 18, 8, 20]
# 8 unseen
# unseen_classes_idx_metric = [10, 14, 1, 18, 8, 20, 19, 5]
# 10 unseen
# unseen_classes_idx_metric = [10, 14, 1, 18, 8, 20, 19, 5, 9, 16]
seen_classes_idx_metric = np.delete(
seen_classes_idx_metric, unseen_classes_idx_metric
).tolist()
parser.add_argument(
"--seen_classes_idx_metric", type=int, default=seen_classes_idx_metric
)
parser.add_argument(
"--unseen_classes_idx_metric", type=int, default=unseen_classes_idx_metric
)
parser.add_argument(
"--nonlinear_last_layer", type=bool, default=False, help="non linear prediction"
)
parser.add_argument(
"--random_last_layer", type=bool, default=False, help="randomly init last layer"
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(",")]
except ValueError:
raise ValueError(
"Argument --gpu_ids must be a comma-separated list of integers only"
)
args.sync_bn = args.cuda and len(args.gpu_ids) > 1
# default settings for epochs, batch_size and lr
if args.epochs is None:
epoches = {
"coco": 30,
"cityscapes": 200,
"pascal": 50,
}
args.epochs = epoches[args.dataset.lower()]
if args.batch_size is None:
args.batch_size = 4 * len(args.gpu_ids)
if args.test_batch_size is None:
args.test_batch_size = args.batch_size
if args.lr is None:
lrs = {
"coco": 0.1,
"cityscapes": 0.01,
"pascal": 0.007,
}
args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size
if args.checkname is None:
args.checkname = "deeplab-resnet"
print(args)
torch.manual_seed(args.seed)
trainer = Trainer(args)
print("Starting Epoch:", trainer.args.start_epoch)
print("Total Epoches:", trainer.args.epochs)
trainer.validation(0, args)
if __name__ == "__main__":
main() | |
#%% Imports
import pandas as pd
import numpy as np
#import neuprint as npr
from neuprint import Client, fetch_traced_adjacencies, fetch_adjacencies
from neuprint import fetch_synapse_connections
from neuprint import fetch_synapse_connections, NeuronCriteria as NC, SynapseCriteria as SC
from neuprint.utils import connection_table_to_matrix,merge_neuron_properties
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import seaborn as sns;
from scipy.spatial.distance import euclidean
import os
root = "C:\\Users\\knity\\Documents\\Nitya\\School\\RESEARCH\\GEMSEC_Projects\\NeuroRGM"
os.chdir(root)
#%% API Connection
TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6Im5pdHlha0B1dy5lZHUiLCJsZXZlbCI6Im5vYXV0aCIsImltYWdlLXVybCI6Imh0dHBzOi8vbGg2Lmdvb2dsZXVzZXJjb250ZW50LmNvbS8tcV9pZTRrcTBCQU0vQUFBQUFBQUFBQUkvQUFBQUFBQUFBQUEvQUFLV0pKTXo1dFBQQnY1ME0xOWRmcEg4TmNaNXlrNEVtZy9waG90by5qcGc_c3o9NTA_c3o9NTAiLCJleHAiOjE3NjY1MzYxNzB9.gNTqVgZNHTfIoGkfwlFidgzOkjWxwAJqNyY3cSpaw0M"
c = Client('neuprint.janelia.org', 'hemibrain:v1.0.1', TOKEN)
#%% Adjacency
#neurons_df, roi_conn_df = npr.queries.fetch_adjacencies(None, None, 200, 'Neuron', None)
sources = [329566174, 425790257, 424379864, 329599710]
targets = [425790257, 424379864, 329566174, 329599710, 420274150]
neuron_df, connection_df = fetch_adjacencies(sources, targets)
#results = npr.fetch_simple_connections(upstream_bodyId = 1224941044)
#%% Create plot for one neuron
def plot_neuron(s, x=[], y=[], z=[]):
# bodyId = '917669951' #Inputs and outputs in AB(L) and FB
#s = c.fetch_skeleton(bodyId, format='pandas')
X, Y, Z = np.array(s['x']), np.array(s['y']), np.array(s['z'])
rad = np.array(s['radius'])
fig = plt.figure(figsize=(20,15))
ax = fig.gca(projection='3d')
ax.scatter(X, Y, Z, s=rad, c = rad, cmap = plt.cm.seismic, linewidths = rad*0.02, alpha = 0.5)
#plt.savefig(f'./Plots/skeleton_neuron{bodyId}_withRad.png')
#s = c.fetch_skeleton('917669951', format='pandas')
#plot_neuron(s, np.array(s['x']), np.array(s['y']), np.array(s['z'])
#%% Distance between 2 neurons - get min Dists
### try pairwise distances
bodyId1 = '917669951'
bodyId2 = '917674256'
s1 = c.fetch_skeleton(bodyId1, format='pandas')
s2 = c.fetch_skeleton(bodyId2, format='pandas')
minDists = pd.DataFrame(columns = ['p1', 'p2', 'dist'])
for index, row in s1.iterrows():
p1 = [row.x, row.y, row.z]
minDist = float("inf")
minp2 = p1
for index2, row2 in s2.iterrows():
p2 = [row2.x, row2.y, row2.z]
dist = euclidean(p1, p2)
if dist < minDist:
minp2 = p2
minDist = dist
minDists = minDists.append({'p1':p1, 'p2':minp2,'dist':minDist}, ignore_index=True)
#%% Distance between 2 neurons - plot
p1_X = [item[0] for item in minDists['p1']]
p1_Y = [item[1] for item in minDists['p1']]
p1_Z = [item[2] for item in minDists['p1']]
p2_X = [item[0] for item in minDists['p2']]
p2_Y = [item[1] for item in minDists['p2']]
p2_Z = [item[2] for item in minDists['p2']]
X1, Y1, Z1 = np.array(s1['x']), np.array(s1['y']), np.array(s1['z'])
rad1 = np.array(s1['radius'])
X2, Y2, Z2 = np.array(s2['x']), np.array(s2['y']), np.array(s2['z'])
rad2 = np.array(s2['radius'])
fig = plt.figure(figsize=(20,15))
ax = fig.gca(projection='3d')
ax.scatter(X1, Y1, Z1, s = rad1, c = rad1, cmap = plt.cm.seismic, linewidths = rad1*0.02, alpha = 0.5)
ax.scatter(X2, Y2, Z2, s = rad2, c = rad2, cmap = plt.cm.seismic, linewidths = rad2*0.02, alpha = 0.5)
ax.scatter(p1_X, p1_Y, p1_Z, s = 2, cmap = 'green')
#%% Plot distances - histogram
plt.hist(minDists['dist'], bins = 20,
color = '#0504aa', alpha=0.7, rwidth = 0.9)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Minimum distances')
plt.ylabel('Frequency')
plt.title('Minimum euclidean distances from neuron 1 to neuron 2')
plt.savefig(f'./Plots/minDists_neuron{bodyId1}_neuron{bodyId2}.png')
#%% Adjacency
# neurons_df, roi_conn_df = fetch_traced_adjacencies('exported-connections')
# conn_df = merge_neuron_properties(neurons_df, roi_conn_df, ['type', 'instance'])
# conn_mat = connection_table_to_matrix(conn_df, 'bodyId', sort_by='type')
neuron_criteria = NC(status='Traced', cropped=False)
eb_syn_criteria = SC(primary_only=False)
eb_conns = fetch_synapse_connections(neuron_criteria, None, eb_syn_criteria)
# plt.matshow(conn_mat)
# plt.title('Adjacencies - traced neurons')
# plt.savefig(f'./Plots/adj_traced.png') | |
import gym
import numpy as np
import pytest
from push_ups import spaces
@pytest.fixture
def env():
return gym.make("CartPole-v1")
def test_equal_spaces(env):
space_1 = spaces.BoxSpace(env.observation_space)
space_2 = spaces.DiscreteSpace(env.action_space)
assert space_1 == space_1
assert space_1 != space_2
def test_BoxSpace_repr(env):
space = spaces.BoxSpace(env.observation_space)
assert repr(space) == "Box(4,)"
def test_DiscreteSpace_repr(env):
space = spaces.DiscreteSpace(env.action_space)
assert repr(space) == "Discrete(2)"
def test_BoxSpace_init(env):
space = spaces.BoxSpace(env.observation_space)
expected_result = np.array(
[4.8000002e00, 3.4028235e38, 4.1887903e-01, 3.4028235e38], dtype=np.float32
)
assert space.shape == (4,)
np.testing.assert_array_almost_equal(space.high, expected_result)
def test_BoxSpace_discrete_space(env):
space = spaces.BoxSpace(env.observation_space)
result = space.discrete_space()
expected_result = np.array([])
np.testing.assert_array_equal(result, expected_result)
def test_BoxSpace_continuous_space(env):
space = spaces.BoxSpace(env.observation_space)
result = space.continuous_space()
expected_result = np.array(
[
[-4.8000002e00, -3.4028235e38, -4.1887903e-01, -3.4028235e38],
[4.8000002e00, 3.4028235e38, 4.1887903e-01, 3.4028235e38],
],
dtype=np.float32,
)
np.testing.assert_array_almost_equal(result, expected_result)
def test_DiscreteSpace_init(env):
space = spaces.DiscreteSpace(env.action_space)
assert space.n == 2
def test_DiscreteSpace_super_init(env):
with pytest.raises(AssertionError):
spaces.DiscreteSpace(env.observation_space) | |
import os
import numpy as np
import pandas as pd
import xarray as xr
import datetime
import time
import cftime
import warnings
import requests
import shutil
def set_bnds_as_coords(ds):
new_coords_vars = [var for var in ds.data_vars if 'bnds' in var or 'bounds' in var]
ds = ds.set_coords(new_coords_vars)
return ds
def set_bnds_as_coords_drop_height(ds):
ds = set_bnds_as_coords(ds)
if 'height' in ds.coords:
ds = ds.drop('height')
return ds
def convert2gregorian(ds):
ds = set_bnds_as_coords(ds)
start = str(ds.time.values[0])[:4]
ds['time'] = xr.cftime_range(start=start, periods=ds.time.shape[0], freq='MS', calendar='gregorian').shift(15,'D')
return ds
def get_ncfiles(zarr,df,skip_sites,skip_string='serendipity'):
# download any files needed for this zarr store (or abort the attempt)
okay = True
gfiles = []
trouble = ''
check_size = True
files = df[df.zstore == zarr].file_name.unique()
tmp = 'nctemp'
if not (os.path.exists(tmp)):
os.system('mkdir -p ' + tmp)
institution_id = zarr.split('/')[-8]
v_short = zarr.split(institution_id+'/')[1][:-1]
codes = read_codes(v_short)
if 'noUse' in codes:
return [], 'noUse in codes',codes, okay
if 'noSizeCheck' in codes:
check_size = False
for file in files:
#print(file)
skip=False
if skip_string == 'from 1950':
if 'gn_18' in file:
skip=True
if 'gn_190' in file:
skip=True
if 'gn_191' in file:
skip=True
if 'gn_192' in file:
skip=True
if 'gn_193' in file:
skip=True
if 'gn_194' in file:
skip=True
if skip:
continue
if okay:
save_file = tmp + '/'+file
expected_size = df[df.file_name == file]['size'].values[0]
if os.path.isfile(save_file):
if abs(os.path.getsize(save_file) - expected_size) <= 1000 :
gfiles += [save_file]
continue
url = df[df.file_name == file].HTTPServer_url.values[0]
try:
r = requests.get(url, timeout=3.01, stream=True)
with open(save_file, 'wb') as f:
shutil.copyfileobj(r.raw, f)
except:
trouble += '\tServer not responding for: ' + url
okay = False
if not okay:
gfiles = []
continue
'''
for site in skip_sites:
if site in url:
trouble += '\tskipping ' + site + ' domain'
okay = False
if not okay:
gfiles = []
continue
command = 'curl ' + url + ' -o ' + save_file
print(command,' expected size: ',expected_size)
os.system(command)
time.sleep( 2 ) # needed when downloading many, many small files
```
if check_size:
if os.path.getsize(save_file) != expected_size:
os.system(command)
actual_size = os.path.getsize(save_file)
if abs(actual_size - expected_size) > 200:
trouble += '\nnetcdf download not complete for: ' + url + ' expected/actual size: ' + str(expected_size) + '/' + str(actual_size)
okay = False
if os.path.getsize(save_file) == 0:
os.system("rm -f "+save_file)
if okay:
gfiles += [save_file]
return gfiles, trouble, codes, okay
def concatenate(zarr,gfiles,codes):
dstr = ''
if len(codes) > 0:
dstr += f'special treatment needed: {codes}'
for code in codes:
dstr += '\ncodes = ' + code
# guess chunk size by looking a first file: (not always a good choice - e.g. cfc11)
nc_size = os.path.getsize(gfiles[0])
ds = xr.open_dataset(gfiles[0])
svar = ds.variable_id
nt = ds[svar].shape[0]
chunksize_optimal = 5e7
chunksize = max(int(nt*chunksize_optimal/nc_size),1)
preprocess = set_bnds_as_coords
join = 'exact'
for code in codes:
if 'deptht' in code:
fix_string = '/usr/bin/ncrename -d .deptht\,olevel -v .deptht\,olevel -d .deptht_bounds\,olevel_bounds -v .deptht_bounds\,olevel_bounds '
for gfile in gfiles:
dstr += f'fixing deptht trouble in gfile:{gfile}'
os.system(f'{fix_string} {gfile}')
if 'remove_files' in code:
command = '/bin/rm nctemp/*NorESM2-LM*1230.nc'
os.system(command)
gfiles = [file for file in gfiles if ('1231.nc' in file)]
if 'fix_time' in code:
preprocess = convert2gregorian
if 'drop_height' in codes:
preprocess = set_bnds_as_coords_drop_height
if 'override' in code:
join = 'override'
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
if 'time' in ds.coords:
df7 = xr.open_mfdataset(gfiles, preprocess=preprocess, data_vars='minimal', chunks={'time': chunksize},
use_cftime=True, join=join, combine='nested', concat_dim='time')
else: # fixed in time, no time grid
df7 = xr.open_mfdataset(gfiles, preprocess=set_bnds_as_coords, combine='by_coords', join=join, data_vars='minimal')
except:
dstr += '\nerror in open_mfdataset'
for code in codes:
if 'drop_tb' in code: # to_zarr cannot do chunking with time_bounds/time_bnds which is cftime (an object, not float)
timeb = [var for var in df7.coords if 'time_bnds' in var or 'time_bounds' in var][0]
df7 = df7.drop(timeb)
if 'time_' in code:
[y1,y2] = code.split('_')[-1].split('-')
df7 = df7.sel(time=slice(str(y1)+'-01-01',str(y2)+'-12-31'))
if '360_day' in code:
year = gfiles[0].split('-')[-2][-6:-2]
df7['time'] = cftime.num2date(np.arange(df7.time.shape[0]), units='months since '+year+'-01-16', calendar='360_day')
#print('encoding time as 360_day from year = ',year)
if 'noleap' in code:
year = gfiles[0].split('-')[-2][-6:-2]
df7['time'] = xr.cftime_range(start=year, periods=df7.time.shape[0], freq='MS', calendar='noleap').shift(15, 'D')
#print('encoding time as noleap from year = ',year)
if 'missing' in code:
del df7[svar].encoding['missing_value']
# check time grid to make sure there are no gaps in concatenated data (open_mfdataset checks for mis-ordering)
if 'time' in ds.coords:
table_id = zarr.split('/')[-3]
year = sorted(list(set(df7.time.dt.year.values)))
print(np.diff(year).sum(), len(year))
if '3hr' in table_id:
if not (np.diff(year).sum() == len(year)-1) | (np.diff(year).sum() == len(year)-2):
dstr += '\ntrouble with 3hr time grid'
return 'failure', df7, dstr
elif 'dec' in table_id:
if not (np.diff(year).sum()/10 == len(year)) | (np.diff(year).sum()/10 == len(year)-1):
dstr += '\ntrouble with dec time grid'
return 'failure',df7, dstr
else:
if not np.diff(year).sum() == len(year)-1:
dstr += '\ntrouble with grid'
return 'failure',df7, dstr
dsl = xr.open_dataset(gfiles[0])
tracking_id = dsl.tracking_id
if len(gfiles) > 1:
for file in gfiles[1:]:
dsl = xr.open_dataset(file)
tracking_id = tracking_id+'\n'+dsl.tracking_id
df7.attrs['tracking_id'] = tracking_id
date = str(datetime.datetime.now().strftime("%Y-%m-%d"))
nstatus = date + ';created; by gcs.cmip6.ldeo@gmail.com'
df7.attrs['status'] = nstatus
if 'time' in df7.coords:
nt = len(df7.time.values)
chunksize = min(chunksize,int(nt/2))
df7 = df7.chunk(chunks={'time' : chunksize}) # yes, do it again
return 'success', df7, dstr
def read_codes(zarr):
dex = pd.read_csv('csv/exceptions.csv',skipinitialspace=True)
codes = []
[source_id,experiment_id,member_id,table_id,variable_id,grid_label] = zarr.split('/')
for ex in dex.values:
dd = dict(zip(dex.keys(),ex))
if dd['source_id'] == source_id or dd['source_id'] == 'all':
if dd['experiment_id'] == experiment_id or dd['experiment_id'] == 'all':
if dd['member_id'] == member_id or dd['member_id'] == 'all':
if dd['table_id'] == table_id or dd['table_id'] == 'all':
if dd['variable_id'] == variable_id or dd['variable_id'] == 'all':
if dd['grid_label'] == grid_label or dd['grid_label'] == 'all':
codes += [dd['reason_code']]
print('special treatment needed:',dd['reason_code'])
return codes | |
import torch
import numpy as np
import logging
import os
import torch.nn.functional as F
## Get the same logger from main"
logger = logging.getLogger("anti-spoofing")
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (_, X1, X2, target) in enumerate(train_loader):
X1, X2, target = X1.to(device), X2.to(device), target.to(device)
target = target.view(-1,1).float()
optimizer.zero_grad()
#output, hidden = model(data, hidden=None)
y = model(X1, X2)
loss = F.binary_cross_entropy(y, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(X1), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def snapshot(dir_path, run_name, is_best, state):
snapshot_file = os.path.join(dir_path,
run_name + '-model_best.pth')
if is_best:
torch.save(state, snapshot_file)
logger.info("Snapshot saved to {}\n".format(snapshot_file)) | |
import numpy as np
import math
import pandas as pd
from random import shuffle
from matplotlib import pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as pause
from mpl_toolkits.mplot3d import Axes3D
from time import sleep
import matplotlib.animation as animation
import sys
# DEFAULT PARAMETERS
threshold = 0.0000000000000001
def plotPoints(plotOutFunction, xyPoints, fy=[]):
x1 = []
x2 = []
y = []
for i in range(len(xyPoints)):
x1.append(xyPoints[i][0])
x2.append(xyPoints[i][1])
y.append(xyPoints[i][len(xyPoints[0])-1])
plt.title("XY points and predicted function")
isFirst1 = True
isFirst0 = True
for i in range(len(x1)):
if y[i] == 1:
if isFirst1:
plt.plot(x1[i],x2[i],'b+',label='Actual output value 1',markersize=12)
isFirst1 = False
else:
plt.plot(x1[i],x2[i],'b+',markersize=12)
else:
if isFirst0:
plt.plot(x1[i],x2[i],'bo',label='Actual output value 0',markersize=12)
isFirst0 = False
else:
plt.plot(x1[i],x2[i],'bo',markersize=12)
if (plotOutFunction == 1):
isFirst0 = True
isFirst1 = True
#for i in range(len(x1)):
# if fy[i] >= 0.5:
# if isFirst1:
# plt.plot(x1[i],x2[i],'r>',label='Predicted output value 1')
# isFirst1 = False
# else:
# plt.plot(x1[i],x2[i],'r>')
# else:
# if isFirst0:
# plt.plot(x1[i],x2[i],'rs',label='Predicted output value 0')
# isFirst0 = False
# else:
# plt.plot(x1[i],x2[i],'rs')
bndry = []
for i in range(len(x1)):
bndry.append((-1 * thetaVec[2] - thetaVec[0] * x1[i]) / thetaVec[1])
plt.plot(x1, bndry, '-')
plt.xlabel("Input Data X1")
plt.ylabel("Input Data X2")
plt.legend()
plt.show()
def readPoints(fileX,fileY):
noOfPoints = 0
xyPoints = []
f1 = open(fileX, "r")
f2 = open(fileY, "r")
xPoint = f1.read().splitlines()
yPoint = f2.read().splitlines()
for i in range(len(xPoint)):
xy1Point = []
xPoints = xPoint[i].split(",")
xy1Point.append(float(xPoints[0]))
xy1Point.append(float(xPoints[1]))
xy1Point.append(1.0) # This is the value corresponding to theta0 (since theta0 is the intercept)
xy1Point.append(float(yPoint[i]))
xyPoints.append(xy1Point)
noOfPoints = noOfPoints + 1
f1.close()
f2.close()
return xyPoints, noOfPoints
def normalize(xyPoints, mean, var):
for i in range(len(xyPoints[0])):
if (i == len(xyPoints[0])-2): # Ignore the second last col and last col. Second last col is 1.0 that denotes intercept
break
x = []
for j in range(len(xyPoints)):
x.append(xyPoints[j][i])
mean[i]=(np.mean(x))
var[i]=(np.std(x))
for j in range(len(xyPoints)):
xyPoints[j][i] = (xyPoints[j][i] - mean[i]) / var[i]
return xyPoints, mean, var
def getHTheta(thetaVec, xyPoints, row):
htheta = 0.0
for i in range(len(thetaVec)):
htheta = htheta + thetaVec[i] * xyPoints[row][i]
htheta = 1 / (1 + math.exp(-1 * htheta))
return htheta
def getPredictedFn(xyPoints, thetaVec):
fy = []
y = [0] * len(xyPoints)
for k in range(len(xyPoints)):
fy1 = getHTheta(thetaVec, xyPoints, k)
fy.append(fy1)
y[k] = xyPoints[k][len(xyPoints[0])-1]
return fy
def unormalize(xyPoints, mean, var):
for i in range(len(xyPoints[0])):
if (i == len(xyPoints[0])-2):
break
for j in range(len(xyPoints)):
xyPoints[j][i] = (xyPoints[j][i] * var[i]) + mean[i]
return xyPoints
def calculateError(xyPoints, thetaVec):
sum = 0.0
for i in range(len(xyPoints)):
htheta = getHTheta(thetaVec, xyPoints, i)
yi = xyPoints[i][len(xyPoints[0])-1]
term2 = math.log(1 - htheta)
term1 = math.log(htheta)
sum = sum + yi * term1 + (1 - yi) * term2
Jo = sum / float(len(xyPoints))
return Jo
def calcDerivateOfError(thetaVec, xyPoints):
derivJoMat = np.zeros((len(thetaVec), 1))
for k in range(len(thetaVec)):
for i in range(len(xyPoints)):
htheta = getHTheta(thetaVec, xyPoints, i)
yi = xyPoints[i][len(xyPoints[0])-1]
derivJoMat[k][0] = derivJoMat[k][0] + (yi - htheta) * xyPoints[i][k]
derivJoMat[k][0] = derivJoMat[k][0] / float(len(xyPoints))
return derivJoMat
def calcInvHessianError(thetaVec, xyPoints):
hessianJoMat = np.empty((len(thetaVec), len(thetaVec),))
hessianJoMat[:] = np.nan
htheta = [0] * (len(xyPoints))
for l in range(len(thetaVec)):
for k in range(len(thetaVec)):
if math.isnan(hessianJoMat[l][k]) == False:
continue
hessianJoMat[l][k] = 0
hessianJoMat[k][l] = 0
for i in range(len(xyPoints)):
hthetaVar = 0.0
if htheta[i] == 0.0:
hthetaVar = getHTheta(thetaVec, xyPoints, i)
htheta[i] = hthetaVar
else:
hthetaVar = htheta[i]
hessianJoMat[l][k] = hessianJoMat[l][k] + hthetaVar * (1 - hthetaVar) * xyPoints[i][k] * xyPoints[i][l]
hessianJoMat[l][k] = hessianJoMat[l][k] / float(len(xyPoints))
hessianJoMat[k][l] = hessianJoMat[l][k]
hessianJoMat = np.linalg.inv(hessianJoMat)
return hessianJoMat
def updateTheta(xyPoints, thetaVec):
thetaMat = np.zeros((len(thetaVec), 1))
for i in range(len(thetaVec)):
thetaMat[i][0] = thetaVec[i]
derivErrorMat = calcDerivateOfError(thetaVec, xyPoints)
invHessianErrorMat = calcInvHessianError(thetaVec, xyPoints)
thetaMat = np.add(thetaMat, np.matmul(invHessianErrorMat, derivErrorMat))
# Once all newTheta is calculated, update the thetaVec in this loop
for i in range(len(thetaVec)):
thetaVec[i] = thetaMat[i][0]
return thetaVec
fileX = sys.argv[1] # prints python_script.py
fileY = sys.argv[2] # prints var1
# LOCAL VARIABLES
xyPoints,noOfPoints = readPoints(fileX,fileY)
thetaVec = [0] * (len(xyPoints[0]) - 1) # Last col is y so we will subtract that col in thetaVec and last theta is O0 which we will add a column in theta Vec
JPvs = 0
noOfIterations = 0
# GLOBAL VARIABLE
mean = [0] * len(xyPoints[0])
var = [0] * len(xyPoints[0])
xyPoints, mean, var = normalize(xyPoints, mean, var)
while 1:
thetaVec = updateTheta(xyPoints, thetaVec)
JCur = calculateError(xyPoints, thetaVec)
fy = getPredictedFn(xyPoints, thetaVec)
#plotPoints(1,xyPoints, fy)
if abs(JCur - JPvs) < threshold:
break # Converged
JPvs = JCur
noOfIterations = noOfIterations + 1
fy = getPredictedFn(xyPoints, thetaVec)
plotPoints(1,xyPoints,fy)
xyPoints = unormalize(xyPoints, mean, var)
print ("Theta Vector = ", thetaVec)
print ("Number of Iterations = ", noOfIterations) | |
import numpy as np
# import _proj as proj_lib
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
ZERO = "f"
POS = "l"
SOC = "q"
PSD = "s"
EXP = "ep"
EXP_DUAL = "ed"
POWER = "p"
# The ordering of CONES matches SCS.
CONES = [ZERO, POS, SOC, PSD, EXP, EXP_DUAL, POWER]
def parse_cone_dict(cone_dict):
"""Parses SCS-style cone dictionary."""
return [(cone, cone_dict[cone]) for cone in CONES if cone in cone_dict]
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices (or linear operators)."""
linear_operators = [splinalg.aslinearoperator(
op) if not isinstance(op, splinalg.LinearOperator) else op
for op in matrices]
num_operators = len(linear_operators)
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
row_indices = np.append(0, np.cumsum(nrows))
col_indices = np.append(0, np.cumsum(ncols))
def matvec(x):
output = np.zeros(m)
for i, op in enumerate(linear_operators):
z = x[col_indices[i]:col_indices[i + 1]].ravel()
output[row_indices[i]:row_indices[i + 1]] = op.matvec(z)
return output
def rmatvec(y):
output = np.zeros(n)
for i, op in enumerate(linear_operators):
z = y[row_indices[i]:row_indices[i + 1]].ravel()
output[col_indices[i]:col_indices[i + 1]] = op.rmatvec(z)
return output
return splinalg.LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec)
def transpose_linear_operator(op):
return splinalg.LinearOperator(reversed(op.shape), matvec=op.rmatvec,
rmatvec=op.matvec)
def vec_psd_dim(dim):
return int(dim * (dim + 1) / 2)
def psd_dim(x):
return int(np.sqrt(2 * x.size))
def in_exp(x):
return (x[0] <= 0 and np.isclose(x[1], 0) and x[2] >= 0) or (x[1] > 0 and
x[1] * np.exp(x[0] / x[1]) <= x[2])
def in_exp_dual(x):
# TODO(sbarratt): need to make the numerics safe here, maybe using logs
return (np.isclose(x[0], 0) and x[1] >= 0 and x[2] >= 0) or (
x[0] < 0 and -x[0] * np.exp(x[1] / x[0]) <= np.e * x[2])
def unvec_symm(x, dim):
"""Returns a dim-by-dim symmetric matrix corresponding to `x`.
`x` is a vector of length dim*(dim + 1)/2, corresponding to a symmetric
matrix; the correspondence is as in SCS.
X = [ X11 X12 ... X1k
X21 X22 ... X2k
...
Xk1 Xk2 ... Xkk ],
where
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = np.zeros((dim, dim))
# triu_indices gets indices of upper triangular matrix in row-major order
col_idx, row_idx = np.triu_indices(dim)
X[(row_idx, col_idx)] = x
X = X + X.T
X /= np.sqrt(2)
X[np.diag_indices(dim)] = np.diagonal(X) * np.sqrt(2) / 2
return X
def vec_symm(X):
"""Returns a vectorized representation of a symmetric matrix `X`.
Vectorization (including scaling) as per SCS.
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = X.copy()
X *= np.sqrt(2)
X[np.diag_indices(X.shape[0])] = np.diagonal(X) / np.sqrt(2)
col_idx, row_idx = np.triu_indices(X.shape[0])
return X[(row_idx, col_idx)]
def _proj(x, cone, dual=False):
"""Returns the projection of x onto a cone or its dual cone."""
if cone == ZERO:
return x if dual else np.zeros(x.shape)
elif cone == POS:
return np.maximum(x, 0)
elif cone == SOC:
# print("Second Order Cone: x = {}".format(x))
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t or np.isclose(norm_z, t, atol=1e-8):
return x
elif norm_z <= -t:
return np.zeros(x.shape)
else:
return 0.5 * (1 + t / norm_z) * np.append(norm_z, z)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
return vec_symm(Q @ sparse.diags(np.maximum(lambd, 0)) @ Q.T)
elif cone == EXP:
raise NotImplementedError("exp cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
out = np.zeros(x.size)
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
r, s, t, _ = proj_lib.proj_exp_cone(
float(x_i[0]), float(x_i[1]), float(x_i[2]))
out[offset:offset + 3] = np.array([r, s, t])
offset += 3
# via Moreau
return x - out if dual else out
else:
raise NotImplementedError(f"{cone} not implemented")
def _dproj(x, cone, dual=False):
"""Returns the derivative of projecting onto a cone (or its dual cone) at x.
The derivative is represented as either a sparse matrix or linear operator.
"""
shape = (x.size, x.size)
if cone == ZERO:
return sparse.eye(*shape) if dual else sparse.csc_matrix(shape)
elif cone == POS:
return sparse.diags(.5 * (np.sign(x) + 1), format="csc")
elif cone == SOC:
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t:
return sparse.eye(*shape)
elif norm_z <= -t:
return sparse.csc_matrix(shape)
else:
z = z.reshape(z.size)
unit_z = z / norm_z
scale_factor = 1.0 / (2 * norm_z)
t_plus_norm_z = t + norm_z
def matvec(y):
t_in = y[0]
z_in = y[1:]
first = norm_z * t_in + np.dot(z, z_in)
rest = z * t_in + t_plus_norm_z * z_in - \
t * unit_z * np.dot(unit_z, z_in)
return scale_factor * np.append(first, rest)
# derivative is symmetric
return splinalg.LinearOperator(shape, matvec=matvec,
rmatvec=matvec)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
if np.all(lambd >= 0):
matvec = lambda y: y
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
# Sort eigenvalues, eigenvectors in ascending order, so that
# we can obtain the index k such that lambd[k-1] < 0 < lambd[k]
idx = lambd.argsort()
lambd = lambd[idx]
Q = Q[:, idx]
k = np.searchsorted(lambd, 0)
B = np.zeros((dim, dim))
pos_gt_k = np.outer(np.maximum(lambd, 0)[k:], np.ones(k))
neg_lt_k = np.outer(np.ones(dim - k), np.minimum(lambd, 0)[:k])
B[k:, :k] = pos_gt_k / (neg_lt_k + pos_gt_k)
B[:k, k:] = B[k:, :k].T
B[k:, k:] = 1
matvec = lambda y: vec_symm(
Q @ (B * (Q.T @ unvec_symm(y, dim) @ Q)) @ Q.T)
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
elif cone == EXP:
raise NotImplementedError("EXP cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
ops = []
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
offset += 3
if in_exp(x_i):
ops.append(splinalg.aslinearoperator(sparse.eye(3)))
elif in_exp_dual(-x_i):
ops.append(splinalg.aslinearoperator(
sparse.csc_matrix((3, 3))))
elif x_i[0] < 0 and x_i[1] and not np.isclose(x_i[2], 0):
matvec = lambda y: np.array([
y[0], 0, y[2] * 0.5 * (1 + np.sign(x_i[2]))])
ops.append(splinalg.LinearOperator((3, 3), matvec=matvec,
rmatvec=matvec))
else:
# TODO(akshayka): Cache projection if this is a bottleneck
# TODO(akshayka): y_st is sometimes zero ...
x_st, y_st, _, mu = proj_lib.proj_exp_cone(x_i[0], x_i[1],
x_i[2])
if np.equal(y_st, 0):
y_st = np.abs(x_st)
exp_x_y = np.exp(x_st / y_st)
mu_exp_x_y = mu * exp_x_y
x_mu_exp_x_y = x_st * mu_exp_x_y
M = np.zeros((4, 4))
M[:, 0] = np.array([
1 + mu_exp_x_y / y_st, -x_mu_exp_x_y / (y_st ** 2),
0,
exp_x_y])
M[:, 1] = np.array([
-x_mu_exp_x_y / (y_st ** 2),
1 + x_st * x_mu_exp_x_y / (y_st ** 3),
0, exp_x_y - x_st * exp_x_y / y_st])
M[:, 2] = np.array([0, 0, 1, -1])
M[:, 3] = np.array([
exp_x_y, exp_x_y - x_st * exp_x_y / y_st, -1, 0])
ops.append(splinalg.aslinearoperator(np.linalg.inv(M)[:3, :3]))
D = as_block_diag_linear_operator(ops)
if dual:
return splinalg.LinearOperator((x.size, x.size),
matvec=lambda v: v - D.matvec(v),
rmatvec=lambda v: v - D.rmatvec(v))
else:
return D
else:
raise NotImplementedError(f"{cone} not implemented")
def pi(x, cones, dual=False):
"""Projects x onto product of cones (or their duals)
Args:
x: NumPy array (with PSD data formatted in SCS convention)
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
NumPy array that is the projection of `x` onto the (dual) cones
"""
projection = np.zeros(x.shape)
offset = 0
for cone, sz in cones:
# ===============================
# print(cone, sz) # only uncomment for debug
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
# ===============================
# print("offset:", offset)
# ===============================
projection[offset:offset + dim] = _proj(
x[offset:offset + dim], cone, dual=dual)
offset += dim
# ===============================
# debug for deep analysis
# ===============================
# print("cone type: {:s}, offset: {:d} ".format(cone, offset))
return projection
def dpi(x, cones, dual=False):
"""Derivative of projection onto product of cones (or their duals), at x
Args:
x: NumPy array
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
An abstract linear map representing the derivative, with methods
`matvec` and `rmatvec`
"""
dprojections = []
offset = 0
for cone, sz in cones:
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
dprojections.append(
_dproj(x[offset:offset + dim], cone, dual=dual))
offset += dim
return as_block_diag_linear_operator(dprojections) | |
'''Trains a simple convnet on the MNIST dataset.
based on a keras example by fchollet
Find a way to improve the test accuracy to almost 99%!
FYI, the number of layers and what they do is fine.
But their parameters and other hyperparameters could use some work.
'''
import numpy as np
np.random.seed(1337) # for reproducibility
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
import os
def save_model(model, save_dir, model_name):
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
def load_and_featurize_data():
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape input into format Conv2D layer likes
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
# don't change conversion or normalization
X_train = X_train.astype('float32') # data was uint8 [0-255]
X_test = X_test.astype('float32') # data was uint8 [0-255]
X_train /= 255 # normalizing (scaling from 0 to 1)
X_test /= 255 # normalizing (scaling from 0 to 1)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices (don't change)
Y_train = to_categorical(y_train, nb_classes) # cool
Y_test = to_categorical(y_test, nb_classes)
# in Ipython you should compare Y_test to y_test
return X_train, X_test, Y_train, Y_test
def define_model(nb_filters, kernel_size, input_shape, pool_size):
model = Sequential() # model is a linear stack of layers (don't change)
# note: the convolutional layers and dense layers require an activation function
# see https://keras.io/activations/
# and https://en.wikipedia.org/wiki/Activation_function
# options: 'linear', 'sigmoid', 'tanh', 'relu', 'softplus', 'softsign'
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]),
padding='valid',
input_shape=input_shape)) #first conv. layer KEEP
model.add(Activation('relu')) # Activation specification necessary for Conv2D and Dense layers
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]), padding='valid')) #2nd conv. layer KEEP
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size)) # decreases size, helps prevent overfitting
model.add(Dropout(0.25)) # zeros out some fraction of inputs, helps prevent overfitting
model.add(Flatten()) # necessary to flatten before going into conventional dense layer KEEP
print('Model flattened out to ', model.output_shape)
# now start a typical neural network
model.add(Dense(32)) # (only) 32 neurons in this layer, really? KEEP
model.add(Activation('relu'))
model.add(Dropout(0.5)) # zeros out some fraction of inputs, helps prevent overfitting
model.add(Dense(nb_classes)) # 10 final nodes (one for each class) KEEP
model.add(Activation('softmax')) # softmax at end to pick between classes 0-9 KEEP
# many optimizers available, see https://keras.io/optimizers/#usage-of-optimizers
# suggest you KEEP loss at 'categorical_crossentropy' for this multiclass problem,
# and KEEP metrics at 'accuracy'
# suggest limiting optimizers to one of these: 'adam', 'adadelta', 'sgd'
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
if __name__ == '__main__':
# important inputs to the model: don't changes the ones marked KEEP
nb_classes = 10 # number of output possibilites: [0 - 9] KEEP
img_rows, img_cols = 28, 28 # the size of the MNIST images KEEP
input_shape = (img_rows, img_cols, 1) # 1 channel image input (grayscale) KEEP
batch_size = 100 # number of training samples used at a time to update the weights
nb_epoch = 8 # number of passes through the entire train dataset before weights "final"
nb_filters = 15 # number of convolutional filters to use
pool_size = (4, 4) # pooling decreases image size, reduces computation, adds translational invariance
kernel_size = (5, 5) # convolutional kernel size, slides over image to learn features
X_train, X_test, Y_train, Y_test = load_and_featurize_data()
model = define_model(nb_filters, kernel_size, input_shape, pool_size)
# during fit process watch train and test error simultaneously
model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1]) # this is the one we care about
saveme = input('Save the model? (y/n): ')
if saveme=='y':
save_model(model, 'saved_models/', f'cnnmodel_ACC-{round(score[1],4)}.h5')
'''
val_accuracy = 0.7941 0.8077
batch_size = 10 10 100
nb_epoch = 8 8 3
nb_filters = 24 30 10
pool_size = (2, 2) (2,2) same
kernel_size = (2, 2) (2,2) same
activations tanh tanh relu
''' | |
class A:
def __init__(self):
pass
def f1(self, a):
b = 1
c = 2
result = a**b + a**c
return result
def f2(self, a):
b = 1
c = 2
result = a**b + a**c
return result
def f3(self, a):
b = 1
c = 2
result = a**b + a**c
return result
# Lists
# 0 1 2 3 4 5
noten = [
1, 1, 3,
4, 2, 1
]
noten.append(6)
noten.append(1)
print(noten)
noten.pop()
noten.pop()
print(noten)
noten.insert(0, 12)
print(noten)
noten.pop(2)
print(noten)
# List Comprehensions
my_list = []
my_list2 = []
for i in range(10):
my_list2.append(i)
print(my_list2)
my_list2_comp = [i**2 for i in range(100) if i % 2 == 0]
my_list2_comp2 = [i for i in range(100) if i % 2 == 0]
print(my_list2_comp)
print("\n")
print(my_list2_comp2)
# Numpy
import numpy as np
m = np.array([1, 0, 0, 1])
print(m.shape)
print(m)
m = np.reshape(m, (2, 2))
print(m.shape)
print(m)
my_var = 2
f(my_var) | |
import torch
from torch.utils.data import Dataset
import numpy as np
from .grid_generator import SampleSpec
DEFAULT_PAIRS = 4
DEFAULT_CLASSES = 4
sample_spec = SampleSpec(num_pairs=DEFAULT_PAIRS, num_classes=DEFAULT_CLASSES, im_dim=76, min_cell=15, max_cell=18)
def set_sample_spec(num_pairs, num_classes, reset_every=None, im_dim=76):
global sample_spec
assert sample_spec.generators is None, 'attempting to redefine spec after it has been used'
sample_spec = SampleSpec(num_pairs=num_pairs, num_classes=num_classes, im_dim=im_dim,
min_cell=15, max_cell=18, reset_every=reset_every)
class ToTensor(object):
"""simple override to add context to ToTensor"""
def __init__(self, numpy_base_type=np.float32):
self.numpy_base_type = numpy_base_type
def __call__(self, img):
#result = img.astype(self.numpy_base_type) - 0.5 # TODO: this is not the right place to subtract 0.5
result = img.astype(self.numpy_base_type)
result = np.expand_dims(result, axis=0)
result = torch.from_numpy(result)
return result
class GridGenerator(Dataset):
def __init__(self, batch_size, batches_per_epoch,
transform=None,
target_transform=None):
self.batch_size = batch_size
self.batches_per_epoch = batches_per_epoch
self.current_batch = 0
self.transform = transform
self.target_transform = target_transform
# XXX: lots of other code does len(dataset) to get size,
# so accommodate that with a simple 0 list
self.dataset = [0] * batches_per_epoch * batch_size
def __len__(self):
return self.batches_per_epoch * self.batch_size
def __iter__(self):
return self
def __next__(self):
# Python 3 compatibility
return self.next()
def __getitem__(self, index):
return self.next()
def next(self):
self.current_batch += 1
img, target = sample_spec.generate(1)
img, target = np.squeeze(img), np.squeeze(target)
# apply our input transform
if self.transform is not None:
for transform in self.transform:
if transform is not None:
img = transform(img)
# apply our target transform
if self.target_transform is not None:
for transform in self.target_transform:
if transform is not None:
target = transform(target)
return img, target
class GridDataLoader(object):
def __init__(self, path=None, batch_size=128, train_sampler=None, test_sampler=None,
transform=None, target_transform=None, use_cuda=1,
batches_per_epoch=500, test_frac=0.2, **kwargs):
self.batch_size = batch_size
self.batches_per_epoch = batches_per_epoch
self.output_size = 2
# build the torch train dataloader
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_dataset = GridGenerator(batch_size, batches_per_epoch, transform=[ToTensor()])
self.train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=True,
**kwargs)
# tabulate samples counts
total_samples = batches_per_epoch * batch_size
num_test_batches = int(test_frac * total_samples) // batch_size
num_test_samples = num_test_batches * batch_size
# generate all test samples independently and store away
print("starting full test set [%d samples] generation [this might take a while]..." % num_test_samples)
old_state = np.random.get_state()
np.random.seed(123456) # always make the same test set
test_imgs, test_labels, stats = sample_spec.blocking_generate_with_stats(num_test_samples)
print("generator retry rate = {}".format(stats['num_retries']/float(stats['num_generated'])))
np.random.set_state(old_state)
#test_dataset = torch.utils.data.TensorDataset(torch.from_numpy(test_imgs - 0.5), torch.from_numpy(test_labels))
test_dataset = torch.utils.data.TensorDataset(torch.from_numpy(test_imgs),
torch.from_numpy(test_labels))
print("test samples successfully generated...")
# generate test dataloader using above samples
self.test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
drop_last=True,
shuffle=False, # don't shuffle here to keep consistency
**kwargs)
# grab a test sample to set the size in the loader
test_img, _ = train_dataset.__next__()
self.img_shp = list(test_img.size())
print("derived image shape = ", self.img_shp) | |
#!/usr/bin/env python3
import sys
import mpmath as mp
import psr_common
mp.dps=250
mp.mp.dps = 250
if len(sys.argv) != 2:
print("Usage: generate_constants.py outbase")
quit(1)
outbase = sys.argv[1]
constants = {}
# All constants to generate
# variable base value description reference
constants["PI"] = (mp.pi, "The famous pi", "mmm pie" )
constants["TWO_PI"] = (mp.mpf('2.0')*mp.pi, "2*pi", None )
constants["PI_SQUARED"] = (mp.pi*mp.pi, "pi^2", None )
constants["SQRT_PI"] = (mp.sqrt(mp.pi), "sqrt(pi)", None )
constants["ONE_OVER_PI"] = (mp.mpf('1.0')/mp.pi, "1/pi", None )
constants["ONE_OVER_SQRT_PI"] = (mp.mpf('1.0')/mp.sqrt(mp.pi), "1/sqrt(pi)", None )
constants["SPEED_OF_LIGHT_SI"] = (mp.mpf('299792458'), "Speed of light in m/s", "NIST CODATA 2014" )
constants["BOHR_RADIUS_SI"] = (mp.mpf('0.52917721067e-10'), "Bohr radius (AU of length) in m", "NIST CODATA 2014" )
constants["BOHR_RADIUS_ANGSTROMS"] = (mp.mpf('0.52917721067'), "Bohr radius (AU of length) in A", "NIST CODATA 2014" )
constants["ELECTRON_MASS_SI"] = (mp.mpf('9.10938356e-31'), "Mass of electron in kg", "NIST CODATA 2014" )
constants["AVOGADROS_CONSTANT"] = (mp.mpf('6.022140857e23'), "Avogadro's constant (mol^-1)", "NIST CODATA 2014" )
constants["BOLTZMANN_CONSTANT_SI"] = (mp.mpf('1.38064852e-23'), "Boltzmann's constant in J/K", "NIST CODATA 2014" )
constants["BOLTZMANN_CONSTANT_EV_K"] = (mp.mpf('8.6173303e-5'), "Boltzmann's constant in eV/K", "NIST CODATA 2014" )
constants["JOULE_TO_EV"] = (mp.mpf('6.241509126e18'), "Joule -> eV relationship (eV)", "NIST CODATA 2014" )
constants["JOULE_TO_HARTREE"] = (mp.mpf('2.293712317e17'), "Joule -> Hartree relationship (E_H)", "NIST CODATA 2014" )
constants["HARTREE_TO_JOULE"] = (mp.mpf('4.359744650e-18'), "Hartree -> Joule relationship (J)", "NIST CODATA 2014" )
constants["ANGSTROM_SI"] = (mp.mpf('1e-10'), "Angstrom (in m)", None )
constants["PLANCK_H_SI"] = (mp.mpf('6.626070040e-34'), "Planck's constant in J*s", "NIST CODATA 2014" )
constants["PLANCK_HBAR_SI"] = (mp.mpf('1.054571800e-34'), "h/(2pi)", "NIST CODATA 2014" )
constants["AU_TIME_SI"] = (mp.mpf('2.418884326509e-17'), "Atomic unit of time (in s)", "NIST CODATA_2014" )
constants["AU_VELOCITY_SI"] = (mp.mpf('2.18769126277e6'), "Atomic unit of velocity (in m/s)", "NIST CODATA_2014" )
constants["SPEED_OF_LIGHT_AU"] = (mp.fdiv(constants['SPEED_OF_LIGHT_SI'][0], constants['AU_VELOCITY_SI'][0]), "Speed of light in atomic units", "Derived from NIST CODATA 2014" )
# Some aliases
constants["ATOMIC_UNIT_MASS"] = constants['ELECTRON_MASS_SI']
constants["ATOMIC_UNIT_LENGTH"] = constants['BOHR_RADIUS_SI']
constants["ATOMIC_UNIT_ENERGY"] = constants['JOULE_TO_HARTREE']
constants["ATOMIC_UNIT_TIME"] = constants['AU_TIME_SI']
constants["ATOMIC_UNIT_VELOCITY"] = constants['AU_VELOCITY_SI']
# keys in alphabetical order
keys = sorted(constants.keys())
with psr_common.HeaderSourceFiles(outbase, "Various constants and conversion factors",
[], # no namespaces
createsource = False,
plainc = True) as src:
for c in keys:
v = constants[c]
src.fh.write("/*! \\brief {}\n".format(v[1]))
src.fh.write(" *\n")
if v[2]:
src.fh.write(" * {}\n".format(v[2]))
src.fh.write(" */\n")
# setting min_fixed > max_fixed results in always using floating-point format
#src.fh.write("#define {}_F {}f\n".format(c, mp.nstr(v[0], 10, strip_zeros=True, min_fixed=1, max_fixed=0))) # float (single precision)
src.fh.write("#define {} {}\n".format(c, mp.nstr(v[0], 18, strip_zeros=True, min_fixed=1, max_fixed=0))) # double precision
src.fh.write("\n\n") | |
"""
Configuration file for pytest, containing global ("session-level") fixtures.
"""
import pytest
from astropy.utils.data import download_file
import vip_hci as vip
@pytest.fixture(scope="session")
def example_dataset():
"""
Download example FITS cube from github + prepare HCIDataset object.
Returns
-------
dataset : HCIDataset
Notes
-----
Astropy's ``download_file`` uses caching, so the file is downloaded at most
once per test run.
"""
print("downloading data...")
url_prefix = ("https://github.com/carlgogo/vip-tutorial/raw/"
"ced844dc807d3a21620fe017db116d62fbeaaa4a")
f1 = download_file("{}/naco_betapic.fits".format(url_prefix), cache=True)
f2 = download_file("{}/naco_psf.fits".format(url_prefix), cache=True)
# load fits
cube = vip.fits.open_fits(f1, 0)
angles = vip.fits.open_fits(f1, 1).flatten() # shape (61,1) -> (61,)
psf = vip.fits.open_fits(f2)
# create dataset object
dataset = vip.HCIDataset(cube, angles=angles, psf=psf, px_scale=0.01225)
# crop
dataset.crop_frames(size=100, force=True)
dataset.normalize_psf(size=38, force_odd=False)
# overwrite PSF for easy access
dataset.psf = dataset.psfn
return dataset | |
"""
Copyright (c) 2017 - Philip Paquette
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# -*- coding: utf-8 -*-
# Modified from https://github.com/Newmu/dcgan_code/blob/master/lib/ops.py
# MIT License
from math import floor
import theano
import theano.tensor as T
import theano.tensor.signal.pool as T_pool
from .rng import t_rng
# ------------------------
# Activation Layers
# ------------------------
def linear(x):
return x
def relu(x):
return (x + abs(x)) / 2.
def clipped_relu(x, max=256.):
return T.clip((x + abs(x)) / 2., 0., max)
def leaky_relu(x, alpha=0.2):
return ((1 + alpha) * x + (1 - alpha) * abs(x)) / 2.
lrelu = leaky_relu
def prelu(x, alpha):
alpha = alpha.dimshuffle('x', 0, 'x', 'x') if x.dim == 4 else alpha
return leaky_relu(x, alpha)
def sigmoid(x):
return T.nnet.sigmoid(x)
def fast_sigmoid(x):
return T.nnet.ultra_fast_sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def tanh(x):
return T.tanh(x)
def hard_tanh(x):
return T.clip(x, min=-1., max=1.)
def softplus(x):
return T.nnet.softplus(x)
def softmax(x):
return T.nnet.softmax(x)
def elu(x, alpha=1.):
return T.nnet.elu(x, alpha)
def maxout(x, nb_pool):
if x.ndim == 2:
x = T.max([x[:, n::nb_pool] for n in range(nb_pool)], axis=0)
elif x.ndim == 4:
x = T.max([x[:, n::nb_pool, :, :] for n in range(nb_pool)], axis=0)
else:
raise NotImplementedError
return x
# ------------------------
# Cost Layers
# ------------------------
def CategoricalCrossEntropy(y_pred, y_true):
return T.nnet.categorical_crossentropy(y_pred, y_true).mean()
def BinaryCrossEntropy(y_pred, y_true):
return T.nnet.binary_crossentropy(y_pred, y_true).mean()
def MeanSquaredError(y_pred, y_true):
return T.sqr(y_pred - y_true).mean()
def MeanAbsoluteError(y_pred, y_true):
return T.abs_(y_pred - y_true).mean()
def SquaredHinge(y_pred, y_true):
return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean()
def SquaredError(y_pred, y_true):
return T.sqr(y_pred - y_true).sum()
def Hinge(y_pred, y_true):
return T.maximum(1. - y_true * y_pred, 0.).mean()
cce = CCE = CategoricalCrossEntropy
bce = BCE = BinaryCrossEntropy
mse = MSE = MeanSquaredError
mae = MAE = MeanAbsoluteError
# ------------------------
# Regular Layers
# ------------------------
def l2normalize(x, axis=1, e=1e-8, keepdims=True):
return x/l2norm(x, axis=axis, e=e, keepdims=keepdims)
def l2norm(x, axis=1, e=1e-8, keepdims=True):
return T.sqrt(T.sum(T.sqr(x), axis=axis, keepdims=keepdims) + e)
def cosine(x, y):
d = T.dot(x, y.T)
d /= l2norm(x).dimshuffle(0, 'x')
d /= l2norm(y).dimshuffle('x', 0)
return d
def euclidean(x, y, e=1e-8):
xx = T.sqr(T.sqrt((x*x).sum(axis=1) + e))
yy = T.sqr(T.sqrt((y*y).sum(axis=1) + e))
dist = T.dot(x, y.T) # sqrt(x^2 - 2x*y + y^2)
dist *= -2
dist += xx.dimshuffle(0, 'x')
dist += yy.dimshuffle('x', 0)
dist = T.sqrt(dist)
return dist
def concat(tensor_list, axis=0):
return T.concatenate(tensor_list, axis)
def pool(X, ws, ignore_border=None, stride=None, pad=(0,0), mode='max'):
""" Generic pooling layer
X - input (N-D theano tensor of input images) - Input images. Max pooling will be done over the 2 last dimensions.
ws (tuple of length 2) - Factor by which to downscale (vertical ws, horizontal ws). (2,2) will halve the image in each dimension.
ignore_border (bool (default None, will print a warning and set to False)) - When True, (5,5) input with ws=(2,2) will generate a (2,2) output. (3,3) otherwise.
stride (tuple of two ints) - Stride size, which is the number of shifts over rows/cols to get the next pool region. If stride is None, it is considered equal to ws (no overlap on pooling regions).
pad (tuple of two ints) - (pad_h, pad_w), pad zeros to extend beyond four borders of the images, pad_h is the size of the top and bottom margins, and pad_w is the size of the left and right margins.
mode ({'max', 'sum', 'average_inc_pad', 'average_exc_pad'}) - Operation executed on each window. max and sum always exclude the padding in the computation. average gives you the choice to include or exclude it.
"""
if X.ndim >= 2 and len(ws) == 2:
return T_pool.pool_2d(input=X, ws=ws, ignore_border=ignore_border, stride=stride, pad=pad, mode=mode)
else:
raise NotImplementedError
def max_pool_1d(X, ws=2, ignore_border=True, stride=None, pad=0):
""" Max pooling layer in 1 dimension - see pool() for details """
input = X.dimshuffle(0, 1, 2, 'x')
ws = (ws, 1)
stride = ws if stride is None else (stride, 1)
pad = (pad, 0)
pooled = pool(input, ws=ws, ignore_border=ignore_border, stride=stride, pad=pad, mode='max')
return pooled[:, :, :, 0]
def max_pool(X, ws=(2,2), ignore_border=True, stride=None, pad=(0,0)):
""" Max pooling layer - see pool() for details """
return pool(X, ws=ws, ignore_border=ignore_border, stride=stride, pad=pad, mode='max')
def avg_pool(X, ws=(2,2), ignore_border=True, stride=None, pad=(0,0)):
""" Average pooling layer - see pool() for details """
return pool(X, ws=ws, ignore_border=ignore_border, stride=stride, pad=pad, mode='average_inc_pad')
def unpool(X, us):
""" Unpooling layer
X - input (N-D theano tensor of input images)
us (tuple of length >= 1) - Factor by which to upscale (vertical ws, horizontal ws). (2,2) will double the image in each dimension. - Factors are applied to last dimensions of X
"""
x_dims = X.ndim
output = X
for i, factor in enumerate(us[::-1]):
if factor > 1:
output = T.repeat(output, factor, x_dims - i - 1)
return output
# Luke Perforated Upsample
# Source: http://www.brml.org/uploads/tx_sibibtex/281.pdf
# Code from: https://gist.github.com/kastnerkyle/f3f67424adda343fef40
def perforated_upsample(X, factor):
output_shape = [X.shape[1], X.shape[2] * factor, X.shape[3] * factor]
stride = X.shape[2]
offset = X.shape[3]
in_dim = stride * offset
out_dim = in_dim * factor * factor
upsamp_matrix = T.zeros((in_dim, out_dim))
rows = T.arange(in_dim)
cols = T.cast(rows * factor + (rows / stride * factor * offset), 'int32')
upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)
flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3]))
up_flat = T.dot(flat, upsamp_matrix)
upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0], output_shape[1], output_shape[2]))
return upsamp
def repeat_upsample(X, factor):
return unpool(X, us=(factor, factor))
def bilinear_upsample(X, factor):
return theano.tensor.nnet.abstract_conv.bilinear_upsampling(X, factor, batch_size=X.shape[0], num_input_channels=X.shape[1])
def batchnorm(X, g=None, b=None, u=None, s=None, a=1., e=1e-8):
"""
batchnorm with support for not using scale and shift parameters
as well as inference values (u and s) and partial batchnorm (via a)
will detect and use convolutional or fully connected version
"""
if X.ndim == 4:
if u is not None and s is not None:
_u = u.dimshuffle('x', 0, 'x', 'x')
_s = s.dimshuffle('x', 0, 'x', 'x')
else:
_u = T.mean(X, axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
_s = T.mean(T.sqr(X - _u), axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
if a != 1:
_u = (1. - a) * 0. + a * _u
_s = (1. - a) * 1. + a * _s
X = (X - _u) / T.sqrt(_s + e)
if g is not None and b is not None:
X = X * g.dimshuffle('x', 0, 'x', 'x') + b.dimshuffle('x', 0, 'x', 'x')
elif X.ndim == 2:
if u is None and s is None:
u = T.mean(X, axis=0)
s = T.mean(T.sqr(X - u), axis=0)
if a != 1:
u = (1. - a) * 0. + a * u
s = (1. - a) * 1. + a * s
X = (X - u) / T.sqrt(s + e)
if g is not None and b is not None:
X = X * g + b
else:
raise NotImplementedError
return X
def dropout(X, p=0.):
if p > 0:
retain_prob = 1 - p
X *= t_rng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
def conv_1d(X, w, b=None, border_mode='valid', subsample=(1,), filter_flip=True):
if isinstance(border_mode, tuple):
(border_mode,) = border_mode
if isinstance(border_mode, int):
border_mode = (border_mode, 0)
input = X.dimshuffle(0, 1, 2, 'x')
filter = w.dimshuffle(0, 1, 2, 'x')
conved = T.nnet.conv2d(input, filter, subsample=(subsample[0], 1), border_mode=border_mode, filter_flip=filter_flip)
conved = conved[:, :, :, 0]
if b is not None:
conved += b.dimshuffle('x', 0, 'x')
return conved
def conv(X, w, b=None, border_mode='half', subsample=(1, 1), filter_flip=True):
""" Generic convolution layer
X - input (symbolic 4D tensor) - Mini-batch of feature map stacks, of shape (batch size, input channels, input rows, input columns). See the optional parameter input_shape.
w - filters (symbolic 4D tensor) - Set of filters used in CNN layer of shape (output channels, input channels, filter rows, filter columns). See the optional parameter filter_shape.
border_mode 'valid', 'full', 'half', int, (int1, int2)
subsample (tuple of len 2) - Factor by which to subsample the output. Also called strides elsewhere.
filter_flip (bool) - If True, will flip the filter rows and columns before sliding them over the input. This operation is normally referred to as a convolution, and this is the default. If False, the filters are not flipped and the operation is referred to as a cross-correlation
"""
output =\
T.nnet.conv2d(
input=X,
filters=w,
border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip)
if b is not None:
output += b.dimshuffle('x', 0, 'x', 'x')
return output
def deconv(X, X_shape, w, b=None, border_mode='half', subsample=(1, 1), filter_flip=True, a=None, target_size=None):
""" Generic convolution layer
X - input (symbolic 4D tensor) - This is the input to the transposed convolution
w - filters (symbolic 4D tensor) - Set of filters used in CNN layer of shape (nb of channels of X, nb of channels of output, filter rows, filter columns). The first 2 parameters are inversed compared to a normal convolution. (Usually (output channels, input channels))
border_mode 'valid', 'full', 'half', int, (int1, int2)
subsample (tuple of len 2) - Factor by which to subsample the output. Also called strides elsewhere.
filter_flip (bool) - If True, will flip the filter rows and columns before sliding them over the input. This operation is normally referred to as a convolution, and this is the default. If False, the filters are not flipped and the operation is referred to as a cross-correlation
a (tuple of len 2) - Additional padding to add to the transposed convolution to get a specific output size
input_shape (tuple of len 4) - The size of the variable X
target_size (tuple of len 2 or 4) - indicates the shape you want to get as a result of the transposed convolution (e.g. (64,64) or (128,3,64,64)).
"""
# Calculating size of o_prime (output after transposed convolution)
w_shape = w.get_value().shape
x_chan, i1, i2 = X_shape[1], X_shape[2], X_shape[3]
c2, c1, k1, k2 = w_shape[0], w_shape[1], w_shape[2], w_shape[3] # We are going from c2 (nb of channels of X) to c1 (nb of channels of result) ...
s1, s2 = subsample[0], subsample[1]
assert c2 == x_chan
if border_mode == 'half':
p1, p2 = floor(k1 / 2.), floor(k2 / 2.)
elif border_mode == 'full':
p1, p2 = k1 - 1, k2 - 1
elif border_mode == 'valid':
p1, p2 = 0, 0
elif isinstance(border_mode, tuple):
p1, p2 = border_mode[0], border_mode[1]
elif isinstance(border_mode, int):
p1, p2 = border_mode, border_mode
else:
raise NotImplementedError
# 'a' represents additional padding on top and right edge
# adjust to modify the shape of the result of the transposed convolution
if a is None:
if target_size is not None:
orig_i1, orig_i2 = target_size[-2], target_size[-1]
a1 = (orig_i1 + 2 * p1 - k1) % s1
a2 = (orig_i2 + 2 * p2 - k2) % s2
else:
a1, a2 = s1 - 1, s2 - 1
else:
a1, a2 = a[0], a[1]
o_prime1 = int(s1 * (i1 - 1) + a1 + k1 - 2 * p1)
o_prime2 = int(s2 * (i2 - 1) + a2 + k2 - 2 * p2)
# Transposed Convolution
output =\
T.nnet.abstract_conv.conv2d_grad_wrt_inputs(
output_grad=X,
filters=w,
input_shape=(None, c1, o_prime1, o_prime2),
border_mode=(p1, p2),
subsample=(s1, s2),
filter_flip=filter_flip)
if b is not None:
output += b.dimshuffle('x', 0, 'x', 'x')
return output | |
import random
from typing import Callable, Dict, List
import albumentations as alb
import numpy as np
import torch
from torch.utils.data import Dataset
from virtex.data.tokenizers import SentencePieceBPETokenizer
from virtex.data import transforms as T
from .arch_captions import ArchCaptionsDatasetRaw
class ArchCaptioningDatasetExtended(Dataset):
r"""
A dataset which provides image-caption (forward and backward) pairs from
a ARCH Captions annotation file. This is used for pretraining tasks which
use captions - bicaptioning, forward captioning and token classification.
Args:
data_root: Path to dataset directory containing images and annotations.
source: Name of ARCH source to read. One of ``{"pubmed", "books", "both"}``.
"both" option results in a concatenation of the datasets from "pubmed" and "books"
split: Name of ARCH split to read. One of ``{"train", "val", "all"}``.
tokenizer: Tokenizer which maps word tokens to their integer IDs.
image_transform: List of image transformations, from either
`albumentations <https://albumentations.readthedocs.io/en/latest/>`_
or :mod:`virtex.data.transforms`.
max_caption_length: Maximum number of tokens to keep in caption tokens.
Extra tokens will be trimmed from the right end of the token list.
"""
def __init__(
self,
data_root: str,
split: str,
tokenizer: SentencePieceBPETokenizer,
source: str = "both",
image_transform: Callable = T.ARCH_DEFAULT_IMAGE_TRANSFORM,
tensor_flip_transform: Callable = None,
max_caption_length: int = 30,
):
self._dset = ArchCaptionsDatasetRaw(data_root=data_root, source=source,
split=split)
self.image_transform = image_transform
self.tensor_flip_transform = tensor_flip_transform
self.caption_transform = alb.Compose(
[
T.NormalizeCaption(),
T.TokenizeCaption(tokenizer),
T.TruncateCaptionTokens(max_caption_length),
]
)
self.padding_idx = tokenizer.token_to_id("<unk>")
def __len__(self):
return len(self._dset)
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
# keys: {"image_ids", "images", "caption"}
instance = self._dset[idx]
image_ids, images, caption = (
instance["image_ids"],
instance["images"],
instance["caption"],
)
# # debugging
# print("Checkpoint 1")
# print("Shapes before applying self.image_transform", [image.shape for image in images])
# List[int] -> np.array of shape (len(image_ids), )
image_ids = np.array(image_ids)
# (len(image_ids), ) -> (len(image_ids), 1)
image_ids = image_ids.reshape((image_ids.shape[0], 1))
# # debugging
# print("Checkpoint 2")
# Transform images, no flips at this stage not to create multiple versions of the caption!
# Before flipping all images need to be resized to the same size to put them into a tensor.
# Caption won't be tokenized/processed here.
# Albumentations transforms require named arguments - can't avoid it.
images = [self.image_transform(image=image)["image"] for image in
images]
# print("Shapes after applying self.image_transform", [image.shape for image in images])
# # # debugging
# print("Checkpoint 3")
# Convert each image from HWC to CHW format and convert to tensors:
# PyTorch Transforms expect to receive tensors in (B, C, H, W) shape
# [(Channel, Height, Width), ..., ] Bag Size times
images = [np.transpose(image, (2, 0, 1)) for image in images]
images = [torch.tensor(image, dtype=torch.float) for image in images]
# # # debugging
# print("Checkpoint 4")
# stack all the images into a tensor: (bag_size=batch_size, Channel, Height, Width)
images = torch.stack(images, dim=0)
if self.tensor_flip_transform is not None:
# perform tensor transforms on images in the tensor and the
# corresponding caption, e.g. random horizontal flips
# Reason: single version of the caption should appear => random flip
# should be performed on all images in a bag
images_caption = self.tensor_flip_transform(image=images, caption=caption)
images, caption = images_caption["image"], images_caption["caption"]
# print(images)
# print(caption)
# # # debugging
# print("Checkpoint 5")
# caption tokens
caption_tokens = self.caption_transform(caption=caption)["caption"]
# # # debugging
# print("Checkpoint 6")
return {
"image_ids": torch.tensor(image_ids, dtype=torch.long), #(bag_size,1)
"images": images,
"caption_tokens": torch.tensor(caption_tokens, dtype=torch.long),
"noitpac_tokens": torch.tensor(caption_tokens,
dtype=torch.long).flip(0),
"caption_lengths": torch.tensor(len(caption_tokens),
dtype=torch.long),
}
def collate_fn(
self, data: List[Dict[str, torch.Tensor]]
) -> Dict[str, torch.Tensor]:
# Pad `caption_tokens` and `masked_labels` up to this length.
caption_tokens = torch.nn.utils.rnn.pad_sequence(
[d["caption_tokens"] for d in data],
batch_first=True,
padding_value=self.padding_idx,
)
noitpac_tokens = torch.nn.utils.rnn.pad_sequence(
[d["noitpac_tokens"] for d in data],
batch_first=True,
padding_value=self.padding_idx,
)
return {
"image_id": torch.stack([d["image_ids"] for d in data], dim=0),
"image": torch.stack([d["images"] for d in data], dim=0),
"caption_tokens": caption_tokens,
"noitpac_tokens": noitpac_tokens,
"caption_lengths": torch.stack(
[d["caption_lengths"] for d in data]),
} | |
# -*- coding: utf-8 -*-
"""Ramdom_Search_Classes.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bJw4Q1F3TYv8okhNqNTtw5CdCFH7-vrh
"""
import numpy as np
import tensorflow as tf
import keras
from keras import models
from keras import layers
from keras import optimizers
from keras.applications.resnet50 import ResNet50
from keras.layers import Flatten, Dense, Dropout, UpSampling2D
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Activation, Convolution2D, BatchNormalization, GlobalAveragePooling2D, Dropout
from keras.models import Sequential
from keras.initializers import RandomNormal
from keras.datasets import cifar10
import matplotlib.pyplot as plt
from keras.utils.np_utils import to_categorical
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
from keras.wrappers.scikit_learn import KerasClassifier
import keras.backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import Callback,ModelCheckpoint
from keras.datasets import cifar10
import numpy as np
import matplotlib.pyplot as plt
import random as rand
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
import sklearn
from sklearn.model_selection import ShuffleSplit
def get_data(skew_ratio, num_of_class):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train.reshape((50000))
y_test.reshape((10000))
# integers = rand.randint(1,3)
integers = num_of_class
skew_amount = int(5000/skew_ratio)
class_index = []
class_index = np.random.choice(10, integers, replace = False)
for j in class_index:
indices = np.where(y_train == j)[0]
np.random.shuffle(indices)
delete_indices = indices[skew_amount:]
y_train = np.delete(y_train, list(delete_indices))
x_train = np.delete(x_train, list(delete_indices), axis = 0)
return x_train, y_train, x_test, y_test, class_index
# ONLY WHEN REQUIRED
path = "/content/drive/My Drive/CIFAR10/Imbalance/Class_Fraction_Constant/3/50"
x_train, y_train, x_test, y_test, class_index = get_data(5, 3)
# np.savez(path + "/data.npz",name1 = x_train, name2 = y_train, name3 = x_test, name4 = y_test)
num_classes = 10
y_test_1 = y_test
y_train_1 = y_train
num_classes = 10
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
def get_f1(y_true, y_pred): #taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
return f1_val
def nn_model():
classifier = Sequential()
random_uniform = RandomNormal(mean=0.0, stddev=0.01, seed=None)
# classifier.add(Dropout(0.25,input_shape = (32,32,3)))
classifier.add(Convolution2D(96,(3,3), input_shape = (32,32,3), padding = 'same', strides = 1,kernel_initializer =random_uniform, bias_initializer = "zeros" ))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.25))
classifier.add(Convolution2D(96,(3,3), padding = 'same', strides = 1,kernel_initializer =random_uniform))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.25))
classifier.add(Convolution2D(96,(3,3), padding = 'same', strides = 2,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.25))
classifier.add(Convolution2D(192,(3,3), padding = 'same', strides = 1,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.50))
classifier.add(Convolution2D(192,(3,3), padding = 'same', strides = 1,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.50))
classifier.add(Convolution2D(192,(3,3), padding = 'same', strides = 2,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.50))
classifier.add(Convolution2D(192,(3,3), padding = 'same', strides = 1,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Dropout(0.50))
classifier.add(Convolution2D(192,(1,1), padding = 'same', strides = 1,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(Convolution2D(10,(1,1), padding = 'same', strides = 1,kernel_initializer =random_uniform, bias_initializer = "zeros"))
classifier.add(BatchNormalization())
classifier.add(Activation('relu'))
classifier.add(GlobalAveragePooling2D())
classifier.add(Activation("softmax"))
callbacks_list = []
checkpoint = ModelCheckpoint("/content/drive/My Drive/CIFAR10/Imbalance/Class_Fraction_Constant/3/50/Focal Loss/model-{epoch:03d}-{get_f1:03f}-{val_get_f1:03f}.h5", verbose=1, monitor='val_get_f1',save_best_only=True, mode='max')
callbacks_list = [checkpoint]
adam = optimizers.Adam(learning_rate=0.001,decay = 1e-4, beta_1=0.9, beta_2=0.999, amsgrad=False)
classifier.compile(optimizer = adam , loss= "categorical_crossentropy", metrics = [get_f1])
return classifier
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc
def f1(Y_test, y_pred):d
y_test = np.argmax(Y_test, axis=1)
cm = confusion_matrix(y_test, y_pred)
accuracy = np.trace(cm) / float(np.sum(cm))
print("\nValidation_Accuracy = ", accuracy, "\n")
return accuracy
estimator = KerasClassifier(build_fn = nn_model, epochs = 40, batch_size = 128, verbose = 1)
# Tunable Parameters
weights = np.linspace(0.05,0.5,num = 20)
index = np.where(np.bincount(y_train_1)==np.bincount(y_train_1).min())
class_weights = [[x]*10 for x in weights]
for i in range(20):
for j in np.nditer(index):
class_weights[i][j] = 1 - weights[i]
param_grid_1 = dict(class_weight = class_weights)
# Fitting the model
scorer_1 = sklearn.metrics.make_scorer(f1, greater_is_better=True)
scorer = sklearn.metrics.make_scorer(recall_score)
cv = ShuffleSplit(n_splits=1, test_size=0.1, random_state=0)
random_grid = RandomizedSearchCV(estimator = estimator, param_distributions = param_grid_1, verbose = 2, scoring = scorer_1, return_train_score = True, cv =cv)
grid_result = random_grid.fit(x_train, y_train)
random_grid.cv_result_
dir(random_grid)
random_grid.best_params_
random_grid.best_score_
random_grid.best_params_
random_grid.error_score
random_grid.return_train_score
random_grid.get_params | |
import unittest
import numpy as np
from photonai_graph.GraphConstruction.graph_constructor_threshold import GraphConstructorThreshold
class ThresholdTests(unittest.TestCase):
def setUp(self):
self.X4d_adjacency = np.ones((20, 20, 20, 1))
self.X4d_features = np.random.rand(20, 20, 20, 1)
self.X4d = np.concatenate((self.X4d_adjacency, self.X4d_features), axis=3)
self.test_mtrx = np.array(([0.5, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0.5]))
self.y = np.ones(20)
def test_wrong_input_shape(self):
g_constr = GraphConstructorThreshold(threshold=.5)
input_mtrx = np.ones((15, 20, 20))
g_constr.fit(input_mtrx, np.arange(15))
with self.assertRaises(ValueError):
g_constr.transform(input_mtrx)
def test_strange_one_hot_value(self):
with self.assertRaises(ValueError):
GraphConstructorThreshold(one_hot_nodes=27.3)
def test_threshold_4d(self):
# ensure that individual transform style with a 4d matrix returns the right shape
g_constr = GraphConstructorThreshold(threshold=0.5)
g_constr.fit(self.X4d, self.y)
trans = g_constr.transform(self.X4d)
self.assertEqual(trans.shape, (20, 20, 20, 3))
# first dimension should be thresholded but unchanged
self.assertTrue(np.array_equal(trans[..., 0, np.newaxis], self.X4d_adjacency))
# second dimension should contain original connectivity
self.assertTrue(np.array_equal(trans[..., 1, np.newaxis], self.X4d_adjacency))
# last dimension should contain the random features
self.assertTrue(np.array_equal(trans[..., 2, np.newaxis], self.X4d_features))
def test_threshold_4d_discard_connectivity(self):
# ensure that individual transform style with a 4d matrix returns the right shape
g_constr = GraphConstructorThreshold(threshold=0.5,
discard_original_connectivity=True)
g_constr.fit(self.X4d, self.y)
trans = g_constr.transform(self.X4d)
self.assertEqual(trans.shape, (20, 20, 20, 2))
# first dimension should be thresholded but unchanged
self.assertTrue(np.array_equal(trans[..., 0, np.newaxis], self.X4d_adjacency))
# last dimension should contain the random features
self.assertTrue(np.array_equal(trans[..., 1, np.newaxis], self.X4d_features))
def test_threshold_shape_4d_onehot(self):
# ensure that an individual transform with a 3d matrix returns the right shape
# when using one hot encoded features
g_constr = GraphConstructorThreshold(threshold=0.5, one_hot_nodes=1)
g_constr.fit(self.X4d, self.y)
trans = g_constr.transform(self.X4d)
self.assertEqual(trans.shape, (20, 20, 20, 4))
# the first dimension still contains the (thresholded but unchanged) values
self.assertTrue(np.array_equal(trans[..., 0, np.newaxis], self.X4d_adjacency))
# the second dimesion contains the one hot encoding
# We know the one hot encoding, as we created the matrix accordingly
self.assertTrue(np.array_equal(trans[..., 1, np.newaxis],
np.repeat(np.eye(20)[np.newaxis, ...], 20, axis=0)[..., np.newaxis]))
# the third dimension contains again the original values
self.assertTrue(np.array_equal(trans[..., 2, np.newaxis], self.X4d_adjacency))
# the last dimension contains the features
self.assertTrue(np.array_equal(trans[..., 3, np.newaxis], self.X4d_features))
def test_threshold_individual_shape_4d_onehot_discard_connectivity(self):
# ensure that an individual transform with a 3d matrix returns the right shape
# when using one hot encoded features
g_constr = GraphConstructorThreshold(threshold=0.5,
one_hot_nodes=1,
discard_original_connectivity=True)
g_constr.fit(self.X4d, self.y)
trans = g_constr.transform(self.X4d)
self.assertEqual(trans.shape, (20, 20, 20, 3))
# the first dimension still contains the (thresholded but unchanged) values
self.assertTrue(np.array_equal(trans[..., 0, np.newaxis], self.X4d_adjacency))
# the second dimesion contains the one hot encoding
# We know the one hot encoding, as we created the matrix accordingly
self.assertTrue(np.array_equal(trans[..., 1, np.newaxis],
np.repeat(np.eye(20)[np.newaxis, ...], 20, axis=0)[..., np.newaxis]))
# the last dimension contains the features
self.assertTrue(np.array_equal(trans[..., 2, np.newaxis], self.X4d_features))
def test_prep_matrix(self):
g_constr = GraphConstructorThreshold(threshold=.0, use_abs=True)
input_matrix = np.eye(4)
output_matrix = g_constr.prep_mtrx(input_matrix * -1)
self.assertTrue(np.array_equal(input_matrix, output_matrix))
def test_use_abs(self):
g_constr = GraphConstructorThreshold(threshold=0.5, use_abs=True)
input_matrix = np.eye(4) * -1
output_matrix = g_constr.prep_mtrx(input_matrix)
self.assertTrue(np.array_equal(np.eye(4), output_matrix))
def test_use_abs_fisher(self):
g_constr = GraphConstructorThreshold(threshold=0.5, fisher_transform=1, use_abs_fisher=1)
input_matrix = np.eye(4) * -1
input_matrix = input_matrix[np.newaxis, :, :, np.newaxis]
output_matrix = g_constr.prep_mtrx(input_matrix)
ids = np.diag(output_matrix[0, ..., 0])
self.assertTrue(np.array_equal(np.isposinf(ids), [True, True, True, True]))
def test_use_zscore(self):
g_constr = GraphConstructorThreshold(threshold=0.5, zscore=1)
output_matrix = g_constr.prep_mtrx(self.test_mtrx[np.newaxis, :, :, np.newaxis])
self.assertEqual((np.sum(np.array(output_matrix) >= 0)), 4)
def test_use_abs_zscore(self):
g_constr = GraphConstructorThreshold(threshold=0.5, zscore=1, use_abs_zscore=1)
output_matrix = g_constr.prep_mtrx(self.test_mtrx[np.newaxis, :, :, np.newaxis])
self.assertEqual((np.sum(np.array(output_matrix) >= 0)), 16)
def test_use_abs_and_fisher(self):
g_constr = GraphConstructorThreshold(threshold=0.5, use_abs=1, fisher_transform=1)
output_matrix = g_constr.prep_mtrx(self.test_mtrx[np.newaxis, :, :, np.newaxis])
self.assertEqual((np.sum(np.array(output_matrix) >= 1)), 2)
def test_use_abs_and_fisher_and_abs_fisher(self):
g_constr = GraphConstructorThreshold(threshold=0.5, use_abs=1, fisher_transform=1, use_abs_fisher=1)
output_matrix = g_constr.prep_mtrx(self.test_mtrx[np.newaxis, :, :, np.newaxis])
self.assertEqual((np.sum(np.array(output_matrix) >= 1)), 2)
def test_use_abs_and_fisher_and_zsore(self):
g_constr = GraphConstructorThreshold(threshold=0.5, use_abs=1, fisher_transform=1, zscore=1)
output_matrix = g_constr.prep_mtrx(self.test_mtrx[np.newaxis, :, :, np.newaxis])
self.assertEqual((np.sum(np.array(output_matrix) >= 1)), 2)
def test_fisher_and_zscore_and_abszscore(self):
g_constr = GraphConstructorThreshold(threshold=0.5, fisher_transform=1, zscore=1, use_abs_zscore=1)
output_matrix = g_constr.prep_mtrx(self.test_mtrx[np.newaxis, :, :, np.newaxis])
self.assertEqual((np.sum(np.array(output_matrix) >= 1)), 2) | |
#!/usr/bin/python3
import argparse
import os
import time
from functools import partial
import numpy as np
import oneflow as flow
from oneflow import nn
from modeling import BertForPreTraining
from utils.ofrecord_data_utils import OfRecordDataLoader
def save_model(module: nn.Module, checkpoint_path: str, epoch: int, acc: float):
flow.save(
module.state_dict(),
os.path.join(checkpoint_path, "epoch_%d_val_acc_%f" % (epoch, acc)),
)
def train(epoch, iter_per_epoch, graph, print_interval):
total_loss = 0
total_correct = 0
total_element = 0
for i in range(iter_per_epoch):
start_t = time.time()
next_sent_output, next_sent_labels, loss = graph()
# Waiting for sync
loss = loss.numpy().item()
end_t = time.time()
# next sentence prediction accuracy
correct = (
next_sent_output.argmax(dim=-1)
.eq(next_sent_labels.squeeze(1))
.sum()
.numpy()
.item()
)
total_loss += loss
total_correct += correct
total_element += next_sent_labels.nelement()
if (i + 1) % print_interval == 0:
print(
"Epoch {}, train iter {}, loss {:.3f}, iter time: {:.3f}s".format(
epoch, (i + 1), total_loss / (i + 1), end_t - start_t
)
)
print(
"Epoch {}, train iter {}, loss {:.3f}, total accuracy {:.2f}".format(
epoch, (i + 1), total_loss / (i + 1), total_correct * 100.0 / total_element
)
)
def validation(
epoch: int, iter_per_epoch: int, graph: nn.Graph, print_interval: int
) -> float:
total_correct = 0
total_element = 0
for i in range(iter_per_epoch):
start_t = time.time()
next_sent_output, next_sent_labels = graph()
next_sent_output = next_sent_output.numpy()
next_sent_labels = next_sent_labels.numpy()
end_t = time.time()
# next sentence prediction accuracy
correct = (
next_sent_output.argmax(axis=-1) == next_sent_labels.squeeze(1)
).sum()
total_correct += correct
total_element += next_sent_labels.size
if (i + 1) % print_interval == 0:
print(
"Epoch {}, val iter {}, val time: {:.3f}s".format(
epoch, (i + 1), end_t - start_t
)
)
print(
"Epoch {}, val iter {}, total accuracy {:.2f}".format(
epoch, (i + 1), total_correct * 100.0 / total_element
)
)
return total_correct / total_element
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--ofrecord_path",
type=str,
default="wiki_ofrecord_seq_len_128_example",
help="Path to ofrecord dataset",
)
parser.add_argument(
"--train_batch_size", type=int, default=32, help="Training batch size"
)
parser.add_argument(
"--val_batch_size", type=int, default=32, help="Validation batch size"
)
parser.add_argument(
"--hidden_size", type=int, default=768, help="Hidden size of transformer model",
)
parser.add_argument(
"--num_hidden_layers", type=int, default=12, help="Number of layers"
)
parser.add_argument(
"-a",
"--num_attention_heads",
type=int,
default=12,
help="Number of attention heads",
)
parser.add_argument(
"--intermediate_size",
type=int,
default=3072,
help="intermediate size of bert encoder",
)
parser.add_argument("--max_position_embeddings", type=int, default=512)
parser.add_argument(
"-s", "--seq_length", type=int, default=128, help="Maximum sequence len"
)
parser.add_argument(
"--vocab_size", type=int, default=30522, help="Total number of vocab"
)
parser.add_argument("--type_vocab_size", type=int, default=2)
parser.add_argument("--attention_probs_dropout_prob", type=float, default=0.1)
parser.add_argument("--hidden_dropout_prob", type=float, default=0.1)
parser.add_argument("--hidden_size_per_head", type=int, default=64)
parser.add_argument("--max_predictions_per_seq", type=int, default=20)
parser.add_argument("-e", "--epochs", type=int, default=10, help="Number of epochs")
parser.add_argument(
"--with-cuda",
type=bool,
default=True,
help="Training with CUDA: true, or false",
)
parser.add_argument(
"--cuda_devices", type=int, nargs="+", default=None, help="CUDA device ids"
)
parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate of adam")
parser.add_argument(
"--adam_weight_decay", type=float, default=0.01, help="Weight_decay of adam"
)
parser.add_argument(
"--adam_beta1", type=float, default=0.9, help="Adam first beta value"
)
parser.add_argument(
"--adam_beta2", type=float, default=0.999, help="Adam first beta value"
)
parser.add_argument(
"--print_interval", type=int, default=10, help="Interval of printing"
)
parser.add_argument(
"--checkpoint_path",
type=str,
default="checkpoints",
help="Path to model saving",
)
args = parser.parse_args()
if args.with_cuda:
device = flow.device("cuda")
else:
device = flow.device("cpu")
print("Device is: ", device)
print("Creating Dataloader")
train_data_loader = OfRecordDataLoader(
ofrecord_dir=args.ofrecord_path,
mode="train",
dataset_size=1024,
batch_size=args.train_batch_size,
data_part_num=1,
seq_length=args.seq_length,
max_predictions_per_seq=args.max_predictions_per_seq,
)
test_data_loader = OfRecordDataLoader(
ofrecord_dir=args.ofrecord_path,
mode="test",
dataset_size=1024,
batch_size=args.val_batch_size,
data_part_num=1,
seq_length=args.seq_length,
max_predictions_per_seq=args.max_predictions_per_seq,
)
print("Building BERT Model")
bert_model = BertForPreTraining(
args.vocab_size,
args.seq_length,
args.hidden_size,
args.num_hidden_layers,
args.num_attention_heads,
args.intermediate_size,
nn.GELU(),
args.hidden_dropout_prob,
args.attention_probs_dropout_prob,
args.max_position_embeddings,
args.type_vocab_size,
)
# print(bert_model)
bert_model.to(device)
optimizer = flow.optim.Adam(
bert_model.parameters(), lr=args.lr, betas=(args.adam_beta1, args.adam_beta2),
)
steps = args.epochs * len(train_data_loader)
cosine_annealing_lr = flow.optim.lr_scheduler.CosineDecayLR(
optimizer, decay_steps=steps
)
ns_criterion = nn.CrossEntropyLoss(reduction="mean")
mlm_criterion = nn.CrossEntropyLoss(reduction="none")
def get_masked_lm_loss(
logit_blob,
masked_lm_positions,
masked_lm_labels,
label_weights,
max_prediction_per_seq,
):
# gather valid position indices
logit_blob = flow.gather(
logit_blob,
index=masked_lm_positions.unsqueeze(2).repeat(1, 1, args.vocab_size),
dim=1,
)
logit_blob = flow.reshape(logit_blob, [-1, args.vocab_size])
label_id_blob = flow.reshape(masked_lm_labels, [-1])
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
pre_example_loss = mlm_criterion(logit_blob, label_id_blob)
pre_example_loss = flow.reshape(pre_example_loss, [-1, max_prediction_per_seq])
sum_label_weight = flow.sum(label_weights, dim=-1)
sum_label_weight = sum_label_weight / label_weights.shape[0]
numerator = flow.sum(pre_example_loss * label_weights)
denominator = flow.sum(label_weights) + 1e-5
loss = numerator / denominator
return loss
class BertGraph(nn.Graph):
def __init__(self):
super().__init__()
self.bert = bert_model
self.ns_criterion = ns_criterion
self.masked_lm_criterion = partial(
get_masked_lm_loss, max_prediction_per_seq=args.max_predictions_per_seq
)
self.add_optimizer(optimizer, lr_sch=cosine_annealing_lr)
self._train_data_loader = train_data_loader
def build(self):
(
input_ids,
next_sentence_labels,
input_mask,
segment_ids,
masked_lm_ids,
masked_lm_positions,
masked_lm_weights,
) = self._train_data_loader()
input_ids = input_ids.to(device=device)
input_mask = input_mask.to(device=device)
segment_ids = segment_ids.to(device=device)
next_sentence_labels = next_sentence_labels.to(device=device)
masked_lm_ids = masked_lm_ids.to(device=device)
masked_lm_positions = masked_lm_positions.to(device=device)
masked_lm_weights = masked_lm_weights.to(device=device)
# 1. forward the next_sentence_prediction and masked_lm model
prediction_scores, seq_relationship_scores = self.bert(
input_ids, segment_ids, input_mask
)
# 2-1. loss of is_next classification result
next_sentence_loss = self.ns_criterion(
seq_relationship_scores.view(-1, 2), next_sentence_labels.view(-1)
)
masked_lm_loss = self.masked_lm_criterion(
prediction_scores, masked_lm_positions, masked_lm_ids, masked_lm_weights
)
total_loss = next_sentence_loss + masked_lm_loss
total_loss.backward()
return seq_relationship_scores, next_sentence_labels, total_loss
bert_graph = BertGraph()
class BertEvalGraph(nn.Graph):
def __init__(self):
super().__init__()
self.bert = bert_model
self._test_data_loader = test_data_loader
def build(self):
(
input_ids,
next_sent_labels,
input_masks,
segment_ids,
masked_lm_ids,
masked_lm_positions,
masked_lm_weights,
) = self._test_data_loader()
input_ids = input_ids.to(device=device)
input_masks = input_masks.to(device=device)
segment_ids = segment_ids.to(device=device)
next_sent_labels = next_sent_labels.to(device=device)
masked_lm_ids = masked_lm_ids.to(device=device)
masked_lm_positions = masked_lm_positions.to(device)
with flow.no_grad():
# 1. forward the next_sentence_prediction and masked_lm model
_, seq_relationship_scores = self.bert(
input_ids, input_masks, segment_ids
)
return seq_relationship_scores, next_sent_labels
bert_eval_graph = BertEvalGraph()
for epoch in range(args.epochs):
# Train
bert_model.train()
train(epoch, len(train_data_loader), bert_graph, args.print_interval)
# Eval
bert_model.eval()
val_acc = validation(
epoch, len(test_data_loader), bert_eval_graph, args.print_interval * 10
)
print("Saveing model ...")
save_model(bert_model, args.checkpoint_path, epoch, val_acc)
if __name__ == "__main__":
main() | |
"""
Calculates zonal-mean eddy rms of meridional wind on a given model
level for aquaplanet model data
"""
import numpy as np
import xarray as xr
from ds21grl.misc import get_dim_exp,get_eddy,daysinmonths,get_season_daily
from ds21grl.read_aqua import read_xt_ml_daily
from ds21grl.config import data_name,dir_raw_aqua,dir_processed
# INPUT -----------------------------------------------------------
data_name_local = data_name[1:10]
season_name = ['ANNUAL','NDJFM']
var = 'V'
level = 850
ilat = 70
write2file = 0
# -----------------------------------------------------------------
for exp in data_name_local:
for season in season_name:
print('dataset: ' + exp,',season: ' + season,',var: ' + var)
# get dimensions
dim = get_dim_exp(exp)
# define paths
dir_in = dir_raw_aqua + exp + '/'
dir_out = dir_processed + exp + '/'
# read data
data = read_xt_ml_daily(var,level,ilat,dir_in,dim)
# get eddies
data = get_eddy(data,ax=-1)
# get transients by removing stationary waves
data_trans = np.zeros((data.shape))
for m in dim.months:
if m == 1:
index = np.arange(0,daysinmonths(m))
else:
temp = np.arange(1,m)
index = np.arange(np.sum(daysinmonths(temp)),np.sum(daysinmonths(temp)) + daysinmonths(m))
data_stw = np.sum(data[:,index,:].mean(axis=0),axis=0)/daysinmonths(m)
for yr in range(0,dim.years.size):
for i in range(0,index.size):
data_trans[yr,index[i],:] = data[yr,index[i],:] - data_stw
# extract season
data = get_season_daily(data,season,ax=1)
data_trans = get_season_daily(data_trans,season,ax=1)
# calc zonal-mean rms
rms = np.mean(np.sqrt(np.mean(data**2,axis=1)),axis=-1)
rms_trans = np.mean(np.sqrt(np.mean(data_trans**2,axis=1)),axis=-1)
# write to file
if write2file == 1:
output = xr.Dataset(data_vars={'rms_eddy': (('year'), rms.astype(np.float32)),
'rms_trans': (('year'), rms_trans.astype(np.float32))},
coords={'year': dim.years})
output.rms_eddy.attrs['units'] = 'm/s'
output.rms_trans.attrs['units'] = 'm/s'
filename = 'zm_rms_' + var + str(level) + '_ml_' + str(ilat) + 'N_' + season + '_' + dim.timestamp + '.nc'
output.to_netcdf(dir_out + filename) | |
from sympy import Mul, Add, Rational, Float, Integer, Pow, Function
from .util import ScalarSymbol, FunctionSymbol
import keras
import tensorflow as tf
import numpy as np
class MetaLayer(object):
def __init__(self,type_key=None, name=None,input_key=None,output_key=None,options=None):
self.type_key = type_key
self.name = name
self.input_key = input_key
self.output_key = output_key
self.options = options
@staticmethod
def from_layer(layer):
MetaLayer(type_key=layer.type_key,
name=layer.name,
input_key=layer.input_key,
output_key=layer.output_key,
options=layer.options)
return
class Layer(object):
""" Build a layer from the recursive exploration of the sub-expression """
_id = 0
_name = None
_output_key = None
def __init__(self, expr):
self.expr = expr
self.number = self._id
self._update_id()
self._skeleton = []
self._input_key = []
self._options = None
def _agregate_sub_expressions(self):
# 1) Agregates code
for arg in self.expr.args:
output_key, sub_skeleton = LayerFactory(arg)()
self.add_input_key(output_key)
self._add_to_skeleton(sub_skeleton)
def _extract_options(self):
pass
@property
def as_MetaLayer(self):
meta_layer = MetaLayer(
type_key=self.type_key,
name=self.name,
input_key=self.input_key,
output_key=self.output_key,
options=self.options
)
return meta_layer
def add_input_key(self, input_key):
if isinstance(input_key,list):
self._input_key += input_key
else:
self._input_key.append(input_key)
@property
def type_key(self):
return type(self).__name__
@property
def input_key(self):
return self._input_key
@property
def options(self):
return self._options
def __call__(self):
""" Construct the skeleton of a symbolic expression """
"""
Code description:
1) Feed self._skeleton and self._input_key from sub-expressions
2) Extract option when needed (Derivative,.. )
3) Feed self._skeleton with the present layer
"""
# 1) Feed from sub-expressions: this modifies self._skeleton & self._input_key
self._agregate_sub_expressions()
# 2) Extract options
self._extract_options()
# 3) Add layer to skeleton
self._add_to_skeleton(self.as_MetaLayer)
return self.output_key, self._skeleton
@classmethod
def _update_id(cls):
cls._id += 1
def _add_to_skeleton(self, skeleton):
if skeleton is not None:
if isinstance(skeleton, list):
self._skeleton += skeleton
else:
self._skeleton.append(skeleton)
@property
def name(self):
if self._name is None:
name = type(self).__name__
else:
name = self._name
return f"{name}_{self._id}"
@property
def output_key(self):
if self._output_key is None:
return self.name
else:
return f"{self._output_key}_{self._id}"
class AddLayer(Layer):
_output_key = 'add'
pass
class MulLayer(Layer):
_output_key = 'mul'
pass
class ScalarAddLayer(Layer):
_output_key = 'sc_add'
def __init__(self, scalar, expr):
super().__init__(expr)
self.scalar = ScalarLayer(scalar)()[0]
def __call__(self):
""" Construct the skeleton of a symbolic expression """
"""
Code description:
1) Feed self._skeleton and self._input_key from sub-expressions
2) Extract option when needed (Derivative,.. )
3) Feed self._skeleton with the present layer
"""
# 1) Feed from sub-expressions: this modifies self._skeleton & self._input_key
ouptut_key, skeleton = LayerFactory(self.expr)()
self.add_input_key(ouptut_key)
self._add_to_skeleton(skeleton)
# 3) Add layer to skeleton
meta_layer = self.as_MetaLayer
meta_layer.input_key = [self.scalar] + self.input_key
self._add_to_skeleton(meta_layer)
return self.output_key, self._skeleton
class ScalarMulLayer(Layer):
_output_key = 'sc_mul'
def __init__(self, scalar, expr):
super().__init__(expr)
self.scalar = ScalarLayer(scalar)()[0]
def __call__(self):
""" Construct the skeleton of a symbolic expression """
"""
Code description:
1) Feed self._skeleton and self._input_key from sub-expressions
2) Extract option when needed (Derivative,.. )
3) Feed self._skeleton with the present layer
"""
# 1) Feed from sub-expressions: this modifies self._skeleton & self._input_key
output_key, skeleton = LayerFactory(self.expr)()
self.add_input_key(output_key)
self._add_to_skeleton(skeleton)
# 3) Add layer to skeleton
meta_layer = self.as_MetaLayer
meta_layer.input_key = [self.scalar] + self.input_key
self._add_to_skeleton(meta_layer)
return self.output_key, self._skeleton
class DivLayer(Layer):
_output_key = 'div'
def __call__(self):
""" Construct the skeleton of a symbolic expression """
"""
Code description:
1) Feed self._skeleton and self._input_key from sub-expressions
2) Extract option when needed (Derivative,.. )
3) Feed self._skeleton with the present layer
"""
# 1) Feed from sub-expressions: this modifies self._skeleton & self._input_key
output_key, skeleton = LayerFactory(self.expr)()
self._add_to_skeleton(skeleton)
# 3) Add layer to skeleton
meta_layer = self.as_MetaLayer
meta_layer.input_key = output_key
self._add_to_skeleton(meta_layer)
return self.output_key, self._skeleton
class PowLayer(Layer):
""" Pow layer for x**a where 'a' is an integer """
_output_key = 'pow'
def __init__(self, scalar, expr):
if isinstance(scalar, Integer):
if scalar<0:
self.inverse = True
scalar = -scalar
else:
self.inverse = False
self.scalar = str(scalar)
else:
raise ValueError(f"exponent {scalar} should be an Integer")
super().__init__(expr)
def __call__(self):
""" Construct the skeleton of a symbolic expression """
"""
Code description:
1) Feed self._skeleton and self._input_key from sub-expressions
2) Extract option when needed (Derivative,.. )
3) Feed self._skeleton with the present layer
"""
# 1) Feed from sub-expressions: this modifies self._skeleton & self._input_key
if self.inverse:
output_key, skeleton = DivLayer(self.expr)()
else:
output_key, skeleton = LayerFactory(self.expr)()
self.add_input_key(output_key)
self._add_to_skeleton(skeleton)
# 3) Add layer to skeleton
meta_layer = self.as_MetaLayer
meta_layer.input_key = [self.scalar] + self.input_key
self._add_to_skeleton(meta_layer)
return self.output_key, self._skeleton
class ScalarPowLayer(Layer):
""" Pow layer for x**a where 'a' is not an integer """
_output_key = 'spow'
def __init__(self, scalar, expr):
super().__init__(expr)
self.scalar = str(float(Float(scalar)))
def __call__(self):
""" Construct the skeleton of a symbolic expression """
"""
Code description:
1) Feed self._skeleton and self._input_key from sub-expressions
2) Extract option when needed (Derivative,.. )
3) Feed self._skeleton with the present layer
"""
# 1) Feed from sub-expressions: this modifies self._skeleton & self._input_key
output_key, skeleton = LayerFactory(self.expr)()
self.add_input_key(output_key)
self._add_to_skeleton(skeleton)
# 3) Add layer to skeleton
meta_layer = self.as_MetaLayer
meta_layer.input_key = [self.scalar] + self.input_key
self._add_to_skeleton(meta_layer)
return self.output_key, self._skeleton
class DerivativeLayer(Layer):
"""
- get the derivative order (partial order)
- compute the stencil from sympy diff operator
- define the kernel
"""
_output_key = 'deriv'
def _extract_options(self):
raise NotImplementedError
def is_scalar(expr):
if isinstance(expr, (Integer, Float, Rational, ScalarSymbol, int, float)):
return True
elif isinstance(expr, (Function, Add, Mul, Pow)):
return sum([is_scalar(arg) for arg in expr.args]) == len(expr.args)
else:
return False
class ScalarLayer(Layer):
def __call__(self):
if not is_scalar(self.expr):
raise ValueError(f"{self.expr} is not a scalar")
expr = self.expr
# Replace Rationals by Float
expr = expr.subs({scalar: Float(scalar) for scalar in expr.atoms(Rational)})
# Replace Integer by Float
expr = expr.subs({scalar: Float(scalar) for scalar in expr.atoms(Integer)})
output_key = str(expr)
# Replace long string for float
for scalar in expr.atoms(Float):
output_key = output_key.replace(str(scalar), str(float(scalar)))
skeleton = None
return output_key, skeleton
class FunctionLayer(Layer):
def __call__(self):
output_key = str(self.expr)
skeleton = None
return output_key, skeleton
class LayerFactory(object):
def __new__(cls, expr):
if isinstance(expr, Add):
# Decompose the addition in sclar/vector addition
# ---> use 'wild' for pattern research.
# 1) check for decomposition (scalar + vector)
scalar = 0
other = 0
for arg in expr.args:
if is_scalar(arg):
scalar += arg
else:
other += arg
# 2) Applies 'Add' for vector part and 'ScalarAdd' for addition with scalar.
# Three cases :
# i. saclar + scalar
# -> this situation should not appear if expressions are only with Float
# ii. scalar + vector
# iii. vector + vector
if scalar == 0:
# iii. No addition with scalar
return AddLayer(expr)
else:
# ii. Addition with scalar
return ScalarAddLayer(scalar, other)
elif isinstance(expr, Mul):
# Decompose the multiplication in sclar/vector multiplication
# ---> use 'wild' for pattern research.
# 1) check for decomposition (scalar + vector)
# 2) Applies 'Mul' for vector part and 'ScalarMul' for addition with scalar.
# Three cases :
# i. saclar * scalar
# -> this situation should not appear if expressions are only with Float
# ii. scalar * vector
# iii. vector * vector
# 1) check for decomposition (scalar + vector)
scalar = 1
other = []
for arg in expr.args:
if is_scalar(arg):
scalar *= arg
else:
other.append(arg)
other = Mul(*other)
# 2) Applies 'Add' for vector part and 'ScalarAdd' for addition with scalar.
# Three cases :
# i. saclar + scalar
# -> this situation should not appear if expressions are only with Float
# ii. scalar + vector
# iii. vector + vector
if scalar == 1:
# iii. No addition with scalar
return MulLayer(expr)
else:
# ii. Addition with scalar
return ScalarMulLayer(scalar, other)
elif isinstance(expr, Pow):
sub_expr, exponent = expr.args
if isinstance(exponent, Integer):
if exponent == Integer(-1):
return DivLayer(sub_expr)
else:
return PowLayer(exponent, sub_expr)
elif isinstance(exponent, (Float, Rational)):
return ScalarPowLayer(exponent, sub_expr)
elif isinstance(exponent, ScalarSymbol) and exponent.is_integer:
return PowLayer(exponent, sub_expr)
elif isinstance(exponent, ScalarSymbol) and exponent.is_real:
return ScalarPowLayer(exponent, sub_expr)
else:
raise ValueError(f"{exponent} is not Integer/Float/Rational/Symbol -- integer or real --")
elif isinstance(expr, FunctionSymbol):
return FunctionLayer(expr)
elif isinstance(expr, (Float, Integer, Rational, ScalarSymbol)):
return ScalarLayer(expr)
else:
raise NotImplementedError
class CodeSkeleton(object):
def __init__(self, expr):
self._expr = expr
output_key, skeleton = LayerFactory(self._expr)()
self._output_key = output_key
self._skeleton = skeleton
@property
def skeleton(self):
return self._skeleton
@property
def output_key(self):
return self._output_key
@property
def expr(self):
return self._expr
class KerasCodingRule(object):
translate_to_keras = {
'AddLayer': 'keras.layers.add',
'MulLayer': 'keras.layers.multiply',
'PowLayer': 'keras.layers.multiply',
'DivLayer': 'keras.layers.Lambda',
'ScalarPowLayer': 'keras.layers.Lambda',
'ScalarAddLayer': 'keras.layers.Lambda',
'ScalarMulLayer': 'keras.layers.Lambda',
'DerivativeLayer': 'keras.layers.Conv{{dimension}}D',
}
def __init__(self, skeleton):
self.skeleton = skeleton
self._code = None
@property
def code(self):
if self._code is None:
code = []
for meta_layer in self.skeleton:
output_key = meta_layer.output_key
input_key = meta_layer.input_key
name = meta_layer.name
type_key = meta_layer.type_key
function = self.translate_to_keras[type_key]
if type_key in ['AddLayer', 'MulLayer']:
input_args = '[' + ",".join(input_key) + ']'
new_lines = f"{output_key} = {function}({input_args},name='{name}')"
elif type_key == 'PowLayer':
# Convert "x**a" by an 'a' times product "x*x ... *x"
exponent,x = input_key
exponent = int(exponent)
x_comma = x+','
input_args = '[' + exponent *x_comma + ']'
new_lines = f"{output_key} = {function}({input_args} ,name='{name}')"
elif type_key == 'ScalarPowLayer':
exponent,x = input_key
new_lines = f"{output_key} = {function}(lambda x: x**{exponent},name='{name}')({x})"
elif type_key == 'DivLayer':
new_lines = f"{output_key} = {function}(lambda x: 1/x,name='{name}')({input_key})"
elif type_key == 'ScalarAddLayer':
scalar, other = input_key
new_lines = f"{output_key} = {function}(lambda x: x+{scalar},name='{name}')({other})"
elif type_key == 'ScalarMulLayer':
scalar, other = input_key
new_lines = f"{output_key} = {function}(lambda x: {scalar}*x,name='{name}')({other})"
elif type_key == 'DerivativeLayer':
kernel_keyvar = meta_layer.options['kernel_keyvar']
kernel_size = meta_layer.options['size']
#new_lines = f"{output_key} = {function.replace('{{dimension}}',str(dimension))}(1,{kernel_size},weights=[{kernel_keyvar}],trainable=False,padding='same',activation='linear',use_bias=False,name='{name}')({input_key})"
new_lines = f"{output_key} = DerivativeFactory({kernel_size},kernel={kernel_keyvar},name='{name}')({input_key})"
else:
raise NotImplementedError
code.append(new_lines)
self._code = code
return self._code
class PeriodicFactory(object):
def __new__(cls, kernel_size, **kwargs):
dimension = len(kernel_size)
if dimension == 1:
return Periodic1D(kernel_size, **kwargs)
elif dimension == 2:
return Periodic2D(kernel_size, **kwargs)
else:
raise NotImplementedError('Periodic boundary for dimension higher than 2 is not implemented')
class Periodic(keras.layers.Layer):
""" Periodization of the grid in accordance with filter size"""
def __init__(self, kernel_size, **kwargs):
# Convert network into [:,network]
self.kernel_size = kernel_size
if (np.array(kernel_size) % 2 == len(kernel_size) * (1,)).all():
self.add_columns = np.array(kernel_size, dtype=int) // 2
else:
raise ValueError(f"kernel_size {kernel_size} is not **odd** integer")
super().__init__(**kwargs)
def call(self, x):
raise NotImplementedError
def compute_output_shape(self, input_shape):
raise NotImplementedError
def get_config(self):
config = {
'add_columns': self.add_columns,
'kernel_size': self.kernel_size
}
# base_config = super(Layer, self).get_config()
# return dict(list(base_config.items()) + list(config.items()))
return config
class Periodic1D(Periodic):
""" Periodization of the grid in accordance with filter size"""
def call(self, x):
""" Add self.kernel_size[-1] columns in the geographical domain """
# x[batch, x_shape, channels]
left_columns = x[:, :self.add_columns[0], :]
right_columns = x[:, -self.add_columns[0]:, :]
periodic_x = tf.concat([right_columns, x, left_columns], axis=1)
return periodic_x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[1] += 2 * self.add_columns[0]
return tuple(output_shape)
class Periodic2D(Periodic):
""" Periodization of the grid in accordance with filter size"""
def call(self, x):
""" Add self.kernel_size[-1] columns in the geographical domain """
# x[batch, x_shape, y_shape, channels]
left_columns = x[:, :self.add_columns[0], :, :]
right_columns = x[:, -self.add_columns[0]:, :, :]
periodic_x = tf.concat([right_columns, x, left_columns], axis=1)
left_columns = periodic_x[:, :, :self.add_columns[1], :]
right_columns = periodic_x[:, :, -self.add_columns[1]:, :]
periodic_x = tf.concat([right_columns, periodic_x, left_columns], axis=2)
return periodic_x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[1] += 2 * self.add_columns[0]
output_shape[2] += 2 * self.add_columns[1]
return tuple(output_shape)
class CropFactory(object):
def __new__(cls, kernel_size, **kwargs):
dimension = len(kernel_size)
if dimension == 1:
return Crop1D(kernel_size, **kwargs)
elif dimension == 2:
return Crop2D(kernel_size, **kwargs)
else:
raise NotImplementedError('Crop for dimension higher than 2 is not implemented')
class Crop(keras.layers.Layer):
def __init__(self, kernel_size, **kwargs):
self.kernel_size = kernel_size
if (np.array(kernel_size) % 2 == len(kernel_size) * (1,)).all():
self.suppress_columns = np.array(kernel_size, dtype=int) // 2
else:
raise ValueError(f"kernel_size {kernel_size} is not **odd** integer")
super().__init__(**kwargs)
def get_config(self):
config = {
'suppress_columns': self.suppress_columns,
'kernel_size': self.kernel_size
}
# base_config = super(Layer, self).get_config()
# return dict(list(base_config.items()) + list(config.items()))
return config
class Crop2D(Crop):
""" Extract center of the domain """
def call(self, x):
""" Add self.kernel_size[-1] columns in the geographical domain """
out = x[:, :, self.suppress_columns[1]:-self.suppress_columns[1], :] # eliminate additionnal boundaries
return out[:, self.suppress_columns[0]:-self.suppress_columns[0], :, :]
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[1] -= 2 * self.suppress_columns[0]
output_shape[2] -= 2 * self.suppress_columns[1]
return tuple(output_shape)
class Crop1D(Crop):
""" Spherical periodization of the grid in accordance with filter size"""
def call(self, x):
""" Add self.kernel_size[-1] columns in the geographical domain """
#out = x[:, self.suppress_columns[0]:-self.suppress_columns[0], :]
#return tf.cast(out,dtype='float32') # eliminate additionnal boundaries
return x[:, self.suppress_columns[0]:-self.suppress_columns[0], :]
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[1] -= 2 * self.suppress_columns
return tuple(output_shape)
class DerivativeFactory(object):
def __new__(cls, kernel_size, kernel=None, name=None, periodic=True, nfilter=1):
"""
Create Derivative layer
:param kernel_size:
:param kernel:
* when is None:
the kernel can be deduced from learning
* when provided:
the kernel should corresponds to finite difference stencil of the derivative
:param name:
:param periodic:
:param nfilter:
:return:
"""
dimension = len(kernel_size)
if periodic:
def PeriodicDerivative(conv):
periodized = PeriodicFactory(kernel_size)(conv)
derivative = DerivativeFactory(kernel_size, kernel, name, periodic=False)(periodized)
cropped = CropFactory(kernel_size)(derivative)
return cropped
return PeriodicDerivative
options = {
'padding':'same',
'activation':'linear',
'use_bias':False,
'name':name,
}
if kernel is not None:
options['weights'] = [kernel]
options['trainable'] = False
else:
print('Introduce Option in Conv Net')
#wl2 = 0.001
#options['kernel_regularizer'] = keras.regularizers.l2(wl2)
#options['kernel_initializer'] = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
if dimension == 1:
return keras.layers.Conv1D(nfilter, kernel_size, **options)
elif dimension == 2:
return keras.layers.Conv2D(nfilter, kernel_size, **options) | |
from colorsys import rgb_to_hls
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
import json
#TODO: rename file
#TODO: look at turning this into a module instead of a class
#TODO: figure out gamma correction to get relative luminance for colormap y-axis
#TODO: [POTENTIALLY] Look up where the divisions between colors are to a human,
# divide up the colormap into boxes for each of these divisions,
# then use densities of each box to determine what colors you
# need the most of, then possibly use aggregated search terms
# that correspond to said boxes to scrape the best data
# THIS SHOULD NOT BE PURSUED UNTIL PIC-MATCHING ALGORITHM IS BETTER
class ImageAnalyzer(object):
def __init__(self, imgdir):
self.imgdir = imgdir
self.coordlist = []
self.colorlist = []
self.last_loaded_name = ''
self.last_loaded_type = ''
if self.imgdir.endswith('/'):
self.imgdir = self.imgdir[:-1]
def plot_colormap(self, subject_name = None, pt_size = 10):
if self.last_loaded_type == 'image':
save_folder = 'Images'
elif self.last_loaded_type == 'rgbindex':
save_folder = 'ImageSets'
elif self.last_loaded_type == 'query':
save_folder = 'Queries'
else:
save_folder = ''
if 0 in (len(self.coordlist), len(self.colorlist)):
print "Data is unable to be plotted"
return
if subject_name is None:
subject_name = self.last_loaded_name
coordlist = self.coordlist[:]
colorlist = self.colorlist[:]
coordlist = self.unzip(coordlist)
plt.scatter(coordlist[0], coordlist[1], c = colorlist, s = pt_size)
plt.xlabel('Hue')
plt.ylabel('Lightness')
plt.savefig('Colormaps/%s/%s_colormap.png' % (save_folder, subject_name))
plt.show()
def unzip(self, zipped): #look for better way to do this
lists = []
for i in range(len(zipped[0])):
orig = [x[i] for x in zipped]
lists.append(orig)
return lists
def load_rgbindex(self, fp):
self.coordlist = []
self.colorlist = []
self.last_loaded_name = fp.split('/')[-1].split('.')[0]
self.last_loaded_type = 'rgbindex'
with open(fp, 'r') as f:
rgbindex = json.load(f)
for key in rgbindex:
rgbtup = tuple([x/255.0 for x in rgbindex[key]])
pttup = rgb_to_hls(rgbtup[0], rgbtup[1], rgbtup[2])[:2]
self.colorlist.append(rgbtup)
self.coordlist.append(pttup)
def load_image(self, name, sq_size = None):
self.coordlist = []
self.colorlist = []
fp = self.imgdir + '/' + name
self.last_loaded_name = name
self.last_loaded_type = 'image'
img = Image.open(fp)
img = img.convert('RGB')
if sq_size is None or sq_size <= 1:
sq_size = int(sqrt(img.size[0]*img.size[1]) / 45) # calculate sq_size for higher end of ~2000 points of data
img = img.resize((img.size[0]/sq_size,img.size[1]/sq_size), Image.BOX)
pixels = np.array(img)
for x in range(len(pixels)):
for y in range(len(pixels[x])):
rgbtup = tuple([i/255.0 for i in pixels[x,y]])
pttup = rgb_to_hls(rgbtup[0], rgbtup[1], rgbtup[2])[:2]
self.colorlist.append(rgbtup)
self.coordlist.append(pttup)
def load_query_map(self, query):
self.coordlist = []
self.colorlist = []
queryfp = self.imgdir + '/Indexes/query_index.json'
rgbfp = self.imgdir + '/Indexes/rgb_index.json'
with open(queryfp, 'r') as f:
queryindex = json.load(f)
try:
rgbimgs = queryindex[query]
self.last_loaded_name = query
self.last_loaded_type = 'query'
except IndexError:
print "query unavailable"
return
with open(rgbfp, 'r') as f:
rgbindex = json.load(f)
for img in rgbimgs:
rgbtup = tuple([x/255.0 for x in rgbindex[img]])
pttup = rgb_to_hls(rgbtup[0], rgbtup[1], rgbtup[2])[:2]
self.colorlist.append(rgbtup)
self.coordlist.append(pttup)
def switchimgdir(self, imgdir):
self.imgdir = imgdir
if self.imgdir.endswith('/'):
self.imgdir = self.imgdir[:-1] | |
import h5py
import numpy as np
import pandas as pd
import transforms3d
import random
import math
def augment_cloud(Ps, args, return_augmentation_params=False):
"""" Augmentation on XYZ and jittering of everything """
# Ps is a list of point clouds
M = transforms3d.zooms.zfdir2mat(1) # M is 3*3 identity matrix
# scale
if args['pc_augm_scale'] > 1:
s = random.uniform(1/args['pc_augm_scale'], args['pc_augm_scale'])
M = np.dot(transforms3d.zooms.zfdir2mat(s), M)
# rotation
if args['pc_augm_rot']:
scale = args['pc_rot_scale'] # we assume the scale is given in degrees
# should range from 0 to 180
if scale > 0:
angle = random.uniform(-math.pi, math.pi) * scale / 180.0
M = np.dot(transforms3d.axangles.axangle2mat([0,1,0], angle), M) # y=upright assumption
# we have verified that shapes from mvp data, the upright direction is along the y axis positive direction
# mirror
if args['pc_augm_mirror_prob'] > 0: # mirroring x&z, not y
if random.random() < args['pc_augm_mirror_prob']/2:
M = np.dot(transforms3d.zooms.zfdir2mat(-1, [1,0,0]), M)
if random.random() < args['pc_augm_mirror_prob']/2:
M = np.dot(transforms3d.zooms.zfdir2mat(-1, [0,0,1]), M)
# translation
translation_sigma = args.get('translation_magnitude', 0)
translation_sigma = max(args['pc_augm_scale'], 1) * translation_sigma
if translation_sigma > 0:
noise = np.random.normal(scale=translation_sigma, size=(1, 3))
noise = noise.astype(Ps[0].dtype)
result = []
for P in Ps:
P[:,:3] = np.dot(P[:,:3], M.T)
if translation_sigma > 0:
P[:,:3] = P[:,:3] + noise
if args['pc_augm_jitter']:
sigma, clip= 0.01, 0.05 # https://github.com/charlesq34/pointnet/blob/master/provider.py#L74
P = P + np.clip(sigma * np.random.randn(*P.shape), -1*clip, clip).astype(np.float32)
result.append(P)
if return_augmentation_params:
augmentation_params = {}
augmentation_params['M_inv'] = np.linalg.inv(M.T).astype(Ps[0].dtype)
if translation_sigma > 0:
augmentation_params['translation'] = noise
else:
augmentation_params['translation'] = np.zeros((1, 3)).astype(Ps[0].dtype)
return result, augmentation_params
return result
if __name__ == '__main__':
import pdb
args = {'pc_augm_scale':0, 'pc_augm_rot':False, 'pc_rot_scale':30.0, 'pc_augm_mirror_prob':0.5, 'pc_augm_jitter':False}
N = 2048
C = 6
num_of_clouds = 2
pc = []
for _ in range(num_of_clouds):
pc.append(np.random.rand(N,C)-0.5)
result = augment_cloud(pc, args)
pdb.set_trace() | |
import gym
import numpy as np
import random
from collections import deque
from tensorflow.keras import models, layers, optimizers
import matplotlib.pyplot as plt
class DQN:
def __init__(self, env):
self.env = env
# replay buffer
self.buffer = deque(maxlen=10000)
self.discount = 1
self.epsilon = 1
self.epsilon_min = 0.005
self.epsilon_decay = 0.995
self.update_target = 10
self.batch_size = 64
self.train_start = 1000
self.state_size = self.env.observation_space.shape[0] # 2
self.action_size = self.env.action_space.n # 3
self.lr = 0.001
self.model = self.create_model()
self.target_model = self.create_model()
def create_model(self):
model = models.Sequential()
model.add(layers.Dense(16, input_dim=self.state_size, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(self.action_size, activation='linear'))
model.compile(optimizer=optimizers.Adam(lr=self.lr), loss='mean_squared_error')
return model
def act(self, state):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
if random.random() <= self.epsilon:
return self.env.action_space.sample() # random action
return np.argmax(self.model.predict(state)[0])
def train(self):
if len(self.buffer) < self.train_start:
return
batch = random.sample(self.buffer, self.batch_size)
Input = np.zeros((self.batch_size, self.state_size))
Target = np.zeros((self.batch_size, self.action_size))
for i in range(self.batch_size):
state, action, reward, next_state, done = batch[i]
# only introduce loss for the action taken
target = self.model.predict(state)[0] # shape=[1, 3]
if done:
target[action] = reward
else:
target[action] = reward + self.discount * np.max(self.target_model.predict(next_state)[0])
Input[i] = state # state.shape=[1, 2]
Target[i] = target
self.model.fit(Input, Target, batch_size=self.batch_size, verbose=0)
def store(self, state, action, reward, next_state, done):
self.buffer.append([state, action, reward, next_state, done])
def target_train(self):
self.target_model.set_weights(self.model.get_weights())
if __name__ == "__main__":
env = gym.make("MountainCar-v0")
episode = 1000
dqn = DQN(env=env)
r = []
counter = 0
for epi in range(episode):
cur_state = env.reset().reshape(1, dqn.state_size)
score = 0
done = False
while not done:
# env.render()
action = dqn.act(cur_state)
next_state, reward, done, info = env.step(action)
next_state = next_state.reshape(1, 2)
# add to replay buffer
dqn.store(cur_state, action, reward, next_state, done)
# train network
counter += 1
if counter % 256 == 0:
dqn.train()
score += reward
cur_state = next_state
if epi % dqn.update_target == 0:
# update target network
dqn.target_train()
print("Episode: {} Reward_sum: {}".format(epi, score))
r.append(score)
plt.plot(range(episode), r)
plt.xlabel("Episodes")
plt.ylabel("Sum of rewards")
plt.show() | |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
import numpy as np
from colormap import rgb2hex
class Ibcs():
def ibcs_grid(self,fig
,major_ticks1
,major_ticks2
,perc_ticks
,m_gr
,t_gr
,l_gr
,offset
,**kwargs):
"""Creates a Gridspec for subplots
Arguments:
fig {[type]} -- Matplotlib figure
major_ticks1 {[type]} -- y-axis ticks for the major plot
major_ticks2 {[type]} -- y-axis ticks for the delta plot
perc_ticks {[type]} -- y-axis ticks for the percentage delta plot
m_gr {[type]} -- gridspec size for message
t_gr {[type]} -- gridspec size for title
l_gr {[type]} -- space padding top
offset {[type]} -- space padding bottom
Returns:
[type] -- [description]
"""
# Define Gridspec and Sub-Grids
col = 1
row = m_gr+t_gr + l_gr+(len(perc_ticks) + len(major_ticks1) + len(major_ticks2))+offset
gs = gridspec.GridSpec(ncols =col,nrows=row)
s0col = 1
s0row = m_gr+t_gr
loc_0 = (m_gr+t_gr)
gs0 = gridspec.GridSpecFromSubplotSpec(nrows=s0row,ncols = s0col, subplot_spec=gs[:loc_0])
s1col = 1
s1row = (len(perc_ticks) + len(major_ticks1) + len(major_ticks2))
gs1 = gridspec.GridSpecFromSubplotSpec(nrows = s1row, ncols=s1col, subplot_spec=gs[loc_0:])
msg = fig.add_subplot(gs0[:m_gr, 0])
tit = fig.add_subplot(gs0[m_gr:, 0])
ax0 = fig.add_subplot(gs1[l_gr:(l_gr+len(perc_ticks)), 0])
ax1 = fig.add_subplot(gs1[(l_gr+len(perc_ticks)):(l_gr+(len(perc_ticks) + len(major_ticks2))), 0], sharex=ax0)
ax2 = fig.add_subplot(gs1[(l_gr+(len(perc_ticks) + len(major_ticks2))):(l_gr+(len(perc_ticks) + len(major_ticks1) + len(major_ticks2))), 0], sharex=ax0)
return ax0, ax1, ax2, msg, tit
def remove_borders(self,x, dates, ax0, ax1, ax2, msg, tit):
"""Remove borders from plot
Arguments:
x {[type]} -- [description]
dates {[type]} -- [description]
ax0 {[type]} -- [description]
ax1 {[type]} -- [description]
ax2 {[type]} -- [description]
msg {[type]} -- [description]
tit {[type]} -- [description]
"""
ax1.set_xticks(x);
ax2.set_xticks(x);
# Remove Frame
ax0.set_frame_on(False)
ax2.set_frame_on(False)
ax1.set_frame_on(False)
msg.set_frame_on(False)
tit.spines['bottom'].set_visible(False)
tit.spines['right'].set_visible(False)
tit.spines['left'].set_visible(False)
# Remove Ticks
ax2.set_xticks(x)
ax2.set_xticklabels(dates);
ax0.tick_params(axis='both', which='both', length=0)
ax1.tick_params(axis='both', which='both', length=0)
ax2.tick_params(axis='both', which='both', length=0)
msg.tick_params(axis='both', which='both', length=0)
tit.tick_params(axis='both', which='both', length=0)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(msg.get_xticklabels(), visible=False)
plt.setp(tit.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(msg.get_yticklabels(), visible=False)
plt.setp(tit.get_yticklabels(), visible=False)
def ibcs_barchart(self,
data
,dates
,**kwargs
):
x = np.arange(data.shape[1])
delta = data[1] - data[0]
delta_perc = delta / data[0]
idx_pos = (delta > 0)
major_ticks1 = np.arange(data.max(),data.min(),-5)
major_ticks2 = np.arange(delta.max(),delta.min(),-5)
perc_ticks = np.arange(np.ceil(np.nanmin([delta_perc.max(),kwargs['cap_perc']])*10)/10,np.floor(delta_perc.min()*10)/10,-0.1)
fig = plt.figure()
m_gr = kwargs['m_gr']
t_gr = kwargs['t_gr']
l_gr = kwargs['l_gr']
offset = kwargs['offset']
axis_pad = kwargs['axis_pad']
ax0, ax1, ax2, msg, tit = self.ibcs_grid(fig,
major_ticks1
,major_ticks2
,perc_ticks
,m_gr
,t_gr
,l_gr
,offset
)
# Remove Plot Borders
ax1.set_ylim(major_ticks2[-1]-axis_pad,major_ticks2[0]+axis_pad)
ax2.set_ylim(major_ticks1[-1]-axis_pad,major_ticks1[0]+axis_pad)
self.remove_borders(x, dates, ax0, ax1, ax2, msg, tit)
msg.text(0, 1, "Message", size=10);
tit.text(0, 0, "SomeG GmbH\n"+r"$\bf{Revenue}$"+" in mEUR\nPY, CY 2009..2018", size=10);
# Plots
ax0.plot(x,np.zeros(len(x)),color=rgb2hex(0, 0, 0),linewidth=2)
ax0.scatter(x,delta_perc,marker='s',color=rgb2hex(0, 0, 0))
ax0.bar(x[idx_pos],delta_perc[idx_pos],color=rgb2hex(140, 180, 0),width=0.05)
ax0.bar(x[~idx_pos],delta_perc[~idx_pos],color=rgb2hex(250, 0, 0),width=0.05)
ax1.plot(x,np.zeros(len(x)),color='k',linewidth=2)
ax1.bar(x[idx_pos],delta[idx_pos],color=rgb2hex(140, 180, 0),width=0.5)
ax1.bar(x[~idx_pos],delta[~idx_pos],color=rgb2hex(250, 0, 0),width=0.5)
ax2.plot(x,np.zeros(len(x))+1,color='k',linewidth=1)
ax2.bar(x - 0.5/9, data[0], color = rgb2hex(255, 255, 255), width = 0.5,edgecolor=['k'])
ax2.bar(x + 0, data[1], color = rgb2hex(64, 64, 64), width = 0.5)
for ix in x:
if ix in x[idx_pos]:
va = 'bottom'
space = kwargs['label_space']
else:
va = 'top'
space = -kwargs['label_space']
label = '{0:+}'.format(int(delta_perc[ix]*100))
ax0.annotate(
label, # Use `label` as label
(x[ix], delta_perc[ix]), # Place label at end of the bar
xytext=(0, space*3), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va) # Vertically align label differently for
label = '{0:+}'.format(int(round(delta[ix]/kwargs['label_normalize'],0)))
ax1.annotate(
label, # Use `label` as label
(x[ix], delta[ix]), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va) # Vertically align label differently for
# positive and negative values. # positive and negative values.
ax0.text(-0.5,0,'\u0394BU %')
ax1.text(-0.5,0,'\u0394BU')
for ix in range(len(x)):
if data[1][ix] > 0:
va = 'bottom'
space = kwargs['label_space']
else:
va = 'top'
space = -kwargs['label_space']
label = str(int(round(data[1][ix]/kwargs['label_normalize'],1)))
ax2.annotate(
label, # Use `label` as label
(x[ix], data[1][ix]), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va) # Vertically align label differently for
# positive and negative values.
return plt.show() | |
__all__ = [
'TableToTimeGrid',
'ReverseImageDataAxii',
'TranslateGridOrigin',
]
__displayname__ = 'Transform'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from .. import _helpers, interface
from ..base import FilterBase
###############################################################################
class TableToTimeGrid(FilterBase):
"""A filter to convert a static (no time variance) table to a time varying
grid. This effectively reashapes a table full of data arrays as a 4D array
that is placed onto the CellData of a ``vtkImageData`` object.
"""
__displayname__ = 'Table To Time Grid'
__category__ = 'filter'
def __init__(
self,
extent=(10, 10, 10, 1),
order='C',
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
dims=(0, 1, 2, 3),
dt=1.0,
points=False,
**kwargs
):
FilterBase.__init__(
self,
nInputPorts=1,
nOutputPorts=1,
inputType='vtkTable',
outputType='vtkImageData',
**kwargs
)
if len(extent) != 4:
raise _helpers.PVGeoError('`extent` must be of length 4.')
self.__extent = list(extent)
self.__dims = list(
dims
) # these are indexes for the filter to use on the reshape.
# NOTE: self.__dims[0] is the x axis index, etc., self.__dims[3] is the time axis
self.__spacing = list(spacing) # image data spacing
self.__origin = list(origin) # image data origin
self.__order = list(order) # unpacking order: 'C' or 'F'
self.__data = None # this is where we hold the data so entire filter does
# not execute on every time step. Data will be a disctionary of 4D arrays
# each 4D array will be in (nx, ny, nz, nt) shape
self.__needToRun = True
self.__timesteps = None
self.__dt = dt
# Optional parameter to switch between cell and point data
self.__usePointData = points
self.__needToUpdateOutput = True
def _set_data(self, table):
"""Internal helper to restructure the inpt table arrays"""
self.__data = dict()
dims = np.array([d for d in self.__dims])
sd = dims.argsort()
df = interface.table_to_data_frame(table)
keys = df.keys().tolist()
for k in keys:
# perfrom the reshape properly. using the user given extent
arr = np.reshape(df[k].values, self.__extent, order=self.__order)
# Now order correctly for the image data spatial reference
# this uses the user specified dimension definitions
for i in range(4):
arr = np.moveaxis(arr, sd[i], dims[i])
# Now add to disctionary
self.__data[k] = arr
self.__needToRun = False
return
def _build_image_data(self, img):
"""Internal helper to consturct the output"""
if self.__needToUpdateOutput:
# Clean out the output data object
img.DeepCopy(vtk.vtkImageData())
self.__needToUpdateOutput = False
ext = self.__extent
dims = self.__dims
nx, ny, nz = ext[dims[0]], ext[dims[1]], ext[dims[2]]
if not self.__usePointData:
nx += 1
ny += 1
nz += 1
sx, sy, sz = self.__spacing[0], self.__spacing[1], self.__spacing[2]
ox, oy, oz = self.__origin[0], self.__origin[1], self.__origin[2]
img.SetDimensions(nx, ny, nz)
img.SetSpacing(sx, sy, sz)
img.SetOrigin(ox, oy, oz)
return img
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
nt = self.__extent[self.__dims[3]]
if nt > 1:
self.__timesteps = _helpers.update_time_steps(self, nt, self.__dt)
return 1
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
table = self.GetInputData(inInfo, 0, 0)
img = self.GetOutputData(outInfo, 0)
self._build_image_data(img)
# Perfrom task
if self.__needToRun:
self._set_data(table)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
for k, arr in self.__data.items():
# NOTE: Keep order='F' because of the way the grid is already reshaped
# the 3D array has XYZ structure so VTK requires F ordering
narr = interface.convert_array(arr[:, :, :, i].flatten(order='F'), name=k)
if self.__usePointData:
img.GetPointData().AddArray(narr)
else:
img.GetCellData().AddArray(narr)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set whole output extent."""
# Setup the ImageData
ext = self.__extent
dims = self.__dims
nx, ny, nz = ext[dims[0]], ext[dims[1]], ext[dims[2]]
if self.__usePointData:
ext = [0, nx - 1, 0, ny - 1, 0, nz - 1]
else:
ext = [0, nx, 0, ny, 0, nz]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
# Now set the number of timesteps:
self._update_time_steps()
return 1
#### Setters / Getters ####
def Modified(self, run_again=True):
"""Call modified if the filter needs to run again"""
if run_again:
self.__needToRun = run_again
self.__needToUpdateOutput = True
FilterBase.Modified(self)
def modified(self, run_again=True):
"""Call modified if the filter needs to run again"""
return self.Modified(run_again=run_again)
def set_extent(self, nx, ny, nz, nt):
"""Set the extent of the output grid"""
if self.__extent != [nx, ny, nz, nt]:
self.__extent = [nx, ny, nz, nt]
self.Modified()
def set_dimensions(self, x, y, z, t):
"""Set the dimensions of the output grid"""
if self.__dims != [x, y, z, t]:
self.__dims = [x, y, z, t]
self.Modified()
def set_spacing(self, dx, dy, dz):
"""Set the spacing for the points along each axial direction"""
if self.__spacing != [dx, dy, dz]:
self.__spacing = [dx, dy, dz]
self.Modified()
def set_origin(self, x0, y0, z0):
"""Set the origin of the output `vtkImageData`"""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
def set_order(self, order):
"""Set the reshape order (`'C'` or `'F'`)"""
if self.__order != order:
self.__order = order
self.Modified(run_again=True)
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps on the pipeline."""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def set_time_delta(self, dt):
"""An advanced property to set the time step in seconds."""
if dt != self.__dt:
self.__dt = dt
self.Modified()
def set_use_points(self, flag):
"""Set whether or not to place the data on the nodes/cells of the grid.
True places data on nodes, false places data at cell centers (CellData).
In ParaView, switching can be a bit buggy: be sure to turn the visibility
of this data object OFF on the pipeline when changing between nodes/cells.
"""
if self.__usePointData != flag:
self.__usePointData = flag
self.Modified(run_again=True)
###############################################################################
class ReverseImageDataAxii(FilterBase):
"""This filter will flip ``vtkImageData`` on any of the three cartesian axii.
A checkbox is provided for each axis on which you may desire to flip the data.
"""
__displayname__ = 'Reverse Image Data Axii'
__category__ = 'filter'
def __init__(self, axes=(True, True, True)):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkImageData',
nOutputPorts=1,
outputType='vtkImageData',
)
self.__axes = list(axes[::-1]) # Z Y X (FORTRAN)
def _reverse_grid_axes(self, idi, ido):
"""Internal helper to reverse data along specified axii"""
# Copy over input to output to be flipped around
# Deep copy keeps us from messing with the input data
ox, oy, oz = idi.GetOrigin()
ido.SetOrigin(ox, oy, oz)
sx, sy, sz = idi.GetSpacing()
ido.SetSpacing(sx, sy, sz)
ext = idi.GetExtent()
nx, ny, nz = ext[1] + 1, ext[3] + 1, ext[5] + 1
ido.SetDimensions(nx, ny, nz)
widi = dsa.WrapDataObject(idi)
# Iterate over all array in the PointData
for j in range(idi.GetPointData().GetNumberOfArrays()):
# Go through each axis and rotate if needed
arr = widi.PointData[j]
arr = np.reshape(arr, (nz, ny, nx))
for i in range(3):
if self.__axes[i]:
arr = np.flip(arr, axis=i)
# Now add that data array to the output
data = interface.convert_array(
arr.flatten(), name=idi.GetPointData().GetArrayName(j)
)
ido.GetPointData().AddArray(data)
# Iterate over all array in the CellData
for j in range(idi.GetCellData().GetNumberOfArrays()):
# Go through each axis and rotate if needed
arr = widi.CellData[j]
arr = np.reshape(arr, (nz - 1, ny - 1, nx - 1))
for i in range(3):
if self.__axes[i]:
arr = np.flip(arr, axis=i)
# Now add that data array to the output
data = interface.convert_array(
arr.flatten(), name=idi.GetCellData().GetArrayName(j)
)
ido.GetCellData().AddArray(data)
return ido
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._reverse_grid_axes(pdi, pdo)
return 1
#### Seters and Geters ####
def set_flip_x(self, flag):
"""Set the filter to flip th input data along the X-axis"""
if self.__axes[2] != flag:
self.__axes[2] = flag
self.Modified()
def set_flip_y(self, flag):
"""Set the filter to flip th input data along the Y-axis"""
if self.__axes[1] != flag:
self.__axes[1] = flag
self.Modified()
def set_flip_z(self, flag):
"""Set the filter to flip th input data along the Z-axis"""
if self.__axes[0] != flag:
self.__axes[0] = flag
self.Modified()
###############################################################################
# ---- Translate Grid Origin ----#
class TranslateGridOrigin(FilterBase):
"""This filter will translate the origin of `vtkImageData` to any specified
Corner of the data set assuming it is currently in the South West Bottom
Corner (will not work if Corner was moved prior).
"""
__displayname__ = 'Translate Grid Origin'
__category__ = 'filter'
def __init__(self, corner=1):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkImageData',
nOutputPorts=1,
outputType='vtkImageData',
)
self.__corner = corner
def _translate(self, pdi, pdo):
"""Internal helper to translate the inputs origin"""
if pdo is None:
pdo = vtk.vtkImageData()
[nx, ny, nz] = pdi.GetDimensions()
[sx, sy, sz] = pdi.GetSpacing()
[ox, oy, oz] = pdi.GetOrigin()
pdo.DeepCopy(pdi)
xx, yy, zz = 0.0, 0.0, 0.0
if self.__corner == 1:
# South East Bottom
xx = ox - (nx - 1) * sx
yy = oy
zz = oz
elif self.__corner == 2:
# North West Bottom
xx = ox
yy = oy - (ny - 1) * sy
zz = oz
elif self.__corner == 3:
# North East Bottom
xx = ox - (nx - 1) * sx
yy = oy - (ny - 1) * sy
zz = oz
elif self.__corner == 4:
# South West Top
xx = ox
yy = oy
zz = oz - (nz - 1) * sz
elif self.__corner == 5:
# South East Top
xx = ox - (nx - 1) * sx
yy = oy
zz = oz - (nz - 1) * sz
elif self.__corner == 6:
# North West Top
xx = ox
yy = oy - (ny - 1) * sy
zz = oz - (nz - 1) * sz
elif self.__corner == 7:
# North East Top
xx = ox - (nx - 1) * sx
yy = oy - (ny - 1) * sy
zz = oz - (nz - 1) * sz
pdo.SetOrigin(xx, yy, zz)
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._translate(pdi, pdo)
return 1
#### Seters and Geters ####
def set_corner(self, corner):
"""Set the corner to use
Args:
corner (int) : corner location; see note.
Note:
* 1: South East Bottom
* 2: North West Bottom
* 3: North East Bottom
* 4: South West Top
* 5: South East Top
* 6: North West Top
* 7: North East Top
"""
if self.__corner != corner:
self.__corner = corner
self.Modified()
############################################################################### | |
from . import datafetcher
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from floodsystem.stationdata import build_station_list
from floodsystem.station import MonitoringStation
import numpy as np
from floodsystem.analysis import polyfit
import matplotlib
def plot_water_levels(station, dates, levels):
#Defining values for upper and lower range for graph
typical_range = station.typical_range
low_range = typical_range[0]
high_range = typical_range[1]
#Plotting the current level, average lower range level horizontal line and average upper range level horizontal line
plt.plot(dates, levels)
plt.axhline(y= low_range, color= 'r', linestyle = '-')
plt.axhline(y= high_range, color= 'g', linestyle = '-')
# Add axis labels, rotate date labels and add plot title
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
plt.show()
def plot_water_level_with_fit(station, dates, levels, p):
x = matplotlib.dates.date2num(dates) #Converting dates into floats so can be used as function argument
x1 = np.linspace(x[0], x[-1], 50) #Plot polynomial fit at 50 points along interval
#Defining values for upper and lower range for graph
typical_range = station.typical_range
low_range = typical_range[0]
high_range = typical_range[1]
#Plotting the current levels
plt.plot(dates, levels)
#Using Polyfit function to create line of best fit of current levels
poly_line = polyfit(dates, levels, p)
y = poly_line[0]
plt.plot(x1, y(x1 - x[0]))
#Plotting the average lower range level horizontal line and average upper range level horizontal line
plt.axhline(y= low_range, color= 'r', linestyle = '-')
plt.axhline(y= high_range, color= 'g', linestyle = '-')
# Add axis labels, rotate date labels and add plot title
plt.xlabel('date')
plt.ylabel('water level (m)')
plt.xticks(rotation=45);
plt.title(station.name)
# Display plot
plt.tight_layout() # This makes sure plot does not cut off date labels
plt.show() | |
#Exercícios Numpy-03
#*******************
import numpy as np
arr=np.zeros(10)
print('arr=',arr) | |
import requests
from bs4 import BeautifulSoup
from calendar import monthrange
from datetime import datetime
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from utils import getLogger
from ArticleParser import ArticleParser
logger = getLogger("ScrapeDaily")
class ScrapeDaily:
"""Scrapes Articl's metada for a given tag and a date
Args:
tag(str): Name of the tag
date(str): Date of article's to be scraped
Attributes:
scrape_class(str): Value for class of div which contains the metadata
url(str): URL where collection of article's for given date are present
data_sict(dict): Placeholder for metadata we are about to scrape
"""
def __init__(self,tag, date):
self.tag = tag
self.date = self._getValidDate(date)
self.scrape_class="streamItem streamItem--postPreview js-streamItem"
self.url =f"https://medium.com/tag/{self.tag}/archive/{self.date}"
self.data_dict = {"link":list(), "total_claps":list(), "total_responses":list(), "text":list()}
def _getValidDate(self, date):
return date
def _get_total_claps(self, card):
""" Get total claps for the article """
try:
claps = card.find("button",{"class":"button button--chromeless u-baseColor--buttonNormal js-multirecommendCountButton u-disablePointerEvents"}).text
return claps
except:
return False
def _get_total_responses(self, card):
""" Get total responses for the article """
try:
responses = card.find("a", {"class":"button button--chromeless u-baseColor--buttonNormal"}).text
return responses
except:
return False
def _get_link(self, card):
""" Get link of the article """
try:
link = card.find("a", {"class":'link link--darken' })["data-action-value"]
return link
except:
return False
def _cntComment(self, card):
""" checks if the current card is an article or a comment(False incase of a comment) """
resp = card.findAll("div", {"class":"u-textDarker u-noWrapWithEllipsis"})
if(resp):
return True
return False
def _parse_link(self, link):
""" Parses the link, removing the source as archive """
try:
return link.split("?")[0]
except:
return link
def _parse_claps(self, claps):
""" Converts Claps which is in string as K for thousands into integer"""
try:
if("," in claps):
claps = claps.replace(",","")
if("K" in claps):
num = float(claps.split("K")[0])
return int(num*1000)
if("k" in claps):
num = float(claps.split("k")[0])
return int(num*1000)
return int(claps)
except:
return claps
def _parse_responses(self, responses):
""" Parses number of responses from string to integer"""
try:
if(responses==0):
return responses
return self._parse_claps(responses.split(" ")[0])
except:
return responses
def _extract(self):
""" extract each article's metadata from the url """
logger.info(f"Extracting {self.tag} content of date: {self.date}")
page = requests.get(self.url)
soup = BeautifulSoup(page.content, 'html.parser')
for i,card in enumerate((soup.findAll("div", {"class": self.scrape_class}))):
if(not self._cntComment(card)):
link = self._get_link(card)
if(link):
total_claps = self._get_total_claps(card)
if(not total_claps):
total_claps =1
else:
total_claps = self._parse_claps(total_claps)+1
total_responses = self._get_total_responses(card)
if( not total_responses):
total_responses = 1
else:
total_responses=self._parse_responses(total_claps)+1
try:
parsed_article = ArticleParser(link)
self.data_dict["total_responses"].append(total_responses)
self.data_dict["link"].append(self._parse_link(link))
self.data_dict["total_claps"].append(total_claps)
self.data_dict["text"].append(parsed_article.fetchArticleText())
except Exception as e:
logger.error(f'Failed to download {link} article: {e}')
logger.info(f"successfully Extracted {self.tag}")
def _scoreFeed(self, clap_percent=0.7, response_percent=0.3):
""""
creates a metric which is a combination of total claps and responses
Giving us popularity of the article
"""
data_dict_df = pd.DataFrame.from_dict(self.data_dict)
data_dict_df['total_claps_scaled'] = np.nan
data_dict_df['total_responses_scaled'] = np.nan
data_dict_df['ClapRespScores'] = np.nan
scaler = MinMaxScaler()
data_dict_df[["total_claps_scaled", "total_responses_scaled"]] = scaler.fit_transform(
data_dict_df[["total_claps","total_responses"]]
)
data_dict_df["ClapRespScore"] = ((data_dict_df["total_claps_scaled"]*clap_percent) + (data_dict_df["total_responses_scaled"]*response_percent))
# self.data_dict= {"links":data_dict_df["link"].values.tolist(), "ClapRespScore":data_dict_df["ClapRespScore"].values.tolist(),
# "text":data_dict_df["text"].values.tolist(),}
return data_dict_df
def _filter(self, df):
clap_25 = np.quantile(df.total_claps.values.tolist(), 0.25)
quant_25 = df[df['total_claps']>clap_25]
quant_25.drop(['total_claps', 'total_responses'], axis=1, inplace=True)
return quant_25
def getFeed(self):
""" Performs all operations in order """
self._extract()
self.data_dict = self._scoreFeed()
self.data_dict = self._filter(self.data_dict)
return self.data_dict | |
# Copyright 2021 Lawrence Livermore National Security, LLC
"""
This script is used by cmec-driver to run the ASoP-Coherence metrics.
It is based on the workflow in asop_coherence_example.py and
can be called with the aruments listed below. If no configuration file
with module settings is provided, the settings will be estimated.
Arguments:
* model_dir:
directory containing model data
* obs_dir:
directory containing obs data
* wk_dir:
output directory
* config:
JSON config file (optional)
Author: Ana Ordonez
"""
import argparse
import csv
import glob
import numpy as np
import os
import shutil
import json
import iris
import matplotlib
from matplotlib import cm
from cf_units import Unit
import asop_coherence as asop
# setting output directory names
figure_dir_name = "asop_figures"
metrics_dir_name = "asop_metrics"
def get_dictionary(filename):
"""
The Coherence package relies on a dataset dictionary. This
version generates a data dictionary based on metadata, coordinates,
and attributes in the input datasets.
Default values are provided for a standard observation dataset.
Arguments:
* filename: The (base) name of the input file.
Returns:
* asop_dict: A dictionary containing required and optional parameters for the ASoP Coherence package.
"""
asop_dict = {}
# Defaults for standard observational data
if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \
'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:
asop_dict['infile'] = filename
asop_dict['name'] = ''
asop_dict['dt'] = 10800
asop_dict['dx'] = 27
asop_dict['dy'] = 27
asop_dict['constraint'] = 'precipitation'
asop_dict['scale_factor'] = 8.0
asop_dict['legend_name'] = ''
asop_dict['region'] = [-10,10,60,90]
asop_dict['box_size'] = 1680
asop_dict['color'] = 'red'
asop_dict['region_size'] = 7
asop_dict['lag_length'] = 6
asop_dict['grid_type'] = 'native'
asop_dict['time_type'] = '3hr'
asop_dict['grid_desc'] = 'native'
asop_dict['time_desc'] = '3-hourly'
asop_dict['autocorr_length'] = 60*60*24
else:
asop_dict=build_asop_dict(filename)
return(asop_dict)
def build_asop_dict(filename):
"""Auto-populate the asop_dict using the input data file. These are
default settings only.
Arguments:
* filename: File name of the dataset to describe.
"""
cube = iris.load_cube(filename)
# Look for name keys. Default first 15 characters of filename
name = filename.split('/')[-1][0:15]
for key in ['source_id','source_label','short_name','name','long_name']:
if key in cube.attributes:
name = cube.attributes[key]
break
if 'variant_label' in cube.attributes:
name += ('_' + cube.attributes['variant_label'])
constraint = cube.standard_name
# Get coordinate deltas
t1 = cube.coords('time')[0][0].cell(0).point
t2 = cube.coords('time')[0][1].cell(0).point
# Try assuming datetime object, otherwise int
try:
dt = int((t2 - t1).total_seconds())
except AttributeError: # assume units of days
dt = int((t2-t1)*60*60*24)
print('Warning: Time units not found. Units assumed to be "days"')
# Estimate average grid spacing in km
dims=[cube.dim_coords[n].standard_name for n in range(0,len(cube.dim_coords))]
dim_name_lat = None
dim_name_lon = None
for dim in dims:
if 'lat' in dim.lower():
dim_name_lat = dim
elif 'lon' in dim.lower():
dim_name_lon = dim
if (dim_name_lat is None) or (dim_name_lon is None):
raise RuntimeError(filename+': latitude or longitude dimension not found.\n'
+ 'Valid latitude names contain "lat" and '
+ 'valid longitude names contain "lon"')
deltas = {}
for dvar,coord in zip(['dy','dx'],[dim_name_lat,dim_name_lon]):
if cube.coords(coord)[0].is_monotonic():
coord_list = cube.coords(coord)[0].points
# Estimating at equator for now
deltas[dvar] = np.absolute(np.mean(np.diff(coord_list))*110)
# get time descriptions
if (dt > 86399) and (dt < 86401):
# This is a day, with some room for error
time_type = 'day'
time_desc = 'daily'
elif dt >= 86401:
days = round(dt/(60*60*24))
time_type = str(days) + 'day'
time_desc = str(days) + '-day'
elif (dt > 10799) and (dt < 10801):
time_type = '3hr'
time_desc = '3-hourly'
elif (dt > 3599) and (dt < 3601):
time_type = '1hr'
time_desc = 'hourly'
elif dt <= 3599:
minutes = round(dt/60)
time_type = str(minutes) + 'min'
time_desc = str(minutes) + '-min'
elif dt <= 86399:
# catch other hour lengths
hours = round(dt/60*60)
time_type = str(hours) + 'hour'
time_desc = str(hours) + '-hourly'
# Set scale factor, starting with some common units
pr_units = cube.units
scale_factor = 1
if pr_units == Unit('mm'):
scale_factor = round(86400 / dt)
elif pr_units == Unit('kg m-2 s-1'):
scale_factor = 86400
else:
try:
# Find conversion factor between 1 in dataset's units
# and a "benchmark" unit
bm = Unit('kg m-2 day-1')
scale_factor = pr_units.convert(1,bm)
except ValueError:
try:
bm = Unit('mm day-1')
scale_factor = pr_units.convert(1,bm)
except ValueError:
print("Warning: Could not determine scale factor. Using default of "+scale_factor)
asop_dict = {}
asop_dict['infile'] = filename
asop_dict['name'] = name
asop_dict['dt'] = dt
asop_dict['dx'] = deltas['dx']
asop_dict['dy'] = deltas['dy']
asop_dict['constraint'] = constraint
asop_dict['scale_factor'] = scale_factor
asop_dict['legend_name'] = ''
asop_dict['region'] = ''
asop_dict['box_size'] = 7*deltas['dx']
asop_dict['color'] = ''
asop_dict['region_size'] = 7
asop_dict['lag_length'] = 6
asop_dict['grid_type'] = ''
asop_dict['time_type'] = time_type
asop_dict['grid_desc'] = ''
asop_dict['time_desc'] = time_desc
asop_dict['autocorr_length'] = 8*dt
return asop_dict
def update_asop_dict(asop_dict,region,coords,color,all_settings):
"""Adjust data dictionary quantities based on region and unique settings.
Args:
* asop_dict: Dictionary of settings for dataset
* region: Name of region
* coords: List of region boundary coordinates
* color: Code or string designating a color
* all_settings: Dictionary of data from CMEC settings file
"""
# Set unique color
asop_dict['color'] = color
# Apply any general user settings
asop_dict['grid_desc'] = all_settings.get('grid','native')
asop_dict['grid_type'] = all_settings.get('grid','native')
asop_dict['region_name'] = region
asop_dict['region_desc'] = region.replace('_',' ')
asop_dict['region'] = coords
# Edit dx for region
mean_lat = np.mean(coords[0:2])
asop_dict['dx'] = asop_dict['dx'] * np.cos(np.radians(mean_lat))
all_settings.pop('infile','') # key not allowed
for key in asop_dict:
if key in all_settings:
asop_dict[key] = all_settings[key]
# Apply any specific file settings
infile = os.path.basename(asop_dict['infile'])
file_settings = settings.get(infile,{})
file_settings.pop('infile','') # key not allowed
file_settings.pop('region','')
if file_settings:
for key in file_settings:
asop_dict[key] = file_settings[key]
if 'legend_name' not in file_settings:
asop_dict['legend_name'] = asop_dict['name'].replace('_',' ')
print('---> Final data dictionary:')
print(json.dumps(asop_dict, sort_keys=True, indent=2))
return asop_dict
def initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir):
"""Create the output.json metadata file for the CMEC metrics package.
Arguments:
* json_filename: Name of the metadata file (recommended: output.json)
* wk_dir: Path to output directory (parent of descriptive json)
* model_dir: Path to model input directory to record in descriptive json
* obs_dir: Path to obs input directory to record in descriptive json
"""
output = {'provenance':{},'data':{},'metrics':{},'plots':{},'index': 'index.html','html':'index.html'}
log_path = wk_dir + '/asop_coherence.log.txt'
output['provenance'] = {'environment': get_env(),
'modeldata': model_dir,
'obsdata': obs_dir,
'log': log_path}
with open(json_filename,'w') as output_json:
json.dump(output,output_json, indent=2)
return
def initialize_metrics_json(metrics_filename):
"""
Create the metrics json file with the CMEC results structure.
Arguments:
* metrics_filename:
Name of the metrics file to initialize.
"""
json_structure = ['dataset','region','metric','statistic']
metrics = {
'SCHEMA': {
'name': 'CMEC',
'version': 'v1',
'package': 'ASoP'},
'DIMENSIONS':{
'json_structure': json_structure,
'dimensions': {
'dataset': {},
'region': {},
'metric': {
'Temporal intermittency': {},
'Spatial intermittency': {}},
'statistic': {
'p(upper|upper)': "Probability of upper quartile precipitation followed by upper quartile precipitation",
'p(lower|lower)': "Probability of lower quartile precipitation followed by lower quartile precipitation",
'p(upper|lower)': "Probability of upper quartile precipitation followed by lower quartile precipitation",
'p(lower|upper)': "Probability of lower quartile precipitation followed by upper quartile precipitation",
'combined': 'Metric of coherence (combined probabilities)'
}}},
'RESULTS': {},
'REFERENCE': 'Klingaman et al. (2017, GMD, doi:10.5194/gmd-10-57-2017)'}
with open(metrics_filename,'w') as fname:
json.dump(metrics,fname,indent=2)
return
def load_and_transpose_csv(metrics_csv):
"""Load csv into a dictionary and transpose rows to columns.
Args:
* metrics_csv:
Path to csv file to load and transpose
"""
models = []
with open(metrics_csv,'r') as f:
reader = csv.reader(f)
fields = next(reader)
fields = fields[1:] # remove "Dataset"
out_dict = dict.fromkeys(fields,{})
for row in reader:
model_name = row[0]
models.append(model_name)
for i,item in enumerate(row[1:]):
tmp = out_dict[fields[i]].copy()
tmp[model_name] = item
out_dict[fields[i]] = tmp.copy()
return out_dict
def merge_csvs(metrics_csv_dims,metrics_csv_int,region,wk_dir):
"""Combine the data from the dimensions and metrics csv files
into one file.
Args:
* metrics_dsv_dims:
Name of csv file with dimension data
* metrics_csv_int:
Name of csv file with intermittency metrics
* region:
Region name
* wk_dir:
Path to output directory
"""
dict_dims = load_and_transpose_csv(metrics_csv_dims)
dict_int = load_and_transpose_csv(metrics_csv_int)
dict_dims.update(dict_int.copy())
fname = os.path.join(wk_dir,region.replace(' ','_') + '_summary.csv')
fields = [*dict_dims]
models = ['Metric']+[*dict_dims[fields[0]]]
with open(fname,'w') as f:
writer = csv.DictWriter(f,fieldnames=models)
writer.writeheader()
for field in fields:
row = {'Metric': field}
row.update(dict_dims.get(field,{}))
writer.writerow(row)
data_desc = {
os.path.basename(fname): {
'longname': os.path.basename(fname).split('.')[1].replace('_',' '),
'description': 'Parameters and metrics for ' + region + ' region'}}
# add metadata to output.json
asop.update_output_json('metrics', data_desc, wk_dir)
def metrics_to_json(metrics_csv_int, region, coords, metrics_filename):
"""Move intermittency metrics from CSV files to CMEC formatted JSON.
Args:
* metrics_csv_int:
Name of intermittency metrics csv
* region:
Name of region
* coords:
List of region boundary coordinates
* metrics_filename:
Name of metrics JSON to output
"""
data = {}
with open(metrics_csv_int,'r') as f:
reader = csv.reader(f)
fields = next(reader)
for row in reader:
data[row[0]] = {"Temporal intermittency": {},
"Spatial intermittency": {}}
# skip the first key in fields, clean up field name
for i,field in enumerate(fields[1:6]):
data[row[0]]["Temporal intermittency"].update({field[5:]:float(row[i+1])})
for i,field in enumerate(fields[6:]):
data[row[0]]["Spatial intermittency"].update({field[3:]:float(row[i+6])})
with open(metrics_filename, 'r') as fname:
metrics = json.load(fname)
# Add region to dimensions information
metrics['DIMENSIONS']['dimensions']['region'].update({region: coords})
# Update model statistics
for model in data:
if not (model in metrics['RESULTS']):
metrics['RESULTS'][model] = {}
metrics['DIMENSIONS']['dimensions']['dataset'].update({model: {}})
metrics['RESULTS'][model][region] = data[model]
# Write new metrics to same file
with open(metrics_filename, 'w') as fname:
json.dump(metrics,fname,indent = 2)
def delete_intermediate_csvs(wk_dir):
"""Move files to a figure or metrics subdirectory as appropriate.
Delete intermediate csv files.
Args:
* wk_dir:
CMEC output directory path
"""
# Remove intermediate csv tables
out_files = os.listdir(wk_dir)
delete_keys = ["int_metrics","region_dims"]
delete_list = [f for f in out_files if any(x in f for x in delete_keys)]
for f in delete_list:
os.remove(f)
def write_index_html(wk_dir,region_dict,metrics_filename,ext="png"):
"""Create an html page that links users to the metrics json and
plots created by ASoP-Coherence. Results must be located in the
output directory "wk_dir".
Arguments:
* wk_dir:
Output directory
* region_dict:
Dictionary of region names
* metrics_filename:
Path to metrics JSON file
"""
# Make lists of the metrics and figure files to display
metrics_dir = os.path.join(wk_dir,metrics_dir_name)
metric_list = sorted([
f for f in os.listdir(metrics_dir) if f.endswith('_summary.csv')])
plot_list=[]
fig_list=sorted([f for f in os.listdir(wk_dir+'/'+figure_dir_name)])
for keyword in ['lag','correlations','twodpdf']:
plot_list.append([f for f in fig_list if (keyword in f)]) # sort datasets
subtitle_list=['Autocorrelation','2D Histograms','Correlation maps']
# Start working on html text. Each line is appened to a list that
# is then written to file.
html_file=['<html>\n',
'<body>','<head><title>ASoP-Coherence</title></head>\n',
'<br><h1>ASoP-Coherence results</h1>\n','<h2>Contents</h2>\n',
'<dl>\n','<dt><a href="#Metrics">Metrics</a></dt>\n',
'<dt><a href="#Figures">Figures</a></dt>\n',
'<dd><a href="#Autocorrelation">Autocorrelation</a></dd>\n',
'<dd><a href="#2D-Histograms">2D Histograms</a></dd>\n',
'<dd><a href="#Correlation-maps">Correlation Maps</a></dd>\n',
'</dl>\n''<section id="Metrics">\n','<br><h2>Metrics</h2>\n']
html_file.append('<h3>Intermittency Metrics</h3>\n')
# Display metrics JSON in dashboard option
metrics_json = os.path.basename(metrics_filename)
metrics_relocated = os.path.join(metrics_dir_name,metrics_json)
tmp='<p><a href="'+metrics_relocated+'" target="_blank">'+metrics_json+'</a></p>\n'
html_file.append(tmp)
# Link CSV tables for download
html_file.append('<h3>Tables</h3>\n')
for metric_file in metric_list:
metric_path = os.path.join(metrics_dir_name,metric_file)
html_file.append('<p><a href="{0}">{1}</a></p>\n'.format(metric_path,metric_file))
html_file.append('<br>\n')
html_file.append('</section>\n')
# Add figures
html_file.append('<section id="Figures">\n')
html_file.append('<h2>Figures</h2>\n')
for title,category in zip(subtitle_list,plot_list):
html_file.append('<section id='+title.replace(' ','-')+'>\n')
html_file.append('<h3>{0}</h3>\n'.format(title))
# Adjust figure width for autocorrelation
fwidth = "647"
if title=="Autocorrelation":
fwidth="450"
for region in region_dict:
html_file.append('<h4>{0}</h4>\n'.format(region.replace('_',' ')))
region_fig = [f for f in category if (region.replace(" ","_") in f)]
for fig in region_fig:
tmp = '<p><a href="{0}" target="_blank" alt={0}>' + \
'<img src="{0}" width={1} alt="{0}"></a></p>\n'
html_file.append(
tmp.format(os.path.join(figure_dir_name,fig),fwidth))
html_file.append('</section>\n')
html_file.append('</section>\n')
html_file.append('</body>\n</html>\n')
filename=wk_dir+'/index.html'
with open(filename,'w') as html_page:
html_page.writelines(html_file)
def get_env():
"""
Return versions of dependencies.
"""
from platform import python_version
versions = {}
versions['iris'] = iris.__version__
versions['matplotlib'] = matplotlib.__version__
versions['numpy'] = np.__version__
versions['python'] = python_version()
return versions
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ASoP Coherence parser')
parser.add_argument('model_dir', help='model directory')
parser.add_argument('obs_dir', help='observations directory')
parser.add_argument('wk_dir', help='output directory')
parser.add_argument('--config', help='configuration file', default=None)
args = parser.parse_args()
model_dir=args.model_dir
obs_dir=args.obs_dir
wk_dir=args.wk_dir
config=args.config
# Load user settings from cmec config
with open(config) as config_file:
settings = json.load(config_file)['ASoP/Coherence']
ext = '.'+settings.get('figure_type','png').replace(".","")
if 'regions' in settings:
if isinstance(settings['regions'],dict):
region_dict = settings['regions']
else:
print('Error: Please provide region dictionary')
else:
region_dict = {'default': [-10,10,60,90]}
# Use all files in the obs and model directories
dataset_list = []
if obs_dir !='None':
obs_file_list = glob.glob(os.path.join(obs_dir,'*'))
dataset_list.extend(obs_file_list)
mod_file_list = glob.glob(os.path.join(model_dir,'*'))
dataset_list.extend(mod_file_list)
n_datasets = len(dataset_list)
# Create metrics and figure folders
fig_dir = os.path.join(wk_dir,figure_dir_name)
met_dir = os.path.join(wk_dir,metrics_dir_name)
os.mkdir(fig_dir)
os.mkdir(met_dir)
json_filename=os.path.join(wk_dir,'output.json')
initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir)
metrics_filename = os.path.join(wk_dir,met_dir,'intermittency_metrics.json')
initialize_metrics_json(metrics_filename)
# Allocate memory for multi-model fields
max_box_distance,max_timesteps,max_boxes = asop.parameters()
all_distance_correlations = np.zeros((n_datasets,max_box_distance))
all_distance_ranges = np.zeros((n_datasets,3,max_box_distance))
all_distance_max = np.zeros((n_datasets),dtype=np.int64)
all_time_correlations = np.zeros((n_datasets,max_timesteps))
all_time_max = np.zeros((n_datasets),dtype=np.int64)
all_dt = np.zeros((n_datasets),dtype=np.int64)
all_legend_names = []
# Initialize colors
cmap = matplotlib.cm.get_cmap('Paired')
all_colors = [cmap(n) for n in list(np.linspace(0,1,num=n_datasets))]
name_cache = []
for region in region_dict:
metrics_csv_int=met_dir+'/int_metrics_'+region.replace(' ','_')+'.csv'
metrics_csv_dims=met_dir+'/region_dims_'+region.replace(' ','_')+'.csv'
for i,dataset in enumerate(dataset_list):
print('--> '+region)
print('--> '+str(dataset))
asop_dict = get_dictionary(dataset)
print('----> dt: ', asop_dict['dt'])
# Update region, colors, other specific settings
asop_dict = update_asop_dict(asop_dict,region,region_dict[region],all_colors[i],settings)
# Read precipitation data.
precip = asop.read_precip(asop_dict)
# Define edges of bins for 1D and 2D histograms
# Note that on plots, the first and last edges will be
# replaced by < and > signs, respectively.
bins=[0,1,2,4,6,9,12,16,20,25,30,40,60,90,130,180,2e20]
bins=np.asarray(bins)
# Compute 1D and 2D histograms
oned_hist, twod_hist = asop.compute_histogram(precip,bins)
# Plot 1D and 2D histograms (e.g., Fig. 2a in Klingaman et al. 2017).
asop.plot_histogram(oned_hist,twod_hist,asop_dict,bins,wk_dir=fig_dir,ext=ext)
# Compute correlations as a function of native gridpoints, by dividing
# analysis region into sub-regions (boxes of length region_size). Also
# computes lag correlations to a maximum lag of lag_length.
corr_map,lag_vs_distance,autocorr,npts_map,npts = asop.compute_equalgrid_corr(precip,asop_dict,metrics_csv=metrics_csv_dims,wk_dir=wk_dir)
# Plot correlations as a function of native gridpoints and time lag
# (e.g., Figs. 2c and 2e in Klingaman et al. 2017).
asop.plot_equalgrid_corr(corr_map,lag_vs_distance,autocorr,npts,asop_dict,wk_dir=fig_dir,ext=ext)
# Compute correlations as a function of physical distance, by dividing
# analysis region into sub-regions (boxes of length box_size).
all_distance_correlations[i,:],all_distance_ranges[i,:,:],all_distance_max[i] = asop.compute_equalarea_corr(precip,asop_dict)
# Compute lagged autocorrelations over all points
all_time_correlations[i,:],all_time_max[i] = asop.compute_autocorr(precip,asop_dict)
# Compute spatial and temporal coherence metrics, based on quartiles (4 divisions)
space_inter, time_inter = asop.compute_spacetime_summary(precip,4,metrics_csv=metrics_csv_int,short_name=asop_dict['name'],wk_dir=wk_dir)
# Save dataset timestep information
all_dt[i] = asop_dict['dt']
# Save color and legend information
#all_colors.append(asop_dict['color'])
all_legend_names.append(asop_dict['legend_name'])
# Add the intermittency metrics for this region to JSON
metrics_to_json(metrics_csv_int, region, region_dict[region], metrics_filename)
# Plot correlations as a function of physical distance for all datasets
asop.plot_equalarea_corr(all_distance_correlations,all_distance_ranges,all_distance_max,colors=all_colors,legend_names=all_legend_names,set_desc=region,wk_dir=fig_dir,ext=ext)
# Plot correlations as a function of physical time for all datasets
asop.plot_autocorr(all_time_correlations,all_time_max,dt=all_dt,colors=all_colors,legend_names=all_legend_names,set_desc=region,wk_dir=fig_dir,ext=ext)
# Load and process csv files for this region to make main csv
merge_csvs(metrics_csv_dims,metrics_csv_int,region,met_dir)
# move figures and metrics to new folders and generate html pages
delete_intermediate_csvs(wk_dir)
write_index_html(wk_dir,region_dict,metrics_filename,ext=ext) | |
# Copyright (C) 2019 Klaus Spanderen
#
# This file is part of QuantLib, a free-software/open-source library
# for financial quantitative analysts and developers - http://quantlib.org/
#
# QuantLib is free software: you can redistribute it and/or modify it under the
# terms of the QuantLib license. You should have received a copy of the
# license along with this program; if not, please email
# <quantlib-dev@lists.sf.net>. The license is also available online at
# <http://quantlib.org/license.shtml>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the license for more details.
import math
import QuantLib as ql
import matplotlib.pyplot as plt
import numpy as np
todaysDate = ql.Date(15, ql.May, 2019)
exerciseDate = todaysDate + ql.Period(4, ql.Years)
ql.Settings.instance().evaluationDate = todaysDate
settlementDate = todaysDate + ql.Period(2, ql.Days)
dc = ql.Actual365Fixed()
riskFreeRate = ql.YieldTermStructureHandle(ql.FlatForward(settlementDate, 0.05, dc))
dividendYield = ql.YieldTermStructureHandle(ql.FlatForward(settlementDate, 0.025, dc))
spot = 100
underlying = ql.QuoteHandle(ql.SimpleQuote(spot))
vol = 0.30
localVol = ql.LocalVolSurface(
ql.BlackVolTermStructureHandle(ql.BlackConstantVol(settlementDate, ql.TARGET(), vol, dc)),
riskFreeRate,
dividendYield,
underlying,
)
hestonProcess = ql.HestonProcess(riskFreeRate, dividendYield, underlying, 0.09, 1.0, 0.06, 0.4, -0.75)
hestonModel = ql.HestonModel(hestonProcess)
leverageFct = ql.HestonSLVMCModel(
localVol, hestonModel, ql.MTBrownianGeneratorFactory(1234), exerciseDate, 91
).leverageFunction()
# uncomment this if you want to use PDE calibration instead of Monte-Carlo
# fdmParams = ql.HestonSLVFokkerPlanckFdmParams(
# 201,
# 101,
# 200,
# 30,
# 2.0,
# 0,
# 2,
# 0.01,
# 1e-4,
# 10000,
# 1e-5,
# 1e-5,
# 0.0000025,
# 1.0,
# 0.1,
# 0.9,
# 1e-4,
# ql.FdmHestonGreensFct.Gaussian,
# ql.FdmSquareRootFwdOp.Log,
# ql.FdmSchemeDesc.Hundsdorfer(),
# )
# leverageFct = ql.HestonSLVFDMModel(localVol, hestonModel, exerciseDate, fdmParams).leverageFunction()
tSteps = 40
uSteps = 30
tv = np.linspace(0.1, dc.yearFraction(settlementDate, exerciseDate), tSteps)
t = np.empty(tSteps * uSteps)
s = np.empty(tSteps * uSteps)
z = np.empty(tSteps * uSteps)
for i in range(0, tSteps):
scale = min(4, math.exp(3 * math.sqrt(tv[i]) * vol))
sv = np.linspace(spot / scale, spot * scale, uSteps)
for j in range(0, uSteps):
idx = i * uSteps + j
t[idx] = tv[i]
s[idx] = math.log(sv[j])
z[idx] = leverageFct.localVol(t[idx], sv[j])
fig = plt.figure()
ax = plt.axes(projection="3d")
surf = ax.plot_trisurf(s, t, z, cmap=plt.cm.viridis, linewidth=0, antialiased=False, edgecolor="none")
ax.view_init(30, -120)
ax.set_xlabel("ln(S)")
ax.set_ylabel("Time")
ax.text2D(0.225, 0.985, "Leverage Function with $\eta=1.0$", transform=ax.transAxes)
fig.colorbar(surf, shrink=0.75, aspect=14)
plt.show() | |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 15:31:20 2017
@author: Sadhna Kathuria
"""
## calculate pi using monte carlo simulation
import numpy as np
import matplotlib.pyplot as plt
nums = 1000
iter = 100
#def pi_run(nums,iter):
pi_avg =0
pi_val_list =[]
for i in range(iter):
value = 0
x=np.random.uniform(0,1,nums).tolist()
y=np.random.uniform(0,1,nums).tolist()
for j in range(nums):
z=np.sqrt(x[j]*x[j] + y[j]*y[j])
if z<1:
value = value + 1
pi_val = (float(value) * 4)/nums
pi_val_list.append(pi_val)
pi_avg = sum(pi_val_list)/iter
ind = range(1,iter+1)
fig = plt.plot(ind,pi_val_list)
#return(pi_avg,fig) | |
from flask import Flask,render_template,request,redirect,url_for
import easygui
import sqlite3 as sql
import csv
import random
import math
import numpy as np
from pysqlcipher import dbapi2 as sqlcipher
from sklearn import tree
app=Flask(__name__)
@app.route("/")
def index():
#if request.method== 'POST':
return render_template('index.html')
@app.route('/doctor', methods=['GET', 'POST'])
def doctor():
#if request.method== 'POST':
return render_template('doctor.html')
@app.route('/receptionist', methods=['GET', 'POST'])
def receptionist():
#if request.method== 'POST':
return render_template('receptionist.html')
@app.route('/home',methods=['GET','POST'])
def home():
#if request.method== 'POST':
return redirect(url_for('index'))
@app.route('/adddoc',methods = ['POST', 'GET'])
def adddoc():
if request.method == 'POST':
dname = request.form['name']
dusername = request.form['username']
demail = request.form['email']
dpassword = request.form['password']
dphone = request.form['phone']
ddob=request.form['birth']
daddress=request.form['address']
dspecialization=request.form['special']
db=sqlcipher.connect('encrypted.db')
db.executescript('pragma key="my password"; pragma kdf_iter=64000;')
db.execute("INSERT INTO doctor (name,username,email_id,password,phone_number,dob,address,specialisation) VALUES (?,?,?,?,?,?,?,?)",(dname,dusername,demail,dpassword,dphone,ddob,daddress,dspecialization))
easygui.msgbox("Successfully Registered ",title="Registration")
return render_template("doctor.html")
else:
easygui.msgbox("Registeration failed ",title="Registration")
@app.route('/addrec',methods = ['POST', 'GET'])
def addrec():
if request.method == 'POST':
dname = request.form['name']
dusername = request.form['username']
demail = request.form['email']
dpassword = request.form['password']
dphone = request.form['phone']
ddob=request.form['birth']
daddress=request.form['address']
dlang=request.form['language']
db=sqlcipher.connect('encrypted.db')
db.executescript('pragma key="my password";')
db.execute("INSERT INTO receptionists (name,username,email_id,password,phone_number,dob,address,language) VALUES (?,?,?,?,?,?,?,?)",(dname,dusername,demail,dpassword,dphone,ddob,daddress,dlang))
easygui.msgbox("Successfully Registered ",title="Registration")
return render_template("receptionist.html")
else:
easygui.msgbox("Registeration failed ",title="Registration")
@app.route('/addpat',methods = ['POST', 'GET'])
def addpat():
if request.method == 'POST':
did = request.form['pid']
dusername = request.form['did']
dname = request.form['pname']
demail = request.form['pemail']
dphone = request.form['phone']
daddress=request.form['paddress']
ddob=request.form['birth']
dblood=request.form['bg']
db=sqlcipher.connect('encrypted.db')
db.executescript('pragma key="my password"; pragma kdf_iter=64000;')
db.execute("INSERT INTO patient (pid,username,name,email,contact,address,dob,bloodgrp) VALUES (?,?,?,?,?,?,?,?)",(did,dusername,dname,demail,dphone,daddress,ddob,dblood))
easygui.msgbox("Successfully Registered ",title="Registration")
return render_template("nurse-land.html")
else:
easygui.msgbox("Registeration failed ",title="Registration")
@app.route('/doclog',methods = ['POST', 'GET'])
def doclog():
if request.method=='POST':
dusername = request.form['username']
dpassword = request.form['password']
#con=sql.connect("encrypted.db")
#cur=con.cursor()
#cur.execute("SELECT * from doctor where username='"+dusername+"' and password='"+dpassword+"'")
db=sqlcipher.connect('encrypted.db')
db.executescript('pragma key="my password"; pragma kdf_iter=64000;')
data=db.execute("SELECT * from doctor where username='"+dusername+"' and password='"+dpassword+"';").fetchone()
#data=db.fetchone()
if data is None:
easygui.msgbox("Retry ",title="Login")
return render_template("doctor.html")
else:
easygui.msgbox("Successfully Logged in.... ",title="Login")
return render_template("doctor-land.html")
@app.route('/reclog',methods = ['POST', 'GET'])
def reclog():
if request.method=='POST':
dusername = request.form['username']
dpassword = request.form['password']
db=sqlcipher.connect('encrypted.db')
db.executescript('pragma key="my password"; pragma kdf_iter=64000;')
data=db.execute("SELECT * from receptionists where username='"+dusername+"' and password='"+dpassword+"';").fetchone()
if data is None:
easygui.msgbox("Retry ",title="Login")
return render_template("receptionist.html")
else:
easygui.msgbox("Successfully Logged in.... ",title="Login")
return render_template("nurse-land.html")
@app.route('/reclogout',methods=['POST','GET'])
def reclogout():
return render_template("receptionist.html")
@app.route('/rechome',methods=['POST','GET'])
def rechome():
return render_template("nurse-land.html")
@app.route('/newpat', methods=['GET', 'POST'])
def newpat():
#if request.method== 'POST':
return render_template('new-patient.html')
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
#if request.method== 'POST':
return render_template('feedback.html')
def loadCsv(filename):
lines = csv.reader(open(filename, "rb"))
dataset = list(lines)
for i in range(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return dataset
def loadCsv1(filename1):
lines = csv.reader(open(filename1, "rb"))
dataset1 = list(lines)
for i in range(len(dataset1)):
dataset1[i] = [float(x) for x in dataset1[i]]
return dataset1
@app.route('/pred',methods=['GET', 'POST'])
def pred():
if request.method == 'POST':
filename = 'features.csv'
features=loadCsv(filename)
filename1 = 'label.csv'
label = loadCsv1(filename1)
X=features
label=np.array(label)
y=np.ravel(label)
pid=request.form['pat-id']
symp0 = request.form['age']
symp1= request.form['sex']
symp2 = request.form['cp']
symp3 = request.form['trestbps']
symp4 = request.form['chol']
symp5 = request.form['fbs']
symp6 = request.form['restecg']
symp7 = request.form['thalach']
symp8 = request.form['exang']
symp9 = request.form['oldpeak']
symp10 = request.form['slope']
symp11 = request.form['ca']
symp12 = request.form['thal']
user_input=[]
user_input.insert(0,symp0)
user_input.insert(1,symp1)
user_input.insert(2,symp2)
user_input.insert(3,symp3)
user_input.insert(4,symp4)
user_input.insert(5,symp5)
user_input.insert(6,symp6)
user_input.insert(7,symp7)
user_input.insert(8,symp8)
user_input.insert(9,symp9)
user_input.insert(10,symp10)
user_input.insert(11,symp11)
user_input.insert(12,symp12)
clf=tree.DecisionTreeClassifier()
clf=clf.fit(features,label)
a=clf.predict(user_input)
result= a[0]
db=sqlcipher.connect('encrypted.db')
db.executescript('pragma key="my password"; pragma kdf_iter=64000;')
data=db.execute("SELECT * from patient where pid='"+pid+"';").fetchone()
if data is None:
easygui.msgbox("Patient already exists...",title="Prediction")
return render_template("doctor-land.html")
else:
db.execute("INSERT INTO pat_health (pid,symp0,symp1,symp2,symp3,symp4,symp5,symp6,symp7,symp8,symp9,symp10,symp11,symp12,result) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);",(pid,symp0,symp1,symp2,symp3,symp4,symp5,symp6,symp7,symp8,symp9,symp10,symp11,symp12,result))
return render_template("doctor-land.html",result=result)
if __name__=="__main__":
app.run(debug=False) | |
import numpy as np
import matplotlib.pyplot as plt
N=10000
normal_values = np.random.normal(size=N)
'''
normal_values = np.random.beta(9,0.5, size=N)
'''
dummy, bins, dummy = plt.hist(normal_values, np.sqrt(N), normed=True,
lw=1)
sigma = 1
mu = 0
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins -
mu)**2 / (2 * sigma**2) ),lw=2)
plt.show() | |
__author__ = 'sibirrer'
from lenstronomy.LensModel.Profiles.cored_density import CoredDensity
import numpy as np
import numpy.testing as npt
import pytest
class TestCoredDensity(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.model = CoredDensity()
def test_function(self):
r = np.linspace(start=0.01, stop=2, num=10)
sigma0 = 0.2
r_core = 5
f_ = self.model.function(r, 0, sigma0, r_core)
delta = 0.0001
f_d = self.model.function(r + delta, 0, sigma0, r_core)
f_x_num = (f_d - f_) / delta
f_x, _ = self.model.derivatives(r, 0, sigma0, r_core)
npt.assert_almost_equal(f_x_num, f_x, decimal=3)
def test_derivatives(self):
pass
def test_dalpha_dr(self):
x = np.array([1, 3, 4])
y = np.array([2, 1, 1])
r = np.sqrt(x ** 2 + y ** 2)
sigma0 = 0.1
r_core = 7.
dalpha_dr = self.model.d_alpha_dr(r, sigma0, r_core)
alpha_r = self.model.alpha_r(r, sigma0, r_core)
delta = 0.00001
d_alpha_r = self.model.alpha_r(r + delta, sigma0, r_core)
d_alpha_dr_num = (d_alpha_r - alpha_r) / delta
npt.assert_almost_equal(dalpha_dr, d_alpha_dr_num)
def test_hessian(self):
x = np.linspace(start=0.01, stop=100, num=100)
y = 0
r = np.sqrt(x**2 + y**2)
sigma0 = 0.1
r_core = 7
f_xx, f_xy, f_yx, f_yy = self.model.hessian(x, y, sigma0, r_core)
kappa = 1./2 * (f_xx + f_yy)
kappa_direct = self.model.kappa_r(r, sigma0, r_core)
npt.assert_almost_equal(kappa, kappa_direct, decimal=5)
npt.assert_almost_equal(f_xy, f_yx, decimal=8)
def test_mass_3d(self):
x = np.array([1, 3, 4])
y = np.array([2, 1, 1])
r = np.sqrt(x ** 2 + y ** 2)
sigma0 = 0.1
r_core = 7
m3d = self.model.mass_3d(r, sigma0, r_core)
m3d_lens = self.model.mass_3d_lens(r, sigma0, r_core)
npt.assert_almost_equal(m3d, m3d_lens, decimal=8)
if __name__ == '__main__':
pytest.main() | |
import datetime as dt
import numpy as np
import pandas as pd
import sqlite3
from epl.query import create_and_query, create_conn
def result_calculator(match_results, res_type):
"""
Function to output the league table for a set of matches including MP, GF, GA and points
match_results: dataframe of match results including home and away team cols and score lines
res_type: string of 'H' or 'A' - computes the results from a certain perspective
"""
# check if we have the columns we need
req_cols = ['HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR']
for col in req_cols:
if col not in match_results.columns:
return 'Missing column: {}, need following cols: {}'.format(col, req_cols)
# handle whether perspective of H or A
if res_type == 'H':
# make everything from H perspective
match_results = match_results.rename(
columns={'HomeTeam': 'Team', 'AwayTeam': 'Opp', 'FTHG': 'GF', 'FTAG': 'GA'})
# compute points from H perspective
home_p = {'H': 3, 'A': 0, 'D': 1}
home_res = {'H': 'W', 'A': 'L', 'D': 'D'}
match_results['Points'] = match_results['FTR'].map(home_p)
match_results['FTR'] = match_results['FTR'].map(home_res)
elif res_type == 'A':
# make everything from A perspective
match_results = match_results.rename(
columns={'AwayTeam': 'Team', 'HomeTeam': 'Opp', 'FTHG': 'GA', 'FTAG': 'GF'})
# compute points from A perspective
away_p = {'A': 3, 'H': 0, 'D': 1}
away_res = {'A': 'W', 'H': 'L', 'D': 'D'}
match_results['Points'] = match_results['FTR'].map(away_p)
match_results['FTR'] = match_results['FTR'].map(away_res)
else:
return 'res_type must either be H or A, not: {}'.format(res_type)
return match_results
def table_calculator(match_results, res_type):
# work out from perspective we care about
df_match = result_calculator(match_results, res_type)
# agg by team and result
df_match = df_match.groupby(['Team', 'FTR']).agg(
{'Opp': 'count', 'GF': 'sum', 'GA': 'sum', 'Points': 'sum'}).reset_index()
df_match = df_match.rename(columns={'Opp': 'MP'})
# pivot by W/L/D
df_res = pd.pivot_table(data=df_match[[
'Team', 'FTR', 'MP']], index='Team', columns='FTR', values='MP').fillna(0)
# if there are no results for a given type, then create col with zero to complete table
for res in ['W', 'D', 'L']:
if res not in df_res.columns:
df_res[res] = 0
df_res_goals = df_match.groupby('Team').sum()
df_res_goals['GD'] = df_res_goals['GF'] - df_res_goals['GA']
df_res = pd.merge(left=df_res_goals, right=df_res, how='left',
on='Team').sort_values('Points', ascending=False)
df_res['Loc'] = res_type
df_res = df_res[['MP', 'W', 'L', 'D', 'GF', 'GA', 'GD', 'Points']]
return df_res
def full_table_calculator(match_results):
df_home = table_calculator(match_results, 'H')
df_away = table_calculator(match_results, 'A')
df_res = pd.concat([df_home, df_away])
df_res = df_res.groupby('Team').sum().sort_values(
['Points', 'GD', 'GF'], ascending=False)
df_res = df_res.reset_index().reset_index().rename(
columns={'index': 'LeagPos'}).set_index('Team')
df_res['LeagPos'] = df_res['LeagPos'] + 1
df_res = df_res[[x for x in df_res.columns if x !=
'LeagPos'] + ['LeagPos']]
return df_res
def league_table_asof(div, season, asof_date, conn=None):
if not conn:
conn = create_conn()
# variable checking and error messages
if not isinstance(div, str):
try:
elig_divs = pd.read_sql(
"""SELECT DISTINCT Div from matches""", conn)
elig_divs = elig_divs['Div'].values
except:
return 'Cannot connect to db'
conn.close()
return "Div: {} not in db, must be from: {}".format(div, ", ".join(elig_divs))
if not isinstance(season, str):
try:
elig_seasons = pd.read_sql(
"""SELECT DISTINCT Season from matches""", conn)
elig_seasons = elig_seasons['Season'].values
except:
return 'Cannot connect to db'
conn.close()
return "Season: {} not in db, must be from: {}".format(season, ", ".join(elig_seasons))
if not asof_date:
asof_date = dt.date.today() + dt.timedelta(days=365*10)
asof_date = pd.to_datetime(asof_date)
else:
if not isinstance(asof_date, pd.Timestamp):
try:
asof_date = pd.to_datetime(asof_date)
except:
return "Failed to convert asof_date: {} to datetime using pd.to_datetime".format(asof_date)
# query required data from db
table_cols = ['HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'FTR'] + ['Date']
df_raw = create_and_query('matches', cols=table_cols, wc={
'Div': ['=', div], 'Season': ['=', season]})
df_raw['Date'] = pd.to_datetime(df_raw['Date'])
df = df_raw[df_raw.Date <= asof_date]
df_res = full_table_calculator(df)
return df_res
def find_matches_by_score(score, is_ht=False, div=None, home_team=None, away_team=None, leading_team=None, losing_team=None):
# form the where statement in the sql query as sql 10x-50x faster at filtering than pandas
wc = {}
if div:
wc['Div'] = ['=', div]
if home_team:
wc['HomeTeam'] = ['=', home_team]
if away_team:
wc['AwayTeam'] = ['=', away_team]
if len(wc) == 0:
wc = None
# get cols we care about
cols = ['Div', 'Date', 'Season', 'HomeTeam',
'AwayTeam', 'FTHG', 'FTAG', 'HTHG', 'HTAG']
# query
df = create_and_query('matches', cols, wc).dropna()
home_goals = 'HTHG' if is_ht else 'FTHG'
away_goals = 'HTAG' if is_ht else 'FTAG'
# create tuple for score and select where matches
df['Score'] = list(zip(df[home_goals], df[away_goals]))
df = df[(df['Score'] == score) | (df['Score'] == score[::-1])]
# if leading / trailing team specified then apply that filter
# don't know how to do this in sql yet so easier in pandas for now post sql query
if leading_team:
if score[0] == score[1]:
df = df[(df['HomeTeam'] == leading_team) |
(df['AwayTeam'] == leading_team)]
else:
df = df[((df['HomeTeam'] == leading_team) & (df[home_goals] > df[away_goals])) | (
(df['AwayTeam'] == leading_team) & (df[home_goals] < df[away_goals]))]
if losing_team:
if score[0] == score[1]:
df = df[(df['HomeTeam'] == losing_team) |
(df['AwayTeam'] == losing_team)]
else:
df = df[((df['HomeTeam'] == losing_team) & (df[home_goals] < df[away_goals])) | (
(df['AwayTeam'] == leading_team) & (df[home_goals] > df[away_goals]))]
return df
if __name__ == "__main__":
# function to check if working
# x = league_table_asof('E0', '2019/2020', None, conn=None)
# print(x)
None | |
#!/usr/bin/env python
# native Python imports
import os.path
import sys
import numpy as np
# third-party imports
import cyvcf as vcf
# geminicassandra modules
import version
from ped import load_ped_file
import gene_table
import infotag
from database_cassandra import insert, batch_insert, create_tables
import annotations
import func_impact
import severe_impact
import popgen
import structural_variants as svs
from geminicassandra.gemini_constants import HET, HOM_ALT, HOM_REF, UNKNOWN
from compression import pack_blob
from geminicassandra.config import read_gemini_config
from cassandra.cluster import Cluster
from blist import blist
from itertools import repeat
from geminicassandra.ped import get_ped_fields
import time
from string import strip
from random import randint
from geminicassandra.table_schemes import get_column_names
from cassandra.query import BatchStatement, BatchType
import Queue
from cassandra.concurrent import execute_concurrent_with_args
import cassandra
from multiprocessing import cpu_count
from time import sleep
from sys import stderr
class GeminiLoader(object):
"""
Object for creating and populating a geminicassandra
database and auxillary data files.
"""
def __init__(self, args):
self.args = args
# create a reader for the VCF file
self.vcf_reader = self._get_vcf_reader()
self.buffer_size = args.buffer_size
self.queue_length = args.max_queue
self._get_anno_version()
self.contact_points = map(strip, args.contact_points.split(','))
self.keyspace = args.keyspace
self.replication_factor = args.replication
self.typed_gt_column_names = []
self.gt_column_names = []
self.node_n = args.node_num
if not self.args.no_genotypes:
self.samples = self.vcf_reader.samples
self.gt_column_names, self.typed_gt_column_names = self._get_typed_gt_column_names()
print "# samples = %s" % len(self.samples)
NUM_BUILT_IN = 6
self.extra_sample_columns = get_ped_fields(args.ped_file)[NUM_BUILT_IN:]
if self.args.anno_type == "VEP":
self._effect_fields = self._get_vep_csq(self.vcf_reader)
else:
self._effect_fields = []
def single_core_stuff(self):
self.store_vcf_header()
self.store_resources()
self.store_version()
if not self.args.no_genotypes and not self.args.no_load_genotypes:
# load the sample info from the VCF file.
self._prepare_samples()
# initialize genotype counts for each sample
if not self.args.skip_gene_tables:
self._get_gene_detailed()
self._get_gene_summary()
def store_vcf_header(self):
"""Store the raw VCF header.
"""
insert(self.session, 'vcf_header', get_column_names('vcf_header'), [self.vcf_reader.raw_header])
def store_resources(self):
"""Create table of annotation resources used in this geminicassandra database.
"""
batch_insert(self.session, 'resources', get_column_names('resources'), annotations.get_resources( self.args ))
def store_version(self):
"""Create table documenting which geminicassandra version was used for this db.
"""
insert(self.session, 'version', get_column_names('version'), [version.__version__])
def _get_typed_gt_column_names(self):
gt_cols = [('gts', 'text'),
('gt_types', 'int'),
('gt_phases', 'int'),
('gt_depths', 'int'),
('gt_ref_depths', 'int'),
('gt_alt_depths', 'int'),
('gt_quals', 'float'),
('gt_copy_numbers', 'float')]
column_names = concat(map(lambda x: map(lambda y: x[0] + '_' + y, self.samples), gt_cols))
typed_column_names = concat(map(lambda x: map(lambda y: x[0] + '_' + y + ' ' + x[1], self.samples), gt_cols))
return (column_names, typed_column_names)
def _get_vid(self):
if hasattr(self.args, 'offset'):
v_id = int(self.args.offset)
else:
v_id = 1
return v_id
def prepare_insert_queries(self):
basic_query = 'INSERT INTO %s ( %s ) VALUES ( %s )'
if cpu_count() > 8:
nap = 10 * (self.node_n % 11)
sleep(nap)
start_time = time.time()
self.insert_variants_query = self.session.prepare(basic_query % \
('variants', ','.join(get_column_names('variants') + self.gt_column_names), ','.join(list(repeat("?",len(get_column_names('variants') + self.gt_column_names))))))
self.insert_variants_samples_gt_types_query = self.session.prepare(basic_query % \
('variants_by_samples_gt_types', "variant_id, sample_name, gt_types", ','.join(list(repeat("?",3)))))
self.insert_samples_variants_gt_types_query = self.session.prepare(basic_query % \
('samples_by_variants_gt_type', "variant_id, sample_name, gt_type", ','.join(list(repeat("?",3)))))
self.insert_variants_samples_gt_depths_query = self.session.prepare(basic_query % \
('variants_by_samples_gt_depths', "variant_id, sample_name, gt_depths", ','.join(list(repeat("?",3)))))
self.insert_variants_samples_gts_query = self.session.prepare(basic_query % \
('variants_by_samples_gts', "variant_id, sample_name, gts", ','.join(list(repeat("?",3)))))
self.insert_variant_impacts_query = self.session.prepare(basic_query % \
('variant_impacts', ','.join(get_column_names('variant_impacts')), ','.join(list(repeat("?", len(get_column_names('variant_impacts')))))))
self.insert_variant_stcr_query = self.session.prepare(basic_query % \
('variants_by_sub_type_call_rate', ','.join(get_column_names('variants_by_sub_type_call_rate')), ','.join(list(repeat("?", 3)))))
self.insert_variant_gene_query = self.session.prepare(basic_query % \
('variants_by_gene', 'variant_id, gene', ','.join(list(repeat("?", 2)))))
self.insert_variant_chrom_start_query = self.session.prepare(basic_query % \
('variants_by_chrom_start', 'variant_id, chrom, start', ','.join(list(repeat("?", 3)))))
end_time = time.time()
if cpu_count() > 8:
nap = 10 * (11 - (self.node_n % 11))
sleep(nap)
print "Proc %s: preparing statements took %.2f s." % (os.getpid(), (end_time - start_time))
def populate_from_vcf(self):
"""
"""
self.v_id = self._get_vid()
self.var_buffer = blist([])
self.var_impacts_buffer = blist([])
self.var_subtypes_buffer = blist([])
self.var_gene_buffer = blist([])
self.var_chrom_start_buffer = blist([])
self.prepare_insert_queries()
self.leftover_types = blist([])
self.leftover_depths = blist([])
self.leftover_gts = blist([])
buffer_count = 0
self.skipped = 0
self.counter = 0
start_time = time.time()
interval_start = time.time()
variants_gts_timer = 0
log_file = open("loading_logs/%s.csv" % str(os.getpid()), "w")
self.time_out_log = open("loading_logs/%s.err" % str(os.getpid()), "w")
vars_inserted = 0
for var in self.vcf_reader:
if self.args.passonly and (var.FILTER is not None and var.FILTER != "."):
self.skipped += 1
continue
(variant, variant_impacts, sample_info, extra_fields) = self._prepare_variation(var) # @UnusedVariable
# add the core variant info to the variant buffer
self.var_buffer.append(variant)
self.var_subtypes_buffer.append([self.v_id, variant[11], variant[12]])
if variant[55] != None:
self.var_gene_buffer.append([self.v_id, variant[55]])
self.var_chrom_start_buffer.append([self.v_id, variant[1], variant[2]])
var_sample_gt_types_buffer = blist([])
var_sample_gt_depths_buffer = blist([])
var_sample_gt_buffer = blist([])
for sample in sample_info:
if sample[1] != None:
var_sample_gt_depths_buffer.append([self.v_id, sample[0], sample[2]])
var_sample_gt_types_buffer.append([self.v_id, sample[0], sample[1]])
var_sample_gt_buffer.append([self.v_id, sample[0], sample[3]])
stime = time.time()
self.prepared_batch_insert(var_sample_gt_types_buffer, var_sample_gt_depths_buffer, var_sample_gt_buffer, 25)
variants_gts_timer += (time.time() - stime)
# add each of the impact for this variant (1 per gene/transcript)
for var_impact in variant_impacts:
self.var_impacts_buffer.append(var_impact)
buffer_count += 1
# buffer full - start to insert into DB
if buffer_count >= self.buffer_size:
startt = time.time()
self.execute_concurrent_with_retry(self.insert_variants_query, self.var_buffer)
self.execute_concurrent_with_retry(self.insert_variant_impacts_query, self.var_impacts_buffer)
self.execute_concurrent_with_retry(self.insert_variant_stcr_query, self.var_subtypes_buffer)
self.execute_concurrent_with_retry(self.insert_variant_gene_query, self.var_gene_buffer)
self.execute_concurrent_with_retry(self.insert_variant_chrom_start_query, self.var_chrom_start_buffer)
endt = time.time()
# binary.genotypes.append(var_buffer)
# reset for the next batch
self.var_buffer = blist([])
self.var_subtypes_buffer = blist([])
self.var_impacts_buffer = blist([])
self.var_gene_buffer = blist([])
self.var_chrom_start_buffer = blist([])
vars_inserted += self.buffer_size
log_file.write("%s;%.2f;%.2f;%.2f\n" % (self.buffer_size, endt - interval_start, endt - startt, variants_gts_timer))
log_file.flush()
buffer_count = 0
interval_start = time.time()
variants_gts_timer = 0
self.v_id += 1
self.counter += 1
# final load to the database
self.v_id -= 1
startt = time.time()
self.execute_concurrent_with_retry(self.insert_variants_query, self.var_buffer)
self.execute_concurrent_with_retry(self.insert_variant_impacts_query, self.var_impacts_buffer)
self.execute_concurrent_with_retry(self.insert_variant_stcr_query, self.var_subtypes_buffer)
self.execute_concurrent_with_retry(self.insert_variant_gene_query, self.var_gene_buffer)
self.execute_concurrent_with_retry(self.insert_variant_chrom_start_query, self.var_chrom_start_buffer)
#self.prepared_batch_insert(self.leftover_types, self.leftover_depths, self.leftover_gts)
end_time = time.time()
vars_inserted += self.buffer_size
log_file.write("%d;%.2f;%.2f;%.2f\n" % (len(self.var_buffer), end_time - interval_start, end_time - startt, variants_gts_timer))
self.time_out_log.close()
elapsed_time = end_time - start_time
sys.stderr.write("pid " + str(os.getpid()) + ": " +
str(self.counter) + " variants processed in %s s.\n" % elapsed_time)
log_file.write(str(self.counter) + " variants processed in %s s.\n" % elapsed_time)
log_file.write("%d leftovers\n" % len(self.leftover_types))
log_file.close()
if self.args.passonly:
sys.stderr.write("pid " + str(os.getpid()) + ": " +
str(self.skipped) + " skipped due to having the "
"FILTER field set.\n")
return self.counter
def prepared_batch_insert(self, types_buf, depth_buf, gt_buffer, queue_length=40):
"""
Populate the given table with the given values
"""
retry_threshold = 8
futures = Queue.Queue(maxsize=queue_length+1)
retries = {}
i = 0
while i < len(types_buf):
if i >= queue_length:
(old_i, old_future) = futures.get_nowait()
try:
old_future.result()
del retries[old_i]
except cassandra.WriteTimeout, cassandra.OperationTimedOut:
if retries[old_i] < retry_threshold:
if retries[old_i] == 0:
self.write_to_timeoutlog("1::%s;%s;%s;%s;%s\n" % \
(types_buf[old_i][0], types_buf[old_i][1], types_buf[old_i][2], depth_buf[old_i][2], gt_buffer[old_i][2]))
future = self.execute_var_gts_batch(types_buf[old_i], depth_buf[old_i], gt_buffer[old_i])
futures.put_nowait((old_i, future))
retries[old_i] += 1
continue
else:
self.leftover_types.append(types_buf[old_i])
self.leftover_depths.append(depth_buf[old_i])
self.leftover_gts.append(gt_buffer[old_i])
self.write_to_timeoutlog("2::%s;%s;%s;%s;%s\n" % \
(types_buf[old_i][0], types_buf[old_i][1], types_buf[old_i][2], depth_buf[old_i][2], gt_buffer[old_i][2]))
except cassandra.InvalidRequest:
if retries[old_i] < retry_threshold:
if retries[old_i] == 0:
self.write_to_timeoutlog("3::%s;%s;%s;%s;%s\n" % \
(types_buf[old_i][0], types_buf[old_i][1], types_buf[old_i][2], depth_buf[old_i][2], len(gt_buffer[old_i][2])))
future = self.execute_var_gts_batch(types_buf[old_i], depth_buf[old_i], gt_buffer[old_i])
futures.put_nowait((old_i, future))
retries[old_i] += 1
continue
else:
self.leftover_types.append(types_buf[old_i])
self.leftover_depths.append(depth_buf[old_i])
self.leftover_gts.append(gt_buffer[old_i])
self.write_to_timeoutlog("4::%s;%s;%s;%s;%s\n" % \
(types_buf[old_i][0], types_buf[old_i][1], types_buf[old_i][2], depth_buf[old_i][2], len(gt_buffer[old_i][2])))
future = self.execute_var_gts_batch(types_buf[i], depth_buf[i], gt_buffer[i])
futures.put_nowait((i, future))
retries[i] = 0
i += 1
def execute_var_gts_batch(self, type_info, depth, gt):
batch = BatchStatement(batch_type=BatchType.UNLOGGED)
batch.add(self.insert_samples_variants_gt_types_query, type_info)
batch.add(self.insert_variants_samples_gt_types_query, type_info)
batch.add(self.insert_variants_samples_gt_depths_query, depth)
batch.add(self.insert_variants_samples_gts_query, gt)
return self.session.execute_async(batch)
def execute_concurrent_with_retry(self, insert_query, contents, retry=0):
try:
execute_concurrent_with_args(self.session, insert_query, contents)
except cassandra.WriteTimeout, cassandra.OperationTimedOut:
self.write_to_timeoutlog("5::%d\n" % contents[0][0])
self.execute_concurrent_with_retry(insert_query, contents, retry)
except cassandra.InvalidRequest as e:
if retry < 8:
self.write_to_timeoutlog("6::%d\n" % contents[0][0])
self.execute_concurrent_with_retry(insert_query, contents, retry+1)
else:
self.write_to_timeoutlog("63::Give up - too many invalid dink\n")
self.write_to_timeoutlog("63::%s" % str(e))
def write_to_timeoutlog(self, message):
self.time_out_log.write(message)
self.time_out_log.flush()
def _update_extra_headers(self, headers, cur_fields):
"""Update header information for extra fields.
"""
for field, val in cur_fields.items():
headers[field] = self._get_field_type(val, headers.get(field, "integer"))
return headers
def _get_field_type(self, val, cur_type):
start_checking = False
for name, check_fn in [("integer", int), ("float", float), ("text", str)]:
if name == cur_type:
start_checking = True
if start_checking:
try:
check_fn(val)
break
except:
continue
return name
def disconnect(self):
"""
Create the db table indices and close up
db connection
"""
# index our tables for speed
# commit data and close up
self.session.shutdown()
def _get_vcf_reader(self):
# the VCF is a proper file
if self.args.vcf != "-":
if self.args.vcf.endswith(".gz"):
return vcf.VCFReader(open(self.args.vcf), 'rb', compressed=True)
else:
return vcf.VCFReader(open(self.args.vcf), 'rb')
# the VCF is being passed in via STDIN
else:
return vcf.VCFReader(sys.stdin, 'rb')
def _get_anno_version(self):
"""
Extract the snpEff or VEP version used
to annotate the VCF
"""
# default to unknown version
self.args.version = None
if self.args.anno_type == "snpEff":
try:
version_string = self.vcf_reader.metadata['SnpEffVersion']
except KeyError:
error = ("\nWARNING: VCF is not annotated with snpEff, check documentation at:\n"\
"http://geminicassandra.readthedocs.org/en/latest/content/functional_annotation.html#stepwise-installation-and-usage-of-snpeff\n")
sys.exit(error)
# e.g., "SnpEff 3.0a (build 2012-07-08), by Pablo Cingolani"
# or "3.3c (build XXXX), by Pablo Cingolani"
version_string = version_string.replace('"', '') # No quotes
toks = version_string.split()
if "SnpEff" in toks[0]:
self.args.raw_version = toks[1] # SnpEff *version*, etc
else:
self.args.raw_version = toks[0] # *version*, etc
# e.g., 3.0a -> 3
self.args.maj_version = int(self.args.raw_version.split('.')[0])
elif self.args.anno_type == "VEP":
pass
def _get_vep_csq(self, reader):
"""
Test whether the VCF header meets expectations for
proper execution of VEP for use with Gemini.
"""
required = ["Consequence"]
expected = "Consequence|Codons|Amino_acids|Gene|SYMBOL|Feature|EXON|PolyPhen|SIFT|Protein_position|BIOTYPE".upper() # @UnusedVariable
if 'CSQ' in reader.infos:
parts = str(reader.infos["CSQ"].desc).split("Format: ")[-1].split("|")
all_found = True
for check in required:
if check not in parts:
all_found = False
break
if all_found:
return parts
# Did not find expected fields
error = "\nERROR: Check geminicassandra docs for the recommended VCF annotation with VEP"\
"\nhttp://geminicassandra.readthedocs.org/en/latest/content/functional_annotation.html#stepwise-installation-and-usage-of-vep"
sys.exit(error)
def setup_db(self):
"""
Create keyspace named 'gemini_keyspace' and all tables. (IF NOT EXISTS)
"""
self.cluster = Cluster(self.contact_points)
self.session = self.cluster.connect()
query = "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : %d}" % (self.keyspace, self.replication_factor)
self.session.execute(query)
self.session.set_keyspace(self.keyspace)
# create the geminicassandra database tables for the new DB
create_tables(self.session, self.typed_gt_column_names, self.extra_sample_columns)
def connect_to_db(self):
self.cluster = Cluster(self.contact_points)
self.session = self.cluster.connect(self.keyspace)
def _prepare_variation(self, var):
"""private method to collect metrics for a single variant (var) in a VCF file.
Extracts variant information, variant impacts and extra fields for annotation.
"""
extra_fields = {}
# these metrics require that genotypes are present in the file
call_rate = None
hwe_p_value = None
pi_hat = None
inbreeding_coeff = None
hom_ref = het = hom_alt = unknown = None
# only compute certain metrics if genotypes are available
if not self.args.no_genotypes and not self.args.no_load_genotypes:
hom_ref = var.num_hom_ref
hom_alt = var.num_hom_alt
het = var.num_het
unknown = var.num_unknown
try:
call_rate = var.call_rate
except ValueError: #TODO: catch error instead of bogus value
call_rate = -43.0
aaf = var.aaf
hwe_p_value, inbreeding_coeff = \
popgen.get_hwe_likelihood(hom_ref, het, hom_alt, aaf)
pi_hat = var.nucl_diversity
else:
aaf = infotag.extract_aaf(var)
############################################################
# collect annotations from geminicassandra's custom annotation files
# but only if the size of the variant is <= 50kb
############################################################
if var.end - var.POS < 50000:
pfam_domain = annotations.get_pfamA_domains(var)
cyto_band = annotations.get_cyto_info(var)
rs_ids = annotations.get_dbsnp_info(var)
clinvar_info = annotations.get_clinvar_info(var)
in_dbsnp = 0 if rs_ids is None else 1
rmsk_hits = annotations.get_rmsk_info(var)
in_cpg = annotations.get_cpg_island_info(var)
in_segdup = annotations.get_segdup_info(var)
is_conserved = annotations.get_conservation_info(var)
esp = annotations.get_esp_info(var)
thousandG = annotations.get_1000G_info(var)
recomb_rate = annotations.get_recomb_info(var)
gms = annotations.get_gms(var)
grc = annotations.get_grc(var)
in_cse = annotations.get_cse(var)
encode_tfbs = annotations.get_encode_tfbs(var)
encode_dnaseI = annotations.get_encode_dnase_clusters(var)
encode_cons_seg = annotations.get_encode_consensus_segs(var)
gerp_el = annotations.get_gerp_elements(var)
vista_enhancers = annotations.get_vista_enhancers(var)
cosmic_ids = annotations.get_cosmic_info(var)
fitcons = annotations.get_fitcons(var)
Exac = annotations.get_exac_info(var)
#load CADD scores by default
if self.args.skip_cadd is False:
(cadd_raw, cadd_scaled) = annotations.get_cadd_scores(var)
else:
(cadd_raw, cadd_scaled) = (None, None)
# load the GERP score for this variant by default.
gerp_bp = None
if self.args.skip_gerp_bp is False:
gerp_bp = annotations.get_gerp_bp(var)
# the variant is too big to annotate
else:
pfam_domain = None
cyto_band = None
rs_ids = None
clinvar_info = annotations.ClinVarInfo()
in_dbsnp = None
rmsk_hits = None
in_cpg = None
in_segdup = None
is_conserved = None
esp = annotations.ESPInfo(None, None, None, None, None)
thousandG = annotations.ThousandGInfo(None, None, None, None, None, None, None)
Exac = annotations.ExacInfo(None, None, None, None, None, None, None, None, None, None)
recomb_rate = None
gms = annotations.GmsTechs(None, None, None)
grc = None
in_cse = None
encode_tfbs = None
encode_dnaseI = annotations.ENCODEDnaseIClusters(None, None)
encode_cons_seg = annotations.ENCODESegInfo(None, None, None, None, None, None)
gerp_el = None
vista_enhancers = None
cosmic_ids = None
fitcons = None
cadd_raw = None
cadd_scaled = None
gerp_bp = None
# impact is a list of impacts for this variant
impacts = None
severe_impacts = None
# impact terms initialized to None for handling unannotated vcf's
# anno_id in variants is for the trans. with the most severe impact term
gene = transcript = exon = codon_change = aa_change = aa_length = \
biotype = consequence = consequence_so = effect_severity = None
is_coding = is_exonic = is_lof = None
polyphen_pred = polyphen_score = sift_pred = sift_score = anno_id = None
if self.args.anno_type is not None:
impacts = func_impact.interpret_impact(self.args, var, self._effect_fields)
severe_impacts = \
severe_impact.interpret_severe_impact(self.args, var, self._effect_fields)
if severe_impacts:
extra_fields.update(severe_impacts.extra_fields)
gene = severe_impacts.gene
transcript = severe_impacts.transcript
exon = severe_impacts.exon
codon_change = severe_impacts.codon_change
aa_change = severe_impacts.aa_change
aa_length = severe_impacts.aa_length
biotype = severe_impacts.biotype
consequence = severe_impacts.consequence
effect_severity = severe_impacts.effect_severity
polyphen_pred = severe_impacts.polyphen_pred
polyphen_score = severe_impacts.polyphen_score
sift_pred = severe_impacts.sift_pred
sift_score = severe_impacts.sift_score
anno_id = severe_impacts.anno_id
is_exonic = severe_impacts.is_exonic
is_coding = severe_impacts.is_coding
is_lof = severe_impacts.is_lof
consequence_so = severe_impacts.so
# construct the var_filter string
var_filter = None
if var.FILTER is not None and var.FILTER != ".":
if isinstance(var.FILTER, list):
var_filter = ";".join(var.FILTER)
else:
var_filter = var.FILTER
#TODO: sensible value
vcf_id = None
if var.ID is not None and var.ID != ".":
vcf_id = var.ID
# build up numpy arrays for the genotype information.
sample_info = blist([])
if not self.args.no_genotypes and not self.args.no_load_genotypes:
gt_bases = var.gt_bases # 'A/G', './.'
gt_types = var.gt_types # -1, 0, 1, 2
gt_phases = var.gt_phases # T F F
gt_depths = var.gt_depths # 10 37 0
gt_ref_depths = var.gt_ref_depths # 2 21 0 -1
gt_alt_depths = var.gt_alt_depths # 8 16 0 -1
gt_quals = var.gt_quals # 10.78 22 99 -1
gt_copy_numbers = var.gt_copy_numbers # 1.0 2.0 2.1 -1
gt_columns = concat([gt_bases, gt_types, gt_phases, gt_depths, gt_ref_depths, gt_alt_depths, gt_quals, gt_copy_numbers])
for entry in var.samples:
sample_info.append((entry.sample, entry.gt_type, entry.gt_depth, entry.gt_bases))
# tally the genotypes
self._update_sample_gt_counts(np.array(var.gt_types, np.int8))
else:
gt_columns= []
if self.args.skip_info_string is False:
info = var.INFO
else:
info = None
# were functional impacts predicted by SnpEFF or VEP?
# if so, build up a row for each of the impacts / transcript
variant_impacts = []
if impacts is not None:
for idx, impact in enumerate(impacts):
var_impact = [self.v_id, (idx + 1), impact.gene,
impact.transcript, impact.is_exonic,
impact.is_coding, impact.is_lof,
impact.exon, impact.codon_change,
impact.aa_change, impact.aa_length,
impact.biotype, impact.consequence,
impact.so, impact.effect_severity,
impact.polyphen_pred, impact.polyphen_score,
impact.sift_pred, impact.sift_score]
variant_impacts.append(var_impact)
# extract structural variants
sv = svs.StructuralVariant(var)
ci_left = sv.get_ci_left()
ci_right = sv.get_ci_right()
# construct the core variant record.
# 1 row per variant to VARIANTS table
if extra_fields:
extra_fields.update({"chrom": var.CHROM, "start": var.start, "end": var.end})
chrom = var.CHROM if var.CHROM.startswith("chr") else "chr" + var.CHROM
variant = [self.v_id, chrom, var.start, var.end,
vcf_id, anno_id, var.REF, ','.join(var.ALT),
var.QUAL, var_filter, var.var_type,
var.var_subtype,
call_rate, in_dbsnp,
rs_ids,
ci_left[0],
ci_left[1],
ci_right[0],
ci_right[1],
sv.get_length(),
sv.is_precise(),
sv.get_sv_tool(),
sv.get_evidence_type(),
sv.get_event_id(),
sv.get_mate_id(),
sv.get_strand(),
clinvar_info.clinvar_in_omim,
clinvar_info.clinvar_sig,
clinvar_info.clinvar_disease_name,
clinvar_info.clinvar_dbsource,
clinvar_info.clinvar_dbsource_id,
clinvar_info.clinvar_origin,
clinvar_info.clinvar_dsdb,
clinvar_info.clinvar_dsdbid,
clinvar_info.clinvar_disease_acc,
clinvar_info.clinvar_in_locus_spec_db,
clinvar_info.clinvar_on_diag_assay,
clinvar_info.clinvar_causal_allele,
pfam_domain, cyto_band, rmsk_hits, in_cpg,
in_segdup, is_conserved, gerp_bp, parse_float(gerp_el),
hom_ref, het, hom_alt, unknown,
aaf, hwe_p_value, inbreeding_coeff, pi_hat,
recomb_rate, gene, transcript, is_exonic,
is_coding, is_lof, exon, codon_change, aa_change,
aa_length, biotype, consequence, consequence_so, effect_severity,
polyphen_pred, polyphen_score, sift_pred, sift_score,
infotag.get_ancestral_allele(var), infotag.get_rms_bq(var),
infotag.get_cigar(var),
infotag.get_depth(var), infotag.get_strand_bias(var),
infotag.get_rms_map_qual(var), infotag.get_homopol_run(var),
infotag.get_map_qual_zero(var),
infotag.get_num_of_alleles(var),
infotag.get_frac_dels(var),
infotag.get_haplotype_score(var),
infotag.get_quality_by_depth(var),
infotag.get_allele_count(var), infotag.get_allele_bal(var),
infotag.in_hm2(var), infotag.in_hm3(var),
infotag.is_somatic(var),
infotag.get_somatic_score(var),
esp.found, esp.aaf_EA,
esp.aaf_AA, esp.aaf_ALL,
esp.exome_chip, thousandG.found,
parse_float(thousandG.aaf_AMR), parse_float(thousandG.aaf_EAS),
parse_float(thousandG.aaf_SAS), parse_float(thousandG.aaf_AFR),
parse_float(thousandG.aaf_EUR), parse_float(thousandG.aaf_ALL), grc,
parse_float(gms.illumina), parse_float(gms.solid),
parse_float(gms.iontorrent), in_cse,
encode_tfbs,
parse_int(encode_dnaseI.cell_count),
encode_dnaseI.cell_list,
encode_cons_seg.gm12878,
encode_cons_seg.h1hesc,
encode_cons_seg.helas3,
encode_cons_seg.hepg2,
encode_cons_seg.huvec,
encode_cons_seg.k562,
vista_enhancers,
cosmic_ids,
pack_blob(info),
cadd_raw,
cadd_scaled,
fitcons,
Exac.found,
parse_float(Exac.aaf_ALL),
Exac.adj_aaf_ALL,
Exac.aaf_AFR, Exac.aaf_AMR,
Exac.aaf_EAS, Exac.aaf_FIN,
Exac.aaf_NFE, Exac.aaf_OTH,
Exac.aaf_SAS] + gt_columns
return variant, variant_impacts, sample_info, extra_fields
def _prepare_samples(self):
"""
private method to load sample information
"""
if not self.args.no_genotypes:
self.sample_to_id = {}
for idx, sample in enumerate(self.samples):
self.sample_to_id[sample] = idx + 1
self.ped_hash = {}
if self.args.ped_file is not None:
self.ped_hash = load_ped_file(self.args.ped_file)
samples_buffer = blist([])
buffer_counter = 0
for sample in self.samples:
sample_list = []
i = self.sample_to_id[sample]
if sample in self.ped_hash:
fields = self.ped_hash[sample]
sample_list = [i] + fields
elif len(self.ped_hash) > 0:
sys.exit("EXITING: sample %s found in the VCF but "
"not in the PED file.\n" % (sample))
else:
# if there is no ped file given, just fill in the name and
# sample_id and set random value for sex & phenotype
sample_list = [i, '0', sample, '0', '0', str(randint(1,2)), str(randint(1,2))]
samples_buffer.append(sample_list)
buffer_counter += 1
if buffer_counter >= self.buffer_size:
batch_insert(self.session, 'samples', get_column_names('samples') + self.extra_sample_columns, samples_buffer)
buffer_counter = 0
samples_buffer = blist([])
column_names = get_column_names('samples') + self.extra_sample_columns
batch_insert(self.session, 'samples', column_names, samples_buffer)
batch_insert(self.session, 'samples_by_phenotype', column_names, samples_buffer)
batch_insert(self.session, 'samples_by_sex', column_names, samples_buffer)
insert(self.session, 'row_counts', ['table_name', 'n_rows'], ['samples', len(self.samples)])
def _get_gene_detailed(self):
"""
define a gene detailed table
"""
#unique identifier for each entry
i = 0
detailed_list = []
gene_buffer = blist([])
buffer_count = 0
config = read_gemini_config( args = self.args )
path_dirname = config["annotation_dir"]
file_handle = os.path.join(path_dirname, 'detailed_gene_table_v75')
for line in open(file_handle, 'r'):
field = line.strip().split("\t")
if not field[0].startswith("Chromosome"):
i += 1
table = gene_table.gene_detailed(field)
detailed_list = [i,table.chrom,table.gene,table.is_hgnc,
table.ensembl_gene_id,table.ensembl_trans_id,
table.biotype,table.trans_status,table.ccds_id,
table.hgnc_id,table.entrez,table.cds_length,table.protein_length,
table.transcript_start,table.transcript_end,
table.strand,table.synonym,table.rvis,table.mam_phenotype]
gene_buffer.append(detailed_list)
buffer_count += 1
#TODO: buffer size same as for variants?
if buffer_count >= self.buffer_size / 2:
batch_insert(self.session, 'gene_detailed', get_column_names('gene_detailed'), gene_buffer)
buffer_count = 0
gene_buffer = blist([])
batch_insert(self.session, 'gene_detailed', get_column_names('gene_detailed'), gene_buffer)
def _get_gene_summary(self):
"""
define a gene summary table
"""
#unique identifier for each entry
i = 0
summary_list = []
gene_buffer = blist([])
buffer_count = 0
config = read_gemini_config( args = self.args )
path_dirname = config["annotation_dir"]
file_path = os.path.join(path_dirname, 'summary_gene_table_v75')
print 'gene file path = %s' % file_path
for line in open(file_path, 'r'):
col = line.strip().split("\t")
if not col[0].startswith("Chromosome"):
i += 1
table = gene_table.gene_summary(col)
# defaul cosmic census to False
cosmic_census = 0
summary_list = [i,table.chrom,table.gene,table.is_hgnc,
table.ensembl_gene_id,table.hgnc_id,
table.transcript_min_start,
table.transcript_max_end,table.strand,
table.synonym,table.rvis,table.mam_phenotype,
cosmic_census]
gene_buffer.append(summary_list)
buffer_count += 1
if buffer_count >= self.buffer_size / 2:
batch_insert(self.session, 'gene_summary', get_column_names("gene_summary"), gene_buffer)
buffer_count = 0
gene_buffer = blist([])
batch_insert(self.session, 'gene_summary', get_column_names("gene_summary"), gene_buffer)
def update_gene_table(self):
"""
"""
gene_table.update_cosmic_census_genes(self.session, self.args)
def _init_sample_gt_counts(self):
"""
Initialize a 2D array of counts for tabulating
the count of each genotype type for each sample.
The first dimension is one bucket for each sample.
The second dimension (size=4) is a count for each gt type.
Index 0 == # of hom_ref genotypes for the sample
Index 1 == # of het genotypes for the sample
Index 2 == # of missing genotypes for the sample
Index 3 == # of hom_alt genotypes for the sample
"""
self.sample_gt_counts = np.array(np.zeros((len(self.samples), 4)),
dtype='uint32')
def _update_sample_gt_counts(self, gt_types):
"""
Update the count of each gt type for each sample
"""
for idx, gt_type in enumerate(gt_types):
self.sample_gt_counts[idx][gt_type] += 1
def store_sample_gt_counts(self):
"""
Update the count of each gt type for each sample
"""
samples_buffer = blist([])
buffer_count = 0
for idx, gt_counts in enumerate(self.sample_gt_counts):
if buffer_count < 10000:
samples_buffer.append([int(gt_counts[HOM_REF]), # hom_ref
int(gt_counts[HET]), # het
int(gt_counts[HOM_ALT]), # hom_alt
int(gt_counts[UNKNOWN]), #missing
idx])
buffer_count += 1
else:
self.batch_insert_gt_counts(samples_buffer)
samples_buffer = blist([])
buffer_count = 0
self.batch_insert_gt_counts(samples_buffer)
def batch_insert_gt_counts(self, contents):
update_query = self.session.prepare('''UPDATE sample_genotype_counts \
SET num_hom_ref = ?,\
num_het = ?, \
num_hom_alt = ?, \
num_unknown = ?, \
version = ? \
WHERE sample_id = ? \
IF version = ?''')
create_query = self.session.prepare('''INSERT INTO sample_genotype_counts \
(num_hom_ref,\
num_het, \
num_hom_alt, \
num_unknown, \
sample_id, \
version) \
VALUES (?,?,?,?,?,?) IF NOT EXISTS''')
get_query = self.session.prepare("SELECT * FROM sample_genotype_counts WHERE sample_id = ?")
def get_version(sample_id):
res = self.session.execute(get_query, [sample_id])
if len(res) > 0:
return res[0]
else:
return []
retries = []
for entry in contents:
vals = get_version(entry[4])
versioned_entry = entry
if vals == []:
versioned_entry.append(0)
res = self.session.execute(create_query, versioned_entry)
else:
updated_contents = [entry[0]+vals.num_hom_ref, entry[1]+vals.num_het, entry[2]+vals.num_hom_alt,\
entry[3]+vals.num_unknown, vals.version + 1, entry[4], vals.version]
res = self.session.execute(update_query, updated_contents)
if not res[0].applied:
retries.append(entry)
if len(retries) > 0:
self.batch_insert_gt_counts(retries)
def concat(l):
return reduce(lambda x, y: x + y, l, [])
def concat_key_value(samples_dict):
return blist(map(lambda x: blist([x]) + samples_dict[x], samples_dict.keys()))
def parse_float(s):
try:
return float(s)
except ValueError:
return None
except TypeError:
return None
def parse_int(s):
try:
return int(s)
except ValueError:
return -42
except TypeError:
return -43
def load(parser, args):
if args.vcf is None:
parser.print_help()
exit("ERROR: load needs both a VCF file and a database file\n")
if args.anno_type not in ['snpEff', 'VEP', None]:
parser.print_help()
exit("\nERROR: Unsupported selection for -t\n")
# collect of the the add'l annotation files
annotations.load_annos( args )
# create a new geminicassandra loader and populate
# the geminicassandra db and files from the VCF
gemini_loader = GeminiLoader(args)
gemini_loader.connect_to_db()
if not args.no_genotypes and not args.no_load_genotypes:
gemini_loader._init_sample_gt_counts()
gemini_loader.populate_from_vcf()
#gemini_loader.update_gene_table()
if not args.no_genotypes and not args.no_load_genotypes:
if cpu_count() > 8:
sleep(randint(0,8)*20)
gemini_loader.store_sample_gt_counts()
gemini_loader.disconnect() | |
import unittest
import numpy as np
from pyfiberamp.fibers import YbDopedDoubleCladFiber
from pyfiberamp.steady_state import SteadyStateSimulation
class YbDoubleCladWithGuessTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
Yb_number_density = 3e25
core_r = 5e-6
background_loss = 0
length = 3
pump_cladding_r = 50e-6
core_to_cladding_ratio = core_r / pump_cladding_r
core_NA = 0.12
tolerance = 1e-5
cls.input_signal_power = 0.4
cls.input_pump_power = 47.2
fiber = YbDopedDoubleCladFiber(length,
core_r, Yb_number_density,
background_loss, core_NA, core_to_cladding_ratio)
pump_wavelengths = np.linspace(910, 950, 11) * 1e-9
cls.gains = []
init_guess_array = None
for pump_wl in pump_wavelengths:
simulation = SteadyStateSimulation()
simulation.fiber = fiber
simulation.add_cw_signal(wl=1030e-9, power=cls.input_signal_power,
mode_shape_parameters={'functional_form': 'gaussian',
'mode_diameter': 2 * 4.8e-6})
simulation.add_backward_pump(wl=pump_wl, power=cls.input_pump_power)
if init_guess_array is not None:
simulation.set_guess_array(init_guess_array)
result = simulation.run(tol=tolerance)
init_guess_array = result.powers
result_dict = result.make_result_dict()
signal_gain = result_dict['forward_signal']['gain'][0]
cls.gains.append(signal_gain)
def test_gains(self):
expected_gains = [16.920274464870143, 16.920807985811383, 16.89380779110342,
16.72773065938639, 16.39251181039223, 15.932553278021926,
15.327637260825988, 14.58562152909674, 13.963103951187797,
13.431642299893685, 12.84524276399409]
simulated_gains = self.gains
for expected, simulated in zip(expected_gains, simulated_gains):
self.assertAlmostEqual(simulated, expected) | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import threading
import curses
import FontManager
import numpy as np
import PacketFormat as PF
WIN_WIDTH = 256
WIN_HEIGHT = 32
POLLING_SEC = 0.050
SCROLL_SEC = POLLING_SEC * 1.5
class ScreenThread(threading.Thread):
def __init__(self, dev_so):
"""
・ホストと通信するためのソケット
・タイムアウト有りのブロッキング動作
"""
self.dev_so = dev_so
self.dev_so.setblocking(1)
self.dev_so.settimeout(POLLING_SEC)
""" 画面表示制御 """
self.bitmap = [0]
self.is_display_enable = False
self.is_scroll_enable = False
self.rest_scroll_sec = 0
self.base_x = 0
super(ScreenThread, self).__init__()
def run(self):
curses.wrapper(self.curses_main)
def curses_main(self, stdscr):
""" エコーバックOFF,バッファリングOFF,カーソルOFF """
curses.noecho()
curses.cbreak()
curses.curs_set(0)
"""
・新規ウィンドウを作成
・表示タイミングをdoupdate()時に設定
・getch()を非ブロッキング動作に設定
"""
self.win = curses.newwin(WIN_HEIGHT, WIN_WIDTH)
self.win.noutrefresh()
self.win.timeout(0)
while True:
if self.main_loop() is False:
break
def main_loop(self):
""" 入力の有無を確認し,あればホストに送信 """
c = self.win.getch()
if c is not curses.ERR:
if 0 <= c and c <= 255:
c = chr(c)
data = 0
if c == '+':
data |= PF.INPUT_NEXT
if c == '-':
data |= PF.INPUT_PREV
if c == 's':
data |= PF.INPUT_SELECT
if c == 'q':
data |= PF.INPUT_QUIT
send_data = np.array(data, dtype=np.uint8)
self.dev_so.send(send_data)
""" 受信に成功したら対応する処理を実行し,タイムアウト時はnop """
try:
recv_data = self.dev_so.recv(64)
data = np.fromstring(recv_data, dtype=np.uint8)
if data[0] == PF.ID_CONTROL:
cmd = data[1]
if cmd & PF.CONTROL_SHUTDOWN:
return False
if cmd & PF.CONTROL_BITMAP_CLEAR:
self.bitmap = [0 for i in xrange(WIN_WIDTH)]
self.draw_bitmap(0, 0)
self.bitmap = []
self.base_x = 0
self.is_display_enable = True if cmd & PF.CONTROL_DISPLAY_ENABLE else False
self.is_scroll_enable = True if cmd & PF.CONTROL_SCROLL_ENABLE else False
elif data[0] == PF.ID_BITMAP:
self.bitmap.extend(data[1:1+PF.BITMAP_SIZE])
elif data[0] == PF.ID_SPECTRUM:
self.draw_spec(data[1:1+PF.SPECTRUM_SIZE])
except:
pass
""" 曲名を表示 """
if self.is_display_enable:
if self.is_scroll_enable:
self.rest_scroll_sec += POLLING_SEC
if self.rest_scroll_sec >= SCROLL_SEC:
self.rest_scroll_sec = 0
self.base_x -= 2
if self.base_x <= -len(self.bitmap) * 2:
self.base_x = WIN_WIDTH
self.draw_bitmap(self.base_x, 0)
""" 画面を更新 """
self.win.refresh()
curses.doupdate()
return True
def draw_spec(self, spec):
y_pos = 0
for y in range(32)[::-1]:
x_pos = 0
for x in spec:
if x > y:
self.win.addstr(y_pos, x_pos, '______', curses.A_REVERSE)
self.win.addstr('| ')
else:
self.win.addstr(y_pos, x_pos, ' ')
x_pos += 8
y_pos += 1
def draw_bitmap(self, base_x, base_y):
for x in range(len(self.bitmap)):
draw_x = base_x + x*2
if not (0 <= draw_x and draw_x < WIN_WIDTH - 1):
continue
for y in range(8):
draw_y = base_y + y
if not (0 <= draw_y and draw_y < WIN_HEIGHT - 1):
continue
if self.bitmap[x] & (1<<y):
self.win.addstr(draw_y, draw_x, ' ', curses.A_REVERSE)
else:
self.win.addstr(draw_y, draw_x, ' ') | |
# Copyright 2017 Rice UniversityDAPIInvoke.split_api_call(value2add)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import CHILD_EDGE, Node, DSubTree, DVarDecl, DType, DStop
from synthesis.ops.candidate_ast import CONCEPT_NODE, TYPE_NODE
from utilities.vocab_building_dictionary import DELIM
import numpy as np
class Candidate:
def __init__(self, initial_state, ret_type, prob):
## SYNTHESIS
self.head = self.tree_currNode = DSubTree()
self.return_type = ret_type
self.curr_node_val = self.head.val
self.curr_edge = CHILD_EDGE
self.next_node_type = CONCEPT_NODE
self.control_flow_stack = []
## DEBUGGING PURPOSE
self.storage = []
## BEAM SEARCH PURPOSES
self.length = 1
self.log_probability = -np.inf if prob is None else prob
self.rolling = True
self.state = initial_state
def is_rolling(self):
return self.rolling
def is_not_rolling(self):
return not self.is_rolling()
def stop_rolling(self):
self.rolling = False
return
def length_mod_and_check(self, curr_val, max_length):
self.length += 1 \
if self.next_node_type == CONCEPT_NODE \
and curr_val in [DVarDecl.name()] \
else 0
if self.length >= max_length:
self.stop_rolling()
return
def add_to_storage(self):
self.storage.append([self.curr_node_val])
def debug_print(self, vocab_types):
for i in range(len(self.storage[0])):
for val in self.storage:
print(vocab_types[val[i]], end=',')
print()
def force_finish(self):
# TODO
pass
def add_node(self, value2add):
node = self.resolve_node_type(value2add)
self.tree_currNode = self.tree_currNode.add_and_progress_node(node,
edge=self.curr_edge)
return self
def resolve_node_type(self, value):
if value == DVarDecl.name():
node = DVarDecl({}, {})
elif self.next_node_type == TYPE_NODE:
node = DType(value)
elif value == DSubTree.name():
node = DSubTree()
elif value == DStop.name():
node = DStop()
else:
# print(
# "Unknown node generated in FP :: value is " + str(value) +
# " next node type is " + str(self.next_node_type))
node = DStop()
return node | |
import neural_network_lyapunov.examples.quadrotor2d.quadrotor_2d as\
quadrotor_2d
import neural_network_lyapunov.relu_system as relu_system
import neural_network_lyapunov.lyapunov as lyapunov
import neural_network_lyapunov.feedback_system as feedback_system
import neural_network_lyapunov.train_lyapunov as train_lyapunov
import neural_network_lyapunov.utils as utils
import neural_network_lyapunov.mip_utils as mip_utils
import neural_network_lyapunov.train_utils as train_utils
import neural_network_lyapunov.r_options as r_options
import torch
import numpy as np
import scipy.integrate
import gurobipy
import argparse
import os
def generate_quadrotor_dynamics_data(dt):
"""
Generate the pairs (x[n], u[n]) -> (x[n+1])
"""
dtype = torch.float64
plant = quadrotor_2d.Quadrotor2D(dtype)
theta_range = [-np.pi / 2, np.pi / 2]
ydot_range = [-5, 5]
zdot_range = [-5, 5]
thetadot_range = [-2.5, 2.5]
u_range = [-0.5, 8.5]
# We don't need to take the grid on y and z dimension of the quadrotor,
# since the dynamics is invariant along these dimensions.
x_samples = torch.cat((
torch.zeros((1000, 2), dtype=torch.float64),
utils.uniform_sample_in_box(
torch.tensor([
theta_range[0], ydot_range[0], zdot_range[0], thetadot_range[0]
],
dtype=torch.float64),
torch.tensor([
theta_range[1], ydot_range[1], zdot_range[1], thetadot_range[1]
],
dtype=torch.float64), 1000)),
dim=1).T
u_samples = utils.uniform_sample_in_box(
torch.full((2, ), u_range[0], dtype=dtype),
torch.full((2, ), u_range[1], dtype=dtype), 1000).T
xu_tensors = []
x_next_tensors = []
for i in range(x_samples.shape[1]):
for j in range(u_samples.shape[1]):
result = scipy.integrate.solve_ivp(
lambda t, x: plant.dynamics(x, u_samples[:, j].detach().numpy(
)), (0, dt), x_samples[:, i].detach().numpy())
xu_tensors.append(
torch.cat((x_samples[:, i], u_samples[:, j])).reshape((1, -1)))
x_next_tensors.append(
torch.from_numpy(result.y[:, -1]).reshape((1, -1)))
dataset_input = torch.cat(xu_tensors, dim=0)
dataset_output = torch.cat(x_next_tensors, dim=0)
return torch.utils.data.TensorDataset(dataset_input, dataset_output)
def train_forward_model(forward_model, model_dataset, num_epochs):
# The forward model maps (theta[n], u1[n], u2[n]) to
# (ydot[n+1]-ydot[n], zdot[n+1]-zdot[n], thetadot[n+1]-thetadot[n])
plant = quadrotor_2d.Quadrotor2D(torch.float64)
u_equilibrium = plant.u_equilibrium
xu_inputs, x_next_outputs = model_dataset[:]
network_input_data = xu_inputs[:, [2, 5, 6, 7]]
network_output_data = x_next_outputs[:, 3:] - xu_inputs[:, 3:6]
v_dataset = torch.utils.data.TensorDataset(network_input_data,
network_output_data)
def compute_next_v(model, theta_thetadot_u):
return model(theta_thetadot_u) - model(
torch.cat(
(torch.tensor([0, 0], dtype=torch.float64), u_equilibrium)))
utils.train_approximator(v_dataset,
forward_model,
compute_next_v,
batch_size=50,
num_epochs=num_epochs,
lr=0.001)
def train_lqr_value_approximator(lyapunov_relu, V_lambda, R, x_equilibrium,
x_lo, x_up, num_samples, lqr_S: torch.Tensor):
"""
We train both lyapunov_relu and R such that ϕ(x) − ϕ(x*) + λ|R(x−x*)|₁
approximates the lqr cost-to-go.
"""
x_samples = utils.uniform_sample_in_box(x_lo, x_up, num_samples)
V_samples = torch.sum((x_samples.T - x_equilibrium.reshape(
(6, 1))) * (lqr_S @ (x_samples.T - x_equilibrium.reshape((6, 1)))),
dim=0).reshape((-1, 1))
state_value_dataset = torch.utils.data.TensorDataset(x_samples, V_samples)
R.requires_grad_(True)
def compute_v(model, x):
return model(x) - model(x_equilibrium) + V_lambda * torch.norm(
R @ (x - x_equilibrium.reshape((1, 6))).T, p=1, dim=0).reshape(
(-1, 1))
utils.train_approximator(state_value_dataset,
lyapunov_relu,
compute_v,
batch_size=50,
num_epochs=200,
lr=0.001,
additional_variable=[R])
R.requires_grad_(False)
def train_lqr_control_approximator(controller_relu, x_equilibrium,
u_equilibrium, x_lo, x_up, num_samples,
lqr_K: torch.Tensor):
x_samples = utils.uniform_sample_in_box(x_lo, x_up, num_samples)
u_samples = (lqr_K @ (x_samples.T - x_equilibrium.reshape(
(6, 1))) + u_equilibrium.reshape((2, 1))).T
state_control_dataset = torch.utils.data.TensorDataset(
x_samples, u_samples)
def compute_u(model, x):
return model(x) - model(x_equilibrium) + u_equilibrium
utils.train_approximator(state_control_dataset,
controller_relu,
compute_u,
batch_size=50,
num_epochs=50,
lr=0.001)
def train_nn_controller_approximator(controller_relu, target_controller_relu,
x_lo, x_up, num_samples, num_epochs):
x_samples = utils.uniform_sample_in_box(x_lo, x_up, num_samples)
target_controller_relu_output = target_controller_relu(x_samples)
dataset = torch.utils.data.TensorDataset(x_samples,
target_controller_relu_output)
def compute_output(model, x):
return model(x)
utils.train_approximator(dataset,
controller_relu,
compute_output,
batch_size=50,
num_epochs=num_epochs,
lr=0.001)
def train_nn_lyapunov_approximator(lyapunov_relu, R, target_lyapunov_relu,
target_R, V_lambda, x_equilibrium, x_lo,
x_up, num_samples, num_epochs):
x_samples = utils.uniform_sample_in_box(x_lo, x_up, num_samples)
with torch.no_grad():
target_V = target_lyapunov_relu(x_samples) - target_lyapunov_relu(
x_equilibrium) + V_lambda * torch.norm(
target_R @ (x_samples - x_equilibrium).T, p=1, dim=0).reshape(
(-1, 1))
dataset = torch.utils.data.TensorDataset(x_samples, target_V)
def compute_V(model, x):
return model(x) - model(x_equilibrium) + V_lambda * torch.norm(
R @ (x - x_equilibrium).T, p=1, dim=0).reshape((-1, 1))
R.requires_grad_(True)
utils.train_approximator(dataset,
lyapunov_relu,
compute_V,
batch_size=50,
num_epochs=num_epochs,
lr=0.001,
additional_variable=[R])
R.requires_grad_(False)
def simulate_quadrotor_with_controller(controller_relu, t_span, x_equilibrium,
u_lo, u_up, x0):
plant = quadrotor_2d.Quadrotor2D(torch.float64)
u_equilibrium = plant.u_equilibrium
def dyn(t, x):
with torch.no_grad():
x_torch = torch.from_numpy(x)
u_torch = controller_relu(x_torch)\
- controller_relu(x_equilibrium) + u_equilibrium
u = torch.max(torch.min(u_torch, u_up), u_lo).detach().numpy()
return plant.dynamics(x, u)
result = scipy.integrate.solve_ivp(dyn,
t_span,
x0,
t_eval=np.arange(start=t_span[0],
stop=t_span[1],
step=0.01))
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="quadrotor 2d training demo")
parser.add_argument("--generate_dynamics_data", action="store_true")
parser.add_argument("--load_dynamics_data",
type=str,
default=None,
help="path to the dynamics data.")
parser.add_argument("--train_forward_model", action="store_true")
parser.add_argument("--load_forward_model",
type=str,
default=None,
help="path to load dynamics model")
parser.add_argument("--load_lyapunov_relu",
type=str,
default=None,
help="path to the lyapunov model data.")
parser.add_argument("--load_controller_relu",
type=str,
default=None,
help="path to the controller data.")
parser.add_argument("--train_lqr_approximator", action="store_true")
parser.add_argument("--search_R", action="store_true")
parser.add_argument("--train_on_samples", action="store_true")
parser.add_argument("--enable_wandb", action="store_true")
parser.add_argument("--train_adversarial", action="store_true")
parser.add_argument("--max_iterations", type=int, default=5000)
parser.add_argument("--training_set", type=str, default=None)
args = parser.parse_args()
dir_path = os.path.dirname(os.path.realpath(__file__))
dt = 0.01
dtype = torch.float64
if args.generate_dynamics_data:
model_dataset = generate_quadrotor_dynamics_data(dt)
if args.load_dynamics_data is not None:
model_dataset = torch.load(args.load_dynamics_data)
if args.train_forward_model:
forward_model = utils.setup_relu((4, 6, 6, 3),
params=None,
bias=True,
negative_slope=0.01,
dtype=dtype)
train_forward_model(forward_model, model_dataset, num_epochs=100)
if args.load_forward_model:
forward_model_data = torch.load(args.load_forward_model)
forward_model = utils.setup_relu(
forward_model_data["linear_layer_width"],
params=None,
bias=forward_model_data["bias"],
negative_slope=forward_model_data["negative_slope"],
dtype=dtype)
forward_model.load_state_dict(forward_model_data["state_dict"])
plant = quadrotor_2d.Quadrotor2D(dtype)
x_star = np.zeros((6, ))
u_star = plant.u_equilibrium.detach().numpy()
lqr_Q = np.diag([10, 10, 10, 1, 1, plant.length / 2. / np.pi])
lqr_R = np.array([[0.1, 0.05], [0.05, 0.1]])
K, S = plant.lqr_control(lqr_Q, lqr_R, x_star, u_star)
S_eig_value, S_eig_vec = np.linalg.eig(S)
# R = torch.zeros((9, 6), dtype=dtype)
# R[:3, :3] = torch.eye(3, dtype=dtype)
# R[3:6, :3] = torch.eye(3, dtype=dtype) / np.sqrt(2)
# R[3:6, 3:6] = torch.eye(3, dtype=dtype) / np.sqrt(2)
# R[6:9, :3] = -torch.eye(3, dtype=dtype) / np.sqrt(2)
# R[6:9, 3:6] = torch.eye(3, dtype=dtype) / np.sqrt(2)
# R = torch.cat((R, torch.from_numpy(S_eig_vec)), dim=0)
R = torch.from_numpy(S) + 0.01 * torch.eye(6, dtype=dtype)
lyapunov_relu = utils.setup_relu((6, 10, 10, 4, 1),
params=None,
negative_slope=0.1,
bias=True,
dtype=dtype)
V_lambda = 0.9
if args.load_lyapunov_relu is not None:
lyapunov_data = torch.load(args.load_lyapunov_relu)
lyapunov_relu = utils.setup_relu(
lyapunov_data["linear_layer_width"],
params=None,
negative_slope=lyapunov_data["negative_slope"],
bias=lyapunov_data["bias"],
dtype=dtype)
lyapunov_relu.load_state_dict(lyapunov_data["state_dict"])
V_lambda = lyapunov_data["V_lambda"]
R = lyapunov_data["R"]
controller_relu = utils.setup_relu((6, 6, 4, 2),
params=None,
negative_slope=0.01,
bias=True,
dtype=dtype)
if args.load_controller_relu is not None:
controller_data = torch.load(args.load_controller_relu)
controller_relu = utils.setup_relu(
controller_data["linear_layer_width"],
params=None,
negative_slope=controller_data["negative_slope"],
bias=controller_data["bias"],
dtype=dtype)
controller_relu.load_state_dict(controller_data["state_dict"])
q_equilibrium = torch.tensor([0, 0, 0], dtype=dtype)
u_equilibrium = plant.u_equilibrium
x_lo = torch.tensor([-0.7, -0.7, -np.pi * 0.5, -3.75, -3.75, -2.5],
dtype=dtype)
x_up = -x_lo
u_lo = torch.tensor([0, 0], dtype=dtype)
u_up = torch.tensor([8, 8], dtype=dtype)
if args.enable_wandb:
train_utils.wandb_config_update(args, lyapunov_relu, controller_relu,
x_lo, x_up, u_lo, u_up)
if args.train_lqr_approximator:
x_equilibrium = torch.cat(
(q_equilibrium, torch.zeros((3, ), dtype=dtype)))
train_lqr_control_approximator(controller_relu, x_equilibrium,
u_equilibrium, x_lo, x_up, 100000,
torch.from_numpy(K))
train_lqr_value_approximator(lyapunov_relu, V_lambda, R, x_equilibrium,
x_lo, x_up, 100000, torch.from_numpy(S))
forward_system = relu_system.ReLUSecondOrderResidueSystemGivenEquilibrium(
dtype,
x_lo,
x_up,
u_lo,
u_up,
forward_model,
q_equilibrium,
u_equilibrium,
dt,
network_input_x_indices=[2, 5])
closed_loop_system = feedback_system.FeedbackSystem(
forward_system, controller_relu, forward_system.x_equilibrium,
forward_system.u_equilibrium,
u_lo.detach().numpy(),
u_up.detach().numpy())
lyap = lyapunov.LyapunovDiscreteTimeHybridSystem(closed_loop_system,
lyapunov_relu)
if args.search_R:
_, R_sigma, _ = np.linalg.svd(R.detach().numpy())
R_options = r_options.SearchRwithSVDOptions(R.shape, R_sigma * 0.8)
R_options.set_variable_value(R.detach().numpy())
else:
R_options = r_options.FixedROptions(R)
dut = train_lyapunov.TrainLyapunovReLU(lyap, V_lambda,
closed_loop_system.x_equilibrium,
R_options)
dut.lyapunov_positivity_mip_pool_solutions = 1
dut.lyapunov_derivative_mip_pool_solutions = 1
dut.lyapunov_derivative_convergence_tol = 1E-5
dut.lyapunov_positivity_convergence_tol = 5e-6
dut.max_iterations = args.max_iterations
dut.lyapunov_positivity_epsilon = 0.1
dut.lyapunov_derivative_epsilon = 0.001
dut.lyapunov_derivative_eps_type = lyapunov.ConvergenceEps.ExpLower
state_samples_all = utils.get_meshgrid_samples(x_lo,
x_up, (7, 7, 7, 7, 7, 7),
dtype=dtype)
dut.output_flag = True
if args.train_on_samples:
dut.train_lyapunov_on_samples(state_samples_all,
num_epochs=10,
batch_size=50)
dut.enable_wandb = args.enable_wandb
if args.train_adversarial:
options = train_lyapunov.TrainLyapunovReLU.AdversarialTrainingOptions()
options.num_batches = 10
options.num_epochs_per_mip = 20
options.positivity_samples_pool_size = 10000
options.derivative_samples_pool_size = 100000
dut.lyapunov_positivity_mip_pool_solutions = 100
dut.lyapunov_derivative_mip_pool_solutions = 500
dut.add_derivative_adversarial_state = True
dut.add_positivity_adversarial_state = True
forward_system.network_bound_propagate_method =\
mip_utils.PropagateBoundsMethod.MIP
dut.lyapunov_hybrid_system.network_bound_propagate_method =\
mip_utils.PropagateBoundsMethod.MIP
closed_loop_system.controller_network_bound_propagate_method =\
mip_utils.PropagateBoundsMethod.MIP
dut.lyapunov_derivative_mip_params = {
gurobipy.GRB.Param.OutputFlag: False
}
if args.training_set:
training_set_data = torch.load(args.training_set)
positivity_state_samples_init = training_set_data[
"positivity_state_samples"]
derivative_state_samples_init = training_set_data[
"derivative_state_samples"]
else:
positivity_state_samples_init = utils.uniform_sample_in_box(
x_lo, x_up, 1000)
derivative_state_samples_init = positivity_state_samples_init
result = dut.train_adversarial(positivity_state_samples_init,
derivative_state_samples_init, options)
else:
dut.train(torch.empty((0, 6), dtype=dtype))
pass | |
#!/usr/bin/env python -u
# Validation script for GASKAP HI data
#
# Author James Dempsey
# Date 23 Nov 2019
from __future__ import print_function, division
import argparse
import csv
import datetime
import glob
import math
import os
import re
from string import Template
import shutil
import time
import warnings
import matplotlib
matplotlib.use('agg')
import aplpy
from astropy.constants import k_B
from astropy.coordinates import SkyCoord
from astropy.io import ascii, fits
from astropy.io.votable import parse, from_table, writeto
from astropy.io.votable.tree import Info
from astropy.table import Table, Column
import astropy.units as u
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import numpy as np
from radio_beam import Beam
from spectral_cube import SpectralCube
from statsmodels.tsa import stattools
from statsmodels.graphics.tsaplots import plot_pacf
import seaborn as sns
from validation import Bandpass, Diagnostics, SelfCal, Spectra
from validation_reporter import ValidationReport, ReportSection, ReportItem, ValidationMetric, output_html_report, output_metrics_xml
vel_steps = [-324, -280, -234, -189, -143, -100, -60, -15, 30, 73, 119, 165, 200, 236, 273, 311, 357, 399]
#emission_vel_range=[] # (165,200)*u.km/u.s
emission_vel_range=(119,165)*u.km/u.s
non_emission_val_range=(-100,-60)*u.km/u.s
figures_folder = 'figures'
METRIC_BAD = 3
METRIC_UNCERTAIN = 2
METRIC_GOOD = 1
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Produce a validation report for GASKAP HI observations. Either a cube or an image (or both) must be supplied to be validated.")
parser.add_argument("-c", "--cube", required=False, help="The HI spectral line cube to be checked.")
parser.add_argument("-i", "--image", required=False, help="The continuum image to be checked.")
parser.add_argument("-s", "--source_cat", required=False, help="The selavy source catalogue used for source identification.")
parser.add_argument("-b", "--beam_list", required=False, help="The csv file describing the positions of each beam (in radians).")
parser.add_argument("-d", "--duration", required=False, help="The duration of the observation in hours.", type=float, default=12.0)
parser.add_argument("-o", "--output", help="The folder in which to save the validation report and associated figures.", default='report')
parser.add_argument("-e", "--emvel", required=False, help="The low velocity bound of the velocity region where emission is expected.")
parser.add_argument("-n", "--nonemvel", required=False,
help="The low velocity bound of the velocity region where emission is not expected.", default='-100')
parser.add_argument("-N", "--noise", required=False, help="Use this fits image of the local rms. Default is to run BANE", default=None)
parser.add_argument("-r", "--redo", help="Rerun all steps, even if intermediate files are present.", default=False,
action='store_true')
parser.add_argument("--num_spectra", required=False, help="Number of sample spectra to create", type=int, default=15)
args = parser.parse_args()
return args
def get_str(value):
if isinstance(value, bytes):
return value.decode()
return value
def plot_histogram(file_prefix, xlabel, title):
data = fits.getdata(file_prefix+'.fits')
flat = data.flatten()
flat = flat[~np.isnan(flat)]
v =plt.hist(flat, bins=200, bottom=1, log=True, histtype='step')
plt.grid()
plt.xlabel(xlabel)
plt.ylabel('Count')
plt.title(title)
plt.savefig(file_prefix+'_hist.png', bbox_inches='tight')
plt.savefig(file_prefix+'_hist_sml.png', dpi=16, bbox_inches='tight')
plt.close()
def plot_map(file_prefix, title, cmap='magma', stretch='linear', pmax=99.75, colorbar_label=None):
fig = plt.figure(figsize=(5, 4.5))
gc = aplpy.FITSFigure(file_prefix+'.fits', figure=fig)
gc.show_colorscale(cmap=cmap, stretch=stretch, pmax=pmax)
gc.add_colorbar()
if colorbar_label:
gc.colorbar.set_axis_label_text(colorbar_label)
gc.add_grid()
gc.set_title(title)
gc.savefig(filename=file_prefix+'.png', dpi=200)
gc.savefig(filename=file_prefix+'.pdf', dpi=100)
gc.savefig(filename=file_prefix+'_sml.png', dpi=16 )
gc.close()
def plot_difference_map(hdu, file_prefix, title, vmin=None, vmax=None):
# Initiate a figure and axis object with WCS projection information
wcs = WCS(hdu.header)
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(111, projection=wcs)
no_nan_data = np.nan_to_num(hdu.data)
if vmin is None and vmax is None:
vmin=np.percentile(no_nan_data, 0.25)
vmax=np.percentile(no_nan_data, 99.75)
im = ax.imshow(hdu.data, cmap='RdBu_r',vmin=vmin,vmax=vmax, origin='lower')
#ax.invert_yaxis()
ax.set_xlabel("Right Ascension (degrees)", fontsize=16)
ax.set_ylabel("Declination (degrees)", fontsize=16)
ax.set_title(title, fontsize=16)
ax.grid(color = 'gray', ls = 'dotted', lw = 2)
cbar = plt.colorbar(im, pad=.07)
plt.savefig(file_prefix+'.png', bbox_inches='tight')
plt.savefig(file_prefix+'_sml.png', dpi=10, bbox_inches='tight')
plt.close()
def output_plot(mp, title, imagename):
mp.write('\n<h2>{}</h2>\n<br/>'.format(title))
mp.write('\n<a href="{}"><img width="800px" src="{}"></a>'.format(imagename, imagename))
mp.write('\n<br/>\n')
def output_map_page(filename, file_prefix, title):
with open(filename, 'w') as mp:
mp.write('<html>\n<head><title>{}</title>\n</head>'.format(title))
mp.write('\n<body>\n<h1>{}</h1>'.format(title))
output_plot(mp, 'Large Scale Emission Map', file_prefix + '_bkg.png')
output_plot(mp, 'Noise Map', file_prefix + '_rms.png')
output_plot(mp, 'Moment 0 Map', file_prefix + '.png')
mp.write('\n</body>\n</html>\n')
def convert_slab_to_jy(slab, header):
my_beam = Beam.from_fits_header(header)
restfreq = 1.420405752E+09*u.Hz
if 'RESTFREQ' in header.keys():
restfreq = header['RESTFREQ']*u.Hz
elif 'RESTFRQ' in header.keys():
restfreq = header['RESTFRQ']*u.Hz
if slab.unmasked_data[0,0,0].unit != u.Jy:
print ("Converting slab from {} to Jy".format(slab.unmasked_data[0,0,0].unit) )
print (slab)
slab.allow_huge_operations=True
slab = slab.to(u.Jy, equivalencies=u.brightness_temperature(my_beam, restfreq))
print (slab)
return slab
def convert_data_to_jy(data, header, verbose=False):
my_beam = Beam.from_fits_header(header)
restfreq = 1.420405752E+09*u.Hz
if 'RESTFREQ' in header.keys():
restfreq = header['RESTFREQ']*u.Hz
elif 'RESTFRQ' in header.keys():
restfreq = header['RESTFRQ']*u.Hz
if data[0].unit != u.Jy:
if verbose:
print ("Converting data from {} to Jy".format(data[0].unit) )
data = data.to(u.Jy, equivalencies=u.brightness_temperature(my_beam, restfreq))
return data
def get_vel_limit(vel_cube):
velocities = np.sort(vel_cube.spectral_axis)
return velocities[0], velocities[-1]
def extract_slab(filename, vel_start, vel_end):
cube = SpectralCube.read(filename)
vel_cube = cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
cube_vel_min, cube_vel_max = get_vel_limit(vel_cube)
if vel_start > cube_vel_max or vel_end < cube_vel_min:
return None
slab = vel_cube.spectral_slab(vel_start, vel_end)
header = fits.getheader(filename)
slab = convert_slab_to_jy(slab, header)
return slab
def extract_channel_slab(filename, chan_start, chan_end):
cube = SpectralCube.read(filename)
vel_cube = cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
slab = vel_cube[chan_start:chan_end,:, :].with_spectral_unit(u.km/u.s)
header = fits.getheader(filename)
return slab
def build_fname(example_name, suffix):
basename = os.path.basename(example_name)
prefix = os.path.splitext(basename)[0]
fname = prefix + suffix
return fname
def get_figures_folder(dest_folder):
return dest_folder + '/' + figures_folder + '/'
def get_bane_background(infile, outfile_prefix, plot_title_suffix, ncores=8, redo=False, plot=True):
background_prefix = outfile_prefix+'_bkg'
background_file = background_prefix + '.fits'
if redo or not os.path.exists(background_file):
cmd = "BANE --cores={0} --out={1} {2}".format(ncores, outfile_prefix, infile)
print (cmd)
os.system(cmd)
if plot:
plot_map(background_prefix, "Large scale emission in " + plot_title_suffix)
plot_histogram(background_prefix, 'Emission (Jy beam^{-1} km s^{-1})', "Emission for " + plot_title_suffix)
plot_map(outfile_prefix+'_rms', "Noise in "+ plot_title_suffix)
return background_file
def assess_metric(metric, threshold1, threshold2, low_good=False):
if metric < threshold1:
return METRIC_GOOD if low_good else METRIC_BAD
elif metric < threshold2:
return METRIC_UNCERTAIN
else:
return METRIC_BAD if low_good else METRIC_GOOD
def get_spectral_units(ctype, cunit, hdr):
spectral_conversion = 1
if not cunit in hdr:
if ctype.startswith('VEL') or ctype.startswith('VRAD'):
spectral_unit = 'm/s'
else:
spectral_unit = 'Hz'
else:
spectral_unit = hdr[cunit]
if spectral_unit == 'Hz':
spectral_conversion = 1e6
spectral_unit = 'MHz'
elif spectral_unit == 'kHz':
spectral_conversion = 1e3
spectral_unit = 'MHz'
elif spectral_unit == 'm/s':
spectral_conversion = 1e3
spectral_unit = 'km/s'
return spectral_unit, spectral_conversion
def calc_velocity_res(hdr):
spec_sys = hdr['SPECSYS']
axis = '3' if hdr['CTYPE3'] != 'STOKES' else '4'
spec_type = hdr['CTYPE'+axis]
spectral_unit, spectral_conversion = get_spectral_units(spec_type, 'CUNIT'+axis, hdr)
if 'CUNIT'+axis in hdr.keys():
spec_unit = hdr['CUNIT'+axis]
#elif spec_type == 'VRAD' or spec_type == 'VEL':
# spec_unit = 'm/s'
else:
spec_unit = None
spec_delt = hdr['CDELT'+axis]
print ('CDELT={}, CUNIT={}, spec_unit={}, conversion={}'.format(spec_delt, spec_unit, spectral_unit, spectral_conversion))
spec_res_km_s = np.abs(spec_delt) / spectral_conversion
if spectral_unit == 'MHz':
spec_res_km_s = spec_res_km_s/5e-4*0.1 # 0.5 kHz = 0.1 km/s
#elif spec_unit == 'Hz':
# spec_res_km_s = spec_res_km_s/500*0.1 # 0.5 kHz = 0.1 km/s
#elif spec_unit == 'kHz':
# spec_res_km_s = spec_res_km_s/0.5*0.1 # 0.5 kHz = 0.1 km/s
return spec_res_km_s
def report_observation(image, reporter, input_duration, sched_info, obs_metadata):
print('\nReporting observation based on ' + image)
hdr = fits.getheader(image)
w = WCS(hdr).celestial
sbid = hdr['SBID'] if 'SBID' in hdr else sched_info.sbid
project = hdr['PROJECT'] if 'PROJECT' in hdr else ''
proj_link = None
if project.startswith('AS'):
proj_link = "https://confluence.csiro.au/display/askapsst/{0}+Data".format(project)
date = hdr['DATE-OBS']
duration = float(hdr['DURATION'])/3600 if 'DURATION' in hdr else input_duration
naxis1 = int(hdr['NAXIS1'])
naxis2 = int(hdr['NAXIS2'])
pixcrd = np.array([[naxis1/2, naxis2/2]])
centre = w.all_pix2world(pixcrd,1)
centre = SkyCoord(ra=centre[0][0], dec=centre[0][1], unit="deg,deg").to_string(style='hmsdms',sep=':')
# spectral axis
spectral_unit = 'None'
spectral_range = ''
for i in range(3,int(hdr['NAXIS'])+1):
ctype = hdr['CTYPE'+str(i)]
if (ctype.startswith('VEL') or ctype.startswith('VRAD') or ctype.startswith('FREQ')):
key = 'CUNIT'+str(i)
spectral_unit, spectral_conversion = get_spectral_units(ctype, key, hdr)
step = float(hdr['CDELT'+str(i)])
#print ('step {} rval {} rpix {} naxis {}'.format(step, hdr['CRVAL'+str(i)], hdr['CRPIX'+str(i)], hdr['NAXIS'+str(i)]))
spec_start = (float(hdr['CRVAL'+str(i)]) - (step*(float(hdr['CRPIX'+str(i)])-1)))/spectral_conversion
if int(hdr['NAXIS'+str(i)]) > 1:
spec_end = spec_start + (step * (int(hdr['NAXIS'+str(i)]-1)))/spectral_conversion
if step > 0:
spectral_range = '{:0.3f} - {:0.3f}'.format(spec_start, spec_end)
else:
spectral_range = '{:0.3f} - {:0.3f}'.format(spec_end, spec_start)
spec_title = 'Spectral Range'
else:
centre_freq = (float(hdr['CRVAL'+str(i)]) - (step*(float(hdr['CRPIX'+str(i)])-1)))/spectral_conversion
spectral_range = '{:0.3f}'.format(centre_freq)
spec_title = 'Centre Freq'
# Field info
if obs_metadata:
field_names = ''
field_centres = ''
for i,field in enumerate(obs_metadata.fields):
if i > 0:
field_names += '<br/>'
field_centres += '<br/>'
field_names += field.name
field_centres += field.ra + ' ' + field.dec
else:
field_names = sched_info.field_name
field_centres = centre
footprint = sched_info.footprint
if footprint and sched_info.pitch:
footprint = "{}_{}".format(footprint, sched_info.pitch)
section = ReportSection('Observation')
section.add_item('SBID', value=sbid)
section.add_item('Project', value=project, link=proj_link)
section.add_item('Date', value=date)
section.add_item('Duration<br/>(hours)', value='{:.2f}'.format(duration))
section.add_item('Field(s)', value=field_names)
section.add_item('Field Centre(s)', value=field_centres)
section.add_item('Correlator<br/>Mode', value=sched_info.corr_mode)
section.add_item('Footprint', value=footprint)
section.add_item('{}<br/>({})'.format(spec_title, spectral_unit), value=spectral_range)
reporter.add_section(section)
reporter.project = project
return sbid
def report_cube_stats(cube, reporter):
print ('\nReporting cube stats')
hdr = fits.getheader(cube)
w = WCS(hdr).celestial
# Cube information
askapSoftVer = 'N/A'
askapPipelineVer = 'N/A'
history = hdr['history']
askapSoftVerPrefix = 'Produced with ASKAPsoft version '
askapPipelinePrefix = 'Processed with ASKAP pipeline version '
for row in history:
if row.startswith(askapSoftVerPrefix):
askapSoftVer = row[len(askapSoftVerPrefix):]
elif row.startswith(askapPipelinePrefix):
askapPipelineVer = row[len(askapPipelinePrefix):]
beam = 'N/A'
if 'BMAJ' in hdr:
beam_maj = hdr['BMAJ'] * 60 * 60
beam_min = hdr['BMIN'] * 60 * 60
beam = '{:.1f} x {:.1f}'.format(beam_maj, beam_min)
dims = []
for i in range(1,int(hdr['NAXIS'])+1):
dims.append(str(hdr['NAXIS'+str(i)]))
dimensions = ' x '.join(dims)
# self.area,self.solid_ang = get_pixel_area(fits, nans=True, ra_axis=self.ra_axis, dec_axis=self.dec_axis, w=w)
cube_name = os.path.basename(cube)
section = ReportSection('Image Cube', cube_name)
section.add_item('ASKAPsoft<br/>version', value=askapSoftVer)
section.add_item('Pipeline<br/>version', value=askapPipelineVer)
section.add_item('Synthesised Beam<br/>(arcsec)', value=beam)
section.add_item('Sky Area<br/>(deg2)', value='')
section.add_item('Dimensions', value=dimensions)
reporter.add_section(section)
return
def check_for_emission(cube, vel_start, vel_end, reporter, dest_folder, ncores=8, redo=False):
print ('\nChecking for presence of emission in {:.0f} < v < {:.0f}'.format(vel_start, vel_end))
# Extract a moment 0 map
slab = extract_slab(cube, vel_start, vel_end)
if slab is None:
print ("** No data for the emission range - skipping check **")
return
num_channels = slab.shape[0]
hdr = fits.getheader(cube)
spec_res_km_s = calc_velocity_res(hdr)
mom0 = slab.moment0()
prefix = build_fname(cube, '_mom0')
folder = get_figures_folder(dest_folder)
mom0_fname = folder + prefix + '.fits'
mom0.write(mom0_fname, overwrite=True)
hi_data = fits.open(mom0_fname)
plot_title_suffix = "emission region in " + os.path.basename(cube)
plot_difference_map(hi_data[0], folder+prefix, "Moment 0 map of " + plot_title_suffix)
# Produce the background plots
bkg_data = get_bane_background(mom0_fname, folder+prefix, plot_title_suffix, ncores=ncores, redo=redo)
map_page = folder + '/emission.html'
rel_map_page = get_figures_folder('.') + '/emission.html'
output_map_page(map_page, prefix, 'Emission Plots for ' + os.path.basename(cube))
hi_data = fits.open(folder + prefix+'_bkg.fits')
max_em = np.nanmax(hi_data[0].data)
max_em_per_kms = max_em / (spec_res_km_s * num_channels)
# assess
cube_name = os.path.basename(cube)
section = ReportSection('Presence of Emission', cube_name)
section.add_item('Velocity Range<br/>(km/s LSR)', value='{:.0f} to {:.0f}'.format(vel_start.value, vel_end.value))
section.add_item('Channels', value='{}'.format(num_channels))
section.add_item('Large Scale<br/>Emission Map', link=rel_map_page, image='figures/'+prefix+'_bkg_sml.png')
section.add_item('Emission Histogram', link='figures/'+prefix+'_bkg_hist.png', image='figures/'+prefix+'_bkg_hist_sml.png')
section.add_item('Max Emission<br/>(Jy beam<sup>-1</sup>)', value='{:.3f}'.format(max_em_per_kms))
reporter.add_section(section)
metric = ValidationMetric('Presence of Emission',
'Maximum large scale emission intensity in the velocity range where emission is expected.',
int(max_em_per_kms), assess_metric(max_em_per_kms, 12, 20))
reporter.add_metric(metric)
return
def check_for_non_emission(cube, vel_start, vel_end, reporter, dest_folder, ncores=8, redo=False):
print ('\nChecking for absence of emission in {:.0f} < v < {:.0f}'.format(vel_start, vel_end))
# Extract a moment 0 map
slab = extract_slab(cube, vel_start, vel_end)
if slab is None:
print ("** No data for the non-emission range - skipping check **")
return None
num_channels = slab.shape[0]
hdr = fits.getheader(cube)
spec_res_km_s = calc_velocity_res(hdr)
mom0 = slab.moment0()
prefix = build_fname(cube, '_mom0_off')
folder = get_figures_folder(dest_folder)
mom0_fname = folder + prefix + '.fits'
mom0.write(mom0_fname, overwrite=True)
hi_data = fits.open(mom0_fname)
plot_title_suffix = "non-emission region in " + os.path.basename(cube)
plot_difference_map(hi_data[0], folder+prefix, "Moment 0 map of " + plot_title_suffix)
# Produce the background plots
bkg_data = get_bane_background(mom0_fname, folder+prefix, plot_title_suffix, ncores=ncores, redo=redo)
map_page = folder + '/off_emission.html'
rel_map_page = get_figures_folder('.') + '/off_emission.html'
output_map_page(map_page, prefix, 'Off-line Emission Plots for ' + os.path.basename(cube))
hi_data = fits.open(folder+prefix+'_bkg.fits')
max_em = np.nanmax(hi_data[0].data)
max_em_per_kms = max_em / (spec_res_km_s * num_channels)
# assess
cube_name = os.path.basename(cube)
section = ReportSection('Absence of Off-line Emission', cube_name)
section.add_item('Velocity Range<br/>(km/s LSR)', value='{:.0f} to {:.0f}'.format(vel_start.value, vel_end.value))
section.add_item('Channels', value='{}'.format(num_channels))
section.add_item('Large Scale<br/>Emission Map', link=rel_map_page, image='figures/'+prefix+'_bkg_sml.png')
section.add_item('Emission Histogram', link='figures/'+prefix+'_bkg_hist.png', image='figures/'+prefix+'_bkg_hist_sml.png')
section.add_item('Max Emission<br/>(Jy beam<sup>-1</sup>)', value='{:.3f}'.format(max_em_per_kms))
reporter.add_section(section)
metric = ValidationMetric('Absence of Off-line Emission',
'Maximum large scale emission intensity in the velocity range where emission is not expected.',
int(max_em_per_kms), assess_metric(max_em_per_kms, 5, 12, low_good=True))
reporter.add_metric(metric)
return slab
def calc_theoretical_rms(chan_width, t_obs= 12*60*60, n_ant=36):
"""
Calculating the theoretical rms noise for ASKAP. Assuming natural weighting and not taking into account fraction of flagged data.
Based on ASKAP SEFD measurement in SB 9944.
Parameters
----------
chan_width : int
channel width in Hz
t_obs : int
duration of the observation in seconds
n_ant : int
Number of antennae
Returns
-------
rms : int
Theoretical RMS in mJy
"""
#cor_eff = 0.8 # correlator efficiency - WALLABY
cor_eff = 1.0 # correlator efficiency - assumed to be included in the SEFD
n_pol = 2.0 # Number of polarisation, npol = 2 for images in Stokes I, Q, U, or V
#sefd = 1700*u.Jy # As measured in SB 9944
sefd = 1800*u.Jy # Hotan et al 2021
rms_jy = sefd/(cor_eff*math.sqrt(n_pol*n_ant*(n_ant-1)*chan_width*t_obs))
return rms_jy.to(u.mJy).value
def measure_spectral_line_noise(slab, cube, vel_start, vel_end, reporter, dest_folder, duration, redo=False):
print ('\nMeasuring the spectral line noise levels across {:.0f} < v < {:.0f}'.format(vel_start, vel_end))
if slab is None:
print ("** No data for the non-emission range - skipping check **")
return
# Extract header details
hdr = fits.getheader(cube)
spec_sys = hdr['SPECSYS']
axis_num = '3' if hdr['CTYPE3'] != 'STOKES' else '4'
spec_type = hdr['CTYPE'+axis_num]
axis = spec_sys + ' ' + spec_type
spec_res_km_s = calc_velocity_res(hdr)
# Scale the noise to mJy / 5 kHz channel
std_data = np.nanstd(slab.unmasked_data[:], axis=0)
noise_5kHz = std_data / np.sqrt(1 / spec_res_km_s)
noise_5kHz = noise_5kHz.to(u.mJy) # Jy => mJy
# Extract the spectral line noise map
mom0_prefix = build_fname(cube, '_mom0_off')
folder = get_figures_folder(dest_folder)
mom0_fname = folder + mom0_prefix + '.fits'
prefix = build_fname(cube, '_spectral_noise')
noise_fname = folder + prefix + '.fits'
fits.writeto(noise_fname, noise_5kHz.value, fits.getheader(mom0_fname), overwrite=True)
# Produce the noise plots
cube_name = os.path.basename(cube)
plot_map(folder+prefix, "Spectral axis noise map for " + cube_name, cmap='mako_r', stretch='arcsinh',
colorbar_label=r'Noise level per 5 kHz channel (mJy beam$^{-1}$)')
plot_histogram(folder+prefix, r'Noise level per 5 kHz channel (mJy beam$^{-1}$)', 'Spectral axis noise for ' + cube_name)
median_noise_5kHz = np.nanmedian(noise_5kHz.value[noise_5kHz.value!=0.0])
theoretical_gaskap_noise = calc_theoretical_rms(5000, t_obs=duration*60*60) # mJy per 5 kHz for the observation duration
print ("Theoretical noise {:.3f} mJy/beam".format(theoretical_gaskap_noise))
median_ratio = median_noise_5kHz / theoretical_gaskap_noise
# assess
cube_name = os.path.basename(cube)
section = ReportSection('Spectral Line Noise', cube_name)
section.add_item('Velocity Range<br/>(km/s LSR)', value='{:.0f} to {:.0f}'.format(vel_start.value, vel_end.value))
section.add_item('Spectral Axis', value=axis)
section.add_item('Spectral Resolution<br/>(kms)', value='{}'.format(round(spec_res_km_s,2)))
section.add_item('Spectral Axis<br/>Noise Map', link='figures/'+prefix+'.png', image='figures/'+prefix+'_sml.png')
section.add_item('Spectral Axis<br/>Noise Histogram', link='figures/'+prefix+'_hist.png', image='figures/'+prefix+'_hist_sml.png')
section.add_item('Spectral Axis Noise<br/>(mJy per 5 kHz)', value='{:.3f}'.format(median_noise_5kHz))
section.add_item('Spectral Axis Noise<br/>(vs theoretical for {:.2f} hr)'.format(duration), value='{:.3f}'.format(median_ratio))
reporter.add_section(section)
metric = ValidationMetric('Spectral Noise',
'1-sigma spectral noise comparison to theoretical per 5 kHz channel for {:.2f} hr observation.'.format(duration),
round(median_ratio,3), assess_metric(median_ratio,
np.sqrt(2), np.sqrt(2)*2, low_good=True))
reporter.add_metric(metric)
return
def get_pixel_area(fits_file,flux=0,nans=False,ra_axis=0,dec_axis=1,w=None):
"""For a given image, get the area and solid angle of all non-nan pixels or all pixels below a certain flux (doesn't count pixels=0).
The RA and DEC axes follow the WCS convention (i.e. starting from 0).
Arguments:
----------
fits : astropy.io.fits
The primary axis of a fits image.
Keyword arguments:
------------------
flux : float
The flux in Jy, below which pixels will be selected.
nans : bool
Derive the area and solid angle of all non-nan pixels.
ra_axis : int
The index of the RA axis (starting from 0).
dec_axis : int
The index of the DEC axis (starting from 0).
w : astropy.wcs.WCS
A wcs object to use for reading the pixel sizes.
Returns:
--------
area : float
The area in square degrees.
solid_ang : float
The solid angle in steradians.
See Also:
---------
astropy.io.fits
astropy.wcs.WCS"""
if w is None:
w = WCS(fits_file.header)
#count the pixels and derive area and solid angle of all these pixels
if nans:
count = fits_file.data[(~np.isnan(fits_file.data)) & (fits_file.data != 0)].shape[0]
else:
count = fits_file.data[(fits_file.data < flux) & (fits_file.data != 0)].shape[0]
area = (count*np.abs(w.wcs.cdelt[ra_axis])*np.abs(w.wcs.cdelt[dec_axis]))
solid_ang = area*(np.pi/180)**2
return area,solid_ang
def report_image_stats(image, noise_file, reporter, dest_folder, diagnostics_dir, ncores=8, redo=False):
print ('\nReporting image stats')
fits_file = fits.open(image)
hdr = fits_file[0].header
w = WCS(hdr).celestial
fig_folder= get_figures_folder(dest_folder)
# Image information
askapSoftVer = 'N/A'
askapPipelineVer = 'N/A'
history = hdr['history']
askapSoftVerPrefix = 'Produced with ASKAPsoft version '
askapPipelinePrefix = 'Processed with ASKAP pipeline version '
for row in history:
if row.startswith(askapSoftVerPrefix):
askapSoftVer = row[len(askapSoftVerPrefix):]
elif row.startswith(askapPipelinePrefix):
askapPipelineVer = row[len(askapPipelinePrefix):]
beam = 'N/A'
if 'BMAJ' in hdr:
beam_maj = hdr['BMAJ'] * 60 * 60
beam_min = hdr['BMIN'] * 60 * 60
beam = '{:.1f} x {:.1f}'.format(beam_maj, beam_min)
# Analyse image data
area,solid_ang = get_pixel_area(fits_file[0], nans=False)
# if not noise_file:
# prefix = build_fname(image, '')
# folder = get_figures_folder(dest_folder)
# noise_file = get_bane_background(image, folder+prefix, redo=redo, plot=False)
# rms_map = fits.open(noise_file)[0]
img_data = fits_file[0].data
img_peak = np.max(img_data[~np.isnan(img_data)])
# rms_bounds = rms_map.data > 0
# img_rms = int(np.median(rms_map.data[rms_bounds])*1e6) #uJy
# img_peak_bounds = np.max(img_data[rms_bounds])
# img_peak_pos = np.where(img_data == img_peak_bounds)
# img_peak_rms = rms_map.data[img_peak_pos][0]
# dynamic_range = img_peak_bounds/img_peak_rms
#img_flux = np.sum(img_data[~np.isnan(img_data)]) / (1.133*((beam_maj * beam_min) / (img.raPS * img.decPS))) #divide by beam area
# Copy pipleine plots
field_src_plot = copy_existing_image(diagnostics_dir+'/image.i.SB*.cont.restored_sources.png', fig_folder)
image_name = os.path.basename(image)
section = ReportSection('Image', image_name)
section.add_item('ASKAPsoft<br/>version', value=askapSoftVer)
section.add_item('Pipeline<br/>version', value=askapPipelineVer)
section.add_item('Synthesised Beam<br/>(arcsec)', value=beam)
add_opt_image_section('Source Map', field_src_plot, fig_folder, dest_folder, section)
# section.add_item('Median r.m.s.<br/>(uJy)', value='{:.2f}'.format(img_rms))
# section.add_item('Image peak<br/>(Jy)', value='{:.2f}'.format(img_peak_bounds))
# section.add_item('Dynamic Range', value='{:.2f}'.format(dynamic_range))
section.add_item('Sky Area<br/>(deg2)', value='{:.2f}'.format(area))
reporter.add_section(section)
return
def set_velocity_range(emvelstr, nonemvelstr):
emvel = int(emvelstr)
if not emvel in vel_steps:
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
nonemvel = int(nonemvelstr)
if not nonemvel in vel_steps:
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
idx = vel_steps.index(emvel)
if idx +1 >= len(vel_steps):
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
# emission_vel_range=(vel_steps[idx],vel_steps[idx+1])*u.km/u.s
emission_vel_range[0]=vel_steps[idx]*u.km/u.s
emission_vel_range[1]=vel_steps[idx+1]*u.km/u.s
print ('\nSet emission velocity range to {:.0f} < v < {:.0f}'.format(emission_vel_range[0], emission_vel_range[1]))
idx = vel_steps.index(nonemvel)
if idx +1 >= len(vel_steps):
raise ValueError('Velocity {} is not one of the supported GASS velocity steps e.g. 165, 200.'.format(emvel))
# emission_vel_range=(vel_steps[idx],vel_steps[idx+1])*u.km/u.s
non_emission_val_range[0]=vel_steps[idx]*u.km/u.s
non_emission_val_range[1]=vel_steps[idx+1]*u.km/u.s
print ('\nSet non emission velocity range to {:.0f} < v < {:.0f}'.format(non_emission_val_range[0], non_emission_val_range[1]))
def identify_periodicity(spectrum):
"""
Check if there are periodic features in a spectrum. This tests if there are patterns which are
present in the spectrum seperated by a specific number of channels (or lag). i.e. if the same
pattern repeats every so many channels. Only features with at least 3-sigma significance are
returned.
Arguments:
----------
spectrum : array-like
The numerical spectrum.
Returns:
--------
repeats: array
The lag intervals that have 3-sigma or greater periodic features
sigma: array
The significance of each repeat value, in sigma.
"""
# Use a partial auto-correlation function to identify repeated patterns
pacf = stattools.pacf(spectrum, nlags=min(50, len(spectrum)//5))
sd = np.std(pacf[1:])
significance= pacf/sd
indexes = (significance>3).nonzero()[0]
repeats = indexes[indexes>3]
return repeats, significance[repeats]
def plot_all_spectra(spectra, names, velocities, em_unit, vel_unit, figures_folder, prefix):
fig = None
if len(spectra) > 20:
fig = plt.figure(figsize=(18, 72))
else:
fig = plt.figure(figsize=(18, 12))
num_rows = math.ceil(len(spectra)/3)
for idx, spectrum in enumerate(spectra):
label = get_str(names[idx])
ax = fig.add_subplot(num_rows, 3, idx+1)
ax.plot(velocities, spectrum, linewidth=1)
ax.set_title(label)
ax.grid()
if idx > 2*num_rows:
ax.set_xlabel("$v_{LSRK}$ " + '({})'.format(vel_unit))
if idx % 3 == 0:
ax.set_ylabel(em_unit)
fig.tight_layout()
fig.savefig(figures_folder+'/'+prefix+'-spectra-individual.pdf')
def plot_overlaid_spectra(spectra, names, velocities, em_unit, vel_unit, figures_folder, cube_name, prefix):
fig = plt.figure(figsize=(18, 12))
axes = []
if len(spectra) > 36:
for i in range(1,4):
ax = fig.add_subplot(3,1,i)
axes.append(ax)
else:
ax = fig.add_subplot()
axes.append(ax)
for i, spec in enumerate(spectra):
label = get_str(names[i])
idx = 0
if len(axes) > 1:
interleave = label[-4]
idx = ord(interleave) - ord('A')
ax = axes[idx]
ax.plot(velocities, spec, label=label)
for idx, ax in enumerate(axes):
ax.set_xlabel("$v_{LSRK}$ " + '({})'.format(vel_unit))
ax.set_ylabel(em_unit)
ax.legend()
ax.grid()
if len(axes) > 1:
ax.set_title('Spectra for all beams in interleave {}'.format(chr(ord('A')+idx)))
else:
ax.set_title('Spectra for {} brightest sources in {}'.format(len(spectra), cube_name))
plt.savefig(figures_folder+'/'+prefix+'-spectra.png')
plt.savefig(figures_folder+'/'+prefix+'-spectra_sml.png', dpi=16)
def output_spectra_page(filename, prefix, title):
with open(filename, 'w') as mp:
mp.write('<html>\n<head><title>{}</title>\n</head>'.format(title))
mp.write('\n<body>\n<h1>{}</h1>'.format(title))
output_plot(mp, 'All Spectra', prefix + '-spectra.png')
output_plot(mp, 'Individual Spectra', prefix + '-spectra-individual.pdf')
mp.write('\n</body>\n</html>\n')
def plot_periodic_spectrum(spectrum, fig, name):
ax = fig.add_subplot(211)
ax.plot(spectrum)
ax.set_title('Spectrum for ' + name)
ax.grid()
ax = fig.add_subplot(212)
plot_pacf(spectrum, lags=50, ax=ax)
fig.tight_layout()
def output_periodic_spectra_page(filename, prefix, title, periodic, detections):
with open(filename, 'w') as mp:
mp.write('<html>\n<head><title>{}</title>\n</head>'.format(title))
mp.write('\n<body>\n<h1>{}</h1>'.format(title))
for idx, src_name in enumerate(periodic):
output_plot(mp, src_name, prefix + '{}_periodicity.png'.format(src_name))
mp.write('<p>{}</p>'.format(detections[idx]))
mp.write('\n</body>\n</html>\n')
def save_spectum(name, velocities, fluxes, ra, dec, spectra_folder):
spec_table = Table(
[velocities, fluxes],
names=['Velocity', 'Emission'],
meta={'ID': name, 'RA' : ra, 'Dec': dec})
votable = from_table(spec_table)
votable.infos.append(Info('RA', 'RA', ra))
votable.infos.append(Info('Dec', 'Dec', dec))
writeto(votable, '{}/{}.vot'.format(spectra_folder, name))
def extract_spectra(cube, source_cat, dest_folder, reporter, num_spectra, beam_list, slab_size=40):
print('\nExtracting spectra for the {} brightest sources in {} and beams listed in {}'.format(
num_spectra, source_cat, beam_list))
# Prepare the output folders
spectra_folder = dest_folder + '/spectra'
if not os.path.exists(spectra_folder):
os.makedirs(spectra_folder)
figures_folder = dest_folder + '/figures'
# Read the source list and identify the brightest sources
bright_srcs = []
bright_src_pos = []
if source_cat:
votable = parse(source_cat, pedantic=False)
sources = votable.get_first_table()
srcs_tab = sources.to_table()
for key in ('component_name', 'col_component_name'):
if key in srcs_tab.keys():
comp_name_key = key
break
bright_idx = np.argsort(sources.array['flux_peak'])[-num_spectra:]
bright_srcs = sources.array[bright_idx]
bright_srcs.sort(order=comp_name_key)
for idx, src in enumerate(bright_srcs):
pos = SkyCoord(ra=src['ra_deg_cont']*u.deg, dec=src['dec_deg_cont']*u.deg)
bright_src_pos.append(pos)
# Read the beams
beams = []
if beam_list:
beams = ascii.read(beam_list)
beams.add_column(Column(name='pos', data=np.empty((len(beams)), dtype=object)))
beams.add_column(Column(name='name', data=np.empty((len(beams)), dtype=object)))
for beam in beams:
name = '{}-{:02d}'.format(beam['col1'], beam['col2'])
pos = SkyCoord(ra=beam['col3']*u.rad, dec=beam['col4']*u.rad)
beam['name'] = name
beam['pos'] = pos
# Read the cube
spec_cube = SpectralCube.read(cube)
vel_cube = spec_cube.with_spectral_unit(u.m/u.s, velocity_convention='radio')
wcs = vel_cube.wcs.celestial
spec_len =vel_cube.shape[0]
header = fits.getheader(cube)
# Identify the target pixels for each spectrum
pix_pos_bright = []
pix_pos_beam = []
for idx, source in enumerate(bright_srcs):
pos = pos = bright_src_pos[idx]
pixel = pos.to_pixel(wcs=wcs)
rnd = np.round(pixel)
pix_pos_bright.append((int(rnd[0]), int(rnd[1])))
for source in beams:
pos = source['pos']
pixel = pos.to_pixel(wcs=wcs)
rnd = np.round(pixel)
pix_pos_beam.append((int(rnd[0]), int(rnd[1])))
# Extract the spectra
start = time.time()
print(" ## Started spectra extract at {} ##".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))))
prev = start
spectra_bright = []
for p in pix_pos_bright:
spectra_bright.append(np.zeros(spec_len))
spectra_beam = []
for p in pix_pos_beam:
spectra_beam.append(np.zeros(spec_len))
# Extract using slabs
unit = None
prev = time.time()
for i in range(0,spec_len,slab_size):
max_idx = min(i+slab_size, spec_len)
slab = extract_channel_slab(cube, i, max_idx)
checkpoint = time.time()
print (slab)
unit = slab.unit
for j, pos in enumerate(pix_pos_bright):
data = slab[:,pos[1], pos[0]]
#data = convert_data_to_jy(data, header)
spectra_bright[j][i:max_idx] = data.value
for j, pos in enumerate(pix_pos_beam):
data = slab[:,pos[1], pos[0]]
spectra_beam[j][i:max_idx] = data.value
print ("Scanning slab of channels {} to {}, took {:.2f} s".format(i, max_idx-1, checkpoint-prev))
prev = checkpoint
end = time.time()
print(" ## Finished spectra extract at {}, took {:.2f} s ##".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)), end-start))
# Save the spectra
names = bright_srcs['component_name']
for idx, spec in enumerate(spectra_bright):
name = get_str(names[idx])
pos = bright_src_pos[idx]
save_spectum(name, vel_cube.spectral_axis.to(u.km/u.s), spec*vel_cube.unit, pos.ra.deg, pos.dec.deg, spectra_folder)
for idx, spec in enumerate(spectra_beam):
name = beams[idx]['name']
pos = beams[idx]['pos']
save_spectum(name, vel_cube.spectral_axis.to(u.km/u.s), spec*vel_cube.unit, pos.ra.deg, pos.dec.deg, spectra_folder)
# Plot the spectra
em_unit = str(vel_cube.unit)
velocities = vel_cube.spectral_axis.to(u.km/u.s)
plot_overlaid_spectra(spectra_bright, names, velocities, em_unit, 'km/s', figures_folder, os.path.basename(cube), 'bright')
plot_all_spectra(spectra_bright, names, velocities, em_unit, 'km/s', figures_folder, 'bright')
bright_spectra_file = figures_folder+'/bright_spectra.html'
output_spectra_page(bright_spectra_file, './bright', "Spectra for 15 Brightest Sources")
if beam_list:
beam_names = beams['name']
spec_res_hz = Spectra.get_spec_resolution(header)
print ('Spec res (hz) {}'.format(spec_res_hz))
theoretical_noise = calc_theoretical_rms(spec_res_hz)
print ('Theoretical noise (mJy) {}'.format(theoretical_noise))
plot_overlaid_spectra(spectra_beam, beam_names, velocities, em_unit, 'km/s', figures_folder, os.path.basename(cube), 'beam')
Spectra.plot_beam_locs(cube, beams, theoretical_noise, figures_folder+'/beam_comparison', spectra_folder)
plot_all_spectra(spectra_beam, beam_names, velocities, em_unit, 'km/s', figures_folder, 'beam')
beam_spectra_file = figures_folder+'/beam_spectra.html'
output_spectra_page(beam_spectra_file, './beam', "Spectra for centre of each beam")
# Check for periodicity in the spectra
num_bright_periodic = 0
bright_periodic = []
detections = []
for idx, spec in enumerate(spectra_bright):
if spec.any():
repeats, sig = identify_periodicity(spec)
if len(repeats)>0:
num_bright_periodic += 1
name = get_str(names[idx])
bright_periodic.append(name)
fig = plt.figure(figsize=(8, 6))
plot_periodic_spectrum(spec, fig, name)
fig.savefig(figures_folder+'/{}_periodicity.png'.format(name))
detections.append("Detected periodicity with lag {} of significance {}".format(repeats, sig))
print ("Spectrum for {} has periodicity with lag {} of signficance {}".format(name, repeats, sig))
bright_periodic_str = 'None' if len(bright_periodic) == 0 else '<br/>'.join(bright_periodic)
output_periodic_spectra_page(figures_folder+'/periodic_spectra.html', './', "Spectra with Periodic Features", bright_periodic, detections)
# Output the report
cube_name = os.path.basename(cube)
section = ReportSection('Spectra', cube_name)
section.add_item('Bright Source Spectra', link='figures/bright_spectra.html', image='figures/bright-spectra_sml.png')
section.add_item('Spectra wth periodic features', link='figures/periodic_spectra.html', value=bright_periodic_str)
if beam_list:
section.add_item('Beam Centre Spectra', link='figures/beam_spectra.html', image='figures/beam-spectra_sml.png')
section.add_item('Beam Noise Levels', link='figures/beam_comparison.png', image='figures/beam_comparison_sml.png')
reporter.add_section(section)
metric = ValidationMetric('Spectra periodicity',
'Number of spectra with repeated patterns with more than 3-sigma significance ',
num_bright_periodic, assess_metric(num_bright_periodic,
1, 5, low_good=True))
reporter.add_metric(metric)
def copy_existing_image(image_pattern, fig_folder):
paths = glob.glob(image_pattern)
if len(paths) == 0:
return None
# Copy the file with default permnisisons and metadata
new_name = fig_folder + "/" + os.path.basename(paths[0])
shutil.copyfile(paths[0], new_name)
return new_name
def add_opt_image_section(title, image_path, fig_folder, dest_folder, section, thumb_size_x=140, thumb_size_y=140):
if image_path == None:
section.add_item(title, value='N/A')
return
img_thumb, img_thumb_rel = Diagnostics.make_thumbnail(image_path, fig_folder, dest_folder, size_x=thumb_size_y,
size_y=thumb_size_y)
image_path_rel = os.path.relpath(image_path, dest_folder)
section.add_item(title, link=image_path_rel, image=img_thumb_rel)
def add_opt_mult_image_section(title, image_paths, fig_folder, dest_folder, section, thumb_size_x=140, thumb_size_y=140):
if image_paths is None:
section.add_item(title, value='N/A')
return
rel_paths = []
rel_thumbs = []
found = False
for image_path in image_paths:
if image_path:
found = True
img_thumb, img_thumb_rel = Diagnostics.make_thumbnail(image_path, fig_folder, dest_folder,
size_x=thumb_size_x, size_y=thumb_size_y)
image_path_rel = os.path.relpath(image_path, dest_folder)
rel_thumbs.append(img_thumb_rel)
rel_paths.append(image_path_rel)
if found:
section.add_item(title, link=rel_paths, image=rel_thumbs)
else:
section.add_item(title, value='N/A')
def report_calibration(diagnostics_dir, dest_folder, reporter):
print('\nReporting calibration from ' + diagnostics_dir)
fig_folder= get_figures_folder(dest_folder)
bandpass, cal_sbid = Bandpass.get_cal_bandpass(diagnostics_dir)
# Plot bandpasses
bp_by_ant_fig = Bandpass.plot_bandpass_by_antenna(bandpass, cal_sbid, fig_folder, 'Calibration')
#bp_by_ant_thumb, bp_by_ant_thumb_rel = Diagnostics.make_thumbnail(bp_by_ant_fig, fig_folder, dest_folder)
#bp_by_ant_fig_rel = os.path.relpath(bp_by_ant_fig, dest_folder)
bp_by_beam_fig = Bandpass.plot_bandpass_by_beam(bandpass, cal_sbid, fig_folder, 'Calibration')
bp_by_beam_thumb, bp_by_beam_thumb_rel = Diagnostics.make_thumbnail(bp_by_beam_fig, fig_folder, dest_folder)
bp_by_beam_fig_rel = os.path.relpath(bp_by_beam_fig, dest_folder)
# Include the pipeline diagnostics
amp_diag_img = copy_existing_image(diagnostics_dir+'/amplitudesDiagnostics_'+str(cal_sbid)+'.png', fig_folder)
phase_diag_img = copy_existing_image(diagnostics_dir+'/phasesDiagnostics_'+str(cal_sbid)+'.png', fig_folder)
cal_param_pdf = copy_existing_image(diagnostics_dir+'/calparameters_*_bp_SB'+str(cal_sbid)+'.smooth.pdf', fig_folder)
cal_param_pdf_rel = os.path.relpath(cal_param_pdf, dest_folder) if cal_param_pdf else None
# Output the report
section = ReportSection('Calibration', '')
section.add_item('Cal SBID', cal_sbid)
add_opt_image_section('Bandpass by Antenna', bp_by_ant_fig, fig_folder, dest_folder, section)
add_opt_image_section('Bandpass by Beam', bp_by_beam_fig, fig_folder, dest_folder, section)
add_opt_image_section('Amplitude Diagnostics', amp_diag_img, fig_folder, dest_folder, section)
add_opt_image_section('Phase Diagnostics', phase_diag_img, fig_folder, dest_folder, section)
if cal_param_pdf_rel:
section.add_item('Parameters', value="pdf", link=cal_param_pdf_rel)
reporter.add_section(section)
def report_diagnostics(diagnostics_dir, sbid, dest_folder, reporter, sched_info, obs_metadata, short_len=500, long_len=2000):
print('\nReporting diagnostics')
fig_folder= get_figures_folder(dest_folder)
is_closepack = sched_info.footprint == None or sched_info.footprint.startswith('closepack')
# Extract metadata
chan_width, cfreq, nchan = Diagnostics.get_freq_details(diagnostics_dir)
chan_width_kHz = round(chan_width/1000., 3) # convert Hz to kHz
theoretical_rms_mjy = np.zeros(len(obs_metadata.fields))
total_rows = sum([field.num_rows for field in obs_metadata.fields])
for idx, field in enumerate(obs_metadata.fields):
field_tobs = obs_metadata.tobs * field.num_rows / total_rows
theoretical_rms_mjy[idx] = calc_theoretical_rms(chan_width, t_obs=field_tobs)
# Extract flagging details
flag_stat_beams, n_flag_ant_beams, ant_flagged_in_all, pct_integ_flagged, baseline_flag_pct, pct_each_integ_flagged, bad_chan_pct_count = Diagnostics.get_flagging_stats(
diagnostics_dir, fig_folder)
print("Antenna flagged in all:", ant_flagged_in_all)
flagged_ant_desc = ", ".join(ant_flagged_in_all) if len(ant_flagged_in_all) > 0 else 'None'
pct_short_base_flagged, pct_medium_base_flagged, pct_long_base_flagged = Diagnostics.calc_flag_percent(
baseline_flag_pct, short_len=short_len, long_len=long_len)
pct_chan_unflagged = Diagnostics.calc_pct_channels_unflagged(bad_chan_pct_count)
# Extract beam RMS
beam_exp_rms = Diagnostics.calc_beam_exp_rms(flag_stat_beams, theoretical_rms_mjy)
rms_min = np.min(beam_exp_rms)
rms_max = np.max(beam_exp_rms)
rms_range_pct = round((rms_max-rms_min)/rms_min*100,1)
# Plot beam stats
beam_nums = Diagnostics.get_beam_numbers_closepack()
flagged_vis_fig = Diagnostics.plot_flag_stat(flag_stat_beams, beam_nums, sbid, fig_folder, closepack=is_closepack)
flagged_ant_fig = Diagnostics.plot_flag_ant(n_flag_ant_beams, beam_nums, sbid, fig_folder, closepack=is_closepack)
beam_exp_rms_fig = Diagnostics.plot_beam_exp_rms(beam_exp_rms, beam_nums, sbid, fig_folder, closepack=is_closepack)
baseline_fig = Diagnostics.plot_baselines(baseline_flag_pct, fig_folder, sbid, short_len=short_len, long_len=long_len)
flag_ant_file_rel = os.path.relpath(fig_folder+'/flagged_antenna.txt', dest_folder)
integ_flag_fig = Diagnostics.plot_integrations(pct_each_integ_flagged, sbid, fig_folder)
flag_pct_dist_fig = Diagnostics.plot_flagging_distribution(bad_chan_pct_count, sbid, fig_folder)
# Output the report
section = ReportSection('Diagnostics', '')
section.add_item('Completely Flagged Antennas', flagged_ant_desc, link=flag_ant_file_rel)
section.add_item('Integrations Completely<br/>Flagged (%)', pct_integ_flagged)
add_opt_image_section('Flagging over Time', integ_flag_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
add_opt_image_section('Channel Flagging', flag_pct_dist_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
section.add_item('Short Baselines<br/>Flagged (%)', pct_short_base_flagged)
section.add_item('Medium Baselines<br/>Flagged (%)', pct_medium_base_flagged)
section.add_item('Long Baselines<br/>Flagged (%)', pct_long_base_flagged)
add_opt_image_section('Baselines', baseline_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
section.start_new_row()
section.add_item('Channel Width (kHz)', chan_width_kHz)
add_opt_image_section('Flagged Visibilities', flagged_vis_fig, fig_folder, dest_folder, section) #, thumb_size_x=140, thumb_size_y=70)
add_opt_image_section('Flagged Antennas', flagged_ant_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
add_opt_image_section('Expected RMS per channel', beam_exp_rms_fig, fig_folder, dest_folder, section) #, thumb_size_x=70, thumb_size_y=70)
reporter.add_section(section)
metric = ValidationMetric('Flagged Short Baselines',
'Percent of short baselines ({}m or less) flagged across all integrations and all beams'.format(short_len),
pct_short_base_flagged, assess_metric(pct_short_base_flagged,
20, 40, low_good=True))
reporter.add_metric(metric)
metric = ValidationMetric('Flagged Long Baselines',
'Percent of long baselines ({}m or more) flagged across all integrations and all beams'.format(long_len),
pct_long_base_flagged, assess_metric(pct_long_base_flagged,
30, 45, low_good=True))
reporter.add_metric(metric)
metric = ValidationMetric('Unflagged Integrations',
'Percent of integrations with less than 5% of channels flagged',
pct_chan_unflagged, assess_metric(pct_chan_unflagged,
70, 50))
reporter.add_metric(metric)
metric = ValidationMetric('Expected RMS Difference',
'The percentage change of expected RMS across the field.',
rms_range_pct, assess_metric(rms_range_pct,
10, 30, low_good=True))
reporter.add_metric(metric)
def report_self_cal(cube, image, obs_metadata, dest_folder, reporter):
print('\nReporting self calibration')
fig_folder= get_figures_folder(dest_folder)
field_plots = []
field_names = ""
total_bad_beams = 0
max_bad_ant = 0
for i,field in enumerate(obs_metadata.fields):
if i > 0:
field_names += '<br/>'
field_names += field.name
folder = Diagnostics.find_subdir(cube, image, field.name)
if folder:
sc = SelfCal.prepare_self_cal_set(folder)
plots = SelfCal.plot_self_cal_set(sc, fig_folder)
field_plots.append(plots)
num_bad_beams, num_bad_ant = SelfCal.calc_phase_stability(sc)
print("In field {} found {} bad beams and {} bad antennas".format(field.name, num_bad_beams, num_bad_ant))
total_bad_beams += num_bad_beams
max_bad_ant = max(max_bad_ant, num_bad_ant)
else:
field_plots.append([None, None, None])
plot_array = np.asarray(field_plots)
print ("Overall found {} bad beams and {} bad antennas.".format(total_bad_beams, max_bad_ant))
# Output the report
section = ReportSection('Self Calibration', '')
section.add_item('Field(s)', value=field_names)
add_opt_mult_image_section('Phase Stability', plot_array[:,0], fig_folder, dest_folder, section)
add_opt_mult_image_section('Phase Summary', plot_array[:,1], fig_folder, dest_folder, section)
add_opt_mult_image_section('All Phases', plot_array[:,2], fig_folder, dest_folder, section)
reporter.add_section(section)
metric = ValidationMetric('Bad Phase Antenna',
'Number of unflagged antenna with bad phase solutions',
max_bad_ant, assess_metric(max_bad_ant,
1, 3, low_good=True))
reporter.add_metric(metric)
def main():
start = time.time()
print("#### Started validation at {} ####".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))))
#ignore astropy warnings
warnings.simplefilter('ignore', AstropyWarning)
# Parse command line options
args = parseargs()
dest_folder = args.output
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
figures_folder = dest_folder + '/figures'
if not os.path.exists(figures_folder):
os.makedirs(figures_folder)
if args.cube and (not os.path.exists(args.cube) or not os.path.isfile(args.cube)):
raise ValueError('Cube {} could not be found or is not a file.'.format(args.cube))
if args.image and (not os.path.exists(args.image) or not os.path.isfile(args.image)):
raise ValueError('Image {} could not be found or is not a file.'.format(args.image))
if not args.cube and not args.image:
raise ValueError('You must supply either an image or a cube to validate.')
if args.source_cat and (not os.path.exists(args.source_cat) or not os.path.isfile(args.source_cat)):
raise ValueError('Source catalogue {} could not be found or is not a file.'.format(args.source_cat))
if args.emvel:
set_velocity_range(args.emvel, args.nonemvel)
if args.cube:
print ('\nChecking quality level of GASKAP HI cube:', args.cube)
obs_img = args.cube
metrics_subtitle = 'GASKAP HI Validation Metrics'
else:
print ('\nChecking quality level of ASKAP image:', args.image)
obs_img = args.image
metrics_subtitle = 'ASKAP Observation Diagnostics Metrics'
cube_name = os.path.basename(obs_img)
reporter = ValidationReport('GASKAP Validation Report: {}'.format(cube_name), metrics_subtitle=metrics_subtitle)
sched_info = Diagnostics.get_sched_info(obs_img)
diagnostics_dir = Diagnostics.find_diagnostics_dir(args.cube, args.image)
obs_metadata = Diagnostics.get_metadata(diagnostics_dir) if diagnostics_dir else None
sbid = report_observation(obs_img, reporter, args.duration, sched_info, obs_metadata)
if args.cube:
report_cube_stats(args.cube, reporter)
check_for_emission(args.cube, emission_vel_range[0], emission_vel_range[1], reporter, dest_folder, redo=args.redo)
slab = check_for_non_emission(args.cube, non_emission_val_range[0], non_emission_val_range[1], reporter, dest_folder, redo=args.redo)
measure_spectral_line_noise(slab, args.cube, non_emission_val_range[0], non_emission_val_range[1], reporter, dest_folder, args.duration, redo=args.redo)
if args.source_cat or args.beam_list:
extract_spectra(args.cube, args.source_cat, dest_folder, reporter, args.num_spectra, args.beam_list)
if args.image:
report_image_stats(args.image, args.noise, reporter, dest_folder, diagnostics_dir, redo=args.redo)
if diagnostics_dir:
report_calibration(diagnostics_dir, dest_folder, reporter)
report_diagnostics(diagnostics_dir, sbid, dest_folder, reporter, sched_info, obs_metadata)
if obs_metadata:
report_self_cal(args.cube, args.image, obs_metadata, dest_folder, reporter)
print ('\nProducing report to', dest_folder)
output_html_report(reporter, dest_folder)
output_metrics_xml(reporter, dest_folder)
end = time.time()
print("#### Completed validation at {} ####".format(
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))))
print('\nChecks completed in {:.02f} s'.format((end - start)))
return 0
if __name__ == '__main__':
exit(main()) | |
# /usr/bin/env python3
import numpy as np
import pandas as pd
def operaciones():
#debido a quee pandas necesita de Numpys este puede usar los uFuncs de Numpy
#np.sin,cos,tans,arctan.arcsin,exp
ser= pd.Series(np.random.randint(1,90,size=8))
df= pd.DataFrame(np.random.randint(1,90,size=(4,5)),index=['a','b','c','d'],columns=['P1','P2','P3','P4','P5'])
print(ser)
print(df)
print("With uFuncs")
print(np.sin(ser))
print(np.exp2(df))
#alineacion de indices en Series
def op_series():
dat1=pd.Series([1,2,4,46,6,464,335,33],index=list('abcdefgh'))
dat2=pd.Series([1,2,45,54,24],index=list('abdth'))
A=dat1+dat2
print(A)
A=dat1.add(dat2,fill_value=0)
print(A)
#alineacion de indices en dataframes
#podemos hacer operaciones Pandas con funciones personalizadas de estos mismos
# (+) : add()
# (-) : sub() o substract()
# (*) : mul() o multiply()
# (/) : div(), divide() o truediv()
# (//): Division entera-> floordivide()
# (%) : mod()
#(**) : pow()
#pandas permite usar estas funciones atraves de los objetos mismos a diferencia de Numpy
# Por ejemplo
# para multiplicar (*) : en Numpy np.mupltiply(A,B), pero en pandas A.multiply(B)
#con la facilidad de poder agregar valores de relleno en caso no se puedan alinear los indices en la operacion
#esto gracias al paramatro fill_value que se encuentra embebido en cada funcion de operador de pandas
#A.sub(B,fill_value=0) ojo: funciona entrea operaciones de 2 dataframes
def op_dataframe():
df2= pd.DataFrame(np.random.randint(1,90,size=(4,5)),index=['a','b','c','d'],columns=['P1','P2','P3','P4','P5'])
df1= pd.DataFrame(np.random.randint(1,90,size=(4,5)),index=['a','b','c','d'],columns=['P1','P2','P3','P4','P5'])
A=df1+df2
B=df1-df2.iloc[0]
#B=df1.sub(df2,axis=0)
print(A)
print(B)
if __name__=="__main__":
#operaciones()
op_dataframe() | |
import copy
import os
import numpy as np
import pandas as pd
from scipy import optimize
# code models
from src.toric_model import Toric_code
from src.planar_model import Planar_code
from src.xzzx_model import xzzx_code
from src.rotated_surface_model import RotSurCode
# decoders
from decoders import MCMC, single_temp, single_temp_alpha, EWD, \
EWD_general_noise, EWD_general_noise_shortest, \
EWD_alpha_N_n, EWD_alpha, MCMC_biased, \
MCMC_alpha_with_shortest, MCMC_alpha
from src.mwpm import class_sorted_mwpm, regular_mwpm, enhanced_mwpm
def get_individual_error_rates(params):
assert params['noise'] in ['depolarizing', 'alpha', 'biased'], f'{params["noise"]} is not implemented.'
if params['noise'] == 'biased':
eta = params['eta']
p = params['p_error']
p_z = p * eta / (eta + 1)
p_x = p / (2 * (eta + 1))
p_y = p_x
if params['noise'] == 'alpha':
# Calculate pz_tilde from p_error (total error prob.)
p = params['p_error']
alpha = params['alpha']
p_tilde = p / (1 - p)
pz_tilde = optimize.fsolve(lambda x: x + 2*x**alpha - p_tilde, 0.5)[0]
p_z = pz_tilde*(1 - p)
p_x = p_y = pz_tilde**alpha * (1 - p)
if params['noise'] == 'depolarizing':
p_x = p_y = p_z = params['p_error']/3
return p_x, p_y, p_z
# This function generates training data with help of the MCMC algorithm
def generate(file_path, params, nbr_datapoints=10**6, fixed_errors=None):
# Creates df
df = pd.DataFrame()
# Add parameters as first entry in dataframe
names = ['data_nr', 'type']
index_params = pd.MultiIndex.from_product([[-1], np.arange(1)],
names=names)
df_params = pd.DataFrame([[params]],
index=index_params,
columns=['data'])
df = df.append(df_params)
print('\nDataFrame opened at: ' + str(file_path))
# If using a fixed number of errors, let the max number of datapoins be huge
if fixed_errors != None:
nbr_datapoints = 10000000
failed_syndroms = 0
# Initiate temporary list with results (to prevent appending to dataframe each loop)
df_list = []
p_x, p_y, p_z = get_individual_error_rates(params)
# Loop to generate data points
for i in range(nbr_datapoints):
print('Starting generation of point nr: ' + str(i + 1), flush=True)
# Initiate code
if params['code'] == 'toric':
assert params['noise'] == 'depolarizing', f'{params["noise"]}-noise is not compatible with "{params["code"]}"-model.'
init_code = Toric_code(params['size'])
init_code.generate_random_error(params['p_error'])
elif params['code'] == 'planar':
assert params['noise'] in ['depolarizing', 'alpha'], f'{params["noise"]}-noise is not compatible with "{params["code"]}"-model.'
init_code = Planar_code(params['size'])
init_code.generate_random_error(p_x=p_x, p_y=p_y, p_z=p_z)
elif params['code'] == 'xzzx':
assert params['noise'] in ['depolarizing', 'alpha', 'biased'], f'{params["noise"]}-noise is not compatible with "{params["code"]}"-model.'
init_code = xzzx_code(params['size'])
init_code.generate_random_error(p_x=p_x, p_y=p_y, p_z=p_z)
elif params['code'] == 'rotated':
assert params['noise'] in ['depolarizing', 'alpha', 'biased'], f'{params["noise"]}-noise is not compatible with "{params["code"]}"-model.'
init_code = RotSurCode(params['size'])
init_code.generate_random_error(p_x=p_x, p_y=p_y, p_z=p_z)
# Flatten initial qubit matrix to store in dataframe
df_qubit = copy.deepcopy(init_code.qubit_matrix)
eq_true = init_code.define_equivalence_class()
# Create inital error chains for algorithms to start with
if params['mwpm_init']: #get mwpm starting points
assert params['code'] == 'planar', 'Can only use eMWPM for planar model.'
init_code = class_sorted_mwpm(init_code)
print('Starting in MWPM state')
else: #randomize input matrix, no trace of seed.
init_code.qubit_matrix, _ = init_code.apply_random_logical()
init_code.qubit_matrix = init_code.apply_stabilizers_uniform()
print('Starting in random state')
# Generate data for DataFrame storage OBS now using full bincount, change this
if params['method'] == "MCMC":
if params['noise'] == 'depolarizing':
df_eq_distr = MCMC(init_code,
params['p_error'],
Nc=params['Nc'],
SEQ=params['SEQ'],
TOPS=params['TOPS'],
eps=params['eps'],
iters=params['iters'],
conv_criteria=params['conv_criteria'])
if np.argmax(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
if params['noise'] == "biased":
df_eq_distr = MCMC_biased(init_code,
params['p_error'],
eta=params['eta'],
Nc=params['Nc'],
SEQ=params['SEQ'],
TOPS=params['TOPS'],
eps=params['eps'],
iters=params['iters'],
conv_criteria=params['conv_criteria'])
if np.argmax(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
if params['noise'] == "alpha":
df_eq_distr = MCMC_alpha(init_code,
params['p_error'],
alpha=params['alpha'],
Nc=params['Nc'],
SEQ=params['SEQ'],
TOPS=params['TOPS'],
eps=params['eps'],
iters=params['iters'],
conv_criteria=params['conv_criteria'])
if np.argmax(df_eq_distr[0]) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
if params['method'] == "MCMC_with_shortest":
assert params['noise'] == 'alpha'
if params['noise'] == "alpha":
df_eq_distr = MCMC_alpha_with_shortest(init_code, params['p_error'], alpha=params['alpha'])
if np.argmax(df_eq_distr[0:4]) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
elif params['method'] == "EWD":
if params['noise'] == 'depolarizing':
assert params['onlyshortest'] == False, "onlyshortest not implemented for deoplarizing"
df_eq_distr = EWD(init_code, params['p_error'], params['p_sampling'], steps=params['steps'], droplets=params['droplets'])
df_eq_distr = np.array(df_eq_distr)
if np.argmax(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
elif params['noise'] == 'alpha':
alpha=params['alpha']
p_tilde_sampling = params['p_sampling'] / (1 - params['p_sampling'])
pz_tilde_sampling = optimize.fsolve(lambda x: x + 2*x**alpha - p_tilde_sampling, 0.5)[0]
p_tilde = params['p_error'] / (1 - params['p_error'])
pz_tilde = optimize.fsolve(lambda x: x + 2*x**alpha - p_tilde, 0.5)[0]
df_eq_distr = EWD_alpha(init_code,
pz_tilde,
alpha,
params['steps'],
pz_tilde_sampling=pz_tilde_sampling,
onlyshortest=params['onlyshortest'])
df_eq_distr = np.array(df_eq_distr)
else:
raise ValueError(f'''EWD does not support "{params['noise']}" noise''')
elif params['method'] == "ST":
if params['noise'] == 'depolarizing':
df_eq_distr = single_temp(init_code, params['p_error'], params['steps'])
df_eq_distr = np.array(df_eq_distr)
if np.argmin(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
elif params['noise'] == 'alpha':
p_tilde = params['p_error'] / (1 - params['p_error'])
pz_tilde = optimize.fsolve(lambda x: x + 2*x**params['alpha'] - p_tilde, 0.5)[0]
df_eq_distr = single_temp_alpha(init_code,
pz_tilde,
params['alpha'],
params['steps'])
df_eq_distr = np.array(df_eq_distr)
if np.argmin(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
else:
raise ValueError(f'''ST does not support "{params['noise']}" noise''')
elif params['method'] == "eMWPM":
out = class_sorted_mwpm(copy.deepcopy(init_code))
lens = np.zeros((4))
for j in range(4):
lens[j] = sum(out[j].chain_lengths())
choice = np.argmin(lens)
df_eq_distr = np.zeros((4)).astype(np.uint8)
df_eq_distr[choice] = 100
if np.argmax(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
elif params['method'] == "MWPM":
choice = regular_mwpm(copy.deepcopy(init_code))
df_eq_distr = np.zeros((4)).astype(np.uint8)
df_eq_distr[choice] = 100
if np.argmax(df_eq_distr) != eq_true:
print('Failed syndrom, total now:', failed_syndroms)
failed_syndroms += 1
# Generate data for DataFrame storage OBS now using full bincount, change this
# Create indices for generated data
names = ['data_nr', 'type']
index_qubit = pd.MultiIndex.from_product([[i], np.arange(1)],
names=names)
index_distr = pd.MultiIndex.from_product([[i], np.arange(1)+1], names=names)
# Add data to Dataframes
df_qubit = pd.DataFrame([[df_qubit.astype(np.uint8)]], index=index_qubit,
columns=['data'])
df_distr = pd.DataFrame([[df_eq_distr]],
index=index_distr, columns=['data'])
# Add dataframes to temporary list to shorten computation time
df_list.append(df_qubit)
df_list.append(df_distr)
# Every x iteration adds data to data file from temporary list
# and clears temporary list
if (i + 1) % 50 == 0:
df = df.append(df_list)
df_list.clear()
print('Intermediate save point reached (writing over)')
df.to_pickle(file_path)
print('Total number of failed syndroms:', failed_syndroms)
# If the desired amount of errors have been achieved, break the loop and finish up
if failed_syndroms == fixed_errors:
print('Desired amount of failed syndroms achieved, stopping data generation.')
break
# Adds any remaining data from temporary list to data file when run is over
if len(df_list) > 0:
df = df.append(df_list)
print('\nSaving all generated data (writing over)')
df.to_pickle(file_path)
print('\nCompleted')
if __name__ == '__main__':
# Get job array id, working directory
job_id = os.getenv('SLURM_ARRAY_JOB_ID')
array_id = os.getenv('SLURM_ARRAY_TASK_ID')
local_dir = os.getenv('TMPDIR')
# Use environment variables to get parameters
size = int(os.getenv('CODE_SIZE'))
code = str(os.getenv('CODE_TYPE'))
alpha = float(os.getenv('CODE_ALPHA'))
job_name = str(os.getenv('JOB_NAME'))
start_p = float(os.getenv('START_P'))
end_p = float(os.getenv('END_P'))
num_p = int(os.getenv('NUM_P'))
mwpm_init = bool(int(os.getenv('MWPM_INIT')))
p_sampling = float(os.getenv('P_SAMPLE'))
alg = str(os.getenv('ALGORITHM'))
only_shortest = bool(int(os.getenv('ONLY_SHORTEST')))
params = {'code': code,
'method': alg,
'size': size,
'noise': 'alpha',
'p_error': np.linspace(start_p, end_p, num=num_p)[int(array_id)],
'eta': 0.5,
'alpha': alpha,
'p_sampling': p_sampling,
'droplets': 1,
'mwpm_init': mwpm_init,
'fixed_errors':None,
'Nc': None,
'iters': 10,
'conv_criteria': 'error_based',
'SEQ': 2,
'TOPS': 10,
'eps': 0.01,
'onlyshortest': only_shortest}
# Steps is a function of code size L
params.update({'steps': int(5*params['size']**5)})
print('Nbr of steps to take if applicable:', params['steps'])
# Build file path
file_path = os.path.join(local_dir, f'data_paper_{job_name}_{job_id}_{array_id}.xz')
# Generate data
generate(file_path, params, nbr_datapoints=10000, fixed_errors=params['fixed_errors']) | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the core module.
"""
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ..core import Segment, SegmentationImage
try:
import matplotlib # noqa
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
class TestSegmentationImage:
def setup_class(self):
self.data = [[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]]
self.segm = SegmentationImage(self.data)
def test_array(self):
assert_allclose(self.segm.data, self.segm.__array__())
def test_copy(self):
segm = SegmentationImage(self.data)
segm2 = segm.copy()
assert segm.data is not segm2.data
assert segm.labels is not segm2.labels
segm.data[0, 0] = 100.
assert segm.data[0, 0] != segm2.data[0, 0]
def test_invalid_data(self):
# contains all zeros
data = np.zeros((3, 3))
with pytest.raises(ValueError):
SegmentationImage(data)
# contains a NaN
data = np.zeros((5, 5))
data[2, 2] = np.nan
with pytest.raises(ValueError):
SegmentationImage(data)
# contains an inf
data = np.zeros((5, 5))
data[2, 2] = np.inf
data[0, 0] = -np.inf
with pytest.raises(ValueError):
SegmentationImage(data)
# contains a negative value
data = np.arange(-1, 8).reshape(3, 3)
with pytest.raises(ValueError):
SegmentationImage(data)
@pytest.mark.parametrize('label', [0, -1, 2])
def test_invalid_label(self, label):
# test with scalar labels
with pytest.raises(ValueError):
self.segm.check_label(label)
self.segm.check_labels(label)
def test_invalid_label_array(self):
# test with array of labels
with pytest.raises(ValueError):
self.segm.check_labels([0, -1, 2])
def test_data_ma(self):
assert isinstance(self.segm.data_ma, np.ma.MaskedArray)
assert np.ma.count(self.segm.data_ma) == 18
assert np.ma.count_masked(self.segm.data_ma) == 18
def test_segments(self):
assert isinstance(self.segm[0], Segment)
assert_allclose(self.segm[0].data, self.segm[0].__array__())
assert self.segm[0].data_ma.shape == self.segm[0].data.shape
assert (self.segm[0].data_ma.filled(0.).sum() ==
self.segm[0].data.sum())
label = 4
idx = self.segm.get_index(label)
assert self.segm[idx].label == label
assert self.segm[idx].area == self.segm.get_area(label)
assert self.segm[idx].slices == self.segm.slices[idx]
assert self.segm[idx].bbox.slices == self.segm[idx].slices
for i, segment in enumerate(self.segm):
assert segment.label == self.segm.labels[i]
def test_repr_str(self):
assert repr(self.segm) == str(self.segm)
props = ['shape', 'nlabels', 'max_label']
for prop in props:
assert '{}:'.format(prop) in repr(self.segm)
def test_segment_repr_str(self):
assert repr(self.segm[0]) == str(self.segm[0])
props = ['label', 'slices', 'area']
for prop in props:
assert '{}:'.format(prop) in repr(self.segm[0])
def test_segment_data(self):
assert_allclose(self.segm[3].data.shape, (3, 3))
assert_allclose(np.unique(self.segm[3].data), [0, 5])
def test_segment_make_cutout(self):
cutout = self.segm[3].make_cutout(self.data, masked_array=False)
assert not np.ma.is_masked(cutout)
assert_allclose(cutout.shape, (3, 3))
cutout = self.segm[3].make_cutout(self.data, masked_array=True)
assert np.ma.is_masked(cutout)
assert_allclose(cutout.shape, (3, 3))
def test_segment_make_cutout_input(self):
with pytest.raises(ValueError):
self.segm[0].make_cutout(np.arange(10))
def test_labels(self):
assert_allclose(self.segm.labels, [1, 3, 4, 5, 7])
def test_nlabels(self):
assert self.segm.nlabels == 5
def test_max_label(self):
assert self.segm.max_label == 7
def test_areas(self):
expected = np.array([2, 2, 3, 6, 5])
assert_allclose(self.segm.areas, expected)
assert (self.segm.get_area(1) ==
self.segm.areas[self.segm.get_index(1)])
assert_allclose(self.segm.get_areas(self.segm.labels),
self.segm.areas)
def test_background_area(self):
assert self.segm.background_area == 18
def test_is_consecutive(self):
assert not self.segm.is_consecutive
data = [[2, 2, 0], [0, 3, 3], [0, 0, 4]]
segm = SegmentationImage(data)
assert not segm.is_consecutive # does not start with label=1
segm.relabel_consecutive(start_label=1)
assert segm.is_consecutive
def test_missing_labels(self):
assert_allclose(self.segm.missing_labels, [2, 6])
def test_check_labels(self):
with pytest.raises(ValueError):
self.segm.check_label(2)
self.segm.check_labels([2])
with pytest.raises(ValueError):
self.segm.check_labels([2, 6])
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_cmap(self):
cmap = self.segm.make_cmap()
assert len(cmap.colors) == (self.segm.max_label + 1)
assert_allclose(cmap.colors[0], [0, 0, 0])
assert_allclose(self.segm._cmap.colors,
self.segm.make_cmap(background_color='#000000',
seed=0).colors)
def test_reassign_labels(self):
segm = SegmentationImage(self.data)
segm.reassign_labels(labels=[1, 7], new_label=2)
ref_data = np.array([[2, 2, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[2, 0, 0, 0, 0, 5],
[2, 2, 0, 5, 5, 5],
[2, 2, 0, 0, 5, 5]])
assert_allclose(segm.data, ref_data)
assert segm.nlabels == len(segm.slices) - segm.slices.count(None)
@pytest.mark.parametrize('start_label', [1, 5])
def test_relabel_consecutive(self, start_label):
segm = SegmentationImage(self.data)
ref_data = np.array([[1, 1, 0, 0, 3, 3],
[0, 0, 0, 0, 0, 3],
[0, 0, 2, 2, 0, 0],
[5, 0, 0, 0, 0, 4],
[5, 5, 0, 4, 4, 4],
[5, 5, 0, 0, 4, 4]])
ref_data[ref_data != 0] += (start_label - 1)
segm.relabel_consecutive(start_label=start_label)
assert_allclose(segm.data, ref_data)
# relabel_consecutive should do nothing if already consecutive
segm.relabel_consecutive(start_label=start_label)
assert_allclose(segm.data, ref_data)
assert segm.nlabels == len(segm.slices) - segm.slices.count(None)
@pytest.mark.parametrize('start_label', [0, -1])
def test_relabel_consecutive_start_invalid(self, start_label):
with pytest.raises(ValueError):
segm = SegmentationImage(self.data)
segm.relabel_consecutive(start_label=start_label)
def test_keep_labels(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 5, 5, 5],
[0, 0, 0, 0, 5, 5]])
segm = SegmentationImage(self.data)
segm.keep_labels([5, 3])
assert_allclose(segm.data, ref_data)
def test_keep_labels_relabel(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 2, 2, 2],
[0, 0, 0, 0, 2, 2]])
segm = SegmentationImage(self.data)
segm.keep_labels([5, 3], relabel=True)
assert_allclose(segm.data, ref_data)
def test_remove_labels(self):
ref_data = np.array([[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0]])
segm = SegmentationImage(self.data)
segm.remove_labels(labels=[5, 3])
assert_allclose(segm.data, ref_data)
def test_remove_labels_relabel(self):
ref_data = np.array([[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0]])
segm = SegmentationImage(self.data)
segm.remove_labels(labels=[5, 3], relabel=True)
assert_allclose(segm.data, ref_data)
def test_remove_border_labels(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
segm = SegmentationImage(self.data)
segm.remove_border_labels(border_width=1)
assert_allclose(segm.data, ref_data)
def test_remove_border_labels_border_width(self):
with pytest.raises(ValueError):
segm = SegmentationImage(self.data)
segm.remove_border_labels(border_width=3)
def test_remove_masked_labels(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
segm = SegmentationImage(self.data)
mask = np.zeros(segm.data.shape, dtype=bool)
mask[0, :] = True
segm.remove_masked_labels(mask)
assert_allclose(segm.data, ref_data)
def test_remove_masked_labels_without_partial_overlap(self):
ref_data = np.array([[0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
segm = SegmentationImage(self.data)
mask = np.zeros(segm.data.shape, dtype=bool)
mask[0, :] = True
segm.remove_masked_labels(mask, partial_overlap=False)
assert_allclose(segm.data, ref_data)
def test_remove_masked_segments_mask_shape(self):
segm = SegmentationImage(np.ones((5, 5)))
mask = np.zeros((3, 3), dtype=bool)
with pytest.raises(ValueError):
segm.remove_masked_labels(mask)
def test_outline_segments(self):
segm_array = np.zeros((5, 5)).astype(int)
segm_array[1:4, 1:4] = 2
segm = SegmentationImage(segm_array)
segm_array_ref = np.copy(segm_array)
segm_array_ref[2, 2] = 0
assert_allclose(segm.outline_segments(), segm_array_ref)
def test_outline_segments_masked_background(self):
segm_array = np.zeros((5, 5)).astype(int)
segm_array[1:4, 1:4] = 2
segm = SegmentationImage(segm_array)
segm_array_ref = np.copy(segm_array)
segm_array_ref[2, 2] = 0
segm_outlines = segm.outline_segments(mask_background=True)
assert isinstance(segm_outlines, np.ma.MaskedArray)
assert np.ma.count(segm_outlines) == 8
assert np.ma.count_masked(segm_outlines) == 17 | |
from time import perf_counter
import warnings
import torch
import torch.nn.functional as F
import numpy as np
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines.compose import Compose
@PIPELINES.register_module()
class Timer(Compose):
"""Times a list of transforms and stores result in img_meta."""
def __init__(self, name, transforms):
super(Timer, self).__init__(transforms)
self.name = f"{name}_time"
def __call__(self, data):
t1 = perf_counter()
data = super(Timer, self).__call__(data)
data[self.name] = perf_counter() - t1
return data
@PIPELINES.register_module()
class DummyResize(object):
"""Replacement for resize in case the scale is 1.
Adds img_shape, pad_shape, scale_factor to results."""
def __call__(self, results):
"""Resize images with ``results['scale']``."""
for key in results.get('img_fields', ['img']):
img_shape = results[key].shape
results['img_shape'] = img_shape
# in case that there is no padding
results['pad_shape'] = img_shape
results['scale_factor'] = np.array([1, 1, 1, 1], dtype=np.float32)
return results
@PIPELINES.register_module()
class ImageTestTransformGPU(object):
"""Preprocess an image using GPU."""
def __init__(self, img_norm_cfg, size_divisor, scale_factor):
self.img_norm_cfg = img_norm_cfg
self.mean = torch.tensor(img_norm_cfg['mean'], dtype=torch.float32)
self.std = torch.tensor(img_norm_cfg['std'], dtype=torch.float32)
self.to_rgb = img_norm_cfg['to_rgb']
self.std_inv = 1/self.std
self.size_divisor = size_divisor
self.scale_factor = float(scale_factor)
def __call__(self, results, device='cuda'):
start = perf_counter()
img = results['img']
ori_shape = img.shape
h, w = img.shape[:2]
new_size = (round(h*self.scale_factor), round(w*self.scale_factor))
img_shape = (*new_size, 3)
img = torch.from_numpy(img).to(device).float()
if self.to_rgb:
img = img[:, :, (2, 1, 0)]
# to BxCxHxW
img = img.permute(2, 0, 1).unsqueeze(0)
if new_size[0] != img.shape[2] or new_size[1] != img.shape[3]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# ignore the align_corner warnings
img = F.interpolate(img, new_size, mode='bilinear')
for c in range(3):
img[:, c, :, :] = img[:, c, :, :].sub(self.mean[c]) \
.mul(self.std_inv[c])
if self.size_divisor is not None:
pad_h = int(np.ceil(new_size[0] / self.size_divisor)) \
* self.size_divisor - new_size[0]
pad_w = int(np.ceil(new_size[1] / self.size_divisor)) \
* self.size_divisor - new_size[1]
img = F.pad(img, (0, pad_w, 0, pad_h), mode='constant', value=0)
pad_shape = (img.shape[2], img.shape[3], 3)
else:
pad_shape = img_shape
img_meta = dict(
filename=results['filename'],
ori_filename=results['ori_filename'],
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=self.scale_factor,
flip=False,
img_norm_cfg=self.img_norm_cfg,
start_time=start,
)
if 'gt_bboxes' in results:
gt_bboxes = torch.from_numpy(results['gt_bboxes']) \
.to(device).float()
gt_labels = torch.from_numpy(results['gt_labels']) \
.to(device).float()
return dict(img=img, img_metas=[img_meta],
gt_bboxes=gt_bboxes, gt_labels=gt_labels)
else:
return dict(img=img, img_metas=[img_meta]) | |
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
import os, cv2, json
import numpy as np
from progress.bar import Bar
from PIL import Image, ImageDraw
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import pickle
import gzip
from tools.darwin import *
output_dir = "../Results/Results_detectron_solar"
weights_dir = "./output/Solar_20210513T1015"
dataset_dir = "../../Data/Solar"
categories = ["Hot spot","Activated diode 1/3","Multi-diode 2/3","Whole panel","String of panels","Shadow"]
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"))
cfg.MODEL.WEIGHTS = os.path.join(weights_dir, "model_0029999.pth") # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 6
cfg.DATASETS.TEST = ("test",)
MetadataCatalog.get("test").thing_classes = categories
predictor = DefaultPredictor(cfg)
# TEST INFERENCE
dataset_dicts = get_darwin_dataset(dataset_dir, 'val', categories)
# print(dataset_dicts[0])
bar = Bar('Performing inference', max=len(dataset_dicts))
results = []
for d in dataset_dicts:
im = cv2.imread(d["file_name"])
# im = Image.open(d["file_name"])
outputs = predictor(im) #format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
# print(outputs['instances'])
results.append(outputs)
v = Visualizer(im, scale=1, metadata=MetadataCatalog.get("test"))
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
# cv2_imshow(out.get_image()[:, :, ::-1])
Image.fromarray(out.get_image()[:, :, ::-1]).save(os.path.join(output_dir, d['file_name'].split('/')[-1].split('.')[0] + '.jpg'))
bar.next()
bar.finish()
# pickle.dump(dataset_dicts, open( "dataset.p", "wb" ), protocol=4)
# with open('results_detectron_101_validation.json', 'w') as outfile:
# json.dump(results, outfile, indent=4)
# pickle.dump( results, open( "results.p", "wb" ), protocol=4)
# with gzip.GzipFile('results.pgz', 'w') as f:
# pickle.dump(results, f) | |
import cv2
import face_recognition
import numpy as np
import pickle
_KNOWN_FACE_ENCODINGS_FILE = 'known-face-encodings.pkl'
_KNOWN_FACE_IDS_FILE = 'known-face-ids.pkl'
# the two lists are parallel. meaning,
# face_encoding at index 0 of 'known_face_encodings' belongs to the face_id at index 0 of 'known_face_ids'
# known_face_encodings = []
# known_face_ids = []
def _read_list_of_objects_from_file(filepath):
obj_list = []
try:
file = open(filepath, "rb")
obj_list = pickle.load(file)
file.close()
except Exception as e:
print(f'error inside _read_list_of_objects_from_file() function. filepath={filepath}, error={str(e)}')
return obj_list
def _save_list_of_objects_to_file(filepath, obj_list=[]):
try:
file = open(filepath, "wb")
pickle.dump(obj_list, file)
file.close()
except Exception as e:
print(f'error inside _save_list_of_objects_to_file() function. filepath={filepath}, error={str(e)}')
return False
return True
def save_face_from_image_path(single_face_image_path, face_id):
try:
known_face_encodings = _read_list_of_objects_from_file(_KNOWN_FACE_ENCODINGS_FILE)
known_face_ids = _read_list_of_objects_from_file(_KNOWN_FACE_IDS_FILE)
face_image = face_recognition.load_image_file(single_face_image_path)
face_encoding = face_recognition.face_encodings(face_image)[0]
known_face_encodings.append(face_encoding)
known_face_ids.append(face_id)
_save_list_of_objects_to_file(filepath=_KNOWN_FACE_ENCODINGS_FILE, obj_list=known_face_encodings)
_save_list_of_objects_to_file(filepath=_KNOWN_FACE_IDS_FILE, obj_list=known_face_ids)
except Exception as e:
print(f'error inside save_face() function. image_path={single_face_image_path}, error={str(e)}')
def save_face_from_video_path(video_path, face_id):
try:
known_face_encodings = _read_list_of_objects_from_file(_KNOWN_FACE_ENCODINGS_FILE)
known_face_ids = _read_list_of_objects_from_file(_KNOWN_FACE_IDS_FILE)
cap = cv2.VideoCapture(video_path)
grabbed, frame = cap.read()
frame_count = 0
while grabbed:
face_encoding = face_recognition.face_encodings(frame)
if len(face_encoding) > 0:
known_face_encodings.append(face_encoding[0])
known_face_ids.append(face_id)
print(frame_count)
grabbed, frame = cap.read()
frame_count += 1
_save_list_of_objects_to_file(filepath=_KNOWN_FACE_ENCODINGS_FILE, obj_list=known_face_encodings)
_save_list_of_objects_to_file(filepath=_KNOWN_FACE_IDS_FILE, obj_list=known_face_ids)
print('Finish')
except Exception as e:
print(f'error inside save_face() function, error={str(e)}')
def save_face(face_image, face_id):
try:
known_face_encodings = _read_list_of_objects_from_file(_KNOWN_FACE_ENCODINGS_FILE)
known_face_ids = _read_list_of_objects_from_file(_KNOWN_FACE_IDS_FILE)
face_encoding = face_recognition.face_encodings(face_image)[0]
known_face_encodings.append(face_encoding)
known_face_ids.append(face_id)
_save_list_of_objects_to_file(filepath=_KNOWN_FACE_ENCODINGS_FILE, obj_list=known_face_encodings)
_save_list_of_objects_to_file(filepath=_KNOWN_FACE_IDS_FILE, obj_list=known_face_ids)
except Exception as e:
print(f'error inside save_face() function, error={str(e)}')
def match_face_from_image_path(image_path):
# load known_face_encodings and known_face_ids from file
known_face_encodings = _read_list_of_objects_from_file(_KNOWN_FACE_ENCODINGS_FILE)
known_face_ids = _read_list_of_objects_from_file(_KNOWN_FACE_IDS_FILE)
detected_face_ids = []
face_image = face_recognition.load_image_file(image_path)
face_locations = face_recognition.face_locations(face_image)
unknown_face_encodings = face_recognition.face_encodings(face_image, face_locations)
for unknown_face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, unknown_face_encoding)
face_distances = face_recognition.face_distance(known_face_encodings, unknown_face_encoding)
best_match_index = np.argmin(face_distances) # get index of minimum face_distance
if matches[best_match_index]:
detected_face_id = known_face_ids[best_match_index]
detected_face_ids.append(detected_face_id)
else:
detected_face_ids.append(-1)
return detected_face_ids
def match_face(image):
# load known_face_encodings and known_face_ids from file
known_face_encodings = _read_list_of_objects_from_file(_KNOWN_FACE_ENCODINGS_FILE)
known_face_ids = _read_list_of_objects_from_file(_KNOWN_FACE_IDS_FILE)
detected_face_ids = []
face_locations = face_recognition.face_locations(image)
unknown_face_encodings = face_recognition.face_encodings(image, face_locations)
for unknown_face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, unknown_face_encoding)
face_distances = face_recognition.face_distance(known_face_encodings, unknown_face_encoding)
best_match_index = np.argmin(face_distances) # get index of minimum face_distance
if matches[best_match_index]:
detected_face_id = known_face_ids[best_match_index]
detected_face_ids.append(detected_face_id)
else:
detected_face_ids.append(-1)
return detected_face_ids | |
"""
Core MagGeo_Sequential Model
Created on Thur Feb 17, 22
@author: Fernando Benitez-Paez
"""
import datetime as dt
from datetime import timedelta
import sys,os
from matplotlib.pyplot import pause
import pandas as pd
import numpy as np
from tqdm import tqdm
import click
from yaml import load, SafeLoader
from viresclient import set_token
sys.path.append("utilities")
from utilities.MagGeoFunctions import getGPSData
from utilities.MagGeoFunctions import Get_Swarm_residuals
from utilities.MagGeoFunctions import ST_IDW_Process
from utilities.MagGeoFunctions import CHAOS_ground_values
@click.command()
@click.option('-p',
'--parameters-file',
type=click.Path(exists=True),
help="Parameters file to use to configure the model.")
@click.option('--token',
help="Enter your VirES Token.")
def main(parameters_file, token):
"""
Main function which get the data from Swarm VirES client
"""
print(f"--\nReading parameters file: {parameters_file}\n--")
set_token(token=token)
try:
with open(parameters_file, 'r') as f:
parameters = load(f,
Loader=SafeLoader)
maggeo_params = parameters["maggeo"]
gpsfilename = maggeo_params["gpsfilename"]
Lat = maggeo_params["Lat"]
Long = maggeo_params["Long"]
DateTime = maggeo_params["DateTime"]
altitude = maggeo_params["altitude"]
except Exception as error:
print('Error in parameters file format')
raise error
base_dir=os.getcwd()
temp_results_dir = os.path.join(base_dir, "temp_data")
results_dir = os.path.join(base_dir, "results")
data_dir = os.path.join(base_dir, "data")
utilities_dir = os.path.join(base_dir, "utilities")
GPSData = getGPSData(data_dir,gpsfilename,Lat,Long,DateTime,altitude)
datestimeslist = []
for index, row in GPSData.iterrows():
datetimerow = row['gpsDateTime']
daterow = row['dates']
hourrow = row['times']
hourrow = hourrow.strftime('%H:%M:%S')
if hourrow < '04:00:00':
date_bfr = daterow - (timedelta(days=1))
datestimeslist.append(daterow)
datestimeslist.append(date_bfr)
if hourrow > '20:00:00':
Date_aft = daterow + (timedelta(days=1))
datestimeslist.append(daterow)
datestimeslist.append(Date_aft)
else:
datestimeslist.append(daterow)
def uniquelistdates(list):
x = np.array(list)
uniquelist = np.unique(x)
return uniquelist
uniquelist_dates = uniquelistdates(datestimeslist)
hours_t_day = 24 #MagGeo needs the entire Swarm data for each day of the identified day.
hours_added = dt.timedelta(hours = hours_t_day)
listdfa = []
listdfb = []
listdfc = []
for d in tqdm(uniquelist_dates, desc="Getting Swarm Data"):
#print("Getting Swarm data for date:",d )
startdate = dt.datetime.combine(d, dt.datetime.min.time())
enddate = startdate + hours_added
SwarmResidualsA,SwarmResidualsB,SwarmResidualsC = Get_Swarm_residuals(startdate, enddate)
listdfa.append(SwarmResidualsA)
listdfb.append(SwarmResidualsB)
listdfc.append(SwarmResidualsC)
base_dir = os.getcwd() # Get main MagGeo directory
temp_results_dir = os.path.join(base_dir, "temp_data")
results_dir = os.path.join(base_dir, "results")
data_dir = os.path.join(base_dir, "data")
#TODO:
#Find a more elegant way to concante the the list from Swarm and then read it to get the type of columns is requiered, Timestamp as datetime and epoch as index.
PdSwarmRes_A = pd.concat(listdfa, join='outer', axis=0)
PdSwarmRes_A.to_csv (os.path.join(temp_results_dir,'TotalSwarmRes_A.csv'), header=True)
PdSwarmRes_B = pd.concat(listdfb, join='outer', axis=0)
PdSwarmRes_B.to_csv (os.path.join(temp_results_dir,'TotalSwarmRes_B.csv'), header=True)
PdSwarmRes_C = pd.concat(listdfc, join='outer', axis=0)
PdSwarmRes_C.to_csv (os.path.join(temp_results_dir,'TotalSwarmRes_C.csv'), header=True)
TotalSwarmRes_A = pd.read_csv(os.path.join(temp_results_dir,"TotalSwarmRes_A.csv"),low_memory=False, index_col='epoch')
TotalSwarmRes_A['timestamp'] = pd.to_datetime(TotalSwarmRes_A['timestamp'])
TotalSwarmRes_B = pd.read_csv(os.path.join(temp_results_dir,"TotalSwarmRes_B.csv"),low_memory=False, index_col='epoch')
TotalSwarmRes_B['timestamp'] = pd.to_datetime(TotalSwarmRes_B['timestamp'])
TotalSwarmRes_C = pd.read_csv(os.path.join(temp_results_dir,"TotalSwarmRes_C.csv"),low_memory=False, index_col='epoch')
TotalSwarmRes_C['timestamp'] = pd.to_datetime(TotalSwarmRes_C['timestamp'])
dn = [] ## List used to add all the GPS points with the annotated MAG Data. See the last bullet point of this process
for index, row in tqdm(GPSData.iterrows(), total=GPSData.shape[0], desc="Annotating the GPS Trayectory"):
GPSLat = row['gpsLat']
GPSLong = row['gpsLong']
GPSDateTime = row['gpsDateTime']
GPSTime = row['epoch']
GPSAltitude = row['gpsAltitude']
#print("Process for:", index,"DateTime:",GPSDateTime)
try:
result=ST_IDW_Process(GPSLat,GPSLong,GPSAltitude, GPSDateTime,GPSTime, TotalSwarmRes_A, TotalSwarmRes_B, TotalSwarmRes_C)
dn.append(result)
except:
#print("Ups!.That was a bad Swarm Point, let's keep working with the next point")
result_badPoint= {'Latitude': GPSLat, 'Longitude': GPSLong, 'Altitude':GPSAltitude, 'DateTime': GPSDateTime, 'N_res': np.nan, 'E_res': np.nan, 'C_res':np.nan, 'TotalPoints':0, 'Minimum_Distance':np.nan, 'Average_Distance':np.nan}
dn.append(result_badPoint)
continue
GPS_ResInt = pd.DataFrame(dn)
GPS_ResInt.to_csv (os.path.join(temp_results_dir,"GPS_ResInt.csv"), header=True)
X_obs, Y_obs, Z_obs, X_obs_internal, Y_obs_internal, Z_obs_internal = CHAOS_ground_values(utilities_dir,GPS_ResInt)
GPS_ResInt['N'] =pd.Series(X_obs)
GPS_ResInt['E'] =pd.Series(Y_obs)
GPS_ResInt['C'] =pd.Series(Z_obs)
GPS_ResInt['N_Obs'] =pd.Series(X_obs_internal)
GPS_ResInt['E_Obs'] =pd.Series(Y_obs_internal)
GPS_ResInt['C_Obs'] =pd.Series(Z_obs_internal)
GPS_ResInt.drop(columns=['N_res', 'E_res','C_res'], inplace=True)
# Having Intepolated and weighted the magnetic values, we can compute the other magnectic components.
GPS_ResInt['H'] = np.sqrt((GPS_ResInt['N']**2)+(GPS_ResInt['E']**2))
#check the arcgtan in python., From arctan2 is saver.
DgpsRad = np.arctan2(GPS_ResInt['E'],GPS_ResInt['N'])
GPS_ResInt['D'] = np.degrees(DgpsRad)
IgpsRad = np.arctan2(GPS_ResInt['C'],GPS_ResInt['H'])
GPS_ResInt['I'] = np.degrees(IgpsRad)
GPS_ResInt['F'] = np.sqrt((GPS_ResInt['N']**2)+(GPS_ResInt['E']**2)+(GPS_ResInt['C']**2))
originalGPSTrack=pd.read_csv(os.path.join(data_dir,gpsfilename))
MagGeoResult = pd.concat([originalGPSTrack, GPS_ResInt], axis=1)
#Drop duplicated columns. Latitude, Longitued, and DateTime will not be part of the final result.
MagGeoResult.drop(columns=['Latitude', 'Longitude', 'DateTime'], inplace=True)
#Exporting the CSV file
outputfile ="GeoMagResult_"+gpsfilename
export_csv = MagGeoResult.to_csv (os.path.join(results_dir,outputfile), index = None, header=True)
print("Congrats! MagGeo has processed your GPS trayectory. Find the annotated table: " + outputfile + " in the folder results.")
if __name__ == '__main__':
main()
print("End of MagGeo") | |
import numpy as np
import json
import scipy.interpolate
import matplotlib.pyplot as plt
from collections import OrderedDict
from pprint import pprint
import matplotlib
import argparse
##################################################################################################################
## This script allows compare between the data of the Xsens, Mobilenet and the Kinect. ##
parser = argparse.ArgumentParser(description='Writing a Video from frames')
parser.add_argument('--file_Kinect', type=str, default="../Données/Kinect/chris1/chris1_1_interpolated.txt")
parser.add_argument('--file_Mobilenet',type=str,default="../Données/Mobilenet/chris1/chris1_1_interpolated.txt")
parser.add_argument('--file_Xsens',type=str,default="../Données/Xsens/chris1/chris1_1_interpolated.txt")
parser.add_argument('--body_part',type=str,default='lEblow')
args = parser.parse_args()
#All the files paths
file_kinect=args.file_Kinect
file_xsens=args.file_Xsens
file_mobilenet=args.file_Mobilenet
#We import all the files in a json format
with open(file_kinect) as f1:
dataKinect = json.load(f1, object_pairs_hook=OrderedDict)
with open(file_xsens) as f2:
dataXsens = json.load(f2, object_pairs_hook=OrderedDict)
with open(file_mobilenet) as f3:
dataMobilenet = json.load(f3, object_pairs_hook=OrderedDict)
#We collect the positions and copy them in variables
positions_Kinect=dataKinect['positions']
positions_Xsens=dataXsens['positions']
positions_Mobilenet=dataMobilenet['positions']
body_part=args.body_part
#Connecting body parts of the Xsens with Kinect
body_parts_Xsens={"Head":"Head","mShoulder":"T8","rShoulder":"RightUpperArm","rElbow":"RightForeArm",
"rWrist":"RightHand","lShoulder":"LeftUpperArm","lElbow":"LeftForeArm","lWrist":"LeftHand",
"rHip":"RightUpperLeg","rKnee":"RightLowerLeg","rAnkle":"RightFoot","lHip":"LeftUpperLeg",
"lKnee":"LeftLowerLeg","lAnkle":"LeftFoot"}
#Fixing the problem of right and left between the Mobilenet and the Kinect
body_parts_Mobilenet={"Head":"Head","lAnkle":"rAnkle",'lElbow':'rElbow', 'lHip':'rHip', 'lKnee':'rKnee',
'lShoulder':'rShoulder', 'lWrist':'rWrist', 'mShoulder':'mShoulder', 'rAnkle':'lAnkle',
'rElbow':'lElbow', 'rHip':'lHip', 'rKnee':'lKnee', 'rShoulder':'lShoulder',
'rWrist':'lWrist'}
#Body Parts of the Kinect
bPartsKinect=list(list(positions_Kinect.values())[0].keys())
common_body_parts=['Head', 'lAnkle', 'lElbow', 'lHip', 'lKnee', 'lShoulder', 'lWrist', 'mShoulder', 'rAnkle',
'rElbow', 'rHip', 'rKnee', 'rShoulder', 'rWrist']
#Filling the variance dictionnary including the variance between the three algorithms for each body part in all times
Variances={}
#For each time we create a new dictionary containing all body parts and their variances
for time in positions_Kinect.keys():
Variances[time]={}
#Filling for each body part
for bPart in common_body_parts:
#Since the Xsens has different body parts names, we look for its equivalent in the body_parts_Xsens dictionnary
XbPart=body_parts_Xsens[bPart]
#Since the Right and Left of the Mobilenet and Kinect are opposite, we look for its opposite the body_parts_Mobilenet dictionnary
var=np.var((positions_Kinect[time][bPart][1:],positions_Mobilenet[time][body_parts_Mobilenet[bPart]],positions_Xsens[time][XbPart][1:]))
Variances[time][bPart]=var
#Plot of the evolution of the variance of the tree distances for a body part
bPart=body_parts_Mobilenet[body_part]
Times=list(Variances.keys())
Times_float=[]
for time in Times:
Times_float.append(float(time))
Times_float=sorted(Times_float)
Var_bPart=[]
for time in Times_float:
Var_bPart.append(Variances[str(time)][body_parts_Mobilenet[body_part]])
plt.plot(Times_float,Var_bPart,label=body_part)
plt.title('Variance de la Mobilenet, Kinect et Xsens')
plt.legend()
fig = matplotlib.pyplot.gcf()
fig.savefig('../Données/Courbes/Variance_%s.jpg'%body_part)
plt.show()
#Comparaison with the terrain field (Xsens) Variances
Difference_Mobilenet=[]
Difference_Kinect=[]
bPart=body_parts_Mobilenet[body_part]
Times_float=[]
Times=list(Variances.keys())
Times_float=[]
for time in Times:
Times_float.append(float(time))
Times_float=sorted(Times_float)
for time in Times_float:
XbPart=body_parts_Xsens[bPart]
MbPart=body_parts_Mobilenet[bPart]
diff_Mobilenet=np.sqrt(np.var((positions_Mobilenet[str(time)][MbPart][:],positions_Xsens[str(time)][XbPart][1:])))
diff_Kinect=np.sqrt(np.var((positions_Kinect[str(time)][bPart][1:],positions_Xsens[str(time)][XbPart][1:])))
Difference_Mobilenet.append(diff_Mobilenet)
Difference_Kinect.append(diff_Kinect)
plt.plot(Times_float,Difference_Mobilenet,color='blue',label='Var Mob-Xs')
plt.plot(Times_float,Difference_Kinect,color='red',label='Var Kinect-Xs')
plt.title("Variance entre chaque deux algo pour %s"%body_part)
plt.legend()
fig = matplotlib.pyplot.gcf()
fig.savefig('../Données/Courbes/Variance_entre_chaque_algo_%s.jpg'%body_part)
plt.show()
#Comparaison with the terrain field (Xsens) Distances
Difference_Mobilenet=[]
Difference_Kinect=[]
Times_float=[]
Times=list(Variances.keys())
Times_float=[]
for time in Times:
Times_float.append(float(time))
Times_float=sorted(Times_float)
MbPart=body_parts_Mobilenet[bPart]
for time in Times_float:
diff_Mobilenet=np.sqrt((positions_Mobilenet[str(time)][MbPart][0]-positions_Xsens[str(time)][XbPart][1])**2+(positions_Mobilenet[str(time)][MbPart][1]-positions_Xsens[str(time)][XbPart][2])**2)
diff_Kinect=np.sqrt((positions_Kinect[str(time)][bPart][1]-positions_Xsens[str(time)][XbPart][1])**2+(positions_Kinect[str(time)][bPart][2]-positions_Xsens[str(time)][XbPart][2])**2)
Difference_Mobilenet.append(diff_Mobilenet)
Difference_Kinect.append(diff_Kinect)
plt.plot(Times_float,Difference_Mobilenet,color='blue',label='dMobil--dXsens')
plt.plot(Times_float,Difference_Kinect,color='red',label='dKinect--dXsens')
plt.legend()
plt.title("Comparaison vérité terrain pour %s"%body_part)
fig = matplotlib.pyplot.gcf()
fig.savefig('../Données/Courbes/Comparaison vérité terrain pour %s.jpg'%body_part)
plt.show()
#Getting all the x and y values of the body part
bPart=body_parts_Mobilenet[body_part]
x_bPart_valuesX=[]
y_bPart_valuesX=[]
x_bPart_valuesK=[]
y_bPart_valuesK=[]
x_bPart_valuesM=[]
y_bPart_valuesM=[]
Times_float=[]
Times=list(Variances.keys())
Times_float=[]
for time in Times:
Times_float.append(float(time))
Times_float=sorted(Times_float)
MbPart=body_parts_Mobilenet[bPart]
for time in Times_float:
xX=positions_Xsens[str(time)][body_parts_Xsens[bPart]][1]
yX=positions_Xsens[str(time)][body_parts_Xsens[bPart]][2]
x_bPart_valuesX.append(xX)
y_bPart_valuesX.append(yX)
xK=positions_Kinect[str(time)][bPart][1]
yK=positions_Kinect[str(time)][bPart][2]
x_bPart_valuesK.append(xK)
y_bPart_valuesK.append(yK)
xM=positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][0]
yM=positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][1]
x_bPart_valuesM.append(xM)
y_bPart_valuesM.append(yM)
plt.plot(Times_float,y_bPart_valuesX,'green',label='Xsens')
plt.plot(Times_float,y_bPart_valuesM,'blue',label='Mobilenet')
plt.plot(Times_float,y_bPart_valuesK,'red',label='Kinect')
plt.legend()
plt.title("y values after interpolation %s"%body_part)
fig = matplotlib.pyplot.gcf()
axis="y"
fig.savefig('../Données/Courbes/%s_values_%s.jpg'%(axis,body_part))
plt.show()
plt.plot(Times_float,x_bPart_valuesX,'green',label='Xsens')
plt.plot(Times_float,x_bPart_valuesM,'blue',label='Mobilenet')
plt.plot(Times_float,x_bPart_valuesK,'red',label='Kinect')
plt.legend()
plt.title("x values after interpolation %s"%body_part)
fig = matplotlib.pyplot.gcf()
axis="x"
fig.savefig('../Données/Courbes/%s_values_%s.jpg'%(axis,body_part))
plt.show() | |
from multiprocessing import Pool
from numpy import array
from ..array_array import apply, separate_and_apply
def apply_with_vector(ve, ma, fu, se=False, n_jo=1):
if se:
ap = separate_and_apply
else:
ap = apply
po = Pool(processes=n_jo)
re_ = array(po.starmap(ap, ([ve, ro, fu] for ro in ma)))
po.terminate()
return re_ | |
import scipy
from scipy.misc import imsave
import os
import cv2
import numpy as tf
# Removes items that appear in a listmore than once
def remove_duplicates(image_list):
return list(set(image_list))
# Gets a list of all images (including if it's used multiple times)
def find_images(list_of_folders):
length = len(list_of_folders)
# Base case
if length is 0:
return []
else:
images_in_this_folder = []
for image in os.listdir(list_of_folders[length-1]):
images_in_this_folder.append(image)
return images_in_this_folder + find_images(list_of_folders[:-1])
# Sees if an image string is in a folder
def image_in_folder(image, folder):
return each in os.listdir(folder)
# Add border to image
def add_border(image, image_width, image_height, border_width):
blank_image = tf.zeros((image_width, image_height, 3), tf.uint8)
width, height = image.shape[:2]
blank_image[border_width:border_width+width,
border_width:border_width+height, :] = image
return blank_image
def create_image_rows(list_of_folders, image_width, image_height,
output_folder, border_width, between_images):
# List of unique images
unique_images = remove_duplicates(find_images(list_of_folders))
# Create output folder
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Loop through each unique image
for image in unique_images:
# Count the number of times this image appears
counter = 0
for folder in list_of_folders:
if image_in_folder(image, folder):
counter += 1
if counter != len(list_of_folders):
continue
# Create an empty image
blank_image = tf.ones((
image_width,
image_height*counter + (counter-1)*between_images + 2*between_images,
3), tf.uint8)
blank_image.fill(255)
# Fill the empty image
image_counter = 0
for folder in list_of_folders:
for image_name in os.listdir(folder):
if image_name is image:
# Load the image
image_path = os.path.join(folder, image)
current_image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
current_image = cv2.cvtColor(current_image, cv2.COLOR_BGR2RGB)
current_image = add_border(current_image, image_width, image_height, border_width)
# Insert the image
height_start = image_height * image_counter
height_end = image_height * (image_counter+1)
if image_counter is 0:
margin = image_counter * between_images
else:
margin = (image_counter+2) * between_images
height_start += margin
height_end += margin
blank_image[:, height_start:height_end, :] = current_image
image_counter += 1
# Save the image
output_path = os.path.join(output_folder, image)
imsave(output_path, blank_image)
if __name__ == "__main__":
# Paths to folders containing the images to combine (Manaully change this, inputs of folder)
input_folders = []
# Name of the folder that will be created to save the combined images
output_folder = "nototv_rows"
# Width of the black border between images
border_width = 3
# Can also specify to include whitespace between images
between_images = 40
create_image_rows(
list_of_folders=input_folders,
image_width=256 + 2*border_width,
image_height=256 + 2*border_width,
output_folder=output_folder,
border_width=border_width,
between_images=between_images) | |
# Basic libs
import os, time, glob, random, pickle, copy, torch
import open3d as o3d
import numpy as np
import open3d
from scipy.spatial.transform import Rotation
from torchvision.transforms import transforms
# Dataset parent class
from torch.utils.data import Dataset
from collections import namedtuple
from common.camera_intrinsics import adjust_intrinsic
import numpy as np
import trimesh
from PIL import Image
_imagenet_stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
def odometry_to_positions(odometry):
T_w_cam0 = odometry.reshape(3, 4)
cam02world = np.vstack((T_w_cam0, [0, 0, 0, 1]))
return cam02world
def read_calib_file(filepath):
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
R = np.array([
7.533745e-03, -9.999714e-01, -6.166020e-04, 1.480249e-02, 7.280733e-04,
-9.998902e-01, 9.998621e-01, 7.523790e-03, 1.480755e-02
]).reshape(3, 3)
T = np.array([-4.069766e-03, -7.631618e-02, -2.717806e-01]).reshape(3, 1)
velo2cam = np.hstack([R, T])
#velo2cam = np.vstack((velo2cam, [0, 0, 0, 1])).T
data['Tr'] = velo2cam
return data
def load_calib(sequence_path):
"""Load and compute intrinsic and extrinsic calibration parameters."""
# We'll build the calibration parameters as a dictionary, then
# convert it to a namedtuple to prevent it from being modified later
data = {}
# Load the calibration file
calib_filepath = os.path.join(sequence_path, 'calib.txt')
filedata = read_calib_file(calib_filepath)
# Create 3x4 projection matrices
P_rect_00 = np.reshape(filedata['P0'], (3, 4))
P_rect_10 = np.reshape(filedata['P1'], (3, 4))
P_rect_20 = np.reshape(filedata['P2'], (3, 4))
P_rect_30 = np.reshape(filedata['P3'], (3, 4))
data['P_rect_00'] = P_rect_00
data['P_rect_10'] = P_rect_10
data['P_rect_20'] = P_rect_20
data['P_rect_30'] = P_rect_30
# Compute the rectified extrinsics from cam0 to camN
T1 = np.eye(4)
T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]
T2 = np.eye(4)
T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]
T3 = np.eye(4)
T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]
# Compute the velodyne to rectified camera coordinate transforms
data['velo2cam0'] = np.reshape(filedata['Tr'], (3, 4))
data['velo2cam0'] = np.vstack([data['velo2cam0'], [0, 0, 0, 1]])
data['velo2cam1'] = T1.dot(data['velo2cam0'])
data['velo2cam2'] = T2.dot(data['velo2cam0'])
data['velo2cam3'] = T3.dot(data['velo2cam0'])
# Compute the camera intrinsics
data['K_cam0'] = P_rect_00[0:3, 0:3]
data['K_cam1'] = P_rect_10[0:3, 0:3]
data['K_cam2'] = P_rect_20[0:3, 0:3]
data['K_cam3'] = P_rect_30[0:3, 0:3]
# Compute the stereo baselines in meters by projecting the origin of
# each camera frame into the velodyne frame and computing the distances
# between them
p_cam = np.array([0, 0, 0, 1])
p_velo0 = np.linalg.inv(data['velo2cam0']).dot(p_cam)
p_velo1 = np.linalg.inv(data['velo2cam1']).dot(p_cam)
p_velo2 = np.linalg.inv(data['velo2cam2']).dot(p_cam)
p_velo3 = np.linalg.inv(data['velo2cam3']).dot(p_cam)
data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline
data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline
calib = namedtuple('CalibData', data.keys())(*data.values())
return calib
class KITTI(Dataset):
"""
We follow D3Feat to add data augmentation part.
We first voxelize the pcd and get matches
Then we apply data augmentation to pcds. KPConv runs over processed pcds, but later for loss computation, we use pcds before data augmentation
"""
mapping = {'2011_10_03_drive_0027_sync': '00',
'2011_10_03_drive_0042_sync': '01',
'2011_10_03_drive_0034_sync': '02',
'2011_09_30_drive_0016_sync': '04',
'2011_09_30_drive_0018_sync': '05',
'2011_09_30_drive_0020_sync': '06',
'2011_09_30_drive_0027_sync': '07',
'2011_09_30_drive_0028_sync': '08',
'2011_09_30_drive_0033_sync': '09',
'2011_09_30_drive_0034_sync': '10' }
def __init__(self, config, phase):
super(KITTI, self).__init__()
BASE_PATH = self.path
PATH_ODOMETRY = os.path.join(BASE_PATH, 'odometry/data_odometry_velodyne/dataset/poses/')
PATH_COLOR = os.path.join(BASE_PATH, 'odometry/data_odometry_color/dataset/sequences/')
PATH_DEPTH = os.path.join(BASE_PATH, 'depth/data_depth_annotated/')
PATH = os.path.join(BASE_PATH, 'preprocessed/overlap10.txt')
PATH_CENTER = os.path.join(BASE_PATH, 'preprocessed/overlap50/')
self.collate_fn = None
self.config = config
self.files = open(self.PATH).readlines()
self.pose = {}
self.intrinsic = {}
self.transforms = {}
#self.crop_size = config.size
self.IMAGE_SIZE = config.size
self.transforms['color'] = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(_imagenet_stats['mean'], std=_imagenet_stats['std']),
])
self.resize = transforms.Resize(self.IMAGE_SIZE[0])
# extrinsics and intrinsics
for line in self.files:
split = line.split()
drive_id = split[0]
sequence_id = self.mapping[drive_id]
if sequence_id not in self.pose:
data_path = os.path.join(self.PATH_ODOMETRY, '{}.txt'.format(sequence_id))
pose = np.genfromtxt(data_path)
self.pose[sequence_id] = pose
if sequence_id not in self.intrinsic:
calib = load_calib(os.path.join(self.PATH_COLOR, sequence_id))
intrinsic = calib.K_cam2
self.intrinsic[sequence_id] = intrinsic
def __len__(self):
return len(self.files)
def build_dataset(self):
pass
def crop(self, image1, image2, central_match):
bbox1_i = max(int(central_match[0] - self.IMAGE_SIZE[0] // 2), 0)
if bbox1_i + self.IMAGE_SIZE[0] >= image1.shape[1]:
bbox1_i = image1.shape[1] - self.IMAGE_SIZE[0]
bbox1_j = max(int(central_match[1] - self.IMAGE_SIZE[1] // 2), 0)
if bbox1_j + self.IMAGE_SIZE[1] >= image1.shape[2]:
bbox1_j = image1.shape[2] - self.IMAGE_SIZE[1]
bbox2_i = max(int(central_match[2] - self.IMAGE_SIZE[0] // 2), 0)
if bbox2_i + self.IMAGE_SIZE[0] >= image2.shape[1]:
bbox2_i = image2.shape[1] - self.IMAGE_SIZE[0]
bbox2_j = max(int(central_match[3] - self.IMAGE_SIZE[1] // 2), 0)
if bbox2_j + self.IMAGE_SIZE[1] >= image2.shape[2]:
bbox2_j = image2.shape[2] - self.IMAGE_SIZE[1]
return (
image1[:,
bbox1_i : bbox1_i + self.IMAGE_SIZE[0],
bbox1_j : bbox1_j + self.IMAGE_SIZE[1]
],
np.array([bbox1_i, bbox1_j]),
image2[:,
bbox2_i : bbox2_i + self.IMAGE_SIZE[0],
bbox2_j : bbox2_j + self.IMAGE_SIZE[1]
],
np.array([bbox2_i, bbox2_j])
)
def __getitem__(self, idx):
split = self.files[idx].split()
drive_id = split[0]
folder = split[1]
sequence_id = self.mapping[drive_id]
t1, t2 = int(split[2].split('.')[0]), int(split[3].split('.')[0])
odometry1 = self.pose[sequence_id][t1]
odometry2 = self.pose[sequence_id][t2]
camera2world1 = odometry_to_positions(odometry1)
camera2world2 = odometry_to_positions(odometry2)
depth1 = os.path.join(self.PATH_DEPTH, folder, drive_id, 'proj_depth', 'groundtruth', 'image_02', '{:010d}.png'.format(t1))
#depth1 = os.path.join(self.PATH_DEPTH, folder, drive_id, 'proj_depth', 'velodyne_raw', 'image_02', '{:010d}.png'.format(t1))
depth1 = Image.open(depth1)
original_size = depth1.size[::-1]
# make sure we have a proper 16bit depth map here.. not 8bit!
assert(np.max(depth1) > 255)
depth1 = np.array(depth1, dtype=int)
depth1 = depth1.astype(np.float) / 256.
depth1[depth1 == 0] = -1.
depth1 = torch.from_numpy(depth1).float()
depth2 = os.path.join(self.PATH_DEPTH, folder, drive_id, 'proj_depth', 'groundtruth', 'image_02', '{:010d}.png'.format(t2))
#depth2 = os.path.join(self.PATH_DEPTH, folder, drive_id, 'proj_depth', 'velodyne_raw', 'image_02', '{:010d}.png'.format(t2))
depth2 = Image.open(depth2)
#depth2 = self.transforms['depth'](depth2)
assert(np.max(depth2) > 255)
depth2 = np.array(depth2, dtype=int)
depth2 = depth2.astype(np.float) / 256.
depth2[depth2 == 0] = -1.
depth2 = torch.from_numpy(depth2).float()
color1 = os.path.join(self.PATH_COLOR, sequence_id, 'image_2',"{:06d}.png".format(t1))
color1 = Image.open(color1)
color1 = self.transforms['color'](color1)
color2 = os.path.join(self.PATH_COLOR, sequence_id, 'image_2',"{:06d}.png".format(t2))
color2 = Image.open(color2)
color2 = self.transforms['color'](color2)
intrinsic = self.intrinsic[sequence_id]
if self.config.resize:
original_size = color1.shape[1:]
color1 = self.resize(color1)[:,:, :self.IMAGE_SIZE[1]]
color2 = self.resize(color2)[:,:, :self.IMAGE_SIZE[1]]
depth1 = self.resize(depth1[None,:,:])[0,:,:self.IMAGE_SIZE[1]]
depth2 = self.resize(depth2[None,:,:])[0,:,:self.IMAGE_SIZE[1]]
bbox1= torch.FloatTensor([-0.5,-0.5])
bbox2= torch.FloatTensor([-0.5,-0.5])
resized_size = color1.shape[1:]
intrinsic = adjust_intrinsic(intrinsic, original_size, resized_size)
else:
central_match = []
name = '_'.join([split[1], split[2].split('.')[0], split[3].split('.')[0]]) + '.npy'
central_match = np.load(os.path.join(self.PATH_CENTER, drive_id, name))
# bound_check
mask_h1 = (central_match[:,0] - self.IMAGE_SIZE[0] / 2 >= 0) & (central_match[:,0] + self.IMAGE_SIZE[0] / 2 < color1.shape[1])
mask_w1 = (central_match[:,1] - self.IMAGE_SIZE[1] / 2 >= 0) & (central_match[:,1] + self.IMAGE_SIZE[1] / 2 < color1.shape[2])
mask_h2 = (central_match[:,2] - self.IMAGE_SIZE[0] / 2 >= 0) & (central_match[:,2] + self.IMAGE_SIZE[0] / 2 < color2.shape[1])
mask_w2 = (central_match[:,3] - self.IMAGE_SIZE[1] / 2 >= 0) & (central_match[:,3] + self.IMAGE_SIZE[1] / 2 < color2.shape[2])
central_match = central_match[mask_h1&mask_w1&mask_h2&mask_w2]
central_idx = np.random.choice(central_match.shape[0])
color1, bbox1, color2, bbox2 = self.crop(color1, color2, central_match[central_idx])
depth1 = depth1[
bbox1[0] : bbox1[0] + self.IMAGE_SIZE[0],
bbox1[1] : bbox1[1] + self.IMAGE_SIZE[1]]
depth2 = depth2[
bbox2[0] : bbox2[0] + self.IMAGE_SIZE[0],
bbox2[1] : bbox2[1] + self.IMAGE_SIZE[1]]
return {'color1': color1, 'depth1': depth1,
'color2': color2, 'depth2': depth2,
#'id1': fname1, 'id2': fname2,
'intrinsics1': torch.from_numpy(intrinsic).float(),
'intrinsics2': torch.from_numpy(intrinsic).float(),
'bbox1': bbox1, 'bbox2': bbox2,
'world2camera1': torch.inverse(torch.from_numpy(camera2world1).float()),
'world2camera2': torch.inverse(torch.from_numpy(camera2world2).float())}
if __name__ == "__main__":
dataset = KITTIDataset(None, 'train')
points = []
for idx, data in enumerate(dataset):
print(idx, len(dataset))
points.append(data)
print(sum(points) / len(dataset)) | |
import numpy as onp
import legate.numpy as np
import timeit
import deriche_numpy as np_impl
def kernel(alpha, imgIn):
k = (1.0 - np.exp(-alpha)) * (1.0 - np.exp(-alpha)) / (
1.0 + alpha * np.exp(-alpha) - np.exp(2.0 * alpha))
a1 = a5 = k
a2 = a6 = k * np.exp(-alpha) * (alpha - 1.0)
a3 = a7 = k * np.exp(-alpha) * (alpha + 1.0)
a4 = a8 = -k * np.exp(-2.0 * alpha)
b1 = 2.0**(-alpha)
b2 = -np.exp(-2.0 * alpha)
c1 = c2 = 1
y1 = np.empty_like(imgIn)
y1[:, 0] = a1 * imgIn[:, 0]
# y1[:, 1] = a1 * imgIn[:, 1] + a2 * imgIn[:, 0] + b1 * y1[:, 0]
y1[:, 1] = b1 * y1[:, 0]
y1[:, 1] += a1 * imgIn[:, 1] + a2 * imgIn[:, 0]
for j in range(2, imgIn.shape[1]):
y1[:, j] = (a1 * imgIn[:, j] + a2 * imgIn[:, j - 1] +
b1 * y1[:, j - 1] + b2 * y1[:, j - 2])
y2 = np.empty_like(imgIn)
y2[:, -1] = 0.0
y2[:, -2] = a3 * imgIn[:, -1]
for j in range(imgIn.shape[1] - 3, -1, -1):
y2[:, j] = (a3 * imgIn[:, j + 1] + a4 * imgIn[:, j + 2] +
b1 * y2[:, j + 1] + b2 * y2[:, j + 2])
imgOut = c1 * (y1 + y2)
y1[0, :] = a5 * imgOut[0, :]
y1[1, :] = a5 * imgOut[1, :] + a6 * imgOut[0, :] + b1 * y1[0, :]
for i in range(2, imgIn.shape[0]):
y1[i, :] = (a5 * imgOut[i, :] + a6 * imgOut[i - 1, :] +
b1 * y1[i - 1, :] + b2 * y1[i - 2, :])
y2[-1, :] = 0.0
y2[-2, :] = a7 * imgOut[-1, :]
for i in range(imgIn.shape[0] - 3, -1, -1):
y2[i, :] = (a7 * imgOut[i + 1, :] + a8 * imgOut[i + 2, :] +
b1 * y2[i + 1, :] + b2 * y2[i + 2, :])
imgOut[:] = c2 * (y1 + y2)
return imgOut
def init_data(W, H, datatype):
alpha = datatype(0.25)
imgIn = onp.empty((W, H), dtype=datatype)
imgOut = onp.empty((W, H), dtype=datatype)
y1 = onp.empty((W, H), dtype=datatype)
y2 = onp.empty((W, H), dtype=datatype)
for i in range(W):
for j in range(H):
imgIn[i, j] = ((313 * i + 991 * j) % 65536) / 65535.0
return alpha, imgIn, imgOut, y1, y2
if __name__ == "__main__":
# Initialization
W, H = 1000, 1000
alpha, imgIn, imgOut, y1, y2 = init_data(W, H, np.float64)
# First execution
np_imgOut = np_impl.kernel(alpha, imgIn)
lg_imgOut = kernel(alpha, imgIn)
# Validation
assert (onp.allclose(np_imgOut, lg_imgOut))
# Benchmark
time = timeit.repeat("np_impl.kernel(alpha, imgIn)",
setup="pass",
repeat=20,
number=1,
globals=globals())
print("NumPy median time: {}".format(np.median(time)))
time = timeit.repeat("kernel(alpha, imgIn)",
setup="pass",
repeat=20,
number=1,
globals=globals())
print("Legate median time: {}".format(np.median(time))) | |
import pymysql
import datetime
from pandas import DataFrame
import numpy as np
from dbutils.steady_db import connect
class RankDB():
"""
a mariadb wrapper for the following tables:
- stock_data
- corporate_data
- corporate_financials
- model_event_queue
- top_performers
"""
def __init__(self):
self.credentials = ['localhost', 'username',
'password', 'db']
self.db = connect(
creator=pymysql,
host=self.credentials[0],
user=self.credentials[1],
password=self.credentials[2],
database=self.credentials[3],
autocommit=True,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
def get_stock_dataframes(self, tickers=False, timeframe=False, limit=False, live=False):
"""
Returns a dataframe of stock OHLCV information.
Parameters:
- tickers (False|list) :: If False, every ticker available will be used
- timeframe (False|list) :: If defined, should be a list with the first value being the start date and the
second vaue being the end date. The timevalues should be either strings or datetime
values. If live=false, they should be in the form %Y-%m-%d. If live=True, they
should be in the format %Y-%m-%d_%H:%M:%S
- limit (False|int) :: How many results to return. If False all results will be returned
- live (Boolean) :: If true, data from the live_stock_data table will be queried. If False, data from the
stock_data_table is queried
Returns:
- frames (dict) :: A dictionary of Dataframes with each key being the ticker of the corresponding dataframe.
Dataframes have the following columns: [date, open, high, low, close, volume]
"""
cursor = self.db.cursor()
if not tickers:
tickers = self.get_stock_list()
if not tickers:
raise ValueError("Tickers not found, DB Issue??")
if live:
select_query = "SELECT `ticker`, `timestamp`, `open`, `bid`, `bid_size`, `ask`, `ask_size`, `close`, `volume`, `market_cap`, `change`, `percent_change` FROM `live_stock_data` WHERE `ticker` = %s"
else:
select_query = "SELECT `ticker`, `date`, `open`, `high`, `low`, `close`, `volume` FROM `stock_data` WHERE `ticker` = %s "
if timeframe:
# TODO
# this does not deal with live stock data rewrite the last 2 placeholders to allow for granular time
# increments/decrements of 5 seconds
select_query += 'AND (`{}` BETWEEN "{}" AND "{}")'.format(
"date" if not live else "timestamp", timeframe[0].strftime("%Y-%m-%d"), timeframe[1].strftime("%Y-%m-%d"))
limit_string = ";" if not limit else " LIMIT {};".format(limit)
select_query += " ORDER BY `{}` DESC{}".format(
"date" if not live else "timestamp", limit_string)
frames = []
for ticker in tickers:
cursor.execute(select_query, ticker)
results = cursor.fetchall()
if not results:
continue
# convert each result to a dataframe
frames.append(DataFrame(results).set_index(
"date" if not live else "timestamp"))
return frames
def get_stock_list(self):
"""
Returns every stock as a list
"""
cursor = self.db.cursor()
sql = "SELECT `ticker` FROM `corporate_info`;"
cursor.execute(sql)
results = cursor.fetchall()
cursor.close()
results = [x['ticker'] for x in results]
return results
def has_prices(self, ticker, week_start, week_end):
"""
Checks to see if there exists a price for a stock at the given time period.
returns true if there are results and false is there arent
"""
sql = "SELECT `id` FROM `stock_data` WHERE `ticker` = '{}' AND `date` BETWEEN '{}' AND '{}' LIMIT 1;".format(
ticker, week_start, week_end)
cursor = self.db.cursor()
cursor.execute(sql)
results = cursor.fetchall()
return len(results) != 0
def cache_training_week(self, ranking_object):
rank_keys = ['week_start', 'week_end',
'average_volume', 'num_stocks', 'ranking']
if set(ranking_object.keys()) != set(rank_keys):
return False
r = ranking_object
cursor = self.db.cursor()
insertion_query = "INSERT INTO `weekly_ranking_data`(`id`, `week_start`, `week_end`, `average_volume`, `num_stocks`, `ranking`)"
insertion_query += " VALUES (NULL, %s, %s, %s, %s, %s);"
cursor.execute(insertion_query, (r['week_start'], r['week_end'],
r['average_volume'], r['num_stocks'], r['ranking']))
return True
def get_training_weeks(self):
# the total number of training week
"""
Returns a tuple of start and end dates
"""
sql = "SELECT `week_start`, `week_end` FROM `weekly_ranking_data` ORDER BY `week_start`;"
cursor = self.db.cursor()
cursor.execute(sql)
results = cursor.fetchall()
training_periods = [[result['week_start'], result['week_end']]
for result in results]
return training_periods
def get_sentiment_cache(self, ticker, time_period=False):
"""
Retrieves the sentiments given an articles publish date and a ticker value
Parameters:
- ticker :: The ticker of which to retrieve the sentiment information from
- time_period :: A list with the 0th index being the start date and the 1st
index corresponding to the end date. if not specified all
sentiments will be returned
Returns:
- sentiment_cache :: A list of dicts from the start date to the end date (if specified)
with the following keys: [published_on, sentiment]
"""
cursor = self.db.cursor()
query = "SELECT `published_on`, `sentiment` from `article_sentiment_cache` WHERE `ticker` = %s"
if time_period:
start = time_period[0]
end = time_period[1]
query = query + \
" AND `published_on` BETWEEN %s AND %s".format(start, end)
query = query + " ORDER BY `published_on` DESC;"
if time_period:
cursor.execute(query, (ticker, start, end))
else:
cursor.execute(query, ticker)
results = cursor.fetchall()
return results
def test(self):
cursor = self.db.cursor()
d1 = datetime.datetime.strptime("2021-07-24", "%Y-%m-%d")
d2 = datetime.datetime.strptime("2021-07-25", "%Y-%m-%d")
q1 = "SELECT COUNT(*) FROM article_sentiment_cache WHERE published_on BETWEEN '2021-07-24' AND '2021-07-25';"
q2 = "SELECT COUNT(*) FROM article_sentiment_cache WHERE published_on BETWEEN %s AND %s;"
cursor.execute(q1)
results = cursor.fetchall()
cursor.execute(q2, (d1, d2))
res2 = cursor.fetchall() | |
"""
# Test uzf for the vs2d comparison problem in the uzf documentation except in
# this case there are 15 gwf and uzf cells, rather than just one cell.
"""
import os
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["gwf_uzf03a"]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
nlay, nrow, ncol = 15, 1, 1
def build_models():
perlen = [17.7]
nper = len(perlen)
nstp = [177]
tsmult = nper * [1.0]
delr = 1.0
delc = 1.0
delv = 2.0
top = 0.0
botm = [top - (k + 1) * delv for k in range(nlay)]
strt = -22.0
laytyp = 1
ss = 0.0
sy = 0.4
# unsat props
seconds_to_days = 60.0 * 60.0 * 24.0
hk = 4.0e-6 * seconds_to_days # saturated vertical conductivity
thts = 0.4 # saturated water content
thtr = 0.2 # residual water content
thti = thtr # initial water content
infiltration_rate = 0.5 * hk
evapotranspiration_rate = 5e-8 * seconds_to_days
evt_extinction_depth = 2.0
brooks_corey_epsilon = 3.5 # brooks corey exponent
tdis_rc = []
for idx in range(nper):
tdis_rc.append((perlen[idx], nstp[idx], tsmult[idx]))
for idx, dir in enumerate(exdirs):
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwfname = name
newtonoptions = "NEWTON UNDER_RELAXATION"
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=gwfname,
newtonoptions=newtonoptions,
save_flows=True,
)
# create iterative model solution and register the gwf model with it
nouter, ninner = 100, 10
hclose, rclose, relax = 1.5e-6, 1e-6, 0.97
imsgwf = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="DBD",
under_relaxation_theta=0.7,
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename="{}.ims".format(gwfname),
)
sim.register_ims_package(imsgwf, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=np.ones((nlay, nrow, ncol), dtype=int),
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf, save_flows=False, icelltype=laytyp, k=hk
)
# storage
sto = flopy.mf6.ModflowGwfsto(
gwf,
save_flows=False,
iconvert=laytyp,
ss=ss,
sy=sy,
steady_state={0: False},
transient={0: True},
)
# ghb
ghbspdict = {
0: [[(nlay - 1, 0, 0), strt, hk / (0.5 * delv)]],
}
ghb = flopy.mf6.ModflowGwfghb(
gwf,
print_input=True,
print_flows=True,
stress_period_data=ghbspdict,
save_flows=False,
)
# note: for specifying lake number, use fortran indexing!
uzf_obs = {
name
+ ".uzf.obs.csv": [
("wc{}".format(k + 1), "water-content", k + 1, 0.5 * delv)
for k in range(nlay)
]
}
surfdep = 1.0e-5
uzf_pkdat = [
[
0,
(0, 0, 0),
1,
1,
surfdep,
hk,
thtr,
thts,
thti,
brooks_corey_epsilon,
"uzf01",
]
] + [
[
k,
(k, 0, 0),
0,
k + 1,
surfdep,
hk,
thtr,
thts,
thti,
brooks_corey_epsilon,
"uzf0{}".format(k + 1),
]
for k in range(1, nlay)
]
uzf_pkdat[-1][3] = -1
uzf_spd = {
0: [
[
0,
infiltration_rate,
evapotranspiration_rate,
evt_extinction_depth,
thtr,
0.0,
0.0,
0.0,
],
]
}
uzf = flopy.mf6.ModflowGwfuzf(
gwf,
print_input=True,
print_flows=True,
save_flows=True,
boundnames=True,
simulate_et=True,
unsat_etwc=True,
ntrailwaves=15,
nwavesets=40,
nuzfcells=len(uzf_pkdat),
packagedata=uzf_pkdat,
perioddata=uzf_spd,
budget_filerecord="{}.uzf.bud".format(name),
observations=uzf_obs,
filename="{}.uzf".format(name),
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.bud".format(gwfname),
head_filerecord="{}.hds".format(gwfname),
headprintrecord=[
("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "ALL")],
)
obs_lst = []
obs_lst.append(["obs1", "head", (0, 0, 0)])
obs_lst.append(["obs2", "head", (1, 0, 0)])
obs_dict = {"{}.obs.csv".format(gwfname): obs_lst}
obs = flopy.mf6.ModflowUtlobs(
gwf, pname="head_obs", digits=20, continuous=obs_dict
)
# write MODFLOW 6 files
sim.write_simulation()
return
def make_plot(sim, obsvals):
print("making plots...")
name = ex[sim.idxsim]
ws = exdirs[sim.idxsim]
# shows curves for times 2.5, 7.5, 12.6, 17.7
# which are indices 24, 74, 125, and -1
idx = [24, 74, 125, -1]
obsvals = [list(row) for row in obsvals]
obsvals = [obsvals[i] for i in idx]
obsvals = np.array(obsvals)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6, 3))
ax = fig.add_subplot(1, 1, 1)
depth = np.arange(1, 31, 2.0)
for row in obsvals:
label = "time {}".format(row[0])
ax.plot(row[1:], depth, label=label, marker="o")
ax.set_ylim(0.0, 20.0)
ax.set_xlim(0.15, 0.4)
ax.invert_yaxis()
ax.set_xlabel("Water Content")
ax.set_ylabel("Depth, in meters")
plt.legend()
fname = "fig-xsect.pdf"
fname = os.path.join(ws, fname)
plt.savefig(fname, bbox_inches="tight")
return
def eval_flow(sim):
print("evaluating flow...")
name = ex[sim.idxsim]
ws = exdirs[sim.idxsim]
# check binary grid file
fname = os.path.join(ws, name + ".dis.grb")
grbobj = flopy.mf6.utils.MfGrdFile(fname)
ia = grbobj._datadict["IA"] - 1
ja = grbobj._datadict["JA"] - 1
bpth = os.path.join(ws, name + ".uzf.bud")
bobj = flopy.utils.CellBudgetFile(bpth, precision="double")
gwf_recharge = bobj.get_data(text="GWF")
bpth = os.path.join(ws, name + ".bud")
bobj = flopy.utils.CellBudgetFile(bpth, precision="double")
flow_ja_face = bobj.get_data(text="FLOW-JA-FACE")
uzf_recharge = bobj.get_data(text="UZF-GWRCH")
errmsg = "uzf rch is not equal to negative gwf rch"
for gwr, uzr in zip(gwf_recharge, uzf_recharge):
assert np.allclose(gwr["q"], -uzr["q"]), errmsg
# Check on residual, which is stored in diagonal position of
# flow-ja-face. Residual should be less than convergence tolerance,
# or this means the residual term is not added correctly.
for fjf in flow_ja_face:
fjf = fjf.flatten()
res = fjf[ia[:-1]]
errmsg = "min or max residual too large {} {}".format(
res.min(), res.max()
)
assert np.allclose(res, 0.0, atol=1.0e-6), errmsg
bpth = os.path.join(ws, name + ".uzf.bud")
bobj = flopy.utils.CellBudgetFile(bpth, precision="double")
uzet = bobj.get_data(text="UZET")
uz_answer = [-0.00432] + 14 * [0.0]
for uz in uzet[20:]:
assert np.allclose(uz["q"], uz_answer), "unsat ET is not correct"
# Make plot of obs
fpth = os.path.join(sim.simpath, name + ".uzf.obs.csv")
try:
obsvals = np.genfromtxt(fpth, names=True, delimiter=",")
except:
assert False, 'could not load data from "{}"'.format(fpth)
if False:
make_plot(sim, obsvals)
return
# - No need to change any code below
def test_mf6model():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
yield test.run_mf6, Simulation(dir, exfunc=eval_flow, idxsim=idx)
return
def main():
# initialize testing framework
test = testing_framework()
# build the models
build_models()
# run the test models
for idx, dir in enumerate(exdirs):
sim = Simulation(dir, exfunc=eval_flow, idxsim=idx)
test.run_mf6(sim)
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main() | |
from tqdm import tqdm
import numpy as np
from polaris2.micro.micro import det
from polaris2.geomvis import R3S2toR, utilmpl, phantoms
import logging
log = logging.getLogger('log')
N = 80
log.info('Making '+str(N)+' frames')
for i in tqdm(range(N)):
xp, yp, zp = phantoms.sphere_spiral(i/(N-1))
obj = R3S2toR.xyzj_list([0,0,0.1*i,xp,yp,zp], shape=[10,10,4])
xlabel='10$\\times$10$\\times$4 $\mu$m${}^3$', title='Single dipole radiator')
obj.build_actors()
d1 = det.FourFLF(ulens_aperture='square', irrad_title='Lightfield detector irradiance')
im1 = d1.xyzj_single_to_xy_det(obj)
im1.data /= 100
d2 = det.FourF()
im2 = d2.xyzj_single_to_xy_det(obj)
im2.data /= 100
istr = '{:03d}'.format(i)
# im1.save_tiff('./out2/'+istr+'.tif')
utilmpl.plot([[obj, im2, im1]], './out/'+istr+'.png') | |
import numpy as np
from scipy.optimize import linear_sum_assignment as linear_assignment
# from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
# nmi = normalized_mutual_info_score
# ari = adjusted_rand_score
def acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
# from sklearn.utils.linear_assignment_ import linear_assignment
ind = linear_assignment(w.max() - w)
ind=np.vstack(ind).T
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size | |
import matplotlib.pyplot as plt
import numpy as np
from fast_image_classification.preprocessing_utilities import read_img_from_path
from fast_image_classification.training_utilities import get_seq
def plot_figures(names, figures, nrows=1, ncols=1):
"""Plot a dictionary of figures.
Parameters
----------
figures : <title, figure> dictionary
ncols : number of columns of subplots wanted in the display
nrows : number of rows of subplots wanted in the figure
"""
fig, axeslist = plt.subplots(ncols=ncols, nrows=nrows, figsize=(128, 128))
for ind, title in enumerate(names):
img = np.squeeze(figures[title])
if len(img.shape) == 2:
axeslist.ravel()[ind].imshow(img, cmap=plt.gray()) # , cmap=plt.gray()
else:
axeslist.ravel()[ind].imshow(img)
axeslist.ravel()[ind].set_title(title)
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
figures = {}
img_1 = read_img_from_path('../example/data/0d539aa7c5f448c5aca2db15eeebb0c3.jpg')
figures['Original'] = img_1
seq = get_seq()
for i in range(1, 12):
augmented = seq.augment_image(img_1)
figures["Augmented %s" % i] = augmented
names = ['Original'] + ["Augmented %s" % i for i in range(1, 10)]
# plot of the images in a figure, with 2 rows and 5 columns
plot_figures(names, figures, 2, 5) | |
# License: MIT
from typing import List
import numpy as np
from openbox.utils.config_space import Configuration, ConfigurationSpace
WAITING = 'waiting'
RUNNING = 'running'
COMPLETED = 'completed'
PROMOTED = 'promoted'
def sample_configuration(configuration_space: ConfigurationSpace, excluded_configs: List[Configuration] = None):
"""
sample one config not in excluded_configs
"""
if excluded_configs is None:
excluded_configs = []
sample_cnt = 0
max_sample_cnt = 1000
while True:
config = configuration_space.sample_configuration()
sample_cnt += 1
if config not in excluded_configs:
break
if sample_cnt >= max_sample_cnt:
raise ValueError('Cannot sample non duplicate configuration after %d iterations.' % max_sample_cnt)
return config
def sample_configurations(configuration_space: ConfigurationSpace, num: int,
excluded_configs: List[Configuration] = None) -> List[Configuration]:
if excluded_configs is None:
excluded_configs = []
result = []
cnt = 0
while cnt < num:
config = configuration_space.sample_configuration(1)
if config not in result and config not in excluded_configs:
result.append(config)
cnt += 1
return result
def expand_configurations(configs: List[Configuration], configuration_space: ConfigurationSpace, num: int,
excluded_configs: List[Configuration] = None):
if excluded_configs is None:
excluded_configs = []
num_config = len(configs)
num_needed = num - num_config
config_cnt = 0
while config_cnt < num_needed:
config = configuration_space.sample_configuration(1)
if config not in configs and config not in excluded_configs:
configs.append(config)
config_cnt += 1
return configs
def minmax_normalization(x):
min_value = min(x)
delta = max(x) - min(x)
if delta == 0:
return [1.0] * len(x)
return [(float(item) - min_value) / float(delta) for item in x]
def std_normalization(x):
_mean = np.mean(x)
_std = np.std(x)
if _std == 0:
return np.array([0.] * len(x))
return (np.array(x) - _mean) / _std
def norm2_normalization(x):
z = np.array(x)
normalized_z = z / np.linalg.norm(z)
return normalized_z | |
import sys
if sys.version_info < (3, 6):
sys.stdout.write(
"Minkowski Engine requires Python 3.6 or higher. Please use anaconda https://www.anaconda.com/distribution/ for isolated python environment.\n"
)
sys.exit(1)
try:
import torch
except ImportError:
raise ImportError('Pytorch not found. Please install pytorch first.')
import codecs
import os
import re
import subprocess
from sys import argv, platform
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from distutils.sysconfig import get_python_inc
if platform == 'win32':
raise ImportError('Windows is currently not supported.')
elif platform == 'darwin':
# Set the distutils to use clang instead of g++ for valid std
os.environ['CC'] = '/usr/local/opt/llvm/bin/clang'
os.environ['CXX'] = '/usr/local/opt/llvm/bin/clang'
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def run_command(*args):
subprocess.call(args)
# For cpu only build
CPU_ONLY = '--cpu_only' in argv
KEEP_OBJS = '--keep_objs' in argv
BLAS = [arg for arg in argv if '--blas' in arg]
Extension = CUDAExtension
compile_args = [
'make',
'-j%d' % min(os.cpu_count(), 12), # parallel compilation
'PYTHON=' + sys.executable, # curr python
]
extra_compile_args = ['-Wno-deprecated-declarations']
extra_link_args = []
libraries = ['minkowski']
# extra_compile_args+=['-g'] # Uncomment for debugging
if CPU_ONLY:
print('\nCPU_ONLY build')
argv.remove('--cpu_only')
compile_args += ['CPU_ONLY=1']
extra_compile_args += ['-DCPU_ONLY']
Extension = CppExtension
else:
# system python installation
libraries.append('cusparse')
if KEEP_OBJS:
print('\nUsing built objects')
argv.remove('--keep_objs')
if len(BLAS) > 0:
BLAS = BLAS[0]
argv.remove(BLAS)
BLAS = BLAS.split('=')[1]
assert BLAS in ['openblas', 'mkl', 'atlas', 'blas']
libraries.append(BLAS)
blas_inc_dirs = os.environ.get('BLAS_INCLUDE_DIRS')
compile_args += [f'BLAS_INCLUDE_DIRS={blas_inc_dirs}']
blas_lib_dirs = os.environ.get('BLAS_LIBRARY_DIRS')
if blas_lib_dirs is not None:
extra_link_args += [f'-Wl,-rpath,{blas_lib_dirs}']
else:
# find the default BLAS library
import numpy.distutils.system_info as sysinfo
# Search blas in this order
for blas in ['openblas', 'atlas', 'mkl', 'blas']:
if 'libraries' in sysinfo.get_info(blas):
BLAS = blas
libraries += sysinfo.get_info(blas)['libraries']
break
else:
# BLAS not found
raise ImportError(' \
\nBLAS not found from numpy.distutils.system_info.get_info. \
\nPlease specify BLAS with: python setup.py install --blas=openblas" \
\nfor more information, please visit https://github.com/StanfordVL/MinkowskiEngine/wiki/Installation'
)
print(f'\nUsing BLAS={BLAS}')
compile_args += ['BLAS=' + BLAS]
if 'darwin' in platform:
extra_compile_args += ['-stdlib=libc++']
if not KEEP_OBJS:
run_command('make', 'clean')
run_command(*compile_args)
# Python interface
setup(
name='MinkowskiEngine',
version=find_version('MinkowskiEngine', '__init__.py'),
install_requires=['torch', 'numpy'],
packages=[
'MinkowskiEngine', 'MinkowskiEngine.utils', 'MinkowskiEngine.modules'
],
package_dir={'MinkowskiEngine': './MinkowskiEngine'},
ext_modules=[
Extension(
name='MinkowskiEngineBackend',
include_dirs=['./', get_python_inc() + "/.."],
library_dirs=['objs'],
sources=[
'pybind/minkowski.cpp',
],
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
],
cmdclass={'build_ext': BuildExtension},
author='Christopher Choy',
author_email='chrischoy@ai.stanford.edu',
description='a convolutional neural network library for sparse tensors',
long_description=read('README.md'),
long_description_content_type="text/markdown",
url='https://github.com/StanfordVL/MinkowskiEngine',
keywords=[
'pytorch', 'Minkowski Engine', 'Sparse Tensor',
'Convolutional Neural Networks', '3D Vision', 'Deep Learning'
],
zip_safe=False,
classifiers=[
# https://pypi.org/classifiers/
'Environment :: Console',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: C++',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Multimedia :: Graphics',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Visualization',
],
python_requires='>=3.6') | |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from typing import Dict, Optional, Union
import numpy
import torch
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Maxout, Seq2SeqEncoder, TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator, util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
@Model.register("semeval_classifier_attention")
class SemEvalClassifierAttention(Model):
"""This ``Model`` performs text classification for SemEval 2017 task 4 subset A.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
embedding_dropout: float,
encoder: Seq2SeqEncoder,
integrator: Seq2SeqEncoder,
integrator_dropout: float,
output_layer: Union[FeedForward, Maxout],
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
# We need the embeddings to convert word IDs to their vector representations
self.embedding_dropout = torch.nn.Dropout(embedding_dropout)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
self.integrator = integrator
self.integrator_dropout = torch.nn.Dropout(integrator_dropout)
self._self_attentive_pooling_projection = torch.nn.Linear(
self.integrator.get_output_dim(), 1)
self.output_layer = output_layer
# Monitor the metrics - we use accuracy, as well as prec, rec, f1 for 4 (very positive)
self.accuracy = CategoricalAccuracy()
self.f1_measure_positive = F1Measure(
vocab.get_token_index("positive", "labels"))
self.f1_measure_negative = F1Measure(
vocab.get_token_index("negative", "labels"))
self.f1_measure_neutral = F1Measure(
vocab.get_token_index("neutral", "labels"))
# We use the cross entropy loss because this is a classification task.
# Note that PyTorch's CrossEntropyLoss combines softmax and log likelihood loss,
# which makes it unnecessary to add a separate softmax layer.
self.loss_function = torch.nn.CrossEntropyLoss()
initializer(self)
@overrides
def forward(self,
tokens: Dict[str, torch.Tensor],
label: torch.Tensor = None) -> torch.Tensor:
# In deep NLP, when sequences of tensors in different lengths are batched together,
# shorter sequences get padded with zeros to make them equal length.
# Masking is the process to ignore extra zeros added by padding
text_mask = util.get_text_field_mask(tokens).float()
# Forward pass
embedded_text = self.text_field_embedder(tokens)
dropped_embedded_text = self.embedding_dropout(embedded_text)
encoded_tokens = self.encoder(dropped_embedded_text, text_mask)
# Compute biattention. This is a special case since the inputs are the same.
attention_logits = encoded_tokens.bmm(encoded_tokens.permute(0, 2, 1).contiguous())
attention_weights = util.masked_softmax(attention_logits, text_mask)
encoded_text = util.weighted_sum(encoded_tokens, attention_weights)
# Build the input to the integrator
integrator_input = torch.cat([encoded_tokens,
encoded_tokens - encoded_text,
encoded_tokens * encoded_text], 2)
integrated_encodings = self.integrator(integrator_input, text_mask)
# Simple Pooling layers
max_masked_integrated_encodings = util.replace_masked_values(
integrated_encodings, text_mask.unsqueeze(2), -1e7)
max_pool = torch.max(max_masked_integrated_encodings, 1)[0]
min_masked_integrated_encodings = util.replace_masked_values(
integrated_encodings, text_mask.unsqueeze(2), +1e7)
min_pool = torch.min(min_masked_integrated_encodings, 1)[0]
mean_pool = torch.sum(integrated_encodings, 1) / torch.sum(text_mask, 1, keepdim=True)
# Self-attentive pooling layer
# Run through linear projection. Shape: (batch_size, sequence length, 1)
# Then remove the last dimension to get the proper attention shape (batch_size, sequence length).
self_attentive_logits = self._self_attentive_pooling_projection(
integrated_encodings).squeeze(2)
self_weights = util.masked_softmax(self_attentive_logits, text_mask)
self_attentive_pool = util.weighted_sum(integrated_encodings, self_weights)
pooled_representations = torch.cat([max_pool, min_pool, mean_pool, self_attentive_pool], 1)
pooled_representations_dropped = self.integrator_dropout(pooled_representations)
logits = self.output_layer(pooled_representations_dropped)
# In AllenNLP, the output of forward() is a dictionary.
# Your output dictionary must contain a "loss" key for your model to be trained.
output = {"logits": logits}
if label is not None:
self.accuracy(logits, label)
self.f1_measure_positive(logits, label)
self.f1_measure_negative(logits, label)
self.f1_measure_neutral(logits, label)
output["loss"] = self.loss_function(logits, label)
return output
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
_, recall_positive, f1_measure_positive = self.f1_measure_positive.get_metric(
reset)
_, recall_negative, f1_measure_negative = self.f1_measure_negative.get_metric(
reset)
_, recall_neutral, _ = self.f1_measure_neutral.get_metric(reset)
accuracy = self.accuracy.get_metric(reset)
avg_recall = (recall_positive + recall_negative + recall_neutral) / 3.0
macro_avg_f1_measure = (
f1_measure_positive + f1_measure_negative) / 2.0
results = {
'accuracy': accuracy,
'avg_recall': avg_recall,
'f1_measure': macro_avg_f1_measure
}
return results
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SemEvalClassifierAttention': # type: ignore
# pylint: disable=arguments-differ
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
embedding_dropout = params.pop("embedding_dropout")
encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
integrator_dropout = params.pop("integrator_dropout")
output_layer_params = params.pop("output_layer")
if "activations" in output_layer_params:
output_layer = FeedForward.from_params(output_layer_params)
else:
output_layer = Maxout.from_params(output_layer_params)
elmo = params.pop("elmo", None)
if elmo is not None:
elmo = Elmo.from_params(elmo)
use_input_elmo = params.pop_bool("use_input_elmo", False)
use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False)
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
params.assert_empty(cls.__name__)
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
embedding_dropout=embedding_dropout,
encoder=encoder,
integrator=integrator,
integrator_dropout=integrator_dropout,
output_layer=output_layer,
initializer=initializer,
regularizer=regularizer) | |
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
import os
import sys
import json
import argparse
import logging
import numpy as np
import pandas as pd
submission_files = {
"cola": "CoLA.tsv",
"sst2": "SST-2.tsv",
"mrpc": "MRPC.tsv",
"stsb": "STS-B.tsv",
"mnli": "MNLI-m.tsv",
"mnli_mismatched": "MNLI-mm.tsv",
"qnli": "QNLI.tsv",
"qqp": "QQP.tsv",
"rte": "RTE.tsv",
"wnli": "WNLI.tsv",
"ax": "AX.tsv",
}
results_headers = {
"cola": ["sentence", "idx", "class_score"],
"sst2": ["sentence", "idx", "class_score"],
"mrpc": ["sentence1", "sentence2", "idx", "class_score"],
"stsb": ["sentence1", "sentence2", "idx", "score"],
"mnli": ["premise", "hypothesis", "idx", "class_score"],
"mnli_mismatched": ["premise", "hypothesis", "idx", "class_score"],
"qnli": ["question", "sentence", "idx", "class_score"],
"qqp": ["question1", "question2", "idx", "class_score"],
"rte": ["sentence1", "sentence2", "idx", "class_score"],
"wnli": ["sentence1", "sentence2", "idx", "score"],
"ax": ["premise", "hypothesis", "idx", "class_score"],
}
parser = argparse.ArgumentParser()
parser.add_argument(
"--results_folder",
type=str,
default="./results",
help="results folder",
)
parser.add_argument(
"--model_folder1",
type=str,
default="./model_folder1",
help="model result folder1",
)
parser.add_argument(
"--model_folder2",
type=str,
default=None,
help="model result folder2",
)
parser.add_argument(
"--model_folder3",
type=str,
default=None,
help="model result folder3",
)
parser.add_argument(
"--model_folder4",
type=str,
default=None,
help="model result folder4",
)
parser.add_argument(
"--model_folder5",
type=str,
default=None,
help="model result folder5",
)
args, _ = parser.parse_known_args()
model_folder1 = args.model_folder1
model_folder2 = args.model_folder2
model_folder3 = args.model_folder3
model_folder4 = args.model_folder4
model_folder5 = args.model_folder5
results_folder = args.results_folder
if not os.path.exists(results_folder):
os.makedirs(results_folder, exist_ok=True)
model_folders = [model_folder1]
if model_folder2 is not None:
model_folders += [model_folder2]
if model_folder3 is not None:
model_folders += [model_folder3]
if model_folder4 is not None:
model_folders += [model_folder4]
if model_folder5 is not None:
model_folders += [model_folder5]
def avg_fusion(*results, task=None):
results = [result.sort_values("idx") for result in results]
fusion = results[0]
if task in ["stsb", "wnli"]:
fusion["score"] = np.mean([result["score"].values for result in results], axis=0)
else:
_results = pd.concat(
[pd.DataFrame({f"class_score_{i}": result["class_score"]}) for i, result in enumerate(results)],
axis=1,
)
def func(row):
scores = np.mean(
[json.loads(v)["scores"] for k, v in row.items() if k.startswith("class_score_")],
axis=0,
)
cls, score = np.argmax(scores), np.max(scores)
return json.dumps({"class": int(cls), "score": float(score), "scores": scores.tolist()})
fusion["class_score"] = _results.apply(func, axis=1)
return fusion
def get_folder_results(folder, sub_file):
results = []
for root_dir, dirs, files in os.walk(folder):
if sub_file in files:
results.append(os.path.join(root_dir, sub_file))
return results
for sub_task, sub_file in submission_files.items():
results = []
for folder in model_folders:
results += get_folder_results(folder, sub_file)
if len(results) == 0:
logging.info(f"GLUE Task {sub_task} in missing")
continue
results = [
pd.read_csv(
result_file,
names=results_headers.get(sub_task),
sep="\t",
quoting=3,
)
for result_file in results
]
fusion = avg_fusion(*results, task=sub_task)
fusion.to_csv(
os.path.join(results_folder, sub_file),
sep="\t",
header=None,
index=False,
quoting=3,
) | |
# import fitz
import pytesseract
from PIL import Image
import io
import cv2
import numpy as np
from pdf2image import convert_from_bytes
import re
from core import logging
logger = logging.getLogger(__name__)
def de_skew(image, show=False, delta=0):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = 255 - gray
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
logger.debug('Angle: ', angle)
if angle == 90:
angle = 0
elif angle < -45:
angle = -(90 + angle)
else:
angle = -angle
angle = angle + delta
logger.debug(angle)
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
if show:
cv2.imshow("Unrotated", image)
cv2.imshow("Rotated", rotated)
cv2.waitKey(0)
return rotated
def get_image(data):
image = convert_from_bytes(data.read())[0]
return image
def crop(image, x=0, y=0, h=2338, w=1653, show=False, is_gray=True):
if not is_gray:
image = get_grayscale(image)
crop_img = image[y:y + h, x:x + w]
if show:
cv2.imshow("cropped", crop_img)
cv2.waitKey(0)
return crop_img
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def remove_noise(image):
return cv2.medianBlur(image, 5)
def thresholding(image, threshold=240):
ret, th_img = cv2.threshold(image, thresh=threshold, maxval=255, type=cv2.THRESH_BINARY)
return th_img
def dilate(image):
kernel = np.ones((5, 5), np.uint8)
return cv2.dilate(image, kernel, iterations=1)
def erode(image):
kernel = np.ones((5, 5), np.uint8)
return cv2.erode(image, kernel, iterations=1)
def opening(image):
kernel = np.ones((5, 5), np.uint8)
return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
def canny(image):
return cv2.Canny(image, 100, 200)
# def deskew(image):
# coords = np.column_stack(np.where(image > 0))
# angle = cv2.minAreaRect(coords)[-1]
# logger.debug(angle)
# if angle < -45:
# angle = -(90 + angle)
# else:
# angle = -angle
# logger.debug(angle)
# (h, w) = image.shape[:2]
# center = (w // 2, h // 2)
# M = cv2.getRotationMatrix2D(center, angle, 1.0)
# rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
# return rotated
def match_template(image, template):
return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
def image_to_string(image):
config = r'--oem 3 --psm 6'
return pytesseract.image_to_string(image, lang='eng', config=config)
def get_ref_number(image, regex):
text = image_to_string(image)
lines = text.split("\n")
logger.debug("\n")
logger.debug("========================================================")
logger.debug(regex)
i = 0
for line in lines:
ret = re.search(regex, line.strip())
i = i+1
logger.debug(i, '. ', line)
if ret:
groups = tuple(filter(lambda x: x, ret.groups()))
val = groups[0] if groups and len(groups) else None
return val
def threshold_trials(orig_img, regex, thresholds, i=0):
img = orig_img.copy()
threshold = thresholds[i]
logger.debug("Try with threshold: ", threshold)
img = thresholding(np.array(img), threshold=threshold)
ref_number = get_ref_number(img, regex)
if ref_number:
return ref_number
j = i+1
if j < len(thresholds):
return threshold_trials(orig_img, regex, thresholds, i=j)
def zoom_in(img, zoom):
return cv2.resize(img, None, fx=zoom, fy=zoom)
def extract_ref_number(pdf_data, regex, **kwargs):
try:
threshold = kwargs.get('threshold')
zoom = kwargs.get('zoom')
if threshold:
del kwargs['threshold']
if zoom:
del kwargs['zoom']
else:
zoom = 1.0
img = np.array(get_image(pdf_data))
img = crop(img, **kwargs)
img = de_skew(img, show=False)
img = zoom_in(img, zoom)
ref_number = get_ref_number(img, regex)
if not ref_number:
thresholds = [threshold, threshold-7, threshold+7, threshold-14, threshold+14, threshold*2/3.5, threshold*2/3]
logger.debug("Try with thresholds: ", thresholds)
ref_number = threshold_trials(img, regex, thresholds)
return ref_number
except Exception as ex:
return None
def show_wait_destroy(winname, img):
cv2.imshow(winname, img)
cv2.moveWindow(winname, 500, 0)
cv2.waitKey(0)
cv2.destroyWindow(winname)
def apply_corrections(ref_number, corrections):
if corrections and ref_number:
res = list(ref_number)
for c in corrections:
pos = c['pos']
x = res[pos]
if x == c['val']:
res[pos] = c['rep']
return ''.join(res)
else:
return ref_number
def auto_remove_scratches():
logger.debug("Removing scratches...")
file = "C:\\Users\\godfred.nkayamba\\Downloads\\failed\\C.pdf"
with open(file, 'rb') as pdf_data:
img = np.array(get_image(pdf_data))
logger.debug(img.shape)
corrections = [{'pos': 1, 'val': '2', 'rep': 'Z'}]
C_regex = '[ ]{0,1}(\w{15,})[\({ ]'
A_kwargs = {'x': 700, 'y': 20, 'h': 500, 'w': 800, 'threshold': 230}
C_kwargs = {'x': 700, 'y': 600, 'h': 400, 'w': 800, 'threshold': 225}
kwargs = C_kwargs
regex = C_regex
threshold = kwargs.get('threshold')
if threshold:
del kwargs['threshold']
img = crop(img, **kwargs)
img = de_skew(img, show=False)
# ret, binary = cv2.threshold(img, threshold*2/3.5, 255, cv2.THRESH_BINARY)
# ref_number = get_ref_number(binary, regex)
thresholds = [threshold, threshold+7, threshold-1, threshold*2/3.5, threshold*2/3]
logger.debug("Try with thresholds: ", thresholds)
ref_number = threshold_trials(img, regex, thresholds)
logger.debug(corrections)
if ref_number and corrections and len(corrections):
ref_number = apply_corrections(ref_number, corrections)
logger.debug(ref_number)
# cv2.imshow("Orig", orig)
# cv2.imshow("Binary", binary)
# cv2.waitKey(0)
# auto_remove_scratches()
def remove_lines(image, line_spec=(1, 6)):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove horizontal
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (64, 2))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(image, [c], -1, (255, 255, 255), 2)
# Repair image
repair_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 6))
result = 255 - cv2.morphologyEx(255 - image, cv2.MORPH_CLOSE, repair_kernel, iterations=1)
# cv2.imshow('thresh', thresh)
# cv2.imshow('detected_lines', detected_lines)
# cv2.imshow('image', image)
cv2.imshow('result', result)
cv2.waitKey() | |
import numpy as np
import cv2
from matplotlib import pyplot as plt
"""
@X: input data
@k: number of clusters
"""
def kmeans_wrapper(X, k, image_as_input = False):
if not image_as_input:
X = np.float32(X)
else:
orig_shape = X.shape
# flatten the image into a vector of BGR entries
# so an n x 3 -> this is why -1 as first argument
X = X.reshape((-1, 3))
# data must be float32
X = np.float32(X)
type_ = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
max_iter = 10
epsilon = 1.0
criteria = (type_, max_iter, epsilon)
# labels returns the index of the cluster they belong in
compactness, labels, centers = cv2.kmeans(data = X,
K = k,
bestLabels = None,
criteria = criteria,
attempts = 10,
flags = cv2.KMEANS_RANDOM_CENTERS)
if not image_as_input:
# the final clusters - Kmeans output
S = []
for l in labels:
S.append(X[labels.ravel() == l])
return S, centers
else:
# convert data back to image
centers = np.uint8(centers)
# same as for l in flat labels: res.append(center[l])
res = centers[labels.flatten()]
res2 = res.reshape((orig_shape))
return res2, centers | |
import numpy as np
import os
import re
import cPickle
class read_cifar10(object):
def __init__(self, data_path=None, is_training=True):
self.data_path = data_path
self.is_training = is_training
def load_data(self):
files = os.listdir(self.data_path)
if self.is_training is True:
pattern = re.compile('(data_batch_).')
to_read = [m.group(0) for i in files for m in [pattern.search(i)] if m]
data = []
labels = []
for t in to_read:
with open(self.data_path+'/'+t, 'rb') as f:
d = cPickle.load(f)
data.append(d['data'])
labels.append(d['labels'])
data = np.vstack(data)
labels = np.hstack(labels)
else:
with open(self.data_path+'/test_batch') as f:
d = cPickle.load(f)
data = d['data']
labels = d['labels']
return data, labels | |
import copy
import math
import pdb
import random
import timeit
import cPickle as pickle
import numpy as np
from poim import *
import poim
from shogun.Features import *
from shogun.Kernel import *
from shogun.Classifier import *
from shogun.Evaluation import *
from shutil import *
dna = ['A', 'C', 'G', 'T']
def simulate_sequence(length):
sequence = ''
for i in range(length):
sequence += random.choice(dna)
return sequence
def mutate_motif(motiv,probmut):
mutmot = ""
for i in range(len(motiv)):
rnd = random.random()
if (rnd <= probmut):
dnashort =['A', 'C', 'G', 'T']
dnashort.pop(dnashort.index(motiv[i]))
mutmot += random.choice(dnashort)
else:
mutmot +=motiv[i]
return mutmot
def gensequences(tally,positives,sequenceno,prob,motif,mu):
sequences = []
labels = np.concatenate((ones(positives),-ones(sequenceno-positives)))
#np.ones(sequenceno)*(-1)
ml = len(motif)
for i in range(sequenceno):
aa = simulate_sequence(tally)
if i < positives:
# labels[i]=1
mut=mutate_motif(motif,prob)
aa = aa.replace(aa[mu:mu + ml], mut)
sequences.append(aa)
return [sequences,labels]
def svmTraining(x,y,C,kernel_degree):
feats_train = StringCharFeatures(x,DNA)
labels = BinaryLabels(y);
start = timeit.default_timer()
kernel = WeightedDegreePositionStringKernel(feats_train, feats_train, kernel_degree)
stop = timeit.default_timer()
time_kernel = stop-start
start = timeit.default_timer()
svm = LibSVM(C, kernel, labels)
svm.train()
stop=timeit.default_timer()
time_svm = stop-start
return [svm,time_kernel,time_svm]
#fm_train_dna = gensequences(tally,positives,sequenceno,0,motiv,mu)
def svmApply(svm,x):
featstest = StringCharFeatures(x,DNA)
outputs=svm.apply(featstest)
#pm = PerformanceMeasures(labels_test, output);
#acc = pm.get_accuracy();
#roc = pm.get_auROC();
#fms = pm.get_fmeasure();
outputlabels = outputs.get_labels();
return outputs.get_values(),outputlabels
def roc(y,scores,outputlabels,num_pos):
fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=num_pos)
auc=metrics.auc(fpr, tpr)
acc=metrics.accuracy_score(y,outputlabels)
return fpr,tpr,auc,acc
def computePOIM(x,y,poim_degree,kernel_degree,savepath):
feats_train = StringCharFeatures(x,DNA)
labels = BinaryLabels(y);
kernel = WeightedDegreePositionStringKernel(feats_train, feats_train, kernel_degree)
C=1
svm = LibSVM(C, kernel, labels)
svm.train()
tally=len(x[0])
ma = poim.compute_poims(svm,kernel,poim_degree,tally)
fobj = open(savepath,'wb')
pickle.dump(ma,fobj)
fobj.close()
return ma | |
"""Starting from a halo mass at z=0, the two functions below give descriptions for
how halo mass and Vmax smoothly evolve across time.
"""
from numba import njit
from math import log10, exp
__all__ = ('halo_mass_vs_redshift', 'vmax_vs_mhalo_and_redshift')
@njit
def halo_mass_vs_redshift(halo_mass_at_z0, redshift, halo_mass_at_z):
"""Fitting function from Behroozi+13, https://arxiv.org/abs/1207.6105,
Equations (H2)-(H6).
Parameters
----------
halo_mass_at_z0 : float or ndarray
Mass of the halo at z=0 assuming h=0.7
redshift : float or ndarray
halo_mass_at_z : ndarray
Empty array that will be filled with halo mass at the input redshift
"""
n = halo_mass_at_z.size
for i in range(n):
m_z0 = halo_mass_at_z0[i]
z = redshift[i]
a = 1./(1. + z)
_M13_z0 = 10**13.276
_M13_zfactor1 = (1. + z)**3.0
_M13_zfactor2 = (1. + 0.5*z)**-6.11
_M13_zfactor3 = exp(-0.503*z)
_M13 = _M13_z0*_M13_zfactor1*_M13_zfactor2*_M13_zfactor3
_exparg_factor1 = log10(m_z0/_M13_z0)
logarg = ((10**9.649)/m_z0)**0.18
a0 = 0.205 - log10(logarg + 1.)
_factor2_num = 1. + exp(-4.651*(1-a0))
_factor2_denom = 1. + exp(-4.651*(a-a0))
_exparg_factor2 = _factor2_num/_factor2_denom
_exparg = _exparg_factor1*_exparg_factor2
halo_mass_at_z[i] = _M13*(10**_exparg)
@njit
def vmax_vs_mhalo_and_redshift(mhalo, redshift, vmax):
"""Scaling relation between Vmax and Mhalo for host halos across redshift.
Relation taken from Equation (E2) from Behroozi+19,
https://arxiv.org/abs/1806.07893.
Parameters
----------
mhalo : float or ndarray
Mass of the halo at the input redshift assuming h=0.7
redshift : float or ndarray
vmax : ndarray
Empty array that will be filled with Vmax [physical km/s]
for a halo of the input mass and redshift
"""
n = vmax.size
for i in range(n):
m = mhalo[i]
z = redshift[i]
a = 1/(1 + z)
denom_term1 = (a/0.378)**-0.142
denom_term2 = (a/0.378)**-1.79
mpivot = 1.64e12/(denom_term1 + denom_term2)
vmax[i] = 200*(m/mpivot)**(1/3.) | |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 15:03:37 2016
keshengxuu@gmail.com
@author: keshengxu
"""
import numpy as np
from scipy.stats import norm
from scipy import integrate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os.path
from matplotlib import colors
import matplotlib.gridspec as gridspec
#Fonts!
plt.rcParams['mathtext.sf'] = 'Arial'
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
font= { 'family' : 'sans-serif',
# 'color' : 'darkred',
'weight' : 'normal',
'size' : 12,
}
#
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward',0)) # outward by 10 points
# spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
#loading the data
time = np.loadtxt('Data/time.txt')
v1,v2,v3,v4,v5=[np.loadtxt('Data/var_t%g.txt'%i) for i in range(1,6)]
ISI1,ISI2,ISI3,ISI4,ISI5=[np.loadtxt('Data/ISI%g.txt'%i)for i in range(1,6)]
spikes6=np.loadtxt('Data/spikes-T36.300.txt')
ISI6 = np.diff(spikes6)
#ISIs=np.diff(spkf)
binss = 40#20
#linf = 20 Antiguo
linf = 0
lsup = 800
fig,axs=plt.subplots(figsize=(8,6))
plt.clf()
ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8,ax9,ax10,ax11,ax12,ax13,ax14,ax15=[plt.subplot(5,3,i)for i in range(1,16)]
axes_v=[ax1,ax4,ax7,ax10,ax13]
axes_ISI=[ax2,ax5,ax8,ax11,ax14]
aexs_cou=[ax3,ax6,ax9,ax12,ax15]
color_set=['chocolate','darkorange','orange','green','palegreen']
#saving the Membrane potential
Vdata=[v1,v2,v3,v4,v5]
ISIdata=[ISI1,ISI2,ISI3,ISI4,ISI5]
ISIdata1=[ISI1,ISI2,ISI3,ISI4,ISI6]
simu_time=[np.cumsum(ISI1),np.cumsum(ISI2),np.cumsum(ISI3),np.cumsum(ISI4),np.cumsum(ISI5)]
# set the order of the plots
plot_order=['A','B','C','D','E']
#figure out the membrane action potential
for ax,V,cl,plotor in zip(axes_v,Vdata,color_set,plot_order):
ax.plot(time,V,color='black', linewidth = 1)
ax.set_ylabel(u'Voltage (mV)',fontsize=11)
ax.set_ylim([-90,30])
ax.set_xlim([30000,34000])
adjust_spines(ax, ['left'])
ax.set_yticks(np.linspace(-90,30,3))
ax.text(28500,32,'%s'%plotor,fontsize=12)
ax.axes.tick_params(direction="out")
#adjust_spines(ax13, ['left', 'bottom'])
#for ax in axes_v[0:5]:
# adjust_spines(ax, ['left'])
# ax.tick_params(labelsize=10)
#adjust_spines(ax13, ['left','bottom'])
#ax13.set_xticklabels(['30','31','32','33','34','35'],fontsize = 8)
#ax13.spines['bottom'].set_position(('outward',5))
##Change the appearance of ticks and tick labels.
#ax13.tick_params(axis='x',direction='out',labelsize=10)
#figure out the ISI
for ax,st,ISI in zip(axes_ISI,simu_time,ISIdata):
ax.plot(st/1000,ISI, 'bo', ms=1)
adjust_spines(ax, ['left','bottom'])
ax.set_ylim([10**1,10**3])
ax.set_xlim([0,150])
ax.set_yscale('log')
ax.set_ylabel('ISI (ms)',fontdict=font)
ax.tick_params(axis='y',direction='out', length=4, width=1)
for ax in axes_ISI[0:4]:
ax.axes.tick_params(axis='x',direction='out', length=0, width=2)
ax.set_xticks([])
ax14.axes.tick_params(axis='x',direction='out')
ax14.set_xticks(np.linspace(0,150,4))
ax14.set_xlabel('Time (s)',fontdict=font)
# list of temperaure
tem_text=['20','24.76','26','33','36.3']
# Histgram of ISI
for ax,ISI,temt in zip(aexs_cou,ISIdata1,tem_text):
hist, bins = np.histogram(ISI, bins=binss,range = (linf, lsup))
widths = np.diff(bins)
ax.bar(bins[:-1],np.sqrt(hist), widths)
# ax.bar(bins[:-1],hist, widths)
adjust_spines(ax, ['left','bottom'])
ax.set_ylabel('Event Count \n (sq.root)',fontdict=font,fontsize=10)
# ax.set_xlabel('ISI (ms)',fontdict=font)
ax.set_ylim([0,32])
ax.set_yticks(np.linspace(0,30,3))
ax.tick_params(axis='y',direction='out', length=4, width=1)
ax.text(500,25,r'$\mathsf{ %s^{\circ} C}$'%temt,fontdict=font,fontsize=14)
#set the label and ticks
for ax in aexs_cou[0:4]:
ax.axes.tick_params(axis='x',direction='out', length=0, width=2)
ax.set_xticks([])
ax15.axes.tick_params(axis='x',direction='out')
ax15.set_xticks(np.linspace(0,800,5))
ax15.set_xlabel('ISI (ms)',fontdict=font)
#define a scalebar under the menberane action potentional
def figura_scalebar(ax):
ax.set_ylim([-90,30])
ax.set_xlim([30000,34000])
ax.hlines(-85,32000,33000,color='black',linewidth=2)
ax.text(32300,-120,r'$\mathsf{ 1\;s}$',fontsize=12)
# ax.axis('off')
#
#ax0 = fig.add_axes([0.1, -0.1, 0.01, 0.271])
##figura_scalebar(ax0)
ax0 = fig.add_subplot(5,3,13)
figura_scalebar(ax0)
plt.subplots_adjust(bottom=0.08,left=0.1,wspace = 0.4,hspace = 0.18,right=0.98, top=0.97)
plt.draw()
plt.savefig('Figure1_timeseries.png',dpi=600) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.