code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import random
from random import choice
import numpy as np
import pandas as pd
from pyspark.sql import SparkSession
from ydot.spark import smatrices
random.seed(37)
np.random.seed(37)
def get_spark_dataframe(spark):
n = 100
data = {
'a': [choice(['left', 'right']) for _ in range(n)],
'b': [choice(['high', 'mid', 'low']) for _ in range(n)],
'x1': np.random.normal(20, 1, n),
'x2': np.random.normal(3, 1, n),
'y': [choice([1.0, 0.0]) for _ in range(n)]
}
pdf = pd.DataFrame(data)
sdf = spark.createDataFrame(pdf)
return sdf
if __name__ == '__main__':
try:
spark = (SparkSession.builder
.master('local[4]')
.appName('local-testing-pyspark')
.getOrCreate())
sdf = get_spark_dataframe(spark)
y, X = smatrices('y ~ (x1 + x2 + a + b)**2', sdf)
y = y.toPandas()
X = X.toPandas()
print(X.head(10))
X.head(10).to_csv('two-way-interactions.csv', index=False)
except Exception as e:
print(e)
finally:
try:
spark.stop()
print('closed spark')
except Exception as e:
print(e)
| [
"numpy.random.normal",
"random.choice",
"pyspark.sql.SparkSession.builder.master",
"ydot.spark.smatrices",
"random.seed",
"numpy.random.seed",
"pandas.DataFrame"
] | [((152, 167), 'random.seed', 'random.seed', (['(37)'], {}), '(37)\n', (163, 167), False, 'import random\n'), ((168, 186), 'numpy.random.seed', 'np.random.seed', (['(37)'], {}), '(37)\n', (182, 186), True, 'import numpy as np\n'), ((522, 540), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (534, 540), True, 'import pandas as pd\n'), ((385, 411), 'numpy.random.normal', 'np.random.normal', (['(20)', '(1)', 'n'], {}), '(20, 1, n)\n', (401, 411), True, 'import numpy as np\n'), ((427, 452), 'numpy.random.normal', 'np.random.normal', (['(3)', '(1)', 'n'], {}), '(3, 1, n)\n', (443, 452), True, 'import numpy as np\n'), ((848, 890), 'ydot.spark.smatrices', 'smatrices', (['"""y ~ (x1 + x2 + a + b)**2"""', 'sdf'], {}), "('y ~ (x1 + x2 + a + b)**2', sdf)\n", (857, 890), False, 'from ydot.spark import smatrices\n'), ((260, 285), 'random.choice', 'choice', (["['left', 'right']"], {}), "(['left', 'right'])\n", (266, 285), False, 'from random import choice\n'), ((320, 350), 'random.choice', 'choice', (["['high', 'mid', 'low']"], {}), "(['high', 'mid', 'low'])\n", (326, 350), False, 'from random import choice\n'), ((468, 486), 'random.choice', 'choice', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (474, 486), False, 'from random import choice\n'), ((649, 688), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""local[4]"""'], {}), "('local[4]')\n", (676, 688), False, 'from pyspark.sql import SparkSession\n')] |
from typing import List
import numpy as np
from scores.scoreutils import simple_normalization
from scores import weightsmodel
def _attentive_pool_with_weights(encoded, weights):
weights = simple_normalization(np.expand_dims(weights, axis=-1), axis=1) # [B,L,1]
weight_encoded = weights * encoded # [B,L,D]
pooled = np.sum(weight_encoded, axis=1) # [B, D]
return pooled
class AttentivePooler:
def __init__(self,
score_model_path: str):
self.sentence_weights_model = weightsmodel.SentenceWeightModel()
self.sentence_weights_model.load_model(score_model_path)
def get_weights(self,
sentences: List[str],
seq_length: int = 64):
tokens_and_weights = self.sentence_weights_model.sentences_to_score(sentences,
max_len=seq_length)
weights = np.array([[weight for _, weight in sentence] for sentence in tokens_and_weights], dtype=np.float64)
return weights
def pool(self,
matrix_list,
sentences: List[str],
seq_length: int = 64):
weights = self.get_weights(sentences, seq_length)
vectors = _attentive_pool_with_weights(matrix_list, weights)
return vectors
if __name__ == '__main__':
attentive_pooler = AttentivePooler('data/score_model/score_model.pkl')
weights_sample = attentive_pooler.get_weights(['pandas dataframe merge two columns and merge two columns'],
seq_length=64)
print(weights_sample)
| [
"numpy.sum",
"numpy.array",
"scores.weightsmodel.SentenceWeightModel",
"numpy.expand_dims"
] | [((333, 363), 'numpy.sum', 'np.sum', (['weight_encoded'], {'axis': '(1)'}), '(weight_encoded, axis=1)\n', (339, 363), True, 'import numpy as np\n'), ((217, 249), 'numpy.expand_dims', 'np.expand_dims', (['weights'], {'axis': '(-1)'}), '(weights, axis=-1)\n', (231, 249), True, 'import numpy as np\n'), ((519, 553), 'scores.weightsmodel.SentenceWeightModel', 'weightsmodel.SentenceWeightModel', ([], {}), '()\n', (551, 553), False, 'from scores import weightsmodel\n'), ((932, 1035), 'numpy.array', 'np.array', (['[[weight for _, weight in sentence] for sentence in tokens_and_weights]'], {'dtype': 'np.float64'}), '([[weight for _, weight in sentence] for sentence in\n tokens_and_weights], dtype=np.float64)\n', (940, 1035), True, 'import numpy as np\n')] |
'''
Author: <NAME>
Miscellaneous data related utilities used by the main.py file in the scripts folder.
'''
import numpy as np
import logging
from collections import Counter
import collections
import pickle
import re
import networkx as nx
import spacy
from spacy.tokens import Doc
import nltk
nltk.download('wordnet')
from nltk import wordnet as wn
import random
import h5py # conda install -c conda-forge h5py
from spacy.lang.en.stop_words import STOP_WORDS as stop_words
import json
import pandas as pd
#TODO (geeticka) need to clean up utils based upon the methods that are
# not directly used by the script anymore
# to get the dataset from the cross validation splits
TRAIN, DEV, TEST = 0, 1, 2
class Dataset():
def __init__(self, relations_split_file):
with open(relations_split_file, mode='rb') as f: self.relations_splits = pickle.load(f)
self.K = len(self.relations_splits)
def get_data_for_fold(self, fold_num, data_type=TRAIN, mode='normal'): # mode can also be elmo
assert fold_num < self.K
data = self.relations_splits[fold_num][data_type]
if mode == 'elmo':
return data['sentences'].tolist(), data['relations'].tolist(), data['e1_pos'].tolist(), \
data['e2_pos'].tolist(), data['elmo_embeddings'].tolist()
if mode == 'bert-CLS' or mode == 'bert-tokens':
return data['sentences'].tolist(), data['relations'].tolist(), data['e1_pos'].tolist(), \
data['e2_pos'].tolist(), data['bert_embeddings'].tolist()
return data['sentences'].tolist(), data['relations'].tolist(), data['e1_pos'].tolist(), \
data['e2_pos'].tolist()
# we need it in list format
def get_full_data(self):
data = pd.concat([self.relations_splits[0][t] for t in [DEV, TEST, TRAIN]])
return data.values.tolist()
# when reporting the scores for the paper, will merge dev and train set and will grab 0th fold of test
def get_train_dev_data_for_fold(self, fold_num, mode='normal'):
train_data = self.get_data_for_fold(fold_num, data_type=TRAIN, mode=mode)
dev_data = self.get_data_for_fold(fold_num, data_type=DEV, mode=mode)
if mode == 'elmo' or mode == 'bert-CLS' or mode == 'bert-tokens':
return train_data[0] + dev_data[0], train_data[1] + dev_data[1], train_data[2] + dev_data[2], \
train_data[3] + dev_data[3], train_data[4] + dev_data[4]
return train_data[0] + dev_data[0], train_data[1] + dev_data[1], train_data[2] + dev_data[2],\
train_data[3] + dev_data[3]
# given a string that looks like a list, parse it into an actual list
def argument_to_list(argument):
return list(map(float, argument.strip('[]').split(',')))
# Given a string like "word_1" return "word"
# basically the word ends with _number and we want to split that up
def get_only_word(string):
# below is a regex to get the group(1) of the string which means it just grabs whatever is
# before the _
return ''.join(re.findall("^(.*)_\d+$", string))
#return " ".join(re.findall("[a-zA-Z]+", string))
def get_only_number(string):
return ''.join(re.findall("^.*_(\d+)$", string))
#return " ".join(re.findall("[0-9]+", string))
def stringify_tokenized(tokenizedSentence):
return " ".join(tokenizedSentence)
# given a tokenized and splitted sentence
def sentence_replace(sentence, positions, string_update):
return sentence[:positions[0]] + [string_update] + sentence[positions[1]+1:]
# sentence is the sentence to update and entity positions is a list of entity positions
def per_sentence_replacement_ddi(sentence, entity_positions):
# if entity position is updated, then all positions after it also have to be updated
e0_pos = entity_positions[0]
sentence = sentence_replace(sentence, e0_pos, 'DRUG1')
new_e0_pos = (e0_pos[0], e0_pos[0])
entity_positions[0] = new_e0_pos
diff = e0_pos[1] - e0_pos[0] # if the entity is 2 word, then move every other e_pos down by 1
if entity_positions[0] == entity_positions[1]: # if both entities are the same
entity_positions[1] = new_e0_pos
return sentence, entity_positions
if diff > 0:
for i in range(1, len(entity_positions)):
e_pos = entity_positions[i]
if e_pos[0] > e0_pos[1]:
entity_positions[i] = (entity_positions[i][0] - diff, entity_positions[i][1] - diff)
e1_pos = entity_positions[1]
sentence = sentence_replace(sentence, e1_pos, 'DRUG2')
new_e1_pos = (e1_pos[0], e1_pos[0])
entity_positions[1] = new_e1_pos
diff = e1_pos[1] - e1_pos[0]
if diff > 0 and len(entity_positions) > 2:
for i in range(2, len(entity_positions)):
e_pos = entity_positions[i]
if e_pos[0] > e1_pos[1]:
entity_positions[i] = (entity_positions[i][0] - diff, entity_positions[i][1] - diff)
# then should handle for the case when there are more than entity 1 and entity 2 i.e. drug0 (any other drug)
return sentence, entity_positions
# replace by DRUG1, DRUG2
def replace_by_drug_ddi(data):
sentences, relations, e1_pos, e2_pos = data
new_sentences = []
new_e1_pos = []
new_e2_pos = []
for (sent, pos1, pos2) in zip(sentences, e1_pos, e2_pos):
new_sent, new_positions = per_sentence_replacement_ddi(sent, [pos1, pos2])
new_sentences.append(new_sent)
new_e1_pos.append(new_positions[0])
new_e2_pos.append(new_positions[1])
return new_sentences, relations, new_e1_pos, new_e2_pos
def load_data(file_list):
sentences = []
relations = []
e1_pos = []
e2_pos = []
for file in file_list:
with open(file, 'r') as f:
for line in f.readlines():
line = line.strip().lower().split()
relations.append(int(line[0]))
e1_pos.append( (int(line[1]), int(line[2])) ) # (start_pos, end_pos)
e2_pos.append( (int(line[3]), int(line[4])) ) # (start_pos, end_pos)
sentences.append(line[5:])
return sentences, relations, e1_pos, e2_pos
#stores the words with an index in the corpus organized from largest frequency to lowest frequency
def build_dict(sentences, low_freq_thresh=0, remove_stop_words=False):
word_count = Counter()
for sent in sentences:
if sent is not None:
for w in sent:
if remove_stop_words is True and w in stop_words:
# first make sure to put stop words at the end so that they don't leave
# holes in the indexing
word_count[w] = -1
# make sure that they come after the words with frequency 1
else:
word_count[w] += 1
# the words from the low_freq_thresh wouldn't leave holes in the indexing because every word with
# an index higher than them will be mapped to 0
ls = word_count.most_common()
# above organizes the words by most common and less common words; at this point we have the counts
dictionary = {}
for index, word_and_count in enumerate(ls):
word = word_and_count[0]
if remove_stop_words is True and word in stop_words:
dictionary[word] = 0 #giving it a zero index
elif low_freq_thresh > 0 and word_count[word] <= low_freq_thresh:
dictionary[word] = 0
else:
dictionary[word] = index + 1
return dictionary
#basically every word with a count below that number should be sent to 0
# leave 0 to pad
# need to add conditions for when the remove stop words is True and low frequency words are there
#return {word_and_count[0]: index + 1 for (index, word_and_count) in enumerate(ls)}
# above is basically just creating a dictionary with key as the word and the value as the index of the word. Now index of 1 is for the highest frequency
# whereas index of 2 is for lower frequency.
## stores the words with an index in the corpus organized from largest frequency to lowest
## frequency
#def build_dict(sentences):
# word_count = Counter()
# word_count[""] = -1
# for sent in sentences:
# for w in sent:
# word_count[w] += 1
#
# ls = word_count.most_common()
# # above organizes the words by most common and less common words; at this point we have the counts
#
# # leave 0 to PAD or for ""; in this case index always starts with "" being 0 so it is safe
# # to keep the index as index
# return {w[0]: index for (index, w) in enumerate(ls)}
# # above is basically just creating a dictionary with key as the word nad the value as the index of the
# # word. Now index of 1 is for the highest frequency whereas index of 2 is for lower frequency. 0 is for
# # unknown words or "" which is used in the case of hypernyms
def load_embedding_senna(config, word_dict, normalize=False):
emb_file = config.embedding_file
emb_vocab = config.embedding_vocab
vocab = {}
with open(emb_vocab, 'r') as f:
for id, w in enumerate(f.readlines()):
w = w.strip().lower()
vocab[w] = id
f = open(emb_file, 'r')
embed = f.readlines()
dim = len(embed[0].split())
num_words = len(word_dict) + 1
embeddings = np.random.RandomState(seed=config.seed).uniform(-0.1, 0.1, size=(num_words, dim))
config.embedding_size = dim
pre_trained = 0
for w in vocab.keys():
if w in word_dict:
embeddings[word_dict[w]] = [float(x) for x in embed[vocab[w]].split()]
pre_trained += 1
embeddings[0] = np.zeros((dim))
logging.info('embeddings: %.2f%%(pre_trained) unknown: %d' %(pre_trained/float(num_words)*100, num_words-pre_trained))
f.close()
if normalize:
embeddings = embeddings * 0.1 / np.std(embeddings)
return embeddings.astype(np.float32)
def load_embedding(config, word_dict, normalize=False):
emb_file = config.embedding_file
f = open(emb_file, 'r')
f.readline()
embed = f.readlines()
dim = len(embed[0].strip().split()) - 1
num_words = len(word_dict) + 1
embeddings = np.random.RandomState(seed=config.seed).uniform(-0.1, 0.1, size=(num_words, dim))
pre_trained = 0
for line in embed:
line = line.strip().split()
word = line[0]
if word in word_dict:
try: embeddings[word_dict[word]] = list(map(float, line[1:]))
except: pass
else: pre_trained += 1
# basically checking that if the word from embeddings file exists in our corpus isreversed_dictionary
# then we will store it into a matrix called embeddings. Embeddings is going to be initialized by some random values
# between -0.1 and 0.1 in a uniform manner. Now, whichever words are intersecting between embeddings file and dictionary will have
# embeddings from the file whereas the words that are in the dictionary but not present in the embedding file will have a random embedding value.
logging.info('embeddings: %.2f%%(pre_trained) unknown: %d' %(pre_trained/float(num_words)*100, num_words-pre_trained))
f.close()
if normalize:
embeddings = embeddings * 0.1 / np.std(embeddings)
return embeddings.astype(np.float32)
# given the total data length, max sentence length, position of entities, compute the the
# relative distances of everything with respect to it
# TODO: (geeticka) think about whether it makes sense to use pos function in exactly the same way for the
# case when the sentences are trimmed short
def relative_distance(num_data, max_sen_len, e1_pos, e2_pos):
dist1 = np.zeros((num_data, max_sen_len), dtype=int)
dist2 = np.zeros((num_data, max_sen_len), dtype=int)
# compute relative distance
#TODO: (geeticka) think about what to do for the cases when e1_pos and e2_pos is None
for sent_idx in range(num_data):
for word_idx in range(max_sen_len):
if e1_pos[sent_idx] is None or e2_pos[sent_idx] is None:
continue
if word_idx < e1_pos[sent_idx][0]:
dist1[sent_idx, word_idx] = pos(e1_pos[sent_idx][0] - word_idx)
# in the above the word is behind the e1's word
elif word_idx > e1_pos[sent_idx][1]:
dist1[sent_idx, word_idx] = pos(e1_pos[sent_idx][1] - word_idx)
# the word is after the e1
else:
dist1[sent_idx, word_idx] = pos(0)
# the word is within the entity
if word_idx < e2_pos[sent_idx][0]:
dist2[sent_idx, word_idx] = pos(e2_pos[sent_idx][0] - word_idx)
elif word_idx > e2_pos[sent_idx][1]:
dist2[sent_idx, word_idx] = pos(e2_pos[sent_idx][1] - word_idx)
else:
dist2[sent_idx, word_idx] = pos(0)
num_pos = max(np.amax(dist1), np.amax(dist2)) - min(np.amin(dist1), np.amin(dist2))
return dist1, dist2, num_pos
def pad_elmo_embedding(max_len, elmo_embeddings):
new_elmo_embeddings = []
for i in range(0, len(elmo_embeddings)):
sentence = elmo_embeddings[i]
num_of_words_to_pad = max_len - sentence.shape[1]
array_to_pad = np.zeros(shape=(sentence.shape[0], num_of_words_to_pad, sentence.shape[2]),
dtype='float32')
appended_array = np.append(sentence, array_to_pad, axis=1)
new_elmo_embeddings.append(appended_array)
return new_elmo_embeddings
def pad_bert_embedding(max_len, bert_embeddings):
new_bert_embeddings = []
for i in range(0, len(bert_embeddings)):
sentence = bert_embeddings[i]
num_of_words_to_pad = max_len - sentence.shape[1]
if num_of_words_to_pad < 0:
sentence_len = len(sentence[0])
print("Warning! the sentence length %d is larger than %d! Shaving down some" %(
sentence_len, max_len))
sentence = np.array(sentence, dtype='float32')
appended_array = sentence[:, :max_len, :]
new_bert_embeddings.append(appended_array)
continue
array_to_pad = np.zeros(shape=(sentence.shape[0], num_of_words_to_pad, sentence.shape[2]),
dtype='float32')
appended_array = np.append(sentence, array_to_pad, axis=1)
new_bert_embeddings.append(appended_array)
return new_bert_embeddings
def vectorize(config, data, word_dict):
def assign_splits(pos1, pos2):
if pos1[1] < pos2[1]:
return pos1[1], pos2[1]
elif pos1[1] > pos2[1]:
return pos2[1], pos1[1]
elif config.use_piecewise_pool is True and config.dataset == 'i2b2':
if pos1[0] < pos2[0]: return pos1[0], pos2[0]
elif pos1[0] > pos2[0]: return pos2[0], pos2[1]
else: raise Exception("Both entities overlap exactly")
elif config.use_piecewise_pool is True:
raise Exception("Entity positions cannot end at the same position for piecewise splitting")
# I anticipate the above to be a problem for NER blinding, where there are
# overlaps between the entity pairs because the existence of the NER label extends the
# entity pairs
else:
return pos1[1], pos2[1] # this is not going to be used anyway, but using these is problematic
if config.use_elmo is True: sentences, relations, e1_pos, e2_pos, elmo_embeddings = data
elif config.use_bert_CLS is True or config.use_bert_tokens is True : sentences, relations, e1_pos, e2_pos, bert_embeddings = data
else: sentences, relations, e1_pos, e2_pos = data
max_sen_len = config.max_len
max_e1_len = config.max_e1_len
max_e2_len = config.max_e2_len
num_data = len(sentences)
local_max_e1_len = max(list(map(lambda x: x[1]-x[0]+1, e1_pos)))
local_max_e2_len = max(list(map(lambda x: x[1]-x[0]+1, e2_pos)))
print('max sen len: {}, local max e1 len: {}, local max e2 len: {}'.format(max_sen_len, local_max_e1_len, local_max_e2_len))
if config.use_elmo is True: padded_elmo_embeddings = pad_elmo_embedding(max_sen_len, elmo_embeddings)
if config.use_bert_tokens is True: padded_bert_embeddings = pad_bert_embedding(max_sen_len, bert_embeddings)
# maximum values needed to decide the dimensionality of the vector
sents_vec = np.zeros((num_data, max_sen_len), dtype=int)
e1_vec = np.zeros((num_data, max_e1_len), dtype=int)
e2_vec = np.zeros((num_data, max_e2_len), dtype=int)
# dist1 and dist2 are defined in the compute distance function
position1 = [] # need to populate this way because want to make sure that the splits are in order
position2 = []
for idx, (sent, pos1, pos2) in enumerate(zip(sentences, e1_pos, e2_pos)):
# all unseen words are mapped to the index 0
vec = [word_dict[w] if w in word_dict else 0 for w in sent]
sents_vec[idx, :len(vec)] = vec
split1, split2 = assign_splits(pos1, pos2)
position1.append(split1)
position2.append(split2)
# for the particular sentence marked by idx, set the entry as the vector gotten from above
# which is basically just a list of the indexes of the words
for ii in range(max_e1_len):
if ii < (pos1[1]-pos1[0]+1):
e1_vec[idx, ii] = vec[range(pos1[0], pos1[1]+1)[ii]]
# this is assigning the particular sentence's e1 val to have the index of the corresponding word
else:
e1_vec[idx, ii] = vec[pos1[-1]]
# in the above case it is grabbing the last word in the entity and padding with that
for ii in range(max_e2_len):
if ii < (pos2[1]-pos2[0]+1):
e2_vec[idx, ii] = vec[range(pos2[0], pos2[1]+1)[ii]]
else:
e2_vec[idx, ii] = vec[pos2[-1]]
dist1, dist2, num_pos = relative_distance(num_data, max_sen_len, e1_pos, e2_pos)
if config.use_elmo is True:
return sents_vec, np.array(relations).astype(np.int64), e1_vec, e2_vec, dist1, dist2, \
padded_elmo_embeddings, position1, position2
if config.use_bert_CLS is True:
return sents_vec, np.array(relations).astype(np.int64), e1_vec, e2_vec, dist1, dist2, \
bert_embeddings, position1, position2
if config.use_bert_tokens is True:
return sents_vec, np.array(relations).astype(np.int64), e1_vec, e2_vec, dist1, dist2, \
padded_bert_embeddings, position1, position2
return sents_vec, np.array(relations).astype(np.int64), e1_vec, e2_vec, dist1, dist2, position1, position2
# we are also returning the ending positions of the entity 1 and entity 2
def pos(x):
'''
map the relative distance between [0, 123)
'''
if x < -60:
return 0
if x >= -60 and x <= 60:
return x + 61
if x > 60:
return 122
def batch_iter(seed, data, batch_size, shuffle=True):
"""
Generates batches for the NN input feed.
Returns a generator (yield) as the datasets are expected to be huge.
"""
data = np.array(data)
data_size = len(data)
batches_per_epoch = int(np.ceil(data_size/float(batch_size)))
# logging.info("Generating batches.. Total # of batches %d" % batches_per_epoch)
if shuffle:
indices = np.random.RandomState(seed=seed).permutation(np.arange(data_size))
# refer to https://stackoverflow.com/questions/47742622/np-random-permutation-with-seed
shuffled_data = data[indices]
else:
shuffled_data = data
for batch_num in range(batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def test_pred_writer(preds, relation_dict, save_path):
sent_idx = 8000
with open(save_path, 'w') as file:
for pred in preds:
sent_idx += 1
file.write('{}\t{}\n'.format(sent_idx, relation_dict[pred]))
print('Answer writting done!')
def pred_writer(data, preds, relation_dict, save_path, fold_num):
with open(save_path+'_fold{}'.format(fold_num), 'w') as file:
for sent, relation, e1_pos, e2_pos, pred in zip(*(data + (preds,))):
file.write('Sentence:\t{}\nEntity 1:\t{}\nEntity 2:\t{}\nGround Truth:\t{}\tPrediction:\t{}\n\n'.format(' '.join(sent),
' '.join([sent[idx] for idx in range(e1_pos[0], e1_pos[1]+1)]),
' '.join([sent[idx] for idx in range(e2_pos[0], e2_pos[1]+1)]),
relation_dict[relation], relation_dict[pred]))
print('Answer writting done!')
def convert_labels(read_file, save_file):
with open(save_file, 'w') as outfile:
with open(read_file, 'r') as infile:
for line in infile:
line = line.strip().split()
label = int(line[0])
if label == 1:
label = 18
elif label > 1:
label -= 1
line = [str(label)] + line[1:] + ['\n']
outfile.write(' '.join(line))
# read the elmo embeddings for the train and the test file
# the dimensionality of the elmo embeddings is [batch, layers, tokens, dimensionality]
# dimensionality is always 1024 in the case of elmo and each token has a representation here
def get_elmo_embeddings(filename):
h5py_file = h5py.File(filename, 'r')
elmo_embeddings = []
# the h5py file contains one extra index for a new line character so must ignore that
for i in range(0, len(h5py_file) - 1):
embedding = h5py_file.get(str(i))
elmo_embeddings.append(np.array(embedding))
return (elmo_embeddings, )
# this gets the sentence level embedding i.e. the CLS token, so convolution cannot
# be performed with this; Shape returned: [batch, layers, dimensionality]
# in most cases the dimensionality is 768 but in some cases could be 1024 for the BERT-large model
# because the dumping of the embeddings was done using pytorch, refer to their method of writing to
# figure out how to do the reading
# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/extract_features.py
# refer to the above to figure out how to read this file
# original file written by BERT has each line that
# looks like {'linex_index': 0, 'features': [{'token': ABC,
# 'layers': [{'index': -1, 'values': [...]}, ...., {'index: -4, 'values': [...]}]}]}
def get_bert_CLS_embeddings(filename):
with open(filename, 'r', encoding='utf-8') as json_file:
bert_embeddings = []
for line in json_file.readlines():
data = json.loads(line)
if data['features'][0]['token'] != '[CLS]':
raise Exception("The first token has to be CLS!")
layers_embedding = []
for layers in data['features'][0]['layers']:
layers_embedding.append(layers['values'])
bert_embeddings.append(layers_embedding)
return (bert_embeddings,)
# after having converted a bert embeddings json into the (layers, tokens, dimensionality)
# using the write_bert_tokens_without_word_pieces function
# and merged the word piece embeddings portion, read those
def get_bert_token_embeddings(filename):
with open(filename, 'r', encoding='utf-8') as json_file:
bert_embeddings = []
for line in json_file.readlines():
data = json.loads(line)
embedding_layer = []
for layer in data['layers']:
token = []
for tokens in layer['values']:
token.append(np.array(tokens['features']))
embedding_layer.append(np.array(token))
bert_embeddings.append(np.array(embedding_layer))
return (bert_embeddings,)
### A series of helper functions to generate the individual token BERT embeddings
### in the format needed by the CNN
def average_over_token_embedding(indexes, features):
new_feature = collections.OrderedDict()
new_token = ''
new_layers = []
layer_minus_1 = []; layer_minus_2 = []; layer_minus_3 = []; layer_minus_4 = [];
for index in indexes:
layer_minus_1.append(features[index]['layers'][0]['values'])
layer_minus_2.append(features[index]['layers'][1]['values'])
layer_minus_3.append(features[index]['layers'][2]['values'])
layer_minus_4.append(features[index]['layers'][3]['values'])
new_token += features[index]['token']
def round_np_array(list_needed):
# according to the bert pytorch code, they are rounding by 6
new_list_needed = [round(x,6) for x in list_needed]
return new_list_needed
layer_minus_1_mean = list(np.mean(layer_minus_1, axis=0, dtype=np.float64))
layer_minus_2_mean = list(np.mean(layer_minus_2, axis=0, dtype=np.float64))
layer_minus_3_mean = list(np.mean(layer_minus_3, axis=0, dtype=np.float64))
layer_minus_4_mean = list(np.mean(layer_minus_4, axis=0, dtype=np.float64))
new_layers.append({'index': -1, 'values': round_np_array(layer_minus_1_mean)})
new_layers.append({'index': -2, 'values': round_np_array(layer_minus_2_mean)})
new_layers.append({'index': -3, 'values': round_np_array(layer_minus_3_mean)})
new_layers.append({'index': -4, 'values': round_np_array(layer_minus_4_mean)})
new_feature['token'] = new_token
new_feature['layers'] = new_layers
return new_feature
# generates individual sentence's feature maps after fusing the word pieces together
def generate_feature_map_without_word_piece(features):
# need to double check and see why this is happening
new_features = []
i = 0
while(i < len(features)):
if features[i]['token'] == '[CLS]' or features[i]['token'] == '[SEP]':
i += 1
continue
captured_indexes = []
for j in range(i + 1, len(features)):
if not features[j]['token'].startswith('##'):
break
captured_indexes.append(j)
if len(captured_indexes) == 0:
new_features.append(features[i])
i += 1
continue
sum_indexes = [i]
sum_indexes.extend(captured_indexes)
new_feature = average_over_token_embedding(sum_indexes, features)
new_features.append(new_feature)
i = captured_indexes[-1] + 1
# rewrite in the elmo format as well
new_features_map = [] # we are converting from the (token, layers) shape to (layers, token) shape
layer_minus1 = []; layer_minus2 = []; layer_minus3 = []; layer_minus4 = [];
for token in new_features:
layer_minus1.append({'token': token['token'], 'features': token['layers'][0]['values']})
layer_minus2.append({'token': token['token'], 'features': token['layers'][1]['values']})
layer_minus3.append({'token': token['token'], 'features': token['layers'][2]['values']})
layer_minus4.append({'token': token['token'], 'features': token['layers'][3]['values']})
new_features_map.append({'index': -1, 'values': layer_minus1})
new_features_map.append({'index': -2, 'values': layer_minus2})
new_features_map.append({'index': -3, 'values': layer_minus3})
new_features_map.append({'index': -4, 'values': layer_minus4})
return new_features_map
# because the dumping of the embeddings was done using pytorch, refer to their method of writing to
# figure out how to do the reading
# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/extract_features.py
# refer to the above to figure out how to read this file
# need to import collections
def write_bert_tokens_without_word_pieces(input_filename, output_filename):
with open(input_filename, 'r', encoding='utf-8') as input_file:
with open(output_filename, 'w', encoding='utf-8') as output_file:
for line in input_file.readlines():
output_json = collections.OrderedDict()
data = json.loads(line)
if data['features'][0]['token'] != '[CLS]': raise Exception("The first token has to be CLS!")
if data['features'][-1]['token'] != '[SEP]': raise Exception("The last token has to be SEP!")
output_json['linex_index'] = data['linex_index']
features = data['features'] # basically for all features['token'] that starts with ##, add up the values
# for the respective indexes to put the words back together, ignore [CLS] and [SEP] tokens
new_feature_map = generate_feature_map_without_word_piece(features) # this new feature map needs to be
# called layers because things have now been shuffled.
output_json['layers'] = new_feature_map
output_file.write(json.dumps(output_json) + "\n")
# this function first split the line of data into relation, entities and sentence
# then cut the sentence according to the required border size
# if border size is -1, means using the full sentence, if it is 0, means only using
# the sentence between two entities (inclusive)
# note that using the border size parameter is very specific to the datasets that have
# no entity overlaps and e1 appearing before e2 in the sentence
# this does not work for i2b2 and ddi dataset
def split_data_cut_sentence(data, border_size=-1):
sentences = []
relations = []
e1_pos = []
e2_pos = []
# is_reversed = []
# In the parsed data: Num1 num2 num3 num4 num5 sentence
# Num1 - relation number
# Num2 - left entity start (starts the numbering from 0)
# Num3 - left entity end
# Num4 - right entity start
# Num5 - right entity end
if border_size < 0:
for line in data:
line = line.strip().lower().split()
left_start_pos = int(line[1])
right_end_pos = int(line[4])
relations.append(int(line[0]))
e1_pos.append( (int(line[1]), int(line[2])) ) # (start_pos, end_pos)
e2_pos.append( (int(line[3]), int(line[4])) ) # (start_pos, end_pos)
# is_reversed.append( float(isreversed_dictionary[int(line[0])]) )
sentences.append(line[5:])
else:
for line in data:
line = line.strip().lower().split()
left_start_pos = int(line[1])
right_end_pos = int(line[4])
if left_start_pos < right_end_pos:
relations.append(int(line[0]))
# is_reversed.append( float(isreversed_dictionary[int(line[0])]) )
sentence = line[5:]
len_sen = len(sentence)
if left_start_pos >= border_size:
left_border_size = border_size
else:
left_border_size = left_start_pos
e1_pos.append( (left_border_size, int(line[2])-left_start_pos+left_border_size) ) # (start_pos, end_pos)
e2_pos.append((int(line[3])-left_start_pos+left_border_size, int(line[4])-left_start_pos+left_border_size)) # (start_pos, end_pos)
sentences.append(sentence[(left_start_pos-left_border_size):min(right_end_pos+border_size+1, len_sen)])
return sentences, relations, e1_pos, e2_pos
def graph_file_reader(f_file, dim):
"""
Give this function a file, and then read the individual files
"""
f = open(f_file, 'r')
num = sum(1 for line in open(f_file))
vector = np.zeros((num, dim), dtype=float)
i = 0
for line in f:
vec = line.split('\t')
vec = vec[:-1]
vec = [float(x) for x in vec]
vector[i] = vec
i+=1
return num, vector
def graph_reader(entity_file, relation_file, dim):
"""
Function returns entity and relation embeddings of Freebase formed using DKRL
Taken from the code of "Learning beyond datasets"
"""
ent_num, ent = graph_file_reader(entity_file, dim)
rel_num, rel = graph_file_reader(relation_file, dim)
print("Number of entities %d"%ent_num)
print("Number of relations %d"%rel_num)
return ent, rel
if __name__ == '__main__':
# for item in load_data('data/test.txt'):
# print(item)
convert_labels('data/train.txt', 'data/train_new.txt')
convert_labels('data/test.txt', 'data/test_new.txt')
| [
"numpy.mean",
"collections.OrderedDict",
"json.loads",
"numpy.amax",
"numpy.amin",
"nltk.download",
"numpy.arange",
"json.dumps",
"pickle.load",
"h5py.File",
"collections.Counter",
"numpy.array",
"numpy.zeros",
"numpy.append",
"numpy.std",
"re.findall",
"pandas.concat",
"numpy.rand... | [((296, 320), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (309, 320), False, 'import nltk\n'), ((6470, 6479), 'collections.Counter', 'Counter', ([], {}), '()\n', (6477, 6479), False, 'from collections import Counter\n'), ((9899, 9912), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (9907, 9912), True, 'import numpy as np\n'), ((12243, 12287), 'numpy.zeros', 'np.zeros', (['(num_data, max_sen_len)'], {'dtype': 'int'}), '((num_data, max_sen_len), dtype=int)\n', (12251, 12287), True, 'import numpy as np\n'), ((12300, 12344), 'numpy.zeros', 'np.zeros', (['(num_data, max_sen_len)'], {'dtype': 'int'}), '((num_data, max_sen_len), dtype=int)\n', (12308, 12344), True, 'import numpy as np\n'), ((16931, 16975), 'numpy.zeros', 'np.zeros', (['(num_data, max_sen_len)'], {'dtype': 'int'}), '((num_data, max_sen_len), dtype=int)\n', (16939, 16975), True, 'import numpy as np\n'), ((16989, 17032), 'numpy.zeros', 'np.zeros', (['(num_data, max_e1_len)'], {'dtype': 'int'}), '((num_data, max_e1_len), dtype=int)\n', (16997, 17032), True, 'import numpy as np\n'), ((17046, 17089), 'numpy.zeros', 'np.zeros', (['(num_data, max_e2_len)'], {'dtype': 'int'}), '((num_data, max_e2_len), dtype=int)\n', (17054, 17089), True, 'import numpy as np\n'), ((19809, 19823), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (19817, 19823), True, 'import numpy as np\n'), ((22620, 22644), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (22629, 22644), False, 'import h5py\n'), ((25211, 25236), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (25234, 25236), False, 'import collections\n'), ((32702, 32735), 'numpy.zeros', 'np.zeros', (['(num, dim)'], {'dtype': 'float'}), '((num, dim), dtype=float)\n', (32710, 32735), True, 'import numpy as np\n'), ((1757, 1825), 'pandas.concat', 'pd.concat', (['[self.relations_splits[0][t] for t in [DEV, TEST, TRAIN]]'], {}), '([self.relations_splits[0][t] for t in [DEV, TEST, TRAIN]])\n', (1766, 1825), True, 'import pandas as pd\n'), ((3042, 3075), 're.findall', 're.findall', (['"""^(.*)_\\\\d+$"""', 'string'], {}), "('^(.*)_\\\\d+$', string)\n", (3052, 3075), False, 'import re\n'), ((3179, 3212), 're.findall', 're.findall', (['"""^.*_(\\\\d+)$"""', 'string'], {}), "('^.*_(\\\\d+)$', string)\n", (3189, 3212), False, 'import re\n'), ((13826, 13922), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sentence.shape[0], num_of_words_to_pad, sentence.shape[2])', 'dtype': '"""float32"""'}), "(shape=(sentence.shape[0], num_of_words_to_pad, sentence.shape[2]),\n dtype='float32')\n", (13834, 13922), True, 'import numpy as np\n'), ((13952, 13993), 'numpy.append', 'np.append', (['sentence', 'array_to_pad'], {'axis': '(1)'}), '(sentence, array_to_pad, axis=1)\n', (13961, 13993), True, 'import numpy as np\n'), ((14721, 14817), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sentence.shape[0], num_of_words_to_pad, sentence.shape[2])', 'dtype': '"""float32"""'}), "(shape=(sentence.shape[0], num_of_words_to_pad, sentence.shape[2]),\n dtype='float32')\n", (14729, 14817), True, 'import numpy as np\n'), ((14855, 14896), 'numpy.append', 'np.append', (['sentence', 'array_to_pad'], {'axis': '(1)'}), '(sentence, array_to_pad, axis=1)\n', (14864, 14896), True, 'import numpy as np\n'), ((25946, 25994), 'numpy.mean', 'np.mean', (['layer_minus_1'], {'axis': '(0)', 'dtype': 'np.float64'}), '(layer_minus_1, axis=0, dtype=np.float64)\n', (25953, 25994), True, 'import numpy as np\n'), ((26026, 26074), 'numpy.mean', 'np.mean', (['layer_minus_2'], {'axis': '(0)', 'dtype': 'np.float64'}), '(layer_minus_2, axis=0, dtype=np.float64)\n', (26033, 26074), True, 'import numpy as np\n'), ((26106, 26154), 'numpy.mean', 'np.mean', (['layer_minus_3'], {'axis': '(0)', 'dtype': 'np.float64'}), '(layer_minus_3, axis=0, dtype=np.float64)\n', (26113, 26154), True, 'import numpy as np\n'), ((26186, 26234), 'numpy.mean', 'np.mean', (['layer_minus_4'], {'axis': '(0)', 'dtype': 'np.float64'}), '(layer_minus_4, axis=0, dtype=np.float64)\n', (26193, 26234), True, 'import numpy as np\n'), ((848, 862), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (859, 862), False, 'import pickle\n'), ((9530, 9569), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'config.seed'}), '(seed=config.seed)\n', (9551, 9569), True, 'import numpy as np\n'), ((10141, 10159), 'numpy.std', 'np.std', (['embeddings'], {}), '(embeddings)\n', (10147, 10159), True, 'import numpy as np\n'), ((10553, 10592), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'config.seed'}), '(seed=config.seed)\n', (10574, 10592), True, 'import numpy as np\n'), ((11800, 11818), 'numpy.std', 'np.std', (['embeddings'], {}), '(embeddings)\n', (11806, 11818), True, 'import numpy as np\n'), ((13479, 13493), 'numpy.amax', 'np.amax', (['dist1'], {}), '(dist1)\n', (13486, 13493), True, 'import numpy as np\n'), ((13495, 13509), 'numpy.amax', 'np.amax', (['dist2'], {}), '(dist2)\n', (13502, 13509), True, 'import numpy as np\n'), ((13517, 13531), 'numpy.amin', 'np.amin', (['dist1'], {}), '(dist1)\n', (13524, 13531), True, 'import numpy as np\n'), ((13533, 13547), 'numpy.amin', 'np.amin', (['dist2'], {}), '(dist2)\n', (13540, 13547), True, 'import numpy as np\n'), ((14532, 14567), 'numpy.array', 'np.array', (['sentence'], {'dtype': '"""float32"""'}), "(sentence, dtype='float32')\n", (14540, 14567), True, 'import numpy as np\n'), ((20107, 20127), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (20116, 20127), True, 'import numpy as np\n'), ((22876, 22895), 'numpy.array', 'np.array', (['embedding'], {}), '(embedding)\n', (22884, 22895), True, 'import numpy as np\n'), ((23865, 23881), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (23875, 23881), False, 'import json\n'), ((24640, 24656), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (24650, 24656), False, 'import json\n'), ((19155, 19174), 'numpy.array', 'np.array', (['relations'], {}), '(relations)\n', (19163, 19174), True, 'import numpy as np\n'), ((20062, 20094), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (20083, 20094), True, 'import numpy as np\n'), ((24959, 24984), 'numpy.array', 'np.array', (['embedding_layer'], {}), '(embedding_layer)\n', (24967, 24984), True, 'import numpy as np\n'), ((29168, 29193), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (29191, 29193), False, 'import collections\n'), ((29221, 29237), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (29231, 29237), False, 'import json\n'), ((18632, 18651), 'numpy.array', 'np.array', (['relations'], {}), '(relations)\n', (18640, 18651), True, 'import numpy as np\n'), ((18813, 18832), 'numpy.array', 'np.array', (['relations'], {}), '(relations)\n', (18821, 18832), True, 'import numpy as np\n'), ((19002, 19021), 'numpy.array', 'np.array', (['relations'], {}), '(relations)\n', (19010, 19021), True, 'import numpy as np\n'), ((24907, 24922), 'numpy.array', 'np.array', (['token'], {}), '(token)\n', (24915, 24922), True, 'import numpy as np\n'), ((24838, 24866), 'numpy.array', 'np.array', (["tokens['features']"], {}), "(tokens['features'])\n", (24846, 24866), True, 'import numpy as np\n'), ((30070, 30093), 'json.dumps', 'json.dumps', (['output_json'], {}), '(output_json)\n', (30080, 30093), False, 'import json\n')] |
"""Mean-scale hyperprior model (no context model), as described in "Joint Autoregressive and Hierarchical Priors for
Learned Image Compression", NeurIPS2018, by Minnen, Ballé, and Toderici (https://arxiv.org/abs/1809.02736
Also see
<NAME>, <NAME>, <NAME>:
"Improving Inference for Neural Image Compression", NeurIPS 2020
https://arxiv.org/pdf/2006.04240.pdf
where this is the "base" hyperprior model (M3 in Table 1 of paper).
We have a generative model of images:
z_tilde -> y_tilde -> x
where
p(z_tilde) = flexible_cdf_dist,
p(y_tilde | z_tilde) = N(y_tilde | hyper_synthesis_transform(z_tilde)) convolved with U(-0.5, 0.5),
p(x | y_tilde) = N(x | synthesis_transform(y_tilde)
and the following inference model:
x -> y_tilde z_tilde
\_________/^
where
q(y_tilde | x) = U(y-0.5, y+0.5), where y = analysis_transform(x)
q(z_tilde | x) = U(z-0.5, z+0.5), where z = hyper_analysis_transform(y)
"""
import argparse
import glob
import sys
import os
from absl import app
from absl.flags import argparse_flags
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_compression.python.ops import math_ops
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
import tensorflow_compression as tfc
from nn_models import AnalysisTransform, SynthesisTransform, HyperAnalysisTransform
from nn_models import MBT2018HyperSynthesisTransform as HyperSynthesisTransform
from utils import read_png, quantize_image, write_png, read_npy_file_helper, get_runname
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64
def build_graph(args, x, training=True):
"""
Build the computational graph of the model. x should be a float tensor of shape [batch, H, W, 3].
Given original image x, the model computes a lossy reconstruction x_tilde and various other quantities of interest.
During training we sample from box-shaped posteriors; during compression this is approximated by rounding.
"""
# Instantiate model.
analysis_transform = AnalysisTransform(args.num_filters)
synthesis_transform = SynthesisTransform(args.num_filters)
hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)
hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters, num_output_filters=2 * args.num_filters)
entropy_bottleneck = tfc.EntropyBottleneck()
# Build autoencoder and hyperprior.
y = analysis_transform(x) # y = g_a(x)
z = hyper_analysis_transform(y) # z = h_a(y)
# sample z_tilde from q(z_tilde|x) = q(z_tilde|h_a(g_a(x))), and compute the pdf of z_tilde under the flexible prior
# p(z_tilde) ("z_likelihoods")
z_tilde, z_likelihoods = entropy_bottleneck(z, training=training)
mu, sigma = tf.split(hyper_synthesis_transform(z_tilde), num_or_size_splits=2, axis=-1)
sigma = tf.exp(sigma) # make positive
if not training: # need to handle images with non-standard sizes during compression; mu/sigma must have the same shape as y
y_shape = tf.shape(y)
mu = mu[:, :y_shape[1], :y_shape[2], :]
sigma = sigma[:, :y_shape[1], :y_shape[2], :]
scale_table = np.exp(np.linspace(np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))
conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table, mean=mu)
# sample y_tilde from q(y_tilde|x) = U(y-0.5, y+0.5) = U(g_a(x)-0.5, g_a(x)+0.5), and then compute the pdf of
# y_tilde under the conditional prior/entropy model p(y_tilde|z_tilde) = N(y_tilde|mu, sigma^2) conv U(-0.5, 0.5)
y_tilde, y_likelihoods = conditional_bottleneck(y, training=training)
x_tilde = synthesis_transform(y_tilde)
if not training:
side_string = entropy_bottleneck.compress(z)
string = conditional_bottleneck.compress(y)
x_shape = tf.shape(x)
x_tilde = x_tilde[:, :x_shape[1], :x_shape[2], :] # crop reconstruction to have the same shape as input
return locals()
def build_train_graph(args, x):
graph = build_graph(args, x, training=True)
y_likelihoods, z_likelihoods, x_tilde, = graph['y_likelihoods'], graph['z_likelihoods'], graph['x_tilde']
entropy_bottleneck = graph['entropy_bottleneck']
# Total number of bits divided by number of pixels.
# - log p(\tilde y | \tilde z) - log p(\tilde z)
num_pixels = args.batchsize * args.patchsize ** 2
y_bpp = -tf.reduce_sum(tf.log(y_likelihoods)) / (np.log(2) * num_pixels)
z_bpp = -tf.reduce_sum(tf.log(z_likelihoods)) / (np.log(2) * num_pixels)
# train_bpp = (-tf.reduce_sum(tf.log(y_likelihoods)) -
# tf.reduce_sum(tf.log(z_likelihoods))) / (np.log(2) * num_pixels)
train_bpp = y_bpp + z_bpp
# Mean squared error across pixels.
train_mse = tf.reduce_mean(tf.squared_difference(x, x_tilde))
# Multiply by 255^2 to correct for rescaling.
float_train_mse = train_mse
psnr = - 10 * (tf.log(float_train_mse) / np.log(10)) # float MSE computed on float images
train_mse *= 255 ** 2
# The rate-distortion cost.
train_loss = args.lmbda * train_mse + train_bpp
# Minimize loss and auxiliary loss, and execute update op.
step = tf.train.create_global_step()
main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
main_step = main_optimizer.minimize(train_loss, global_step=step)
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])
train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
model_name = os.path.splitext(os.path.basename(__file__))[0]
original = quantize_image(x)
reconstruction = quantize_image(x_tilde)
return locals()
def compress(args):
"""Compresses an image, or a batch of images of the same shape in npy format."""
from configs import get_eval_batch_size, write_tfci_for_eval
if args.input_file.endswith('.npy'):
# .npy file should contain N images of the same shapes, in the form of an array of shape [N, H, W, 3]
X = np.load(args.input_file)
else:
# Load input image and add batch dimension.
from PIL import Image
x = np.asarray(Image.open(args.input_file).convert('RGB'))
X = x[None, ...]
num_images = int(X.shape[0])
num_pixels = int(np.prod(X.shape[1:-1]))
X = X.astype('float32')
X /= 255.
eval_batch_size = get_eval_batch_size(num_pixels)
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset = dataset.batch(batch_size=eval_batch_size)
# https://www.tensorflow.org/api_docs/python/tf/compat/v1/data/Iterator
# Importantly, each sess.run(op) call will consume a new batch, where op is any operation that depends on
# x. Therefore if multiple ops need to be evaluated on the same batch of data, they have to be grouped like
# sess.run([op1, op2, ...]).
x_next = dataset.make_one_shot_iterator().get_next()
x_ph = x = tf.placeholder('float32', (None, *X.shape[1:])) # keep a reference around for feed_dict
graph = build_graph(args, x, training=False)
y_likelihoods, z_likelihoods, x_tilde = graph['y_likelihoods'], graph['z_likelihoods'], graph['x_tilde']
string, side_string = graph['string'], graph['side_string']
# graph = build_graph(args, x, training=False)
# y_likelihoods, z_likelihoods, x_tilde, = graph['y_likelihoods'], graph['z_likelihoods'], graph['x_tilde']
# string, side_string = graph['string'], graph['side_string']
# Total number of bits divided by number of pixels.
axes_except_batch = list(range(1, len(x.shape))) # should be [1,2,3]
y_bpp = tf.reduce_sum(-tf.log(y_likelihoods), axis=axes_except_batch) / (np.log(2) * num_pixels)
z_bpp = tf.reduce_sum(-tf.log(z_likelihoods), axis=axes_except_batch) / (np.log(2) * num_pixels)
eval_bpp = y_bpp + z_bpp # shape (N,)
# Bring both images back to 0..255 range.
x *= 255
x_tilde = tf.clip_by_value(x_tilde, 0, 1)
x_tilde = tf.round(x_tilde * 255)
mse = tf.reduce_mean(tf.squared_difference(x, x_tilde), axis=axes_except_batch) # shape (N,)
psnr = tf.image.psnr(x_tilde, x, 255) # shape (N,)
msssim = tf.image.ssim_multiscale(x_tilde, x, 255) # shape (N,)
msssim_db = -10 * tf.log(1 - msssim) / np.log(10) # shape (N,)
x_shape = graph['x_shape']
y_shape = graph['y_shape']
z_shape = tf.shape(graph['z'])
with tf.Session() as sess:
# Load the latest model checkpoint, get the compressed string and the tensor
# shapes.
save_dir = os.path.join(args.checkpoint_dir, args.runname)
latest = tf.train.latest_checkpoint(checkpoint_dir=save_dir)
tf.train.Saver().restore(sess, save_path=latest)
eval_fields = ['mse', 'psnr', 'msssim', 'msssim_db', 'est_bpp', 'est_y_bpp', 'est_z_bpp']
eval_tensors = [mse, psnr, msssim, msssim_db, eval_bpp, y_bpp, z_bpp]
all_results_arrs = {key: [] for key in eval_fields} # append across all batches
compression_tensors = [string, side_string, x_shape[1:-1], y_shape[1:-1], z_shape[1:-1]]
batch_actual_bpp = []
batch_sizes = []
batch_idx = 0
while True:
try:
x_val = sess.run(x_next)
x_feed_dict = {x_ph: x_val}
# If requested, transform the quantized image back and measure performance.
eval_arrs = sess.run(eval_tensors, feed_dict=x_feed_dict)
for field, arr in zip(eval_fields, eval_arrs):
all_results_arrs[field] += arr.tolist()
# Write a binary file with the shape information and the compressed string.
packed = tfc.PackedTensors()
compression_arrs = sess.run(compression_tensors, feed_dict=x_feed_dict)
packed.pack(compression_tensors, compression_arrs)
if write_tfci_for_eval:
with open(args.output_file, "wb") as f:
f.write(packed.string)
# The actual bits per pixel including overhead.
batch_actual_bpp.append(
len(packed.string) * 8 / num_pixels) # packed.string is the encoding for the entire batch
batch_sizes.append(len(eval_arrs[0]))
batch_idx += 1
except tf.errors.OutOfRangeError:
break
for field in eval_fields:
all_results_arrs[field] = np.asarray(all_results_arrs[field])
all_results_arrs['batch_actual_bpp'] = np.asarray(batch_actual_bpp)
all_results_arrs['batch_sizes'] = np.asarray(batch_sizes)
avg_batch_actual_bpp = np.sum(batch_actual_bpp) / np.sum(batch_sizes)
eval_fields.append('avg_batch_actual_bpp')
all_results_arrs['avg_batch_actual_bpp'] = avg_batch_actual_bpp
input_file = os.path.basename(args.input_file)
results_dict = all_results_arrs
np.savez(os.path.join(args.results_dir, 'rd-%s-file=%s.npz'
% (args.runname, input_file)), **results_dict)
for field in eval_fields:
arr = all_results_arrs[field]
print('Avg {}: {:0.4f}'.format(field, arr.mean()))
def decompress(args):
"""Decompresses an image."""
# Adapted from https://github.com/tensorflow/compression/blob/master/examples/bmshj2018.py
# Read the shape information and compressed string from the binary file.
string = tf.placeholder(tf.string, [1])
side_string = tf.placeholder(tf.string, [1])
x_shape = tf.placeholder(tf.int32, [2])
y_shape = tf.placeholder(tf.int32, [2])
z_shape = tf.placeholder(tf.int32, [2])
with open(args.input_file, "rb") as f:
packed = tfc.PackedTensors(f.read())
tensors = [string, side_string, x_shape, y_shape, z_shape]
arrays = packed.unpack(tensors)
# Instantiate model. TODO: automate this with build_graph
synthesis_transform = SynthesisTransform(args.num_filters)
hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters, num_output_filters=2 * args.num_filters)
entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)
# Decompress and transform the image back.
z_shape = tf.concat([z_shape, [args.num_filters]], axis=0)
z_hat = entropy_bottleneck.decompress(
side_string, z_shape, channels=args.num_filters)
mu, sigma = tf.split(hyper_synthesis_transform(z_hat), num_or_size_splits=2, axis=-1)
sigma = tf.exp(sigma) # make positive
training = False
if not training: # need to handle images with non-standard sizes during compression; mu/sigma must have the same shape as y
mu = mu[:, :y_shape[0], :y_shape[1], :]
sigma = sigma[:, :y_shape[0], :y_shape[1], :]
scale_table = np.exp(np.linspace(np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))
conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table, mean=mu, dtype=tf.float32)
y_hat = conditional_bottleneck.decompress(string)
x_hat = synthesis_transform(y_hat)
# Remove batch dimension, and crop away any extraneous padding on the bottom
# or right boundaries.
x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]
# Write reconstructed image out as a PNG file.
op = write_png(args.output_file, x_hat)
# Load the latest model checkpoint, and perform the above actions.
with tf.Session() as sess:
save_dir = os.path.join(args.checkpoint_dir, args.runname)
latest = tf.train.latest_checkpoint(checkpoint_dir=save_dir)
tf.train.Saver().restore(sess, save_path=latest)
sess.run(op, feed_dict=dict(zip(tensors, arrays)))
from tf_boilerplate import train, parse_args
def main(args):
# Invoke subcommand.
if args.command == "train":
train(args, build_train_graph=build_train_graph)
elif args.command == "compress":
if not args.output_file:
args.output_file = args.input_file + ".tfci"
compress(args)
# compress_est_ideal_rate(args)
elif args.command == "decompress":
if not args.output_file:
args.output_file = args.input_file + ".png"
decompress(args)
if __name__ == "__main__":
app.run(main, flags_parser=parse_args)
| [
"numpy.prod",
"tf_boilerplate.train",
"tensorflow.compat.v1.exp",
"utils.write_png",
"tensorflow_compression.GaussianConditional",
"tensorflow.compat.v1.shape",
"numpy.log",
"tensorflow.compat.v1.train.AdamOptimizer",
"nn_models.AnalysisTransform",
"tensorflow.compat.v1.squared_difference",
"ten... | [((1130, 1150), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1144, 1150), True, 'import numpy as np\n'), ((1151, 1175), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (1169, 1175), True, 'import tensorflow.compat.v1 as tf\n'), ((1964, 1999), 'nn_models.AnalysisTransform', 'AnalysisTransform', (['args.num_filters'], {}), '(args.num_filters)\n', (1981, 1999), False, 'from nn_models import AnalysisTransform, SynthesisTransform, HyperAnalysisTransform\n'), ((2026, 2062), 'nn_models.SynthesisTransform', 'SynthesisTransform', (['args.num_filters'], {}), '(args.num_filters)\n', (2044, 2062), False, 'from nn_models import AnalysisTransform, SynthesisTransform, HyperAnalysisTransform\n'), ((2094, 2134), 'nn_models.HyperAnalysisTransform', 'HyperAnalysisTransform', (['args.num_filters'], {}), '(args.num_filters)\n', (2116, 2134), False, 'from nn_models import AnalysisTransform, SynthesisTransform, HyperAnalysisTransform\n'), ((2167, 2254), 'nn_models.MBT2018HyperSynthesisTransform', 'HyperSynthesisTransform', (['args.num_filters'], {'num_output_filters': '(2 * args.num_filters)'}), '(args.num_filters, num_output_filters=2 * args.\n num_filters)\n', (2190, 2254), True, 'from nn_models import MBT2018HyperSynthesisTransform as HyperSynthesisTransform\n'), ((2275, 2298), 'tensorflow_compression.EntropyBottleneck', 'tfc.EntropyBottleneck', ([], {}), '()\n', (2296, 2298), True, 'import tensorflow_compression as tfc\n'), ((2765, 2778), 'tensorflow.compat.v1.exp', 'tf.exp', (['sigma'], {}), '(sigma)\n', (2771, 2778), True, 'import tensorflow.compat.v1 as tf\n'), ((3179, 3231), 'tensorflow_compression.GaussianConditional', 'tfc.GaussianConditional', (['sigma', 'scale_table'], {'mean': 'mu'}), '(sigma, scale_table, mean=mu)\n', (3202, 3231), True, 'import tensorflow_compression as tfc\n'), ((5078, 5107), 'tensorflow.compat.v1.train.create_global_step', 'tf.train.create_global_step', ([], {}), '()\n', (5105, 5107), True, 'import tensorflow.compat.v1 as tf\n'), ((5129, 5173), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (5151, 5173), True, 'import tensorflow.compat.v1 as tf\n'), ((5263, 5306), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (5285, 5306), True, 'import tensorflow.compat.v1 as tf\n'), ((5390, 5450), 'tensorflow.compat.v1.group', 'tf.group', (['main_step', 'aux_step', 'entropy_bottleneck.updates[0]'], {}), '(main_step, aux_step, entropy_bottleneck.updates[0])\n', (5398, 5450), True, 'import tensorflow.compat.v1 as tf\n'), ((5532, 5549), 'utils.quantize_image', 'quantize_image', (['x'], {}), '(x)\n', (5546, 5549), False, 'from utils import read_png, quantize_image, write_png, read_npy_file_helper, get_runname\n'), ((5571, 5594), 'utils.quantize_image', 'quantize_image', (['x_tilde'], {}), '(x_tilde)\n', (5585, 5594), False, 'from utils import read_png, quantize_image, write_png, read_npy_file_helper, get_runname\n'), ((6304, 6335), 'configs.get_eval_batch_size', 'get_eval_batch_size', (['num_pixels'], {}), '(num_pixels)\n', (6323, 6335), False, 'from configs import get_eval_batch_size, write_tfci_for_eval\n'), ((6350, 6387), 'tensorflow.compat.v1.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['X'], {}), '(X)\n', (6384, 6387), True, 'import tensorflow.compat.v1 as tf\n'), ((6848, 6895), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['"""float32"""', '(None, *X.shape[1:])'], {}), "('float32', (None, *X.shape[1:]))\n", (6862, 6895), True, 'import tensorflow.compat.v1 as tf\n'), ((7838, 7869), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['x_tilde', '(0)', '(1)'], {}), '(x_tilde, 0, 1)\n', (7854, 7869), True, 'import tensorflow.compat.v1 as tf\n'), ((7884, 7907), 'tensorflow.compat.v1.round', 'tf.round', (['(x_tilde * 255)'], {}), '(x_tilde * 255)\n', (7892, 7907), True, 'import tensorflow.compat.v1 as tf\n'), ((8018, 8048), 'tensorflow.compat.v1.image.psnr', 'tf.image.psnr', (['x_tilde', 'x', '(255)'], {}), '(x_tilde, x, 255)\n', (8031, 8048), True, 'import tensorflow.compat.v1 as tf\n'), ((8076, 8117), 'tensorflow.compat.v1.image.ssim_multiscale', 'tf.image.ssim_multiscale', (['x_tilde', 'x', '(255)'], {}), '(x_tilde, x, 255)\n', (8100, 8117), True, 'import tensorflow.compat.v1 as tf\n'), ((8276, 8296), 'tensorflow.compat.v1.shape', 'tf.shape', (["graph['z']"], {}), "(graph['z'])\n", (8284, 8296), True, 'import tensorflow.compat.v1 as tf\n'), ((11367, 11397), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.string', '[1]'], {}), '(tf.string, [1])\n', (11381, 11397), True, 'import tensorflow.compat.v1 as tf\n'), ((11416, 11446), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.string', '[1]'], {}), '(tf.string, [1])\n', (11430, 11446), True, 'import tensorflow.compat.v1 as tf\n'), ((11461, 11490), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[2]'], {}), '(tf.int32, [2])\n', (11475, 11490), True, 'import tensorflow.compat.v1 as tf\n'), ((11505, 11534), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[2]'], {}), '(tf.int32, [2])\n', (11519, 11534), True, 'import tensorflow.compat.v1 as tf\n'), ((11549, 11578), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[2]'], {}), '(tf.int32, [2])\n', (11563, 11578), True, 'import tensorflow.compat.v1 as tf\n'), ((11855, 11891), 'nn_models.SynthesisTransform', 'SynthesisTransform', (['args.num_filters'], {}), '(args.num_filters)\n', (11873, 11891), False, 'from nn_models import AnalysisTransform, SynthesisTransform, HyperAnalysisTransform\n'), ((11924, 12011), 'nn_models.MBT2018HyperSynthesisTransform', 'HyperSynthesisTransform', (['args.num_filters'], {'num_output_filters': '(2 * args.num_filters)'}), '(args.num_filters, num_output_filters=2 * args.\n num_filters)\n', (11947, 12011), True, 'from nn_models import MBT2018HyperSynthesisTransform as HyperSynthesisTransform\n'), ((12032, 12071), 'tensorflow_compression.EntropyBottleneck', 'tfc.EntropyBottleneck', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (12053, 12071), True, 'import tensorflow_compression as tfc\n'), ((12134, 12182), 'tensorflow.compat.v1.concat', 'tf.concat', (['[z_shape, [args.num_filters]]'], {'axis': '(0)'}), '([z_shape, [args.num_filters]], axis=0)\n', (12143, 12182), True, 'import tensorflow.compat.v1 as tf\n'), ((12386, 12399), 'tensorflow.compat.v1.exp', 'tf.exp', (['sigma'], {}), '(sigma)\n', (12392, 12399), True, 'import tensorflow.compat.v1 as tf\n'), ((12791, 12861), 'tensorflow_compression.GaussianConditional', 'tfc.GaussianConditional', (['sigma', 'scale_table'], {'mean': 'mu', 'dtype': 'tf.float32'}), '(sigma, scale_table, mean=mu, dtype=tf.float32)\n', (12814, 12861), True, 'import tensorflow_compression as tfc\n'), ((13175, 13209), 'utils.write_png', 'write_png', (['args.output_file', 'x_hat'], {}), '(args.output_file, x_hat)\n', (13184, 13209), False, 'from utils import read_png, quantize_image, write_png, read_npy_file_helper, get_runname\n'), ((14120, 14158), 'absl.app.run', 'app.run', (['main'], {'flags_parser': 'parse_args'}), '(main, flags_parser=parse_args)\n', (14127, 14158), False, 'from absl import app\n'), ((2943, 2954), 'tensorflow.compat.v1.shape', 'tf.shape', (['y'], {}), '(y)\n', (2951, 2954), True, 'import tensorflow.compat.v1 as tf\n'), ((3727, 3738), 'tensorflow.compat.v1.shape', 'tf.shape', (['x'], {}), '(x)\n', (3735, 3738), True, 'import tensorflow.compat.v1 as tf\n'), ((4680, 4713), 'tensorflow.compat.v1.squared_difference', 'tf.squared_difference', (['x', 'x_tilde'], {}), '(x, x_tilde)\n', (4701, 4713), True, 'import tensorflow.compat.v1 as tf\n'), ((5951, 5975), 'numpy.load', 'np.load', (['args.input_file'], {}), '(args.input_file)\n', (5958, 5975), True, 'import numpy as np\n'), ((6215, 6237), 'numpy.prod', 'np.prod', (['X.shape[1:-1]'], {}), '(X.shape[1:-1])\n', (6222, 6237), True, 'import numpy as np\n'), ((7934, 7967), 'tensorflow.compat.v1.squared_difference', 'tf.squared_difference', (['x', 'x_tilde'], {}), '(x, x_tilde)\n', (7955, 7967), True, 'import tensorflow.compat.v1 as tf\n'), ((8175, 8185), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (8181, 8185), True, 'import numpy as np\n'), ((8307, 8319), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (8317, 8319), True, 'import tensorflow.compat.v1 as tf\n'), ((8451, 8498), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', 'args.runname'], {}), '(args.checkpoint_dir, args.runname)\n', (8463, 8498), False, 'import os\n'), ((8516, 8567), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': 'save_dir'}), '(checkpoint_dir=save_dir)\n', (8542, 8567), True, 'import tensorflow.compat.v1 as tf\n'), ((10448, 10476), 'numpy.asarray', 'np.asarray', (['batch_actual_bpp'], {}), '(batch_actual_bpp)\n', (10458, 10476), True, 'import numpy as np\n'), ((10519, 10542), 'numpy.asarray', 'np.asarray', (['batch_sizes'], {}), '(batch_sizes)\n', (10529, 10542), True, 'import numpy as np\n'), ((10767, 10800), 'os.path.basename', 'os.path.basename', (['args.input_file'], {}), '(args.input_file)\n', (10783, 10800), False, 'import os\n'), ((13291, 13303), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (13301, 13303), True, 'import tensorflow.compat.v1 as tf\n'), ((13332, 13379), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', 'args.runname'], {}), '(args.checkpoint_dir, args.runname)\n', (13344, 13379), False, 'import os\n'), ((13397, 13448), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': 'save_dir'}), '(checkpoint_dir=save_dir)\n', (13423, 13448), True, 'import tensorflow.compat.v1 as tf\n'), ((13695, 13743), 'tf_boilerplate.train', 'train', (['args'], {'build_train_graph': 'build_train_graph'}), '(args, build_train_graph=build_train_graph)\n', (13700, 13743), False, 'from tf_boilerplate import train, parse_args\n'), ((3094, 3112), 'numpy.log', 'np.log', (['SCALES_MIN'], {}), '(SCALES_MIN)\n', (3100, 3112), True, 'import numpy as np\n'), ((3114, 3132), 'numpy.log', 'np.log', (['SCALES_MAX'], {}), '(SCALES_MAX)\n', (3120, 3132), True, 'import numpy as np\n'), ((4334, 4343), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4340, 4343), True, 'import numpy as np\n'), ((4411, 4420), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4417, 4420), True, 'import numpy as np\n'), ((4816, 4839), 'tensorflow.compat.v1.log', 'tf.log', (['float_train_mse'], {}), '(float_train_mse)\n', (4822, 4839), True, 'import tensorflow.compat.v1 as tf\n'), ((4842, 4852), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (4848, 4852), True, 'import numpy as np\n'), ((5486, 5512), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (5502, 5512), False, 'import os\n'), ((7596, 7605), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7602, 7605), True, 'import numpy as np\n'), ((7697, 7706), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7703, 7706), True, 'import numpy as np\n'), ((8154, 8172), 'tensorflow.compat.v1.log', 'tf.log', (['(1 - msssim)'], {}), '(1 - msssim)\n', (8160, 8172), True, 'import tensorflow.compat.v1 as tf\n'), ((10364, 10399), 'numpy.asarray', 'np.asarray', (['all_results_arrs[field]'], {}), '(all_results_arrs[field])\n', (10374, 10399), True, 'import numpy as np\n'), ((10575, 10599), 'numpy.sum', 'np.sum', (['batch_actual_bpp'], {}), '(batch_actual_bpp)\n', (10581, 10599), True, 'import numpy as np\n'), ((10602, 10621), 'numpy.sum', 'np.sum', (['batch_sizes'], {}), '(batch_sizes)\n', (10608, 10621), True, 'import numpy as np\n'), ((10858, 10943), 'os.path.join', 'os.path.join', (['args.results_dir', "('rd-%s-file=%s.npz' % (args.runname, input_file))"], {}), "(args.results_dir, 'rd-%s-file=%s.npz' % (args.runname, input_file)\n )\n", (10870, 10943), False, 'import os\n'), ((12706, 12724), 'numpy.log', 'np.log', (['SCALES_MIN'], {}), '(SCALES_MIN)\n', (12712, 12724), True, 'import numpy as np\n'), ((12726, 12744), 'numpy.log', 'np.log', (['SCALES_MAX'], {}), '(SCALES_MAX)\n', (12732, 12744), True, 'import numpy as np\n'), ((4308, 4329), 'tensorflow.compat.v1.log', 'tf.log', (['y_likelihoods'], {}), '(y_likelihoods)\n', (4314, 4329), True, 'import tensorflow.compat.v1 as tf\n'), ((4385, 4406), 'tensorflow.compat.v1.log', 'tf.log', (['z_likelihoods'], {}), '(z_likelihoods)\n', (4391, 4406), True, 'import tensorflow.compat.v1 as tf\n'), ((7546, 7567), 'tensorflow.compat.v1.log', 'tf.log', (['y_likelihoods'], {}), '(y_likelihoods)\n', (7552, 7567), True, 'import tensorflow.compat.v1 as tf\n'), ((7647, 7668), 'tensorflow.compat.v1.log', 'tf.log', (['z_likelihoods'], {}), '(z_likelihoods)\n', (7653, 7668), True, 'import tensorflow.compat.v1 as tf\n'), ((8576, 8592), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (8590, 8592), True, 'import tensorflow.compat.v1 as tf\n'), ((9596, 9615), 'tensorflow_compression.PackedTensors', 'tfc.PackedTensors', ([], {}), '()\n', (9613, 9615), True, 'import tensorflow_compression as tfc\n'), ((13457, 13473), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (13471, 13473), True, 'import tensorflow.compat.v1 as tf\n'), ((6091, 6118), 'PIL.Image.open', 'Image.open', (['args.input_file'], {}), '(args.input_file)\n', (6101, 6118), False, 'from PIL import Image\n')] |
"""
Support functions for BIDS MRI fieldmap handling
MIT License
Copyright (c) 2017-2022 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import json
import bids
import numpy as np
from glob import glob
from . import io as bio
from . import dcm2niix as d2n
from . import translate as tr
from .json import (acqtime_mins)
def bind_fmaps(bids_subj_dir):
"""
Bind nearest fieldmap in time to each functional series for this subject
- allow only SE-EPI pair or GRE fieldmap bindings, not a mixture of both
- if both SE-EPI and GRE fmaps are present in fmap/ IGNORE the GRE fieldmaps
:param bids_subj_dir: string
BIDS root directory
"""
print(' Subject {}'.format(os.path.basename(bids_subj_dir)))
sess_dirs = glob(os.path.join(bids_subj_dir, 'ses-*'))
# Session loop
for sess_dir in sess_dirs:
print(' Session {}'.format(os.path.basename(sess_dir)))
# Get list of BOLD fMRI JSON sidecars and acquisition times
bold_jsons = glob(os.path.join(sess_dir, 'func', '*task-*_bold.json'))
t_bold = np.array([acqtime_mins(fname) for fname in bold_jsons])
# Find SE-EPI and GRE fieldmaps in session fmap/ folder
fmap_dir = os.path.join(sess_dir, 'fmap')
epi_fmap_jsons = glob(os.path.join(fmap_dir, '*_dir-*_epi.json'))
gre_fmap_jsons = glob(os.path.join(fmap_dir, '*_phasediff.json'))
if epi_fmap_jsons:
bind_epi_fmaps(epi_fmap_jsons, bold_jsons, t_bold)
elif gre_fmap_jsons:
bind_gre_fmaps(gre_fmap_jsons, bold_jsons, t_bold)
else:
print(" * No fieldmaps detected in fmap/ - skipping")
def bind_epi_fmaps(epi_fmap_jsons, bold_jsons, t_bold):
"""
SE-EPI fieldmap binding
:param epi_fmap_jsons:
:param bold_jsons:
:param t_bold:
:return:
"""
# Get list of SE-EPI directions
dirs = []
for fname in epi_fmap_jsons:
ents = bids.layout.parse_file_entities(fname)
if 'direction' in ents:
dirs.append(ents['direction'])
pedirs = np.unique(dirs)
# Loop over phase encoding directions
for pedir in pedirs:
print(' Scanning for dir-{} SE-EPI fieldmaps'.format(pedir))
# List of JSONS with current PE direction
pedir_jsons = [fname for fname in epi_fmap_jsons if pedir in fname]
# Create list for storing IntendedFor lists
intended_for = [ [] for ic in range(len(pedir_jsons)) ]
# Get SE-EPI fmap acquisition times
t_epi_fmap = np.array([acqtime_mins(fname) for fname in pedir_jsons])
# Find the closest fieldmap in time to each BOLD series
for ic, bold_json in enumerate(bold_jsons):
# Time difference between all fieldmaps in this direction and current BOLD series
dt = np.abs(t_bold[ic] - t_epi_fmap)
# Index of closest fieldmap to this BOLD series
idx = np.argmin(dt)
# Add this BOLD series image name to list for this fmap
intended_for[idx].append(bids_intended_name(bold_json))
# Replace IntendedFor field in fmap JSON file
for fc, json_fname in enumerate(pedir_jsons):
info = bio.read_json(json_fname)
info['IntendedFor'] = intended_for[fc]
bio.write_json(json_fname, info, overwrite=True)
def bind_gre_fmaps(gre_fmap_jsons, bold_jsons, t_bold):
"""
GRE fieldmap binding
:param gre_fmap_jsons:
:param bold_jsons:
:param t_bold:
:return:
"""
# Create list for storing IntendedFor lists
intended_for = [[] for ic in range(len(gre_fmap_jsons))]
# Get SE-EPI fmap acquisition times
t_epi_fmap = np.array([acqtime_mins(fname) for fname in gre_fmap_jsons])
# Find the closest fieldmap in time to each BOLD series
for ic, bold_json in enumerate(bold_jsons):
# Time difference between all fieldmaps in this direction and current BOLD series
dt = np.abs(t_bold[ic] - t_epi_fmap)
# Index of closest fieldmap to this BOLD series
idx = np.argmin(dt)
# Add this BOLD series image name to list for this fmap
intended_for[idx].append(bids_intended_name(bold_json))
# Replace IntendedFor field in fmap JSON file
for fc, json_fname in enumerate(gre_fmap_jsons):
info = bio.read_json(json_fname)
info['IntendedFor'] = intended_for[fc]
bio.write_json(json_fname, info, overwrite=True)
def bids_intended_name(json_fname):
# Replace .json with .nii.gz
tmp1 = json_fname.replace('.json', '.nii.gz')
base1 = os.path.basename(tmp1)
tmp2 = os.path.dirname(tmp1)
base2 = os.path.basename(tmp2)
tmp3 = os.path.dirname(tmp2)
base3 = os.path.basename(tmp3)
return os.path.join(base3, base2, base1)
def prune_intendedfors(bids_subj_dir, fmap_only):
"""
Prune out all "IntendedFor" entries pointing to nonexistent files from all json files in given directory tree
:param bids_subj_dir: string
BIDS subject directory (sub-*)
:param fmap_only: boolean
Only looks at json files in an fmap directory
"""
# Traverse through all directories in bids_subj_dir
for root, dirs, files in os.walk(bids_subj_dir):
for name in files:
# Only examine json files, ignore dataset_description, and only work in fmap directories if so specified
if (os.path.splitext(name)[1] == ".json" and
not name == "dataset_description.json" and
(not fmap_only or os.path.basename(root) == "fmap")):
with open(os.path.join(root, name), 'r+') as f:
# Read json file
data = json.load(f)
if 'IntendedFor' in data:
# Prune list of files that do not exist
bids_intendedfor = []
for i in data['IntendedFor']:
i_fullpath = os.path.join(bids_subj_dir, i)
if os.path.isfile(i_fullpath):
bids_intendedfor.append(i)
# Modify IntendedFor with pruned list
data['IntendedFor'] = bids_intendedfor
# Update json file
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
def handle_fmap_case(work_json_fname, bids_nii_fname, bids_json_fname):
"""
There are two popular GRE fieldmap organizations: Case 1 and Case 2
Source: BIDS 1.4.0 Specification https://bids-specification.readthedocs.io
Case 1
sub-<label>/[ses-<label>/]
fmap/
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_phasediff.nii[.gz]
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_phasediff.json
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_magnitude1.nii[.gz]
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_magnitude2.nii[.gz]
Case 2
sub-<label>/[ses-<label>/]
fmap/
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_phase1.nii[.gz]
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_phase1.json
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_phase2.nii[.gz]
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_phase2.json
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_magnitude1.nii[.gz]
sub-<label>[_ses-<label>][_acq-<label>][_run-<index>]_magnitude2.nii[.gz]
Current dcm2niix output suffices
Current version at time of coding: v1.0.20200331
---
Keep checking that this is true with later releases
*--GR--<serno>_e1.<ext> : echo 1 magnitude image [Cases 1 and 2]
*--GR--<serno>_e2.<ext> : echo 2 magnitude image [Cases 1 and 2]
*--GR--<serno+1>_e1_ph.<ext> : echo 1 phase image [Case 2]
*--GR--<serno+1>_e2_ph.<ext> : interecho phase difference [Case 1] or
echo 2 phase image [Case 2]
"""
# Pull dcm2niix filename info
work_info = bio.parse_dcm2niix_fname(work_json_fname)
ser_no = np.int(work_info['SerNo'])
suffix = work_info['Suffix']
# Base series number for magnitude images (see above)
if suffix == 'e1' or suffix == 'e2':
is_mag = True
echo_no = np.int(suffix[1])
base_ser_no = ser_no
elif suffix == 'e1_ph' or suffix == 'e2_ph':
is_mag = False
echo_no = np.int(suffix[1])
base_ser_no = ser_no - 1
else:
is_mag = False
echo_no = None
base_ser_no = None
if base_ser_no:
# Construct candidate JSON sidecar filenames for e1 and e2, mag and phase
e1m_fname = d2n.dcm2niix_json_fname(work_info, base_ser_no, 'e1')
# e2m_fname = dcm2niix_json_fname(work_info, base_ser_no, 'e2') # Optional
e1p_fname = d2n.dcm2niix_json_fname(work_info, base_ser_no + 1, 'e1_ph')
e2p_fname = d2n.dcm2niix_json_fname(work_info, base_ser_no + 1, 'e2_ph')
# Check case based on existence of phase images
fmap_case = None
if os.path.isfile(e2p_fname):
if os.path.isfile(e1p_fname):
print(' Detected GRE Fieldmap Case 2')
fmap_case = 2
else:
print(' Detected GRE Fieldmap Case 1')
fmap_case = 1
else:
print('* GRE Fieldmap Echo 2 image missing - skipping')
# Update BIDS nii and json filenames
if is_mag:
bids_nii_fname = tr.replace_contrast(bids_nii_fname, 'magnitude{}'.format(echo_no))
bids_json_fname = tr.replace_contrast(bids_json_fname, 'magnitude{}'.format(echo_no))
else:
if fmap_case == 1:
bids_nii_fname = tr.replace_contrast(bids_nii_fname, 'phasediff')
bids_json_fname = tr.replace_contrast(bids_json_fname, 'phasediff')
# Load echo 1 and echo 2 metadata
e1m_info = bio.read_json(e1m_fname)
e2p_info = bio.read_json(e2p_fname)
# Add new fields to echo 2 phase metadata
te1 = e1m_info['EchoTime']
te2 = e2p_info['EchoTime']
print(f' GRE TE1 : {te1:0.5f} ms')
print(f' GRE TE2 : {te2:0.5f} ms')
print(f' GRE dTE : {(te2-te1):0.5f} ms')
e2p_info['EchoTime1'] = te1
e2p_info['EchoTime2'] = te2
# Re-write echo 2 phase JSON sidecar
print(' Updating Echo 2 Phase JSON sidecar')
bio.write_json(e2p_fname, e2p_info, overwrite=True)
else:
bids_nii_fname = tr.replace_contrast(bids_nii_fname, 'phase{}'.format(echo_no))
bids_json_fname = tr.replace_contrast(bids_json_fname, 'phase{}'.format(echo_no))
else:
print('* Could not find echo 1 and 2 images for GRE Fieldmap - skipping')
return bids_nii_fname, bids_json_fname
def build_intendedfor(sid, ses, bids_suffix):
"""
Build the IntendedFor entry for a fieldmap sidecar
:param: sid, str, Subject ID
:param: ses, str, Session number
:param: bids_suffix
:return: ifstr, str
"""
bids_name = os.path.basename(bids_suffix)
bids_type = os.path.dirname(bids_suffix)
if bids_type == '':
bids_type = 'func'
# Complete BIDS filenames for image and sidecar
if ses:
# If sessions are being used, add session directory to IntendedFor field
ifstr = os.path.join('ses-' + ses, bids_type, 'sub-' + sid + '_ses-' + ses + '_' + bids_name + '.nii.gz')
else:
ifstr = os.path.join(bids_type, 'sub-' + sid + '_' + bids_name + '.nii.gz')
return ifstr
def add_intended_run(prot_dict, info, run_no):
"""
Add run numbers to files in IntendedFor.
:param prot_dict: dict
:param info: dict
:param run_no: int
:return prot_dict: dict
"""
prot_dict_update = dict()
for k in prot_dict.keys():
if prot_dict[k][0] == 'fmap':
# Construct a list of the intended runs
if type(prot_dict[k][2]) == list:
intended_for = prot_dict[k][2]
elif prot_dict[k][2] != 'UNASSIGNED':
intended_for = [prot_dict[k][2]]
else:
break
suffixes = [os.path.basename(x) for x in intended_for]
types = [os.path.dirname(x) for x in intended_for]
# determine if this sequence is intended by the fmap
if prot_dict[info['SerDesc']] in suffixes:
idx = suffixes.index(prot_dict[info['SerDesc']][1])
# change intendedfor to include run or add a new run
new_suffix = tr.add_run_number(suffixes[idx], run_no)
if new_suffix != suffixes[idx]:
if '_run-' in suffixes[idx]:
suffixes.append(new_suffix)
types.append(types[idx])
else:
suffixes[idx] = new_suffix
intended_for = [os.path.join(x[0], x[1]) for x in zip(types, suffixes)]
prot_dict_update[k] = ['fmap', prot_dict[k][1], intended_for]
prot_dict.update(prot_dict_update)
return prot_dict
| [
"numpy.abs",
"numpy.unique",
"bids.layout.parse_file_entities",
"json.dump",
"os.path.join",
"os.path.splitext",
"os.path.isfile",
"os.path.dirname",
"json.load",
"os.path.basename",
"numpy.argmin",
"numpy.int",
"os.walk"
] | [((3069, 3084), 'numpy.unique', 'np.unique', (['dirs'], {}), '(dirs)\n', (3078, 3084), True, 'import numpy as np\n'), ((5602, 5624), 'os.path.basename', 'os.path.basename', (['tmp1'], {}), '(tmp1)\n', (5618, 5624), False, 'import os\n'), ((5637, 5658), 'os.path.dirname', 'os.path.dirname', (['tmp1'], {}), '(tmp1)\n', (5652, 5658), False, 'import os\n'), ((5671, 5693), 'os.path.basename', 'os.path.basename', (['tmp2'], {}), '(tmp2)\n', (5687, 5693), False, 'import os\n'), ((5706, 5727), 'os.path.dirname', 'os.path.dirname', (['tmp2'], {}), '(tmp2)\n', (5721, 5727), False, 'import os\n'), ((5740, 5762), 'os.path.basename', 'os.path.basename', (['tmp3'], {}), '(tmp3)\n', (5756, 5762), False, 'import os\n'), ((5775, 5808), 'os.path.join', 'os.path.join', (['base3', 'base2', 'base1'], {}), '(base3, base2, base1)\n', (5787, 5808), False, 'import os\n'), ((6234, 6256), 'os.walk', 'os.walk', (['bids_subj_dir'], {}), '(bids_subj_dir)\n', (6241, 6256), False, 'import os\n'), ((9218, 9244), 'numpy.int', 'np.int', (["work_info['SerNo']"], {}), "(work_info['SerNo'])\n", (9224, 9244), True, 'import numpy as np\n'), ((12381, 12410), 'os.path.basename', 'os.path.basename', (['bids_suffix'], {}), '(bids_suffix)\n', (12397, 12410), False, 'import os\n'), ((12427, 12455), 'os.path.dirname', 'os.path.dirname', (['bids_suffix'], {}), '(bids_suffix)\n', (12442, 12455), False, 'import os\n'), ((1751, 1787), 'os.path.join', 'os.path.join', (['bids_subj_dir', '"""ses-*"""'], {}), "(bids_subj_dir, 'ses-*')\n", (1763, 1787), False, 'import os\n'), ((2213, 2243), 'os.path.join', 'os.path.join', (['sess_dir', '"""fmap"""'], {}), "(sess_dir, 'fmap')\n", (2225, 2243), False, 'import os\n'), ((2942, 2980), 'bids.layout.parse_file_entities', 'bids.layout.parse_file_entities', (['fname'], {}), '(fname)\n', (2973, 2980), False, 'import bids\n'), ((4973, 5004), 'numpy.abs', 'np.abs', (['(t_bold[ic] - t_epi_fmap)'], {}), '(t_bold[ic] - t_epi_fmap)\n', (4979, 5004), True, 'import numpy as np\n'), ((5076, 5089), 'numpy.argmin', 'np.argmin', (['dt'], {}), '(dt)\n', (5085, 5089), True, 'import numpy as np\n'), ((9418, 9435), 'numpy.int', 'np.int', (['suffix[1]'], {}), '(suffix[1])\n', (9424, 9435), True, 'import numpy as np\n'), ((10205, 10230), 'os.path.isfile', 'os.path.isfile', (['e2p_fname'], {}), '(e2p_fname)\n', (10219, 10230), False, 'import os\n'), ((12669, 12770), 'os.path.join', 'os.path.join', (["('ses-' + ses)", 'bids_type', "('sub-' + sid + '_ses-' + ses + '_' + bids_name + '.nii.gz')"], {}), "('ses-' + ses, bids_type, 'sub-' + sid + '_ses-' + ses + '_' +\n bids_name + '.nii.gz')\n", (12681, 12770), False, 'import os\n'), ((12793, 12860), 'os.path.join', 'os.path.join', (['bids_type', "('sub-' + sid + '_' + bids_name + '.nii.gz')"], {}), "(bids_type, 'sub-' + sid + '_' + bids_name + '.nii.gz')\n", (12805, 12860), False, 'import os\n'), ((1695, 1726), 'os.path.basename', 'os.path.basename', (['bids_subj_dir'], {}), '(bids_subj_dir)\n', (1711, 1726), False, 'import os\n'), ((2003, 2054), 'os.path.join', 'os.path.join', (['sess_dir', '"""func"""', '"""*task-*_bold.json"""'], {}), "(sess_dir, 'func', '*task-*_bold.json')\n", (2015, 2054), False, 'import os\n'), ((2274, 2316), 'os.path.join', 'os.path.join', (['fmap_dir', '"""*_dir-*_epi.json"""'], {}), "(fmap_dir, '*_dir-*_epi.json')\n", (2286, 2316), False, 'import os\n'), ((2348, 2390), 'os.path.join', 'os.path.join', (['fmap_dir', '"""*_phasediff.json"""'], {}), "(fmap_dir, '*_phasediff.json')\n", (2360, 2390), False, 'import os\n'), ((3822, 3853), 'numpy.abs', 'np.abs', (['(t_bold[ic] - t_epi_fmap)'], {}), '(t_bold[ic] - t_epi_fmap)\n', (3828, 3853), True, 'import numpy as np\n'), ((3933, 3946), 'numpy.argmin', 'np.argmin', (['dt'], {}), '(dt)\n', (3942, 3946), True, 'import numpy as np\n'), ((9555, 9572), 'numpy.int', 'np.int', (['suffix[1]'], {}), '(suffix[1])\n', (9561, 9572), True, 'import numpy as np\n'), ((10247, 10272), 'os.path.isfile', 'os.path.isfile', (['e1p_fname'], {}), '(e1p_fname)\n', (10261, 10272), False, 'import os\n'), ((1879, 1905), 'os.path.basename', 'os.path.basename', (['sess_dir'], {}), '(sess_dir)\n', (1895, 1905), False, 'import os\n'), ((13501, 13520), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (13517, 13520), False, 'import os\n'), ((13565, 13583), 'os.path.dirname', 'os.path.dirname', (['x'], {}), '(x)\n', (13580, 13583), False, 'import os\n'), ((6727, 6739), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6736, 6739), False, 'import json\n'), ((14245, 14269), 'os.path.join', 'os.path.join', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (14257, 14269), False, 'import os\n'), ((6419, 6441), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (6435, 6441), False, 'import os\n'), ((6561, 6583), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (6577, 6583), False, 'import os\n'), ((6624, 6648), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (6636, 6648), False, 'import os\n'), ((7370, 7398), 'json.dump', 'json.dump', (['data', 'f'], {'indent': '(4)'}), '(data, f, indent=4)\n', (7379, 7398), False, 'import json\n'), ((6993, 7023), 'os.path.join', 'os.path.join', (['bids_subj_dir', 'i'], {}), '(bids_subj_dir, i)\n', (7005, 7023), False, 'import os\n'), ((7055, 7081), 'os.path.isfile', 'os.path.isfile', (['i_fullpath'], {}), '(i_fullpath)\n', (7069, 7081), False, 'import os\n')] |
from vizdoom import *
import numpy as np
import itertools as it
class DoomEnvironment:
def __init__(self, scenario='defend_the_center', window=False):
self.game = DoomGame()
print(scenario)
self.game.load_config("ViZDoom/scenarios/" + scenario + ".cfg")
self.game.set_doom_scenario_path("ViZDoom/scenarios/" + scenario + ".wad")
self.game.set_window_visible(window)
self.game.init()
self.scenario = scenario
if self.scenario == 'deadly_corridor':
self.n_actions = 6
self.actions = np.identity(6,dtype=int).tolist()
else:
self.n_actions = self.game.get_available_buttons_size()
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
self.actions = [left, right, shoot]
# self.actions = [list(a) for a in it.product([0, 1], repeat=self.n_actions)]
def get_actions(self):
return self.actions
def init_observations(self):
obs_cur = self.get_observation_cur()
obs_prev = obs_cur.copy()
return obs_cur, obs_prev
def get_observation_cur(self):
obs_cur = {}
obs_cur['kills'] = self.game.get_game_variable(KILLCOUNT)
obs_cur['health'] = self.game.get_game_variable(HEALTH)
obs_cur['ammo'] = self.game.get_game_variable(AMMO2)
return obs_cur
def get_state(self):
state = self.game.get_state().screen_buffer # shape = (3, 480, 640)
return np.transpose(state, [1, 2, 0]) # shape = (480, 640, 3)
def get_zero_state(self):
return np.zeros((480, 640, 3), dtype='uint8')
def make_action(self, action, frame_skip):
return self.game.make_action(action, frame_skip)
def is_episode_finished(self):
return self.game.is_episode_finished() | [
"numpy.identity",
"numpy.zeros",
"numpy.transpose"
] | [((1507, 1537), 'numpy.transpose', 'np.transpose', (['state', '[1, 2, 0]'], {}), '(state, [1, 2, 0])\n', (1519, 1537), True, 'import numpy as np\n'), ((1608, 1646), 'numpy.zeros', 'np.zeros', (['(480, 640, 3)'], {'dtype': '"""uint8"""'}), "((480, 640, 3), dtype='uint8')\n", (1616, 1646), True, 'import numpy as np\n'), ((577, 602), 'numpy.identity', 'np.identity', (['(6)'], {'dtype': 'int'}), '(6, dtype=int)\n', (588, 602), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
import time
# OpenCV Facial Capture Test
_cap = cv.VideoCapture(0)
_cap.set(cv.CAP_PROP_FRAME_WIDTH, 512)
_cap.set(cv.CAP_PROP_FRAME_HEIGHT, 512)
_cap.set(cv.CAP_PROP_BUFFERSIZE, 1)
time.sleep(0.5)
facemark = cv.face.createFacemarkLBF()
try:
# Download the trained model lbfmodel.yaml:
# https://github.com/kurnianggoro/GSOC2017/tree/master/data
# and update this path to the file:
facemark.loadModel("./lbfmodel.yaml")
except cv.error:
print("Model not found")
cascade = cv.CascadeClassifier(cv.data.haarcascades + "haarcascade_frontalface_alt.xml")
if cascade.empty() :
print("cascade not found")
exit()
print("Press ESC to stop")
while True:
_, frame = _cap.read()
faces = cascade.detectMultiScale(frame, 1.05, 6, cv.CASCADE_SCALE_IMAGE, (130, 130))
#find biggest face, and only keep it
if(type(faces) is np.ndarray and faces.size > 0):
biggestFace = np.zeros(shape=(1,4))
for face in faces:
if face[2] > biggestFace[0][2]:
biggestFace[0] = face
# find landmarks
ok, landmarks = facemark.fit(frame, faces=biggestFace)
# draw landmarks
for marks in landmarks:
for (x, y) in marks[0]:
cv.circle(frame, (x, y), 2, (0, 255, 255), -1)
# draw detected face
for (x,y,w,h) in faces:
cv.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 1)
for i,(x,y,w,h) in enumerate(faces):
cv.putText(frame, "Face #{}".format(i), (x - 10, y - 10),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv.imshow("Image Landmarks", frame)
if(cv.waitKey(1) == 27):
exit()
| [
"cv2.rectangle",
"cv2.face.createFacemarkLBF",
"time.sleep",
"cv2.imshow",
"numpy.zeros",
"cv2.circle",
"cv2.VideoCapture",
"cv2.CascadeClassifier",
"cv2.waitKey"
] | [((87, 105), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (102, 105), True, 'import cv2 as cv\n'), ((221, 236), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (231, 236), False, 'import time\n'), ((249, 276), 'cv2.face.createFacemarkLBF', 'cv.face.createFacemarkLBF', ([], {}), '()\n', (274, 276), True, 'import cv2 as cv\n'), ((533, 611), 'cv2.CascadeClassifier', 'cv.CascadeClassifier', (["(cv.data.haarcascades + 'haarcascade_frontalface_alt.xml')"], {}), "(cv.data.haarcascades + 'haarcascade_frontalface_alt.xml')\n", (553, 611), True, 'import cv2 as cv\n'), ((1675, 1710), 'cv2.imshow', 'cv.imshow', (['"""Image Landmarks"""', 'frame'], {}), "('Image Landmarks', frame)\n", (1684, 1710), True, 'import cv2 as cv\n'), ((959, 981), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 4)'}), '(shape=(1, 4))\n', (967, 981), True, 'import numpy as np\n'), ((1718, 1731), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1728, 1731), True, 'import cv2 as cv\n'), ((1428, 1487), 'cv2.rectangle', 'cv.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(1)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 1)\n', (1440, 1487), True, 'import cv2 as cv\n'), ((1297, 1343), 'cv2.circle', 'cv.circle', (['frame', '(x, y)', '(2)', '(0, 255, 255)', '(-1)'], {}), '(frame, (x, y), 2, (0, 255, 255), -1)\n', (1306, 1343), True, 'import cv2 as cv\n')] |
import streamlit as st
import datetime
import time
import pandas as pd
import numpy as np
# title
st.title('Streamlit Basics')
# header
st.header('header')
st.subheader('subheader')
# text
st.text('regular text')
# markdown
st.markdown('## markdown text')
# color text
st.success('Success!')
# misc
st.info('Info')
st.warning('Warning!')
st.error('Error code: ')
st.exception('Name not defined')
# sidebars
st.sidebar.header('Sidebar')
st.sidebar.text('table of content')
# widgets
# checkbox
st.checkbox('yes/no')
# radio buttion
st.radio('yes/no', ('yes', 'no'))
# select box
selected = st.selectbox('select', ['yes', 'no', 'dont know'])
st.write('option selected: ', selected)
# multiselect
st.multiselect('select shape: ', ('Square', 'Triangle', 'Circle'))
# slider
st.slider('Select number between 1 and 10: ', 1, 10)
# text input
st.text_input('Input text here: ', 'type here')
# date
st.date_input('The date is ', datetime.datetime.now())
# time input
st.time_input('The time is ', datetime.time())
# progress bar
bar = st.progress(0)
for i in range(100):
time.sleep(0.01)
bar.progress(i+1)
# display data
# single line code
st.code('import pandas as pd')
# section code
with st.echo():
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import seaborn as sns
# plots
arr = np.random.normal(1, 1, size=100)
plt.hist(arr, bins=20)
st.pyplot()
# dataframe
df = pd.DataFrame(np.random.randn(5, 5), columns=('col_%d' % i for i in range(5)))
st.dataframe(df)
# table()
st.table(df)
| [
"streamlit.table",
"matplotlib.pyplot.hist",
"streamlit.echo",
"time.sleep",
"streamlit.code",
"streamlit.multiselect",
"streamlit.text_input",
"streamlit.header",
"streamlit.title",
"datetime.time",
"streamlit.warning",
"streamlit.sidebar.header",
"numpy.random.normal",
"streamlit.markdow... | [((100, 128), 'streamlit.title', 'st.title', (['"""Streamlit Basics"""'], {}), "('Streamlit Basics')\n", (108, 128), True, 'import streamlit as st\n'), ((139, 158), 'streamlit.header', 'st.header', (['"""header"""'], {}), "('header')\n", (148, 158), True, 'import streamlit as st\n'), ((159, 184), 'streamlit.subheader', 'st.subheader', (['"""subheader"""'], {}), "('subheader')\n", (171, 184), True, 'import streamlit as st\n'), ((193, 216), 'streamlit.text', 'st.text', (['"""regular text"""'], {}), "('regular text')\n", (200, 216), True, 'import streamlit as st\n'), ((229, 260), 'streamlit.markdown', 'st.markdown', (['"""## markdown text"""'], {}), "('## markdown text')\n", (240, 260), True, 'import streamlit as st\n'), ((275, 297), 'streamlit.success', 'st.success', (['"""Success!"""'], {}), "('Success!')\n", (285, 297), True, 'import streamlit as st\n'), ((306, 321), 'streamlit.info', 'st.info', (['"""Info"""'], {}), "('Info')\n", (313, 321), True, 'import streamlit as st\n'), ((323, 345), 'streamlit.warning', 'st.warning', (['"""Warning!"""'], {}), "('Warning!')\n", (333, 345), True, 'import streamlit as st\n'), ((347, 371), 'streamlit.error', 'st.error', (['"""Error code: """'], {}), "('Error code: ')\n", (355, 371), True, 'import streamlit as st\n'), ((373, 405), 'streamlit.exception', 'st.exception', (['"""Name not defined"""'], {}), "('Name not defined')\n", (385, 405), True, 'import streamlit as st\n'), ((418, 446), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Sidebar"""'], {}), "('Sidebar')\n", (435, 446), True, 'import streamlit as st\n'), ((447, 482), 'streamlit.sidebar.text', 'st.sidebar.text', (['"""table of content"""'], {}), "('table of content')\n", (462, 482), True, 'import streamlit as st\n'), ((507, 528), 'streamlit.checkbox', 'st.checkbox', (['"""yes/no"""'], {}), "('yes/no')\n", (518, 528), True, 'import streamlit as st\n'), ((546, 579), 'streamlit.radio', 'st.radio', (['"""yes/no"""', "('yes', 'no')"], {}), "('yes/no', ('yes', 'no'))\n", (554, 579), True, 'import streamlit as st\n'), ((605, 655), 'streamlit.selectbox', 'st.selectbox', (['"""select"""', "['yes', 'no', 'dont know']"], {}), "('select', ['yes', 'no', 'dont know'])\n", (617, 655), True, 'import streamlit as st\n'), ((656, 695), 'streamlit.write', 'st.write', (['"""option selected: """', 'selected'], {}), "('option selected: ', selected)\n", (664, 695), True, 'import streamlit as st\n'), ((711, 777), 'streamlit.multiselect', 'st.multiselect', (['"""select shape: """', "('Square', 'Triangle', 'Circle')"], {}), "('select shape: ', ('Square', 'Triangle', 'Circle'))\n", (725, 777), True, 'import streamlit as st\n'), ((788, 840), 'streamlit.slider', 'st.slider', (['"""Select number between 1 and 10: """', '(1)', '(10)'], {}), "('Select number between 1 and 10: ', 1, 10)\n", (797, 840), True, 'import streamlit as st\n'), ((855, 902), 'streamlit.text_input', 'st.text_input', (['"""Input text here: """', '"""type here"""'], {}), "('Input text here: ', 'type here')\n", (868, 902), True, 'import streamlit as st\n'), ((1050, 1064), 'streamlit.progress', 'st.progress', (['(0)'], {}), '(0)\n', (1061, 1064), True, 'import streamlit as st\n'), ((1164, 1194), 'streamlit.code', 'st.code', (['"""import pandas as pd"""'], {}), "('import pandas as pd')\n", (1171, 1194), True, 'import streamlit as st\n'), ((1381, 1413), 'numpy.random.normal', 'np.random.normal', (['(1)', '(1)'], {'size': '(100)'}), '(1, 1, size=100)\n', (1397, 1413), True, 'import numpy as np\n'), ((1414, 1436), 'matplotlib.pyplot.hist', 'plt.hist', (['arr'], {'bins': '(20)'}), '(arr, bins=20)\n', (1422, 1436), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1448), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (1446, 1448), True, 'import streamlit as st\n'), ((1545, 1561), 'streamlit.dataframe', 'st.dataframe', (['df'], {}), '(df)\n', (1557, 1561), True, 'import streamlit as st\n'), ((1573, 1585), 'streamlit.table', 'st.table', (['df'], {}), '(df)\n', (1581, 1585), True, 'import streamlit as st\n'), ((941, 964), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (962, 964), False, 'import datetime\n'), ((1010, 1025), 'datetime.time', 'datetime.time', ([], {}), '()\n', (1023, 1025), False, 'import datetime\n'), ((1090, 1106), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1100, 1106), False, 'import time\n'), ((1216, 1225), 'streamlit.echo', 'st.echo', ([], {}), '()\n', (1223, 1225), True, 'import streamlit as st\n'), ((1480, 1501), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)'], {}), '(5, 5)\n', (1495, 1501), True, 'import numpy as np\n')] |
# run a BWM search for a fixed source orientation (theta, phi, psi, t0)
import numpy as np
import argparse, os
import pickle
from enterprise.pulsar import Pulsar
from enterprise.signals import parameter
from enterprise.signals import selections
from enterprise.signals import utils
from enterprise.signals import signal_base
from enterprise.signals import white_signals
from enterprise.signals import gp_signals
from enterprise.signals import deterministic_signals
import enterprise.constants as const
from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc
from utils import sample_utils as su
### ARG PARSER
parser = argparse.ArgumentParser(
description='run the BWM analysis with enterprise')
parser.add_argument('-d', '--datafile',
dest='datafile', default='/home/pbaker/nanograv/data/nano11.pkl',
action='store',
help="pickle file containing array of enterprise Pulsar objects")
parser.add_argument('-n', '--noisefile',
dest='noisefile', default='/home/pbaker/nanograv/data/nano11_setpars.pkl',
action='store',
help="pickle file containing noise parameters for all pulsars")
parser.add_argument('-o', '--outdir',
dest='outdir', default='~/nanograv/bwm/',
action='store',
help="location to write output")
parser.add_argument('--costheta', type=float,
dest='costh', default=None,
action='store',
help="sky position: cos(theta)")
parser.add_argument('--phi', type=float,
dest='phi', default=None,
action='store',
help="sky position: phi")
parser.add_argument('--psi', type=float,
dest='psi', default=None,
action='store',
help="polarization angle: psi")
parser.add_argument('--t0', type=float,
dest='t0', default=None,
action='store',
help="fixed t0 burst epoch (MJD)")
parser.add_argument('-u', '--upper-limit',
dest='UL', default=False,
action='store_true',
help=["use uniform priors suitable for upper limit ",
"calculation. Omit for log-uniform priors for ",
"detection"])
parser.add_argument('-b', '--bayesephem',
dest='BE', default=False,
action='store_true',
help="use 'BayesEphem' ephemeris modeling")
parser.add_argument('-N', '--Nsamp', type=int,
dest='N', default=int(1.0e+05),
action='store',
help="number of samples to collect (after thinning!!)")
parser.add_argument('-t', '--thin', type=int,
dest='thin', default=10,
action='store',
help="thinning factor (keep every [thin]th sample)")
parser.add_argument('-R', '--RN-distr',
dest='RNdistr', default=None,
action='store',
help="empirical distribution pickle file to use for RN moves")
parser.add_argument('-J', '--jup-kde',
dest='jupdistr', default=None,
action='store',
help="gaussian KDE pickle file to use for BE moves")
args = parser.parse_args()
outdir = os.path.abspath(args.outdir)
os.system('mkdir -p {}'.format(outdir))
# adjust Nsamp for existing chain
chfile = os.path.join(outdir, 'chain_1.txt')
if os.path.exists(chfile):
ct = sum(1 for i in open(chfile, 'rb'))
if ct >= args.N:
print("{:s} has {:d} samples... exiting".format(chfile, ct))
exit(0)
else:
args.N -= ct
if args.costh is not None and args.phi is not None and args.psi is not None:
if args.costh > 1 or args.costh < -1:
raise ValueError("costheta must be in range [-1, 1]")
if args.phi > 2*np.pi or args.phi < 0:
raise ValueError("phi must be in range [0, 2*pi]")
if args.psi > 2*np.pi or args.psi < 0:
raise ValueError("psi must be in range [0, 2*pi]")
else:
err = "for fixed source must provide phi, costheta, and psi"
raise RuntimeError(err)
# read in data pickles
with open(args.datafile, "rb") as f:
psrs = pickle.load(f)
with open(args.noisefile, "rb") as f:
noise_params = pickle.load(f)
print("loaded pickles")
#################
## PTA model ##
#################
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
# White Noise
selection = selections.Selection(selections.by_backend)
efac = parameter.Constant()
equad = parameter.Constant()
ecorr = parameter.Constant()
ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
eq = white_signals.EquadNoise(log10_equad=equad, selection=selection)
ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection)
wn = ef + eq + ec
# Red Noise
if args.UL:
rn_log10_A = parameter.LinearExp(-20, -11)
else:
rn_log10_A = parameter.Uniform(-20, -11)
rn_gamma = parameter.Uniform(0, 7)
rn_pl = utils.powerlaw(log10_A=rn_log10_A, gamma=rn_gamma)
rn = gp_signals.FourierBasisGP(rn_pl, components=30, Tspan=Tspan)
# GW BWM
amp_name = 'bwm_log10_A'
if args.UL:
bwm_log10_A = parameter.LinearExp(-18, -11)(amp_name)
else:
bwm_log10_A = parameter.Uniform(-18, -11)(amp_name)
t0 = parameter.Constant(args.t0)('bwm_t0')
pol = parameter.Constant(args.psi)('bwm_pol')
phi = parameter.Constant(args.phi)('bwm_phi')
costh = parameter.Constant(args.costh)('bwm_costheta')
bwm_wf = utils.bwm_delay(log10_h=bwm_log10_A, t0=t0,
cos_gwtheta=costh, gwphi=phi, gwpol=pol)
# BWM signal
bwm = deterministic_signals.Deterministic(bwm_wf, name='bwm')
# Timing Model
tm = gp_signals.TimingModel(use_svd=True)
# BayesEphem
be = deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# construct PTA
mod = tm + wn + rn + bwm
if args.BE:
mod += be
pta = signal_base.PTA([mod(p) for p in psrs])
pta.set_default_params(noise_params)
sumfile = os.path.join(outdir, 'summary.txt')
with open(sumfile, 'w') as f:
f.write(pta.summary())
print("generated model")
outfile = os.path.join(outdir, 'params.txt')
with open(outfile, 'w') as f:
for pname in pta.param_names:
f.write(pname+'\n')
###############
## sampler ##
###############
x0 = np.hstack([noise_params[p.name] if p.name in noise_params.keys()
else p.sample() for p in pta.params]) # initial point
ndim = len(x0)
# initial jump covariance matrix
# set initial cov stdev to (starting order of magnitude)/10
stdev = np.array([10**np.floor(np.log10(abs(x)))/10 for x in x0])
cov = np.diag(stdev**2)
# generate custom sampling groups
groups = [list(range(ndim))] # all params
# pulsar noise groups (RN)
for psr in psrs:
this_group = [pta.param_names.index(par)
for par in pta.param_names if psr.name in par]
groups.append(this_group)
# bwm params
this_group = [pta.param_names.index(par)
for par in pta.param_names if 'bwm_' in par]
for ii in range(5):
# multiple copies of BWM group!
groups.append(this_group)
if args.BE:
# all BE params
BE_group = [pta.param_names.index(par)
for par in pta.param_names
if 'jup_orb' in par or 'mass' in par or 'frame_drift' in par]
groups.append(BE_group)
# jup_orb elements + GWs
this_group = [pta.param_names.index(par)
for par in pta.param_names if 'jup_orb' in par]
this_group += [pta.param_names.index(par)
for par in pta.param_names if 'bwm_' in par]
groups.append(this_group)
sampler = ptmcmc(ndim, pta.get_lnlikelihood, pta.get_lnprior, cov, groups=groups,
outDir=outdir, resume=True)
# additional proposals
full_prior = su.build_prior_draw(pta, pta.param_names, name='full_prior')
sampler.addProposalToCycle(full_prior, 1)
if args.RNdistr:
from utils.sample_utils import EmpiricalDistribution2D
print("using empirical RN proposal")
with open(args.RNdistr, "rb") as f:
distr = pickle.load(f)
Non4 = len(distr) // 4
RN_emp = su.EmpDistrDraw(distr, pta.param_names, Nmax=Non4, name='RN_empirical')
sampler.addProposalToCycle(RN_emp, 10)
else:
# use log-uniform draw for RN
print("using log-uniform RN proposal")
RNA_params = [pname for pname in pta.param_names if 'red_noise_log10_A' in pname]
RN_loguni = su.build_loguni_draw(pta, RNA_params, (-20,-11), name='RN_loguni')
sampler.addProposalToCycle(RN_loguni, 5)
GWA_loguni = su.build_loguni_draw(pta, 'bwm_log10_A', (-18,-11), name='GWA_loguni')
sampler.addProposalToCycle(GWA_loguni, 5)
if args.BE:
# start jup params near zero
for p in pta.param_names:
if "jup_" in p:
x0[pta.param_names.index(p)] = np.random.normal(scale=0.01)
if args.jupdistr:
from scipy.stats import gaussian_kde
print("using KDE empirical BE proposal")
with open(args.jupdistr, "rb") as f:
jup_kde = pickle.load(f)
BE_kde = su.JupOrb_KDE_Draw(jup_kde, pta.param_names, 'jup_kde')
sampler.addProposalToCycle(BE_kde, 5)
BE_params = [pta.param_names[ii] for ii in BE_group]
BE_prior = su.build_prior_draw(pta, BE_params, name='BE_prior')
sampler.addProposalToCycle(BE_prior, 5)
# SAMPLE!!
Nsamp = args.N * args.thin
sampler.sample(x0, Nsamp,
SCAMweight=30, AMweight=20, DEweight=50,
burn=int(5e4), thin=args.thin)
| [
"enterprise.signals.selections.Selection",
"enterprise.signals.white_signals.MeasurementNoise",
"enterprise.signals.gp_signals.FourierBasisGP",
"enterprise.signals.utils.powerlaw",
"utils.sample_utils.JupOrb_KDE_Draw",
"os.path.exists",
"enterprise.signals.gp_signals.TimingModel",
"argparse.ArgumentPa... | [((626, 701), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""run the BWM analysis with enterprise"""'}), "(description='run the BWM analysis with enterprise')\n", (649, 701), False, 'import argparse, os\n'), ((3491, 3519), 'os.path.abspath', 'os.path.abspath', (['args.outdir'], {}), '(args.outdir)\n', (3506, 3519), False, 'import argparse, os\n'), ((3604, 3639), 'os.path.join', 'os.path.join', (['outdir', '"""chain_1.txt"""'], {}), "(outdir, 'chain_1.txt')\n", (3616, 3639), False, 'import argparse, os\n'), ((3643, 3665), 'os.path.exists', 'os.path.exists', (['chfile'], {}), '(chfile)\n', (3657, 3665), False, 'import argparse, os\n'), ((4709, 4752), 'enterprise.signals.selections.Selection', 'selections.Selection', (['selections.by_backend'], {}), '(selections.by_backend)\n', (4729, 4752), False, 'from enterprise.signals import selections\n'), ((4761, 4781), 'enterprise.signals.parameter.Constant', 'parameter.Constant', ([], {}), '()\n', (4779, 4781), False, 'from enterprise.signals import parameter\n'), ((4790, 4810), 'enterprise.signals.parameter.Constant', 'parameter.Constant', ([], {}), '()\n', (4808, 4810), False, 'from enterprise.signals import parameter\n'), ((4819, 4839), 'enterprise.signals.parameter.Constant', 'parameter.Constant', ([], {}), '()\n', (4837, 4839), False, 'from enterprise.signals import parameter\n'), ((4846, 4908), 'enterprise.signals.white_signals.MeasurementNoise', 'white_signals.MeasurementNoise', ([], {'efac': 'efac', 'selection': 'selection'}), '(efac=efac, selection=selection)\n', (4876, 4908), False, 'from enterprise.signals import white_signals\n'), ((4914, 4978), 'enterprise.signals.white_signals.EquadNoise', 'white_signals.EquadNoise', ([], {'log10_equad': 'equad', 'selection': 'selection'}), '(log10_equad=equad, selection=selection)\n', (4938, 4978), False, 'from enterprise.signals import white_signals\n'), ((4984, 5054), 'enterprise.signals.white_signals.EcorrKernelNoise', 'white_signals.EcorrKernelNoise', ([], {'log10_ecorr': 'ecorr', 'selection': 'selection'}), '(log10_ecorr=ecorr, selection=selection)\n', (5014, 5054), False, 'from enterprise.signals import white_signals\n'), ((5208, 5231), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(0)', '(7)'], {}), '(0, 7)\n', (5225, 5231), False, 'from enterprise.signals import parameter\n'), ((5241, 5291), 'enterprise.signals.utils.powerlaw', 'utils.powerlaw', ([], {'log10_A': 'rn_log10_A', 'gamma': 'rn_gamma'}), '(log10_A=rn_log10_A, gamma=rn_gamma)\n', (5255, 5291), False, 'from enterprise.signals import utils\n'), ((5297, 5357), 'enterprise.signals.gp_signals.FourierBasisGP', 'gp_signals.FourierBasisGP', (['rn_pl'], {'components': '(30)', 'Tspan': 'Tspan'}), '(rn_pl, components=30, Tspan=Tspan)\n', (5322, 5357), False, 'from enterprise.signals import gp_signals\n'), ((5726, 5814), 'enterprise.signals.utils.bwm_delay', 'utils.bwm_delay', ([], {'log10_h': 'bwm_log10_A', 't0': 't0', 'cos_gwtheta': 'costh', 'gwphi': 'phi', 'gwpol': 'pol'}), '(log10_h=bwm_log10_A, t0=t0, cos_gwtheta=costh, gwphi=phi,\n gwpol=pol)\n', (5741, 5814), False, 'from enterprise.signals import utils\n'), ((5855, 5910), 'enterprise.signals.deterministic_signals.Deterministic', 'deterministic_signals.Deterministic', (['bwm_wf'], {'name': '"""bwm"""'}), "(bwm_wf, name='bwm')\n", (5890, 5910), False, 'from enterprise.signals import deterministic_signals\n'), ((5932, 5968), 'enterprise.signals.gp_signals.TimingModel', 'gp_signals.TimingModel', ([], {'use_svd': '(True)'}), '(use_svd=True)\n', (5954, 5968), False, 'from enterprise.signals import gp_signals\n'), ((5988, 6054), 'enterprise.signals.deterministic_signals.PhysicalEphemerisSignal', 'deterministic_signals.PhysicalEphemerisSignal', ([], {'use_epoch_toas': '(True)'}), '(use_epoch_toas=True)\n', (6033, 6054), False, 'from enterprise.signals import deterministic_signals\n'), ((6218, 6253), 'os.path.join', 'os.path.join', (['outdir', '"""summary.txt"""'], {}), "(outdir, 'summary.txt')\n", (6230, 6253), False, 'import argparse, os\n'), ((6348, 6382), 'os.path.join', 'os.path.join', (['outdir', '"""params.txt"""'], {}), "(outdir, 'params.txt')\n", (6360, 6382), False, 'import argparse, os\n'), ((6847, 6866), 'numpy.diag', 'np.diag', (['(stdev ** 2)'], {}), '(stdev ** 2)\n', (6854, 6866), True, 'import numpy as np\n'), ((7849, 7952), 'PTMCMCSampler.PTMCMCSampler.PTSampler', 'ptmcmc', (['ndim', 'pta.get_lnlikelihood', 'pta.get_lnprior', 'cov'], {'groups': 'groups', 'outDir': 'outdir', 'resume': '(True)'}), '(ndim, pta.get_lnlikelihood, pta.get_lnprior, cov, groups=groups,\n outDir=outdir, resume=True)\n', (7855, 7952), True, 'from PTMCMCSampler.PTMCMCSampler import PTSampler as ptmcmc\n'), ((8003, 8063), 'utils.sample_utils.build_prior_draw', 'su.build_prior_draw', (['pta', 'pta.param_names'], {'name': '"""full_prior"""'}), "(pta, pta.param_names, name='full_prior')\n", (8022, 8063), True, 'from utils import sample_utils as su\n'), ((8761, 8832), 'utils.sample_utils.build_loguni_draw', 'su.build_loguni_draw', (['pta', '"""bwm_log10_A"""', '(-18, -11)'], {'name': '"""GWA_loguni"""'}), "(pta, 'bwm_log10_A', (-18, -11), name='GWA_loguni')\n", (8781, 8832), True, 'from utils import sample_utils as su\n'), ((4406, 4420), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4417, 4420), False, 'import pickle\n'), ((4479, 4493), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4490, 4493), False, 'import pickle\n'), ((5116, 5145), 'enterprise.signals.parameter.LinearExp', 'parameter.LinearExp', (['(-20)', '(-11)'], {}), '(-20, -11)\n', (5135, 5145), False, 'from enterprise.signals import parameter\n'), ((5169, 5196), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-20)', '(-11)'], {}), '(-20, -11)\n', (5186, 5196), False, 'from enterprise.signals import parameter\n'), ((5531, 5558), 'enterprise.signals.parameter.Constant', 'parameter.Constant', (['args.t0'], {}), '(args.t0)\n', (5549, 5558), False, 'from enterprise.signals import parameter\n'), ((5575, 5603), 'enterprise.signals.parameter.Constant', 'parameter.Constant', (['args.psi'], {}), '(args.psi)\n', (5593, 5603), False, 'from enterprise.signals import parameter\n'), ((5621, 5649), 'enterprise.signals.parameter.Constant', 'parameter.Constant', (['args.phi'], {}), '(args.phi)\n', (5639, 5649), False, 'from enterprise.signals import parameter\n'), ((5669, 5699), 'enterprise.signals.parameter.Constant', 'parameter.Constant', (['args.costh'], {}), '(args.costh)\n', (5687, 5699), False, 'from enterprise.signals import parameter\n'), ((8335, 8406), 'utils.sample_utils.EmpDistrDraw', 'su.EmpDistrDraw', (['distr', 'pta.param_names'], {'Nmax': 'Non4', 'name': '"""RN_empirical"""'}), "(distr, pta.param_names, Nmax=Non4, name='RN_empirical')\n", (8350, 8406), True, 'from utils import sample_utils as su\n'), ((8635, 8702), 'utils.sample_utils.build_loguni_draw', 'su.build_loguni_draw', (['pta', 'RNA_params', '(-20, -11)'], {'name': '"""RN_loguni"""'}), "(pta, RNA_params, (-20, -11), name='RN_loguni')\n", (8655, 8702), True, 'from utils import sample_utils as su\n'), ((9437, 9489), 'utils.sample_utils.build_prior_draw', 'su.build_prior_draw', (['pta', 'BE_params'], {'name': '"""BE_prior"""'}), "(pta, BE_params, name='BE_prior')\n", (9456, 9489), True, 'from utils import sample_utils as su\n'), ((5423, 5452), 'enterprise.signals.parameter.LinearExp', 'parameter.LinearExp', (['(-18)', '(-11)'], {}), '(-18, -11)\n', (5442, 5452), False, 'from enterprise.signals import parameter\n'), ((5487, 5514), 'enterprise.signals.parameter.Uniform', 'parameter.Uniform', (['(-18)', '(-11)'], {}), '(-18, -11)\n', (5504, 5514), False, 'from enterprise.signals import parameter\n'), ((8280, 8294), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8291, 8294), False, 'import pickle\n'), ((9262, 9317), 'utils.sample_utils.JupOrb_KDE_Draw', 'su.JupOrb_KDE_Draw', (['jup_kde', 'pta.param_names', '"""jup_kde"""'], {}), "(jup_kde, pta.param_names, 'jup_kde')\n", (9280, 9317), True, 'from utils import sample_utils as su\n'), ((9017, 9045), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.01)'}), '(scale=0.01)\n', (9033, 9045), True, 'import numpy as np\n'), ((9230, 9244), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9241, 9244), False, 'import pickle\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[426]:
import PIL
import cv2
import numpy as np
import os
from PIL import Image
#newly added modules :
import natsort
from typing import Tuple, Union
import math
from deskew import determine_skew
# In[412]:
def Setting_image_to_300DPI(img, img_ref) :
length_x, width_y = img.size
factor = min(1, float(1024.0 / length_x))
size = int(factor * length_x), int(factor * width_y)
imgResult = img.resize(size, Image.ANTIALIAS).convert('RGB')
if img.mode != 'RGB':
img = img.convert('RGB')
name = '/home/ubuntu/BTECHPROECT/myproject/RescaledImages/' + img_ref + '.jpg'
img.save(name, dpi = (300, 300))
return name
# In[413]:
def convertToGrayscale(img) :
im = cv2.imread(img)
image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY, dstCn = 2)
angle = determine_skew(image)
if(angle == None):
angle = 0
grayScaleImage = PIL.Image.fromarray(image)
l = [angle, grayScaleImage]
return l
# In[414]:
def shadowRemoval(img) :
arr = np.array(img)
rgb_planes = cv2.split(arr)
result_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
result_planes.append(diff_img)
result = cv2.merge(result_planes)
imageWithoutShadow = PIL.Image.fromarray(result)
return imageWithoutShadow
# In[415]:
def borderRemoval(img) :
arr = np.array(img)
mask = np.zeros(arr.shape, dtype=np.uint8)
cnts = cv2.findContours(arr, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cv2.fillPoly(mask, cnts, [255,255,255])
mask = 255 - mask
result = cv2.bitwise_or(arr, mask)
imageWithoutBorder = PIL.Image.fromarray(result)
return imageWithoutBorder
# In[416]:
def deskew(image: np.ndarray, angle: float, background: Union[int, Tuple[int, int, int]]) -> np.ndarray:
image = np.array(image)
old_width, old_height = image.shape[:2]
angle_radian = math.radians(angle)
width = abs(np.sin(angle_radian) * old_height) + abs(np.cos(angle_radian) * old_width)
height = abs(np.sin(angle_radian) * old_width) + abs(np.cos(angle_radian) * old_height)
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
rot_mat[1, 2] += (width - old_width) / 2
rot_mat[0, 2] += (height - old_height) / 2
result = cv2.warpAffine(image, rot_mat, (int(round(height)), int(round(width))), borderValue=background)
rotatedImage = PIL.Image.fromarray(result)
return rotatedImage
# In[417]:
def imageBinarisation(img) :
arr = np.array(img)
result = cv2.threshold(arr, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
binarisedImage = PIL.Image.fromarray(result)
return binarisedImage
# In[418]:
def checkBackgroundColor(img, black_max_bgr=(50)) :
arr = np.array(img)
mean_bgr_float = np.mean(arr, axis=(0,1))
mean_bgr_rounded = np.round(mean_bgr_float)
mean_bgr = mean_bgr_rounded.astype(np.uint8)
mean_intensity = int(round(np.mean(arr)))
return 'black' if np.all(mean_bgr < black_max_bgr) else 'white'
# In[419]:
def Inversion(img, color) :
arr = np.array(img)
result = arr if color == "white" else cv2.bitwise_not(arr)
invertedImage = PIL.Image.fromarray(result)
return invertedImage
# In[420]:
def saveImage(img, img_ref, folder) :
length_x, width_y = img.size
factor = min(1, float(1024.0 / length_x))
size = int(factor * length_x), int(factor * width_y)
image = img.resize(size, Image.ANTIALIAS)
name = '/home/ubuntu/BTECHPROECT/myproject/' + folder + '/' + img_ref + '.jpg'
image.save(name, 'JPEG')
# In[421]:
def deleteFiles(path) :
for f in os.listdir(path):
os.remove(path + f)
def openImage(img) :
image = Image.open(img)
return image
| [
"numpy.array",
"cv2.bitwise_or",
"numpy.sin",
"os.remove",
"numpy.mean",
"os.listdir",
"cv2.threshold",
"cv2.medianBlur",
"deskew.determine_skew",
"numpy.round",
"cv2.fillPoly",
"cv2.merge",
"numpy.ones",
"math.radians",
"numpy.cos",
"cv2.cvtColor",
"cv2.split",
"cv2.getRotationMat... | [((752, 767), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (762, 767), False, 'import cv2\n'), ((780, 825), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {'dstCn': '(2)'}), '(im, cv2.COLOR_BGR2GRAY, dstCn=2)\n', (792, 825), False, 'import cv2\n'), ((840, 861), 'deskew.determine_skew', 'determine_skew', (['image'], {}), '(image)\n', (854, 861), False, 'from deskew import determine_skew\n'), ((924, 950), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image'], {}), '(image)\n', (943, 950), False, 'import PIL\n'), ((1046, 1059), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1054, 1059), True, 'import numpy as np\n'), ((1077, 1091), 'cv2.split', 'cv2.split', (['arr'], {}), '(arr)\n', (1086, 1091), False, 'import cv2\n'), ((1363, 1387), 'cv2.merge', 'cv2.merge', (['result_planes'], {}), '(result_planes)\n', (1372, 1387), False, 'import cv2\n'), ((1413, 1440), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['result'], {}), '(result)\n', (1432, 1440), False, 'import PIL\n'), ((1521, 1534), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1529, 1534), True, 'import numpy as np\n'), ((1546, 1581), 'numpy.zeros', 'np.zeros', (['arr.shape'], {'dtype': 'np.uint8'}), '(arr.shape, dtype=np.uint8)\n', (1554, 1581), True, 'import numpy as np\n'), ((1593, 1658), 'cv2.findContours', 'cv2.findContours', (['arr', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(arr, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1609, 1658), False, 'import cv2\n'), ((1715, 1756), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'cnts', '[255, 255, 255]'], {}), '(mask, cnts, [255, 255, 255])\n', (1727, 1756), False, 'import cv2\n'), ((1790, 1815), 'cv2.bitwise_or', 'cv2.bitwise_or', (['arr', 'mask'], {}), '(arr, mask)\n', (1804, 1815), False, 'import cv2\n'), ((1841, 1868), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['result'], {}), '(result)\n', (1860, 1868), False, 'import PIL\n'), ((2031, 2046), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2039, 2046), True, 'import numpy as np\n'), ((2110, 2129), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (2122, 2129), False, 'import math\n'), ((2387, 2436), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (2410, 2436), False, 'import cv2\n'), ((2657, 2684), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['result'], {}), '(result)\n', (2676, 2684), False, 'import PIL\n'), ((2763, 2776), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2771, 2776), True, 'import numpy as np\n'), ((2878, 2905), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['result'], {}), '(result)\n', (2897, 2905), False, 'import PIL\n'), ((3009, 3022), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3017, 3022), True, 'import numpy as np\n'), ((3044, 3069), 'numpy.mean', 'np.mean', (['arr'], {'axis': '(0, 1)'}), '(arr, axis=(0, 1))\n', (3051, 3069), True, 'import numpy as np\n'), ((3092, 3116), 'numpy.round', 'np.round', (['mean_bgr_float'], {}), '(mean_bgr_float)\n', (3100, 3116), True, 'import numpy as np\n'), ((3333, 3346), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3341, 3346), True, 'import numpy as np\n'), ((3430, 3457), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['result'], {}), '(result)\n', (3449, 3457), False, 'import PIL\n'), ((3882, 3898), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3892, 3898), False, 'import os\n'), ((3962, 3977), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (3972, 3977), False, 'from PIL import Image\n'), ((1227, 1258), 'cv2.medianBlur', 'cv2.medianBlur', (['dilated_img', '(21)'], {}), '(dilated_img, 21)\n', (1241, 1258), False, 'import cv2\n'), ((2790, 2853), 'cv2.threshold', 'cv2.threshold', (['arr', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(arr, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2803, 2853), False, 'import cv2\n'), ((3234, 3266), 'numpy.all', 'np.all', (['(mean_bgr < black_max_bgr)'], {}), '(mean_bgr < black_max_bgr)\n', (3240, 3266), True, 'import numpy as np\n'), ((3389, 3409), 'cv2.bitwise_not', 'cv2.bitwise_not', (['arr'], {}), '(arr)\n', (3404, 3409), False, 'import cv2\n'), ((3908, 3927), 'os.remove', 'os.remove', (['(path + f)'], {}), '(path + f)\n', (3917, 3927), False, 'import os\n'), ((1184, 1209), 'numpy.ones', 'np.ones', (['(7, 7)', 'np.uint8'], {}), '((7, 7), np.uint8)\n', (1191, 1209), True, 'import numpy as np\n'), ((1284, 1310), 'cv2.absdiff', 'cv2.absdiff', (['plane', 'bg_img'], {}), '(plane, bg_img)\n', (1295, 1310), False, 'import cv2\n'), ((2339, 2367), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (2347, 2367), True, 'import numpy as np\n'), ((3197, 3209), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (3204, 3209), True, 'import numpy as np\n'), ((2146, 2166), 'numpy.sin', 'np.sin', (['angle_radian'], {}), '(angle_radian)\n', (2152, 2166), True, 'import numpy as np\n'), ((2187, 2207), 'numpy.cos', 'np.cos', (['angle_radian'], {}), '(angle_radian)\n', (2193, 2207), True, 'import numpy as np\n'), ((2238, 2258), 'numpy.sin', 'np.sin', (['angle_radian'], {}), '(angle_radian)\n', (2244, 2258), True, 'import numpy as np\n'), ((2278, 2298), 'numpy.cos', 'np.cos', (['angle_radian'], {}), '(angle_radian)\n', (2284, 2298), True, 'import numpy as np\n')] |
from __future__ import print_function
import unittest
import numpy as np
import scipy.sparse as sp
import discretize
from SimPEG import maps, utils
from SimPEG import data_misfit, simulation, survey
np.random.seed(17)
class DataMisfitTest(unittest.TestCase):
def setUp(self):
mesh = discretize.TensorMesh([30])
sigma = np.ones(mesh.nC)
model = np.log(sigma)
# prob = DC.Simulation3DCellCentered(mesh, rhoMap=Maps.ExpMap(mesh))
receivers = survey.BaseRx(20 * [[0.0]])
source = survey.BaseSrc([receivers])
sim = simulation.ExponentialSinusoidSimulation(
mesh=mesh, survey=survey.BaseSurvey([source]), model_map=maps.ExpMap(mesh)
)
synthetic_data = sim.make_synthetic_data(model)
dobs = synthetic_data.dobs
self.relative = 0.01
self.eps = 1e-8
synthetic_data.relative_error = self.relative
synthetic_data.noise_floor = self.eps
dmis = data_misfit.L2DataMisfit(simulation=sim, data=synthetic_data)
self.model = model
self.mesh = mesh
self.sim = sim
self.survey = sim.survey
# self.survey = survey
# self.prob = prob
self.data = synthetic_data
self.dmis = dmis
def test_Wd_depreciation(self):
with self.assertWarns(DeprecationWarning):
print(self.dmis.Wd)
with self.assertWarns(DeprecationWarning):
self.dmis.Wd = utils.Identity()
def test_DataMisfit_nP(self):
self.assertTrue(self.dmis.nP == self.mesh.nC)
def test_zero_uncertainties(self):
self.data.relative_error = 0.0
self.data.noise_floor = 0.0
with self.assertRaises(Exception):
Worig = self.dmis.W
def test_setting_W(self):
self.data.relative_error = self.relative
self.data.noise_floor = self.eps
Worig = self.dmis.W
v = np.random.rand(self.survey.nD)
self.dmis.W = v
self.assertTrue(self.dmis.W.shape == (self.survey.nD, self.survey.nD))
self.assertTrue(np.all(self.dmis.W.diagonal() == v))
with self.assertRaises(Exception):
self.dmis.W = np.random.rand(self.survey.nD + 10)
self.dmis.W = Worig
def test_DataMisfitOrder(self):
self.data.relative_error = self.relative
self.data.noise_floor = self.eps
self.dmis.test(x=self.model)
if __name__ == "__main__":
unittest.main()
| [
"discretize.TensorMesh",
"numpy.ones",
"SimPEG.survey.BaseSrc",
"numpy.random.rand",
"SimPEG.maps.ExpMap",
"numpy.log",
"SimPEG.utils.Identity",
"SimPEG.survey.BaseSurvey",
"SimPEG.survey.BaseRx",
"numpy.random.seed",
"unittest.main",
"SimPEG.data_misfit.L2DataMisfit"
] | [((203, 221), 'numpy.random.seed', 'np.random.seed', (['(17)'], {}), '(17)\n', (217, 221), True, 'import numpy as np\n'), ((2453, 2468), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2466, 2468), False, 'import unittest\n'), ((301, 328), 'discretize.TensorMesh', 'discretize.TensorMesh', (['[30]'], {}), '([30])\n', (322, 328), False, 'import discretize\n'), ((345, 361), 'numpy.ones', 'np.ones', (['mesh.nC'], {}), '(mesh.nC)\n', (352, 361), True, 'import numpy as np\n'), ((378, 391), 'numpy.log', 'np.log', (['sigma'], {}), '(sigma)\n', (384, 391), True, 'import numpy as np\n'), ((491, 518), 'SimPEG.survey.BaseRx', 'survey.BaseRx', (['(20 * [[0.0]])'], {}), '(20 * [[0.0]])\n', (504, 518), False, 'from SimPEG import data_misfit, simulation, survey\n'), ((536, 563), 'SimPEG.survey.BaseSrc', 'survey.BaseSrc', (['[receivers]'], {}), '([receivers])\n', (550, 563), False, 'from SimPEG import data_misfit, simulation, survey\n'), ((980, 1041), 'SimPEG.data_misfit.L2DataMisfit', 'data_misfit.L2DataMisfit', ([], {'simulation': 'sim', 'data': 'synthetic_data'}), '(simulation=sim, data=synthetic_data)\n', (1004, 1041), False, 'from SimPEG import data_misfit, simulation, survey\n'), ((1925, 1955), 'numpy.random.rand', 'np.random.rand', (['self.survey.nD'], {}), '(self.survey.nD)\n', (1939, 1955), True, 'import numpy as np\n'), ((1468, 1484), 'SimPEG.utils.Identity', 'utils.Identity', ([], {}), '()\n', (1482, 1484), False, 'from SimPEG import maps, utils\n'), ((2191, 2226), 'numpy.random.rand', 'np.random.rand', (['(self.survey.nD + 10)'], {}), '(self.survey.nD + 10)\n', (2205, 2226), True, 'import numpy as np\n'), ((650, 677), 'SimPEG.survey.BaseSurvey', 'survey.BaseSurvey', (['[source]'], {}), '([source])\n', (667, 677), False, 'from SimPEG import data_misfit, simulation, survey\n'), ((689, 706), 'SimPEG.maps.ExpMap', 'maps.ExpMap', (['mesh'], {}), '(mesh)\n', (700, 706), False, 'from SimPEG import maps, utils\n')] |
import numpy as np
import cv2
import copy
from polygon import Polygon
def draw_polygon(img, polygon):
# Create result image from input image
ret = img.copy()
# Create polygon image
poly = img.copy()
# Get image size
h, w = poly.shape[:2]
# Draw polygon
vertices = polygon.vertices.copy()
vertices[:, 0] = h * vertices[:, 0] + 0.5
vertices[:, 1] = w * vertices[:, 1] + 0.5
pts = np.array([vertices], np.int32)
color = np.array(polygon.color * 255 + 0.5, dtype=int)
cv2.fillPoly(poly, pts, color.tolist())
# Paste onto the result
cv2.addWeighted(poly, polygon.alpha,
ret, 1 - polygon.alpha,
0, ret)
return ret
def draw_polygons(img, polygons):
ret = img.copy()
for polygon in polygons:
ret = draw_polygon(ret, polygon)
return ret
def decode(x):
color = x[:3]
alpha = x[3]
vertices = np.array(x[4:]).reshape(-1, 2)
return Polygon(vertices, color, alpha)
def m(x, y):
return random_inheritance(x, y)
def random_inheritance(x, y):
ret = x.copy()
from_y = np.random.rand(x.shape[0]) < 0.5
ret[from_y] = y[from_y]
return ret
def inheritance(x, y):
ret = copy.deepcopy(x)
pos = np.random.randint(len(x))
ret[pos:] = y[pos:]
return ret
if __name__ == "__main__":
x = np.array([1, 5, 8, 2, 7])
y = np.array([2, 3, 6, 4, 1])
print(m(x, y), x, y) | [
"numpy.random.rand",
"polygon.Polygon",
"numpy.array",
"cv2.addWeighted",
"copy.deepcopy"
] | [((425, 455), 'numpy.array', 'np.array', (['[vertices]', 'np.int32'], {}), '([vertices], np.int32)\n', (433, 455), True, 'import numpy as np\n'), ((469, 515), 'numpy.array', 'np.array', (['(polygon.color * 255 + 0.5)'], {'dtype': 'int'}), '(polygon.color * 255 + 0.5, dtype=int)\n', (477, 515), True, 'import numpy as np\n'), ((593, 661), 'cv2.addWeighted', 'cv2.addWeighted', (['poly', 'polygon.alpha', 'ret', '(1 - polygon.alpha)', '(0)', 'ret'], {}), '(poly, polygon.alpha, ret, 1 - polygon.alpha, 0, ret)\n', (608, 661), False, 'import cv2\n'), ((967, 998), 'polygon.Polygon', 'Polygon', (['vertices', 'color', 'alpha'], {}), '(vertices, color, alpha)\n', (974, 998), False, 'from polygon import Polygon\n'), ((1222, 1238), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (1235, 1238), False, 'import copy\n'), ((1350, 1375), 'numpy.array', 'np.array', (['[1, 5, 8, 2, 7]'], {}), '([1, 5, 8, 2, 7])\n', (1358, 1375), True, 'import numpy as np\n'), ((1384, 1409), 'numpy.array', 'np.array', (['[2, 3, 6, 4, 1]'], {}), '([2, 3, 6, 4, 1])\n', (1392, 1409), True, 'import numpy as np\n'), ((1112, 1138), 'numpy.random.rand', 'np.random.rand', (['x.shape[0]'], {}), '(x.shape[0])\n', (1126, 1138), True, 'import numpy as np\n'), ((925, 940), 'numpy.array', 'np.array', (['x[4:]'], {}), '(x[4:])\n', (933, 940), True, 'import numpy as np\n')] |
from pyabc import ABCSMC, Distribution
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
import scipy.stats as st
import numpy as np
from datetime import datetime, timedelta
set_acc_rate = 0.2
pop_size = 10
def model(x):
"""Some model"""
return {"par": x["par"] + np.random.randn()}
def dist(x, y):
"""Some distance"""
return abs(x["par"] - y["par"])
def test_stop_acceptance_rate_too_low(db_path):
"""Test the acceptance rate condition."""
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, pop_size)
abc.new(db_path, {"par": .5})
history = abc.run(-1, 8, min_acceptance_rate=set_acc_rate)
df = history.get_all_populations()
df["acceptance_rate"] = df["particles"] / df["samples"]
assert df["acceptance_rate"].iloc[-1] < set_acc_rate
assert df["acceptance_rate"].iloc[-2] >= set_acc_rate \
or df["t"].iloc[-2] == -1 # calibration iteration
def test_stop_early(db_path):
"""Test early stopping inside a generation."""
mc_sampler = MulticoreEvalParallelSampler(check_max_eval=True)
sc_sampler = SingleCoreSampler(check_max_eval=True)
for sampler in [mc_sampler, sc_sampler]:
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist,
pop_size, sampler=sampler)
abc.new(db_path, {"par": .5})
history = abc.run(
max_nr_populations=8, min_acceptance_rate=set_acc_rate)
df = history.get_all_populations()
# offset with n_procs as more processes can have run at termination
n_procs = sampler.n_procs if hasattr(sampler, 'n_procs') else 1
df["corrected_acceptance_rate"] = \
df["particles"] / (df["samples"] - (n_procs-1))
assert df["corrected_acceptance_rate"].iloc[-1] >= set_acc_rate
def test_total_nr_simulations(db_path):
"""Test the total number of samples condition."""
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, pop_size)
abc.new(db_path, {"par": .5})
max_total_nr_sim = 142
history = abc.run(-1, 100, max_total_nr_simulations=max_total_nr_sim)
assert history.total_nr_simulations >= max_total_nr_sim
# Directly check on the history
df = history.get_all_populations()
# Make sure budget is not exceeded yet in previous iteration
assert sum(df['samples'][:-1]) < max_total_nr_sim
# Just to make sure .total_nr_simulations does what it's supposed to
assert sum(df['samples']) == history.total_nr_simulations
def test_max_walltime(db_path):
"""Test the maximum walltime condition."""
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, pop_size)
abc.new(db_path, {"par": .5})
init_walltime = datetime.now()
max_walltime = timedelta(milliseconds=500)
history = abc.run(-1, 100, max_walltime=max_walltime)
assert datetime.now() - init_walltime > max_walltime
assert history.n_populations < 100
| [
"pyabc.sampler.MulticoreEvalParallelSampler",
"scipy.stats.uniform",
"datetime.datetime.now",
"datetime.timedelta",
"numpy.random.randn",
"pyabc.sampler.SingleCoreSampler"
] | [((1042, 1091), 'pyabc.sampler.MulticoreEvalParallelSampler', 'MulticoreEvalParallelSampler', ([], {'check_max_eval': '(True)'}), '(check_max_eval=True)\n', (1070, 1091), False, 'from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler\n'), ((1109, 1147), 'pyabc.sampler.SingleCoreSampler', 'SingleCoreSampler', ([], {'check_max_eval': '(True)'}), '(check_max_eval=True)\n', (1126, 1147), False, 'from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler\n'), ((2723, 2737), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2735, 2737), False, 'from datetime import datetime, timedelta\n'), ((2757, 2784), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': '(500)'}), '(milliseconds=500)\n', (2766, 2784), False, 'from datetime import datetime, timedelta\n'), ((300, 317), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (315, 317), True, 'import numpy as np\n'), ((2854, 2868), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2866, 2868), False, 'from datetime import datetime, timedelta\n'), ((534, 551), 'scipy.stats.uniform', 'st.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (544, 551), True, 'import scipy.stats as st\n'), ((1951, 1968), 'scipy.stats.uniform', 'st.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (1961, 1968), True, 'import scipy.stats as st\n'), ((2633, 2650), 'scipy.stats.uniform', 'st.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (2643, 2650), True, 'import scipy.stats as st\n'), ((1238, 1255), 'scipy.stats.uniform', 'st.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (1248, 1255), True, 'import scipy.stats as st\n')] |
import os, random, sys, time, csv, pickle, re, pkg_resources
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel
from tkinter.ttk import Progressbar, Separator, Combobox
from tkinter import filedialog as fd
import tkinter.font as font
from scipy.io import loadmat, savemat, whosmat
from scipy.optimize import nnls
from scipy.interpolate import interp1d
from sklearn.cluster import KMeans
from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.ticker as tkr
import matplotlib.cm as cmaps # https://matplotlib.org/gallery/color/colormap_reference.html
import numpy as np
from numpy.matlib import repmat
from midiutil import MIDIFile # need to move to MIDO for
try:
from pyanthem.pyanthem_vars import *
except:
from pyanthem_vars import *
from git import Repo
from google_drive_downloader import GoogleDriveDownloader as gdd
import subprocess as sp
import PIL.Image as Image
def download_soundfont(font):
'''
Downloads soundfonts from https://sites.google.com/site/soundfonts4u/
'''
sf_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts')
if not os.path.isdir(sf_path):
os.mkdir(sf_path)
try:
if not os.path.isfile(os.path.join(sf_path,font+'.sf2')):
gdd.download_file_from_google_drive(file_id=sound_fonts[font],dest_path=os.path.join(sf_path,font+'.sf2'))
print(f'Sound font {font} downloaded to soundfont library.')
else:
print(f'Sound font {font} already present in soundfont library.')
except:
print(f'Sound font {font} is not available font. Please choose from these: {sound_fonts.keys()}')
def init_entry(fn):
'''
Generalized version of StringVar/DoubleVar followed by set()
'''
if isinstance(fn, str):
entry = StringVar()
else:
entry = DoubleVar()
entry.set(fn)
return entry
def stack_videos(videos,fn='output.mp4'):
'''
Stacks .mp4 videos horizontally (and combines audio)
'''
nvids = len(videos)
instr = ''
for i in range(len(videos)):
instr += ' -i '+videos[i]
os.system('ffmpeg -y '+instr+' -filter_complex "[0:v][1:v]hstack=inputs='+str(nvids)+'[v]; [0:a][1:a]amerge[a]" -map "[v]" -map "[a]" -ac 2 '+fn)
def uiopen(title,filetypes):
root = Tk()
root.withdraw()
file_in = os.path.normpath(fd.askopenfilename(title=title,filetypes=filetypes))
root.update()
root.destroy()
return file_in
def run(display=True):
'''
Main command to run GUI or CLI
'''
root = GUI(display=display)
if display:
root.mainloop()
else:
print('Welcome to pyanthem v{}!'.format(pkg_resources.require("pyanthem")[0].version))
return root
class GUI(Tk):
def __init__(self,display=True):
'''
Initializes the GUI instance. display=True runs the Tk.__init__(self)
command, while display=False skips that and visual initialization, keeping
the GUI 'hidden'
'''
self.display = display
self.download_data()
if self.display:
Tk.__init__(self)
self.default_font=font.nametofont("TkDefaultFont")
self.initGUI()
def quit(self,event=None):
'''
Quits the GUI instance. currently, jupyter instances are kinda buggy
'''
try:
# This raises a NameError exception in a notebook env, since
# sys.exit() is not an appropriate method
get_ipython().__class__.__name__
self.destroy()
except NameError:
sys.exit()
def message(self,message):
'''
Sends message through print if no GUI, through self.status if GUI is running
'''
if self.display:
self.status['text'] = message
else:
print(message)
def check_data(self):
'''
Checks to make sure data is loaded.
'''
if not hasattr(self,'data'):
self.message('Error: No dataset has been loaded.')
return False
return True
def check_save_path(self):
if self.cfg['save_path'] is None:
print('Error: cfg["save_path"] is empty - please provide one!')
return False
return True
def self_to_cfg(self):
'''
This function is necessary to allow command-line access of the GUI functions.
StringVar() and IntVar() allow for dynamic, quick field updating and access,
but cannot be used outside of a mainloop or pickled. for this reason, I convert
all StringVars and IntVars to a new dict called 'self.cfg', that can be accessed
oustide the GUI and dumped to a pickle file, which essentially "freezes" the GUI.
'''
self.cfg = {k: getattr(self,k).get() if self_fns[k] is 'entry' else getattr(self,k) for k in self_fns}
if hasattr(self,'cfginfo'):
text=''
for key in self.cfg:
text+=str(key)+': '+str(self.cfg[key])+'\n'
self.cfginfotext['text']=text
self.cfginfo.update()
def download_data(self):
'''
Downloads example datasets from https://github.com/nicthib/anthem_datasets
'''
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_datasets')
if not os.path.isdir(path):
print('Detected new installation. Downloading example datasets...')
try:
Repo.clone_from('https://github.com/nicthib/anthem_datasets.git',path)
print(f'Example datasets downloaded to {path}')
except:
print('ERROR: git executable not present. Please visit https://git-scm.com/downloads to install.')
def load_data(self,filein=None):
'''
loads dataset from filein. At the time, only supports .mat files.
'''
if filein is None:
filein=uiopen(title='Select .mat file for import',filetypes=[('.mat files','*.mat')])
if filein == '.':
return
self.data = loadmat(filein)
try:
self.data['W_shape'] = self.data['W'].shape
self.data['W'] = self.data['W'].reshape(self.data['W'].shape[0]*self.data['W'].shape[1],self.data['W'].shape[2])
self.data['fr'] = float(self.data['fr'])
if not self.display:
return self
except:
self.message('Error: .mat file incompatible. Please select a .mat file with three variables: W (3D), H (2D), and fr (1-element float)')
def load_GUI(self):
'''
GUI-addons for load_data. Prompts user with filedialog, assigns defaults and sets GUI fields.
'''
filein=uiopen(title='Select .mat file for import',filetypes=[('.mat files','*.mat')])
if filein == '.':
return
self.load_data(filein)
self.data['H_pp'] = self.data['H']
self.data['H_fp'] = self.data['H']
self.data['W_pp'] = self.data['W']
self.fr.set(self.data['fr'])
self.file_in.set(os.path.splitext(os.path.split(filein)[1])[0])
# Set some defaults
self.file_out.set(self.file_in.get())
self.save_path.set(os.path.split(filein)[0])
Hstr = 'H' # for whatever reason, can't double nest quotations in an f-string :/
self.brightness.set(f'{float(f"{np.mean(self.data[Hstr])+np.std(self.data[Hstr]):.3g}"):g}')
self.threshold.set(f'{float(f"{np.mean(self.data[Hstr])+np.std(self.data[Hstr]):.3g}"):g}')
self.Wshow_arr = list(range(len(self.data['H'])))
self.process_H_W()
self.init_plots()
self.refresh_GUI()
def dump_cfg(self):
'''
Saves config file. This is run every time a user calls write_audio() or write_video()
'''
if self.check_data():
file_out = os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'_cfg.p'
pickle.dump(self.cfg,open(file_out, "wb"))
self.message(f'cfg file saved to {file_out}')
def load_config(self,filein=None):
'''
Loads .p file containing dict of parameters needed to create outputs. If display=True, sets GUI fields.
'''
if filein is None:
filein=uiopen(title='Select pickle file for import',filetypes=[('pickle file','*.p'),('pickle file','*.pkl'),('pickle file','*.pickle')])
if filein == '.':
return
with open(filein, "rb") as f:
self.cfg = pickle.load(f)
if self.display:
for key,value in self.cfg.items():
if self_fns[key] is 'entry':
getattr(self,key).set(value)
else:
setattr(self,key,value)
self.refresh_GUI()
else:
return self
def refresh_GUI(self,event=None):
'''
'''
if not self.check_data():
return
self.init_plots()
# Update slider (Need to move the command)
if self.frameslider.get() > len(self.data['H_pp'].T): # This (usually) occurs when the user crops the dataset
self.frameslider.set(1)
self.frameslider['to'] = int(len(self.data['H_pp'].T)-1)
Hstd = self.data['H_pp'].std()*3
if self.offsetH.get():
tmpH = self.data['H_pp'].T - repmat([w*Hstd for w in list(range(len(self.Wshow_arr)))],len(self.data['H_pp'].T),1)
else:
tmpH = self.data['H_pp'].T
self.H_plot = self.Hax1.plot(tmpH,linewidth=.5)
for i,j in enumerate(self.Hax1.lines):
j.set_color(self.cmap[i])
if not self.offsetH.get():
thresh_line = self.Hax1.plot(np.ones((len(self.data['H_pp'].T,)))*self.cfg['threshold'],linestyle='dashed',color='0',linewidth=1)
zero_line = self.Hax1.plot(np.zeros((len(self.data['H_pp'].T,))),linestyle='dashed',color='.5',linewidth=1)
self.legend = self.Hax1.legend((thresh_line[0],), ('Threshold',))
#self.legend = self.Hax1.legend((thresh_line[0],zero_line[0]), ('Threshold','Baseline'))
if self.cfg['audio_format'] == 'Analog':
self.H_p_plot = self.Hax2.imshow(self.data['H_pp'],interpolation='none',cmap=plt.get_cmap('gray'))
self.H_p_plot.set_clim(0, np.max(self.data['H_pp']))
else:
self.H_p_plot = self.Hax2.imshow(self.data['H_fp'],interpolation='none',cmap=plt.get_cmap('gray'))
self.Hax2.xaxis.set_major_formatter(tkr.FuncFormatter(lambda x, pos: '{:.2g}'.format(x/self.cfg['fr'])))
self.Hax2.set(xlabel='time (sec)',ylabel='Component #')
self.Hax1.set_xlim(0, len(self.data['H_pp'].T))
self.Hax1.set_ylim(np.min(tmpH), np.max(tmpH))
if self.offsetH.get():
self.Hax1.set(ylabel='Component #')
else:
self.Hax1.set(ylabel='Magnitude')
self.Hax1.spines['left'].set_visible(False)
self.Hax1.spines['top'].set_visible(False)
self.Hax1.spines['bottom'].set_visible(False)
self.Hax1.spines['right'].set_visible(False)
self.Hax1.yaxis.tick_right()
self.Hax1.yaxis.set_label_position("right")
self.Hax1.tick_params(axis='x',which='both',bottom=False, top=False, labelbottom=False, right=False)
if len(self.Wshow_arr) > 12:
yticks = np.arange(4,len(self.data['H_pp']),5)
yticklabels = np.arange(4,len(self.data['H_pp']),5)
else:
yticks = np.arange(0,len(self.data['H_pp']),1)
yticklabels = np.arange(0,len(self.data['H_pp']),1)
if self.offsetH.get():
self.Hax1.set(yticks=-yticks*Hstd,yticklabels=yticklabels)
self.Hax2.set(yticks=yticks,yticklabels=yticklabels)
self.Hax2.spines['left'].set_visible(False)
self.Hax2.spines['top'].set_visible(False)
self.Hax2.spines['bottom'].set_visible(False)
self.Hax2.spines['right'].set_visible(False)
self.Hax2.yaxis.tick_right()
self.Hax2.yaxis.set_label_position("right")
self.imWH = self.Wax1.imshow((self.data['W_pp']@np.diag(self.data['H_pp'][:,self.frameslider.get()])@self.cmap[:,:-1]*(255/self.cfg['brightness'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.imW = self.Wax2.imshow((self.data['W_pp']@self.cmap[:,:-1]*255/np.max(self.data['W_pp'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.H_p_plot.axes.set_aspect('auto')
self.imW.axes.set_aspect('equal')
self.imWH.axes.set_aspect('equal')
self.canvas_H.draw()
self.canvas_W.draw()
self.refresh_slider([])
self.status['text'] = '♫ ♪ ♫ ♪ ♫'
def process_H_W(self):
'''
Core function of pyanthem. Applies all cfg settings to dataset, and creates the note dict used for synthesis.
Automatically calls refresh_GUI() if display=True
'''
if self.display:
self.self_to_cfg()
self.status['text'] = 'Updating...'
self.update()
if self.cfg['Wshow'] == 'all':
self.Wshow_arr = list(range(len(self.data['H'])))
# regex expression which lazily checks for a bracketed expression containing numbers, colons and commas.
elif re.match('^\[[0-9,: ]*\]$',self.cfg['Wshow']) is not None:
# This is a magic function which transforms bracketed string arrays to actual numpy arrays.
# Example: '[1,3,5:8]' --> array([1,3,5,6,7])
self.Wshow_arr = eval('np.r_'+self.cfg['Wshow'])
# Edge case
if np.max(w) <= len(self.data['H']):
self.Wshow_arr = np.asarray(list(range(len(self.data['H']))))[w]
else:
self.message('For \'components to show\', please input indices with commas and colons enclosed by square brackets, or \'all\' for all components.')
return
self.data['H_pp'] = self.data['H'][self.Wshow_arr,int(len(self.data['H'].T)*self.cfg['start_percent']/100):int(len(self.data['H'].T)*self.cfg['end_percent']/100)]
self.data['H_pp'] = self.data['H_pp']+self.cfg['baseline']
self.data['W_pp'] = self.data['W'][:,self.Wshow_arr]
# make_keys()
self.keys,i = [],0
while len(self.keys) < len(self.data['H_pp']):
self.keys.extend([k+i+key_opts[self.cfg['key']]+octave_add_opts[self.cfg['octave_add']] for k in scale_keys[self.cfg['scale_type']]])
i+=12
self.keys=self.keys[:len(self.data['H_pp'])]
# Making note dict
true_fr = self.cfg['fr']*self.cfg['speed']/100
ns = int(len(self.data['H_pp'].T)*1000/true_fr)
t1 = np.linspace(0,len(self.data['H_pp'].T)/self.cfg['fr'],len(self.data['H_pp'].T))
t2 = np.linspace(0,len(self.data['H_pp'].T)/self.cfg['fr'],ns)
nchan = len(self.data['H_pp'])
Hmax = np.max(self.data['H_pp'])
self.data['H_fp'] = np.zeros(np.shape(self.data['H_pp']))
self.nd = {}
self.nd['st'],self.nd['en'],self.nd['note'],self.nd['mag'] = [],[],[],[]
for i in range(nchan):
H_rs = interp1d(t1,self.data['H_pp'][i,:])(t2)
H_b = H_rs.copy()
H_b[H_b<self.cfg['threshold']] = 0
H_b[H_b>=self.cfg['threshold']] = 1
H_b[0] = 0
H_b[-1] = 0
TC = np.diff(H_b)
st = np.argwhere(TC == 1)
en = np.argwhere(TC == -1)
bn = np.ndarray.flatten(np.argwhere(np.ndarray.flatten(en-st) < 2)).tolist()
st = np.ndarray.flatten(st).tolist()
en = np.ndarray.flatten(en).tolist()
# Remove super short notes
for ii in sorted(bn, reverse=True):
st.pop(ii)
en.pop(ii)
self.nd['st'].extend([x/1000 for x in st])
self.nd['en'].extend([x/1000 for x in en])
for j in range(len(st)):
mag = np.max(H_rs[st[j]:en[j]])
self.data['H_fp'][i,int(st[j]*true_fr/1000):int(en[j]*true_fr/1000)] = mag
self.nd['mag'].append(int(mag * 127 / Hmax))
self.nd['note'].append(self.keys[i])
self.data['H_pp'][self.data['H_pp'] < 0] = 0
# Colormap
if hasattr(cmaps,self.cfg['cmapchoice']):
cmap = getattr(cmaps,self.cfg['cmapchoice'])
self.cmap = cmap(np.linspace(0,1,len(self.data['H_pp'])))
else:
self.message(f'cmap {self.cfg["cmapchoice"]} not found. Please check the matplotlib documentation for a list of standard colormaps.')
return
if self.display:
self.refresh_GUI()
self.status['text'] = '♫ ♪ ♫ ♪ ♫'
def refresh_slider(self,event):
'''
'''
#try: # May want to use hasattr() here instead
self.imWH.set_data((self.data['W_pp']@np.diag(self.data['H_pp'][:,self.frameslider.get()])@self.cmap[:,:-1]*(255/self.cfg['brightness'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.canvas_W.draw()
#self.H_vline.set_xdata([self.frameslider.get(), self.frameslider.get()])
#self.H_vline.set_ydata(self.Hax1.get_ylim())
#self.canvas_H.draw()
def preview_notes(self):
'''
'''
if self.audio_format.get().endswith('.sf2') and self.check_data():
self.process_H_W()
self.message('Previewing notes...')
fn_font = os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts',self.audio_format.get())
fn_midi = os.path.join(os.path.dirname(os.path.abspath(__file__)),'preview.mid')
fn_wav = os.path.join(os.path.dirname(os.path.abspath(__file__)),'preview.wav')
if get_init() is None: # Checks if pygame has initialized audio engine. Only needs to be run once per instance
pre_init(44100, -16, 2, 1024)
init()
set_num_channels(128) # We will never need more than 128...
MIDI = MIDIFile(1) # One track
MIDI.addTempo(0,0,60) # addTempo(track, time, tempo)
for i in range(len(self.keys)):
MIDI.addNote(0, 0, self.keys[i], i/2, .5, 100)
with open(fn_midi, 'wb') as mid:
MIDI.writeFile(mid)
cmd = 'fluidsynth -ni -F {} -r 44100 {} {} '.format(fn_wav,fn_font,fn_midi)
print(cmd)
os.system(cmd)
music.load(fn_wav)
for i in range(len(self.keys)):
t = time.time()
self.imW.remove()
Wtmp = self.data['W_pp'][:,i]
cmaptmp = self.cmap[i,:-1]
self.imW = self.Wax2.imshow((Wtmp[:,None]@cmaptmp[None,:]*255/np.max(self.data['W_pp'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.canvas_W.draw()
self.update()
if i == 0:
music.play(0)
time.sleep(.5-np.min(((time.time()-t),.5)))
os.remove(fn_midi)
os.remove(fn_wav)
self.refresh_GUI()
else:
self.message('Please choose an .sf2 under "Audio format" to preview notes.')
def write_audio(self):
'''
'''
if not self.check_data():
return
self.process_H_W()
if self.cfg['audio_format'] == 'MIDI' or self.cfg['audio_format'].endswith('.sf2'):
fn_midi = os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'.mid'
fn_wav = os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'.wav'
MIDI = MIDIFile(1) # One track
MIDI.addTempo(0,0,60) # addTempo(track, time, tempo)
for j in range(len(self.nd['note'])):
# addNote(track, channel, pitch, time + i, duration, volume)
MIDI.addNote(0, 0, self.nd['note'][j], self.nd['st'][j], (self.nd['en'][j]-self.nd['st'][j]), self.nd['mag'][j])
with open(fn_midi, 'wb') as mid:
MIDI.writeFile(mid)
if self.cfg['audio_format'].endswith('.sf2'):
fn_font = os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts',self.cfg['audio_format'])
os.system('fluidsynth -ni -F {} -r 44100 {} {}'.format(fn_wav,fn_font,fn_midi))
elif self.cfg['audio_format'] == 'Piano':
self.synth()
elif self.cfg['audio_format'] == 'Analog':
self.neuralstream()
self.message(f'Audio file written to {self.cfg["save_path"]}')
def write_video(self):
'''
Writes video file using self.data['H_pp'] using opencv
http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
'''
if not self.check_data():
return
self.process_H_W()
fn_vid = os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'.mp4'
v_shape = self.data['W_shape'][::-1][1:] # Reverse because ffmpeg does hxw
command = [ 'ffmpeg',
'-loglevel', 'warning',
'-hide_banner',
'-y',
'-f', 'image2pipe',
'-vcodec','png',
'-s', '{}x{}'.format(v_shape[0],v_shape[1]),
'-r', str(self.cfg['fr']*self.cfg['speed']/100),
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-q:v','2',
'-vcodec', 'mpeg4',
fn_vid]
pipe = sp.Popen( command, stdin=sp.PIPE)
nframes = len(self.data['H_pp'].T)
for i in range(nframes):
frame = (self.data['W_pp']@np.diag(self.data['H_pp'][:,i])@self.cmap[:,:-1]*(255/self.cfg['brightness'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8')
im = Image.fromarray(frame)
im.save(pipe.stdin, 'PNG')
if self.display and i%10==0:
self.status['text'] = f'Writing video file, {i} out of {nframes} frames written'
self.update()
pipe.stdin.close()
pipe.wait()
self.message(f'Video file written to {self.cfg["save_path"]}')
return self
def merge(self):
'''
Merges video and audio with ffmpeg
'''
if self.check_data():
fn = os.path.join(self.cfg['save_path'],self.cfg['file_out'])
cmd = 'ffmpeg -hide_banner -loglevel warning -y -i {} -i {} -c:v copy -c:a aac {}'.format(fn+'.mp4',fn+'.wav',fn+'_AV.mp4')
os.system(cmd)
self.message(f'A/V file written to {self.cfg["save_path"]}')
return self
def write_AV(self):
'''
'''
if self.check_data():
self.write_video()
self.write_audio()
self.merge()
if not self.display:
return self
def cleanup(self):
'''
Tries to remove any files that are video or audio only.
'''
fn = os.path.join(self.cfg['save_path'],self.cfg['file_out'])
try: os.remove(fn+'.mp4')
except OSError: pass
try: os.remove(fn+'.wav')
except OSError: pass
try: os.remove(fn+'.mid')
except OSError: pass
self.message(f'A/V only videos removed')
return self
def edit_save_path(self):
self.save_path.set(fd.askdirectory(title='Select a directory to save output files',initialdir=self.cfg['save_path']))
def initGUI(self):
'''
'''
self.winfo_toplevel().title('pyanthem v{}'.format(pkg_resources.require("pyanthem")[0].version))
self.protocol("WM_DELETE_WINDOW", self.quit)
# StringVars
self.file_in=init_entry('')
self.file_out=init_entry('')
self.save_path=init_entry('')
self.speed=init_entry(100)
self.fr=init_entry(0)
self.start_percent=init_entry(0)
self.end_percent=init_entry(100)
self.baseline=init_entry(0)
self.brightness=init_entry(0)
self.threshold=init_entry(0)
self.octave_add=init_entry('2')
self.scale_type=init_entry('Maj. 7 (4/oct)')
self.key=init_entry('C')
self.audio_format=init_entry('Analog')
self.Wshow=init_entry('all')
self.cmapchoice=init_entry('jet')
# Labels
Label(text='',font='Helvetica 1 bold').grid(row=0,column=0) # Just to give a border around Seperators
Label(text='File Parameters',font='Helvetica 14 bold').grid(row=1,column=1,columnspan=2,sticky='WE')
Label(text='Movie Parameters',font='Helvetica 14 bold').grid(row=1,column=3,columnspan=2,sticky='WE')
Label(text='Audio Parameters',font='Helvetica 14 bold').grid(row=1,column=5,columnspan=2,sticky='WE')
Label(text='Input Filename').grid(row=2, column=1,columnspan=2,sticky='W')
Label(text='Output Filename').grid(row=4, column=1,columnspan=2,sticky='W')
Label(text='Save Path').grid(row=6, column=1,columnspan=1,sticky='W')
Label(text='Speed (%)').grid(row=2, column=3, sticky='E')
Label(text='Start (%)').grid(row=3, column=3, sticky='E')
Label(text='End (%)').grid(row=4, column=3, sticky='E')
Label(text='Baseline').grid(row=5, column=3, sticky='E')
Label(text='Max brightness').grid(row=6, column=3, sticky='E')
Label(text='Colormap').grid(row=7, column=3, sticky='E')
Label(text='Threshold').grid(row=2, column=5, sticky='E')
Label(text='Octave').grid(row=3, column=5, sticky='E')
Label(text='Scale Type').grid(row=4, column=5, sticky='E')
Label(text='Key').grid(row=5, column=5, sticky='E')
Label(text='Audio format').grid(row=6, column=5, sticky='E')
# Messages
self.status = Message(text='> Welcome to pyanthem v{}'.format(pkg_resources.require("pyanthem")[0].version),bg='white',fg='black',width=450)
self.status.grid(row=9, column=2, columnspan=5, sticky='NESW')
self.status['anchor']='nw'
# Entries
Entry(textvariable=self.file_in).grid(row=3, column=1,columnspan=2,sticky='W')
Entry(textvariable=self.file_out).grid(row=5, column=1,columnspan=2,sticky='W')
Entry(textvariable=self.save_path,width=17).grid(row=7, column=1,columnspan=2,sticky='EW')
Entry(textvariable=self.speed,width=7).grid(row=2, column=4, sticky='W')
Entry(textvariable=self.start_percent,width=7).grid(row=3, column=4, sticky='W')
Entry(textvariable=self.end_percent,width=7).grid(row=4, column=4, sticky='W')
Entry(textvariable=self.baseline,width=7).grid(row=5, column=4, sticky='W')
Entry(textvariable=self.brightness,width=7).grid(row=6, column=4, sticky='W')
Entry(textvariable=self.threshold,width=7).grid(row=2, column=6, sticky='W')
# Buttons
Button(text='Edit',command=self.edit_save_path,width=5).grid(row=6, column=2)
Button(text='Preview Notes',width=11,command=self.preview_notes).grid(row=7, column=5,columnspan=2)
Button(text='Update',width=7,font='Helvetica 14 bold',command=self.process_H_W).grid(row=9, column=1,columnspan=1)
# Option/combobox values
audio_format_opts = ['Analog']
sf_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts')
if os.path.isdir(sf_path):
fonts_avail = text_files = [f for f in os.listdir(sf_path) if f.endswith('.sf2')]
audio_format_opts.extend(fonts_avail)
# Option Menus
octave_add_menu = OptionMenu(self,self.octave_add,*octave_add_opts.keys())
octave_add_menu.config(width=7)
octave_add_menu.grid(row=3, column=6, sticky='W')
scale_type_menu=OptionMenu(self,self.scale_type,*scale_keys.keys())
scale_type_menu.config(width=11,font=(self.default_font,(8)))
scale_type_menu.grid(row=4, column=6, sticky='W')
key_menu=OptionMenu(self,self.key,*key_opts.keys())
key_menu.config(width=7)
key_menu.grid(row=5, column=6, sticky='W')
audio_format_menu=OptionMenu(self,self.audio_format,*audio_format_opts)
audio_format_menu.config(width=7)
audio_format_menu.grid(row=6, column=6, sticky='W')
# Combo box
self.cmapchooser = Combobox(self,textvariable=self.cmapchoice,width=5)
self.cmapchooser['values'] = cmaps_opts
#self.cmapchooser['state'] = 'readonly'
self.cmapchooser.grid(row=7, column=4, sticky='W')
self.cmapchooser.current()
self.cmap = []
# Menu bar
menubar=Menu(self)
filemenu=Menu(menubar, tearoff=0)
filemenu.add_command(label="Load from .mat", command=self.load_GUI)
filemenu.add_command(label="Load .cfg", command=self.load_config)
filemenu.add_command(label="Quit",command=self.quit,accelerator="Ctrl+Q")
savemenu=Menu(menubar, tearoff=0)
savemenu.add_command(label="Audio", command=self.write_audio)
savemenu.add_command(label="Video", command=self.write_video)
savemenu.add_command(label="Merge A/V", command=self.merge)
savemenu.add_command(label="Write A/V then merge", command=self.write_AV)
savemenu.add_command(label="Cleanup", command=self.cleanup)
cfgmenu=Menu(menubar, tearoff=0)
cfgmenu.add_command(label="Save", command=self.dump_cfg)
cfgmenu.add_command(label="View", command=self.view_cfg)
debugmenu=Menu(menubar, tearoff=0)
debugmenu.add_command(label="Query", command=self.query)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Save", menu=savemenu)
menubar.add_cascade(label="Config", menu=cfgmenu)
menubar.add_cascade(label="Debug", menu=debugmenu)
self.config(menu=menubar)
# Seperators
s_v=[[0,1,9],[2,1,8],[4,1,8],[6,1,9]]
s_h=[[1,1,6],[1,2,6],[1,9,6],[1,10,6],[1,4,2],[1,6,2]]
for sv in s_v:
Separator(self, orient='vertical').grid(column=sv[0], row=sv[1], rowspan=sv[2], sticky='nse')
for sh in s_h:
Separator(self, orient='horizontal').grid(column=sh[0], row=sh[1], columnspan=sh[2], sticky='nwe')
# Offset
self.offsetH=IntVar()
self.offsetH.set(1)
# frameslider
self.frameslider=Scale(self, from_=0, to=1, orient=HORIZONTAL)
self.frameslider['command']=self.refresh_slider
# Bind shortcuts
self.bind_all("<Control-q>", self.quit)
self.bind_all("<Control-a>", lambda:[self.process_H_W(),self.refresh_GUI()])
def init_plots(self):
'''
'''
# H
self.figH = plt.Figure(figsize=(6,6), dpi=100, tight_layout=True)
self.Hax1 = self.figH.add_subplot(211)
self.Hax2 = self.figH.add_subplot(212)
self.Hax1.set_title('Temporal Data (H)')
self.Hax2.set_title('Audio Preview (H\')')
self.canvas_H = FigureCanvasTkAgg(self.figH, master=self)
self.canvas_H.get_tk_widget().grid(row=1,column=7,rowspan=29,columnspan=10)
bg = self.status.winfo_rgb(self['bg'])
self.figH.set_facecolor([(x>>8)/255 for x in bg])
#self.canvas_H.draw()
# Checkbox
Checkbutton(self, text="Offset H",command=self.refresh_GUI,variable=self.offsetH).grid(row=1,rowspan=1,column=16)
# W
self.figW = plt.Figure(figsize=(6,3), dpi=100, constrained_layout=True)
self.Wax1 = self.figW.add_subplot(121)
self.Wax2 = self.figW.add_subplot(122)
self.Wax1.set_title('Video Preview')
self.Wax2.set_title('Spatial Data (W)')
self.Wax1.axis('off')
self.Wax2.axis('off')
self.canvas_W = FigureCanvasTkAgg(self.figW, master=self)
self.canvas_W.get_tk_widget().grid(row=11,column=1,rowspan=19,columnspan=6)
self.figW.set_facecolor([(x>>8)/255 for x in bg])
#self.canvas_W.draw()
# Frameslider
self.frameslider.grid(row=30, column=1, columnspan=3,sticky='EW')
# Wshow
Label(text='Components to show:').grid(row=30, column=3, columnspan=3, sticky='E')
Entry(textvariable=self.Wshow,width=15,justify='center').grid(row=30, column=5, columnspan=2,sticky='E')
def process_raw(self,file_in=None,n_clusters=None,frame_rate=None,save=False):
'''
Decomposes raw dataset. Can be used in two ways: as a part of the
GUI class for immediate processing (e.g. process_raw().write_AV()),
or as a method to save a new dataset.
'''
if filein is None:
filein=uiopen(title='Select .mat file for import',filetypes=[('.mat files','*.mat')])
if filein == '.':
return
dh, var = loadmat(file_in),whosmat(file_in)
data = dh[var[0][0]]
sh = data.shape
if len(sh) != 3:
self.message('ERROR: input dataset is not 3D.')
return
data = data.reshape(sh[0]*sh[1],sh[2])
# Ignore rows with any nans
nanidx = np.any(np.isnan(data), axis=1)
data_nn = data[~nanidx] # nn=non-nan
# k-means
print('Performing k-means...',end='')
if n_clusters is None:
n_clusters = int(len(data)**.25) # Default k is the 4th root of the number of samples per frame (for 256x256, this would be 16)
print(f'No num_clusters given. Defaulting to {n_clusters}...',end='')
idx_nn = KMeans(n_clusters=n_clusters, random_state=0).fit(data_nn).labels_
idx = np.zeros((len(data),))
idx[nanidx==False] = idx_nn
# TCs
H = np.zeros((n_clusters,len(data.T)))
for i in range(n_clusters):
H[i,:] = np.nanmean(data[idx==i,:],axis=0)
print('done.')
# NNLS
nnidx=np.where(~nanidx)[0]
W = np.zeros((len(data),n_clusters))
print('Performing NNLS...',end='')
for i in range(len(nnidx)):
W[nnidx[i],:]=nnls(H.T,data_nn[i,:])[0]
# Sort bottom to top
xc,yc = [], []
(X,Y) = np.meshgrid(range(sh[0]),range(sh[1]))
for i in range(len(W.T)):
Wtmp = W[:,i].reshape(sh[0],sh[1])
xc.append((X*Wtmp).sum() / Wtmp.sum().astype("float"))
yc.append((Y*Wtmp).sum() / Wtmp.sum().astype("float"))
I = np.argsort(yc).reverse() # Reverse orders from bottom to top
W, H = W[:,I],H[I,:]
print('done.')
# Assign variables and save
self.data = {}
self.data['H'] = H
self.data['W'] = W.reshape(sh[0],sh[1],n_clusters)
self.data['W_shape'] = self.data['W'].shape
if frame_rate == []:
self.data['fr'] = 10
print('No fr given. Defaulting to 10')
else:
self.data['fr'] = frame_rate
if save:
fn = file_in.replace('.mat','_decomp.mat')
savemat(fn,self.data)
self.message(f'Decomposed data file saved to {fn}')
# Reshape W here, since any use of self from here would require a flattened W
self.data['W'] = self.data['W'].reshape(self.data['W'].shape[0]*self.data['W'].shape[1],self.data['W'].shape[2])
return self
def query(self):
field = simpledialog.askstring("Input", "Query a root property",parent=self)
try:
self.status['text'] = str(getattr(self,field))
except:
self.status['text'] = 'Bad query.'
def view_cfg(self):
'''
Only works once!
'''
self.cfginfo = Toplevel()
text = ''
try:
for key in self.cfg:
text+=str(key)+': '+str(self.cfg[key])+'\n'
except:
pass
self.cfginfotext = Message(self.cfginfo,text=text)
self.cfginfotext.pack()
def help(self):
print('To load a dataset:\npyanthem.load_data()\n\nTo load a cfg file:\npyanthem.load_config()\n\nTo write video:\npyanthem.write_video()\n\nTo write audio:\npyanthem.write_audio()')
if __name__ == "__main__":
run()
# self\.([a-z_]{1,14})\.get\(\)
# self\.cfg\[$1\] | [
"tkinter.filedialog.askdirectory",
"scipy.io.savemat",
"git.Repo.clone_from",
"pkg_resources.require",
"scipy.io.loadmat",
"matplotlib.pyplot.Figure",
"tkinter.Button",
"scipy.interpolate.interp1d",
"scipy.optimize.nnls",
"numpy.nanmean",
"numpy.argsort",
"tkinter.Label",
"sys.exit",
"tkin... | [((2443, 2447), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (2445, 2447), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((1392, 1414), 'os.path.isdir', 'os.path.isdir', (['sf_path'], {}), '(sf_path)\n', (1405, 1414), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((1418, 1435), 'os.mkdir', 'os.mkdir', (['sf_path'], {}), '(sf_path)\n', (1426, 1435), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((1990, 2001), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1999, 2001), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((2019, 2030), 'tkinter.DoubleVar', 'DoubleVar', ([], {}), '()\n', (2028, 2030), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((2493, 2545), 'tkinter.filedialog.askopenfilename', 'fd.askopenfilename', ([], {'title': 'title', 'filetypes': 'filetypes'}), '(title=title, filetypes=filetypes)\n', (2511, 2545), True, 'from tkinter import filedialog as fd\n'), ((5632, 5647), 'scipy.io.loadmat', 'loadmat', (['filein'], {}), '(filein)\n', (5639, 5647), False, 'from scipy.io import loadmat, savemat, whosmat\n'), ((13401, 13426), 'numpy.max', 'np.max', (["self.data['H_pp']"], {}), "(self.data['H_pp'])\n", (13407, 13426), True, 'import numpy as np\n'), ((18980, 19012), 'subprocess.Popen', 'sp.Popen', (['command'], {'stdin': 'sp.PIPE'}), '(command, stdin=sp.PIPE)\n', (18988, 19012), True, 'import subprocess as sp\n'), ((20235, 20292), 'os.path.join', 'os.path.join', (["self.cfg['save_path']", "self.cfg['file_out']"], {}), "(self.cfg['save_path'], self.cfg['file_out'])\n", (20247, 20292), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((24148, 24170), 'os.path.isdir', 'os.path.isdir', (['sf_path'], {}), '(sf_path)\n', (24161, 24170), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((24813, 24868), 'tkinter.OptionMenu', 'OptionMenu', (['self', 'self.audio_format', '*audio_format_opts'], {}), '(self, self.audio_format, *audio_format_opts)\n', (24823, 24868), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((24995, 25048), 'tkinter.ttk.Combobox', 'Combobox', (['self'], {'textvariable': 'self.cmapchoice', 'width': '(5)'}), '(self, textvariable=self.cmapchoice, width=5)\n', (25003, 25048), False, 'from tkinter.ttk import Progressbar, Separator, Combobox\n'), ((25254, 25264), 'tkinter.Menu', 'Menu', (['self'], {}), '(self)\n', (25258, 25264), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((25276, 25300), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (25280, 25300), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((25527, 25551), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (25531, 25551), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((25891, 25915), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (25895, 25915), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((26047, 26071), 'tkinter.Menu', 'Menu', (['menubar'], {'tearoff': '(0)'}), '(menubar, tearoff=0)\n', (26051, 26071), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((26740, 26748), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (26746, 26748), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((26809, 26854), 'tkinter.Scale', 'Scale', (['self'], {'from_': '(0)', 'to': '(1)', 'orient': 'HORIZONTAL'}), '(self, from_=0, to=1, orient=HORIZONTAL)\n', (26814, 26854), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((27105, 27159), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {'figsize': '(6, 6)', 'dpi': '(100)', 'tight_layout': '(True)'}), '(figsize=(6, 6), dpi=100, tight_layout=True)\n', (27115, 27159), True, 'import matplotlib.pyplot as plt\n'), ((27347, 27388), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.figH'], {'master': 'self'}), '(self.figH, master=self)\n', (27364, 27388), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((27735, 27795), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {'figsize': '(6, 3)', 'dpi': '(100)', 'constrained_layout': '(True)'}), '(figsize=(6, 3), dpi=100, constrained_layout=True)\n', (27745, 27795), True, 'import matplotlib.pyplot as plt\n'), ((28024, 28065), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.figW'], {'master': 'self'}), '(self.figW, master=self)\n', (28041, 28065), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((31048, 31117), 'tkinter.simpledialog.askstring', 'simpledialog.askstring', (['"""Input"""', '"""Query a root property"""'], {'parent': 'self'}), "('Input', 'Query a root property', parent=self)\n", (31070, 31117), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((31292, 31302), 'tkinter.Toplevel', 'Toplevel', ([], {}), '()\n', (31300, 31302), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((31433, 31465), 'tkinter.Message', 'Message', (['self.cfginfo'], {'text': 'text'}), '(self.cfginfo, text=text)\n', (31440, 31465), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((1336, 1361), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1351, 1361), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((3129, 3146), 'tkinter.Tk.__init__', 'Tk.__init__', (['self'], {}), '(self)\n', (3140, 3146), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((3168, 3200), 'tkinter.font.nametofont', 'font.nametofont', (['"""TkDefaultFont"""'], {}), "('TkDefaultFont')\n", (3183, 3200), True, 'import tkinter.font as font\n'), ((5022, 5041), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5035, 5041), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((7745, 7759), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7756, 7759), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((9652, 9664), 'numpy.min', 'np.min', (['tmpH'], {}), '(tmpH)\n', (9658, 9664), True, 'import numpy as np\n'), ((9666, 9678), 'numpy.max', 'np.max', (['tmpH'], {}), '(tmpH)\n', (9672, 9678), True, 'import numpy as np\n'), ((13458, 13485), 'numpy.shape', 'np.shape', (["self.data['H_pp']"], {}), "(self.data['H_pp'])\n", (13466, 13485), True, 'import numpy as np\n'), ((13787, 13799), 'numpy.diff', 'np.diff', (['H_b'], {}), '(H_b)\n', (13794, 13799), True, 'import numpy as np\n'), ((13808, 13828), 'numpy.argwhere', 'np.argwhere', (['(TC == 1)'], {}), '(TC == 1)\n', (13819, 13828), True, 'import numpy as np\n'), ((13837, 13858), 'numpy.argwhere', 'np.argwhere', (['(TC == -1)'], {}), '(TC == -1)\n', (13848, 13858), True, 'import numpy as np\n'), ((16087, 16098), 'midiutil.MIDIFile', 'MIDIFile', (['(1)'], {}), '(1)\n', (16095, 16098), False, 'from midiutil import MIDIFile\n'), ((16410, 16424), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (16419, 16424), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((16428, 16446), 'pygame.mixer.music.load', 'music.load', (['fn_wav'], {}), '(fn_wav)\n', (16438, 16446), False, 'from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music\n'), ((16906, 16924), 'os.remove', 'os.remove', (['fn_midi'], {}), '(fn_midi)\n', (16915, 16924), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((16928, 16945), 'os.remove', 'os.remove', (['fn_wav'], {}), '(fn_wav)\n', (16937, 16945), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((17404, 17415), 'midiutil.MIDIFile', 'MIDIFile', (['(1)'], {}), '(1)\n', (17412, 17415), False, 'from midiutil import MIDIFile\n'), ((18466, 18523), 'os.path.join', 'os.path.join', (["self.cfg['save_path']", "self.cfg['file_out']"], {}), "(self.cfg['save_path'], self.cfg['file_out'])\n", (18478, 18523), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((19290, 19312), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (19305, 19312), True, 'import PIL.Image as Image\n'), ((19693, 19750), 'os.path.join', 'os.path.join', (["self.cfg['save_path']", "self.cfg['file_out']"], {}), "(self.cfg['save_path'], self.cfg['file_out'])\n", (19705, 19750), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((19880, 19894), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (19889, 19894), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((20299, 20321), 'os.remove', 'os.remove', (["(fn + '.mp4')"], {}), "(fn + '.mp4')\n", (20308, 20321), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((20350, 20372), 'os.remove', 'os.remove', (["(fn + '.wav')"], {}), "(fn + '.wav')\n", (20359, 20372), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((20401, 20423), 'os.remove', 'os.remove', (["(fn + '.mid')"], {}), "(fn + '.mid')\n", (20410, 20423), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((20551, 20654), 'tkinter.filedialog.askdirectory', 'fd.askdirectory', ([], {'title': '"""Select a directory to save output files"""', 'initialdir': "self.cfg['save_path']"}), "(title='Select a directory to save output files', initialdir\n =self.cfg['save_path'])\n", (20566, 20654), True, 'from tkinter import filedialog as fd\n'), ((28937, 28953), 'scipy.io.loadmat', 'loadmat', (['file_in'], {}), '(file_in)\n', (28944, 28953), False, 'from scipy.io import loadmat, savemat, whosmat\n'), ((28954, 28970), 'scipy.io.whosmat', 'whosmat', (['file_in'], {}), '(file_in)\n', (28961, 28970), False, 'from scipy.io import loadmat, savemat, whosmat\n'), ((29181, 29195), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (29189, 29195), True, 'import numpy as np\n'), ((29755, 29792), 'numpy.nanmean', 'np.nanmean', (['data[idx == i, :]'], {'axis': '(0)'}), '(data[idx == i, :], axis=0)\n', (29765, 29792), True, 'import numpy as np\n'), ((29823, 29840), 'numpy.where', 'np.where', (['(~nanidx)'], {}), '(~nanidx)\n', (29831, 29840), True, 'import numpy as np\n'), ((30730, 30752), 'scipy.io.savemat', 'savemat', (['fn', 'self.data'], {}), '(fn, self.data)\n', (30737, 30752), False, 'from scipy.io import loadmat, savemat, whosmat\n'), ((1466, 1502), 'os.path.join', 'os.path.join', (['sf_path', "(font + '.sf2')"], {}), "(sf_path, font + '.sf2')\n", (1478, 1502), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((3527, 3537), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3535, 3537), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((4967, 4992), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4982, 4992), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((5126, 5197), 'git.Repo.clone_from', 'Repo.clone_from', (['"""https://github.com/nicthib/anthem_datasets.git"""', 'path'], {}), "('https://github.com/nicthib/anthem_datasets.git', path)\n", (5141, 5197), False, 'from git import Repo\n'), ((6617, 6638), 'os.path.split', 'os.path.split', (['filein'], {}), '(filein)\n', (6630, 6638), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((7190, 7247), 'os.path.join', 'os.path.join', (["self.cfg['save_path']", "self.cfg['file_out']"], {}), "(self.cfg['save_path'], self.cfg['file_out'])\n", (7202, 7247), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((9277, 9302), 'numpy.max', 'np.max', (["self.data['H_pp']"], {}), "(self.data['H_pp'])\n", (9283, 9302), True, 'import numpy as np\n'), ((11971, 12019), 're.match', 're.match', (['"""^\\\\[[0-9,: ]*\\\\]$"""', "self.cfg['Wshow']"], {}), "('^\\\\[[0-9,: ]*\\\\]$', self.cfg['Wshow'])\n", (11979, 12019), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((13612, 13649), 'scipy.interpolate.interp1d', 'interp1d', (['t1', "self.data['H_pp'][i, :]"], {}), "(t1, self.data['H_pp'][i, :])\n", (13620, 13649), False, 'from scipy.interpolate import interp1d\n'), ((14252, 14277), 'numpy.max', 'np.max', (['H_rs[st[j]:en[j]]'], {}), '(H_rs[st[j]:en[j]])\n', (14258, 14277), True, 'import numpy as np\n'), ((15860, 15870), 'pygame.mixer.get_init', 'get_init', ([], {}), '()\n', (15868, 15870), False, 'from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music\n'), ((15972, 16001), 'pygame.mixer.pre_init', 'pre_init', (['(44100)', '(-16)', '(2)', '(1024)'], {}), '(44100, -16, 2, 1024)\n', (15980, 16001), False, 'from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music\n'), ((16006, 16012), 'pygame.mixer.init', 'init', ([], {}), '()\n', (16010, 16012), False, 'from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music\n'), ((16017, 16038), 'pygame.mixer.set_num_channels', 'set_num_channels', (['(128)'], {}), '(128)\n', (16033, 16038), False, 'from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music\n'), ((16490, 16501), 'time.time', 'time.time', ([], {}), '()\n', (16499, 16501), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((17254, 17311), 'os.path.join', 'os.path.join', (["self.cfg['save_path']", "self.cfg['file_out']"], {}), "(self.cfg['save_path'], self.cfg['file_out'])\n", (17266, 17311), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((17330, 17387), 'os.path.join', 'os.path.join', (["self.cfg['save_path']", "self.cfg['file_out']"], {}), "(self.cfg['save_path'], self.cfg['file_out'])\n", (17342, 17387), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((21389, 21428), 'tkinter.Label', 'Label', ([], {'text': '""""""', 'font': '"""Helvetica 1 bold"""'}), "(text='', font='Helvetica 1 bold')\n", (21394, 21428), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((21493, 21548), 'tkinter.Label', 'Label', ([], {'text': '"""File Parameters"""', 'font': '"""Helvetica 14 bold"""'}), "(text='File Parameters', font='Helvetica 14 bold')\n", (21498, 21548), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((21596, 21652), 'tkinter.Label', 'Label', ([], {'text': '"""Movie Parameters"""', 'font': '"""Helvetica 14 bold"""'}), "(text='Movie Parameters', font='Helvetica 14 bold')\n", (21601, 21652), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((21700, 21756), 'tkinter.Label', 'Label', ([], {'text': '"""Audio Parameters"""', 'font': '"""Helvetica 14 bold"""'}), "(text='Audio Parameters', font='Helvetica 14 bold')\n", (21705, 21756), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((21804, 21832), 'tkinter.Label', 'Label', ([], {'text': '"""Input Filename"""'}), "(text='Input Filename')\n", (21809, 21832), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((21881, 21910), 'tkinter.Label', 'Label', ([], {'text': '"""Output Filename"""'}), "(text='Output Filename')\n", (21886, 21910), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((21959, 21982), 'tkinter.Label', 'Label', ([], {'text': '"""Save Path"""'}), "(text='Save Path')\n", (21964, 21982), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22031, 22054), 'tkinter.Label', 'Label', ([], {'text': '"""Speed (%)"""'}), "(text='Speed (%)')\n", (22036, 22054), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22091, 22114), 'tkinter.Label', 'Label', ([], {'text': '"""Start (%)"""'}), "(text='Start (%)')\n", (22096, 22114), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22151, 22172), 'tkinter.Label', 'Label', ([], {'text': '"""End (%)"""'}), "(text='End (%)')\n", (22156, 22172), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22209, 22231), 'tkinter.Label', 'Label', ([], {'text': '"""Baseline"""'}), "(text='Baseline')\n", (22214, 22231), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22268, 22296), 'tkinter.Label', 'Label', ([], {'text': '"""Max brightness"""'}), "(text='Max brightness')\n", (22273, 22296), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22333, 22355), 'tkinter.Label', 'Label', ([], {'text': '"""Colormap"""'}), "(text='Colormap')\n", (22338, 22355), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22392, 22415), 'tkinter.Label', 'Label', ([], {'text': '"""Threshold"""'}), "(text='Threshold')\n", (22397, 22415), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22452, 22472), 'tkinter.Label', 'Label', ([], {'text': '"""Octave"""'}), "(text='Octave')\n", (22457, 22472), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22509, 22533), 'tkinter.Label', 'Label', ([], {'text': '"""Scale Type"""'}), "(text='Scale Type')\n", (22514, 22533), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22570, 22587), 'tkinter.Label', 'Label', ([], {'text': '"""Key"""'}), "(text='Key')\n", (22575, 22587), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22624, 22650), 'tkinter.Label', 'Label', ([], {'text': '"""Audio format"""'}), "(text='Audio format')\n", (22629, 22650), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((22951, 22983), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.file_in'}), '(textvariable=self.file_in)\n', (22956, 22983), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23032, 23065), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.file_out'}), '(textvariable=self.file_out)\n', (23037, 23065), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23114, 23158), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.save_path', 'width': '(17)'}), '(textvariable=self.save_path, width=17)\n', (23119, 23158), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23207, 23246), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.speed', 'width': '(7)'}), '(textvariable=self.speed, width=7)\n', (23212, 23246), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23282, 23329), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.start_percent', 'width': '(7)'}), '(textvariable=self.start_percent, width=7)\n', (23287, 23329), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23365, 23410), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.end_percent', 'width': '(7)'}), '(textvariable=self.end_percent, width=7)\n', (23370, 23410), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23446, 23488), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.baseline', 'width': '(7)'}), '(textvariable=self.baseline, width=7)\n', (23451, 23488), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23524, 23568), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.brightness', 'width': '(7)'}), '(textvariable=self.brightness, width=7)\n', (23529, 23568), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23604, 23647), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.threshold', 'width': '(7)'}), '(textvariable=self.threshold, width=7)\n', (23609, 23647), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23696, 23753), 'tkinter.Button', 'Button', ([], {'text': '"""Edit"""', 'command': 'self.edit_save_path', 'width': '(5)'}), "(text='Edit', command=self.edit_save_path, width=5)\n", (23702, 23753), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23776, 23842), 'tkinter.Button', 'Button', ([], {'text': '"""Preview Notes"""', 'width': '(11)', 'command': 'self.preview_notes'}), "(text='Preview Notes', width=11, command=self.preview_notes)\n", (23782, 23842), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((23878, 23965), 'tkinter.Button', 'Button', ([], {'text': '"""Update"""', 'width': '(7)', 'font': '"""Helvetica 14 bold"""', 'command': 'self.process_H_W'}), "(text='Update', width=7, font='Helvetica 14 bold', command=self.\n process_H_W)\n", (23884, 23965), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((24095, 24120), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (24110, 24120), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((27600, 27688), 'tkinter.Checkbutton', 'Checkbutton', (['self'], {'text': '"""Offset H"""', 'command': 'self.refresh_GUI', 'variable': 'self.offsetH'}), "(self, text='Offset H', command=self.refresh_GUI, variable=self.\n offsetH)\n", (27611, 27688), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((28322, 28355), 'tkinter.Label', 'Label', ([], {'text': '"""Components to show:"""'}), "(text='Components to show:')\n", (28327, 28355), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((28407, 28465), 'tkinter.Entry', 'Entry', ([], {'textvariable': 'self.Wshow', 'width': '(15)', 'justify': '"""center"""'}), "(textvariable=self.Wshow, width=15, justify='center')\n", (28412, 28465), False, 'from tkinter import StringVar, DoubleVar, Tk, Label, Entry, Button, OptionMenu, Checkbutton, Message, Menu, IntVar, Scale, HORIZONTAL, simpledialog, messagebox, Toplevel\n'), ((29967, 29991), 'scipy.optimize.nnls', 'nnls', (['H.T', 'data_nn[i, :]'], {}), '(H.T, data_nn[i, :])\n', (29971, 29991), False, 'from scipy.optimize import nnls\n'), ((30270, 30284), 'numpy.argsort', 'np.argsort', (['yc'], {}), '(yc)\n', (30280, 30284), True, 'import numpy as np\n'), ((1577, 1613), 'os.path.join', 'os.path.join', (['sf_path', "(font + '.sf2')"], {}), "(sf_path, font + '.sf2')\n", (1589, 1613), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((9226, 9246), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (9238, 9246), True, 'import matplotlib.pyplot as plt\n'), ((9392, 9412), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (9404, 9412), True, 'import matplotlib.pyplot as plt\n'), ((12248, 12257), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (12254, 12257), True, 'import numpy as np\n'), ((13947, 13969), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['st'], {}), '(st)\n', (13965, 13969), True, 'import numpy as np\n'), ((13987, 14009), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['en'], {}), '(en)\n', (14005, 14009), True, 'import numpy as np\n'), ((15615, 15640), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (15630, 15640), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((15729, 15754), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (15744, 15754), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((15812, 15837), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (15827, 15837), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((16841, 16854), 'pygame.mixer.music.play', 'music.play', (['(0)'], {}), '(0)\n', (16851, 16854), False, 'from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music\n'), ((17858, 17883), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (17873, 17883), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((24214, 24233), 'os.listdir', 'os.listdir', (['sf_path'], {}), '(sf_path)\n', (24224, 24233), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((26500, 26534), 'tkinter.ttk.Separator', 'Separator', (['self'], {'orient': '"""vertical"""'}), "(self, orient='vertical')\n", (26509, 26534), False, 'from tkinter.ttk import Progressbar, Separator, Combobox\n'), ((26614, 26650), 'tkinter.ttk.Separator', 'Separator', (['self'], {'orient': '"""horizontal"""'}), "(self, orient='horizontal')\n", (26623, 26650), False, 'from tkinter.ttk import Progressbar, Separator, Combobox\n'), ((29536, 29581), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(0)'}), '(n_clusters=n_clusters, random_state=0)\n', (29542, 29581), False, 'from sklearn.cluster import KMeans\n'), ((2768, 2801), 'pkg_resources.require', 'pkg_resources.require', (['"""pyanthem"""'], {}), "('pyanthem')\n", (2789, 2801), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((6503, 6524), 'os.path.split', 'os.path.split', (['filein'], {}), '(filein)\n', (6516, 6524), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((20738, 20771), 'pkg_resources.require', 'pkg_resources.require', (['"""pyanthem"""'], {}), "('pyanthem')\n", (20759, 20771), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((22763, 22796), 'pkg_resources.require', 'pkg_resources.require', (['"""pyanthem"""'], {}), "('pyanthem')\n", (22784, 22796), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((13898, 13925), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['(en - st)'], {}), '(en - st)\n', (13916, 13925), True, 'import numpy as np\n'), ((6760, 6784), 'numpy.mean', 'np.mean', (['self.data[Hstr]'], {}), '(self.data[Hstr])\n', (6767, 6784), True, 'import numpy as np\n'), ((6785, 6808), 'numpy.std', 'np.std', (['self.data[Hstr]'], {}), '(self.data[Hstr])\n', (6791, 6808), True, 'import numpy as np\n'), ((6854, 6878), 'numpy.mean', 'np.mean', (['self.data[Hstr]'], {}), '(self.data[Hstr])\n', (6861, 6878), True, 'import numpy as np\n'), ((6879, 6902), 'numpy.std', 'np.std', (['self.data[Hstr]'], {}), '(self.data[Hstr])\n', (6885, 6902), True, 'import numpy as np\n'), ((16882, 16893), 'time.time', 'time.time', ([], {}), '()\n', (16891, 16893), False, 'import os, random, sys, time, csv, pickle, re, pkg_resources\n'), ((11128, 11153), 'numpy.max', 'np.max', (["self.data['W_pp']"], {}), "(self.data['W_pp'])\n", (11134, 11153), True, 'import numpy as np\n'), ((16655, 16680), 'numpy.max', 'np.max', (["self.data['W_pp']"], {}), "(self.data['W_pp'])\n", (16661, 16680), True, 'import numpy as np\n'), ((19108, 19140), 'numpy.diag', 'np.diag', (["self.data['H_pp'][:, i]"], {}), "(self.data['H_pp'][:, i])\n", (19115, 19140), True, 'import numpy as np\n')] |
from typing import Any, List, Mapping
import numpy as np
from scipy.interpolate import LinearNDInterpolator
from skimage.color import rgb2gray
from skimage.filters import gaussian, threshold_otsu
from skimage.morphology import binary_dilation, binary_erosion, dilation, disk
from morphocut import Node, Output, RawOrVariable, ReturnOutputs
@ReturnOutputs
@Output("result")
class VignettingCorrector(Node):
"""Remove the vignette effect from an image."""
def __init__(self, image):
super().__init__()
self.image = image
def transform(self, image: RawOrVariable):
if len(image.shape) == 2:
grey_img = image
elif image.shape[-1] == 1:
grey_img = image[:-1]
else:
grey_img = rgb2gray(image)
flat_image = calculate_flat_image(grey_img)
# Add a dimension for multichannel input images
if len(image.shape) == 3:
flat_image = flat_image[:, :, np.newaxis]
corrected_img = image / flat_image
return corrected_img
def calculate_flat_image(img: np.ndarray) -> np.ndarray:
"""
Calculate a flat background image by removing dark objects and max-filtering.
Parameters
==========
img (ndarray of shape [h,w]): Graylevel image.
"""
# Blur image to make subsequent dilation more robust
# TODO: Optimal sigma
img = gaussian(img, 3)
# Find obvious objects and create a mask of valid background
thr = threshold_otsu(img)
valid_mask = img > thr
# Shrink valid regions to avoid border regions of objects
valid_mask = binary_erosion(valid_mask, disk(16))
# Greyscale morphological dilation to remove dark structures
# The larger the selem, the longer it takes
img = dilation(img, disk(16))
invalid_mask = np.bitwise_not(valid_mask)
# Only keep valid borders of objects to avoid computational overhead in interpolation
valid_mask &= binary_dilation(invalid_mask, disk(1))
# Interpolate masked image
# Fit interpolator with valid image parts
coords = np.array(np.nonzero(valid_mask)).T
values = img[valid_mask]
interpolator = LinearNDInterpolator(coords, values)
# Interpolate invalid regions
img_filled = img.copy()
coords_invalid = np.array(np.nonzero(invalid_mask)).T
img_filled[invalid_mask] = interpolator(coords_invalid)
# Fill regions where interpolation failed with original values
mask = np.isnan(img_filled)
img_filled[mask] = img[mask]
# Finally smooth result
img_filled = gaussian(img_filled, 64)
return img_filled
| [
"morphocut.Output",
"skimage.color.rgb2gray",
"scipy.interpolate.LinearNDInterpolator",
"skimage.filters.threshold_otsu",
"numpy.isnan",
"numpy.nonzero",
"numpy.bitwise_not",
"skimage.morphology.disk",
"skimage.filters.gaussian"
] | [((360, 376), 'morphocut.Output', 'Output', (['"""result"""'], {}), "('result')\n", (366, 376), False, 'from morphocut import Node, Output, RawOrVariable, ReturnOutputs\n'), ((1387, 1403), 'skimage.filters.gaussian', 'gaussian', (['img', '(3)'], {}), '(img, 3)\n', (1395, 1403), False, 'from skimage.filters import gaussian, threshold_otsu\n'), ((1480, 1499), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img'], {}), '(img)\n', (1494, 1499), False, 'from skimage.filters import gaussian, threshold_otsu\n'), ((1812, 1838), 'numpy.bitwise_not', 'np.bitwise_not', (['valid_mask'], {}), '(valid_mask)\n', (1826, 1838), True, 'import numpy as np\n'), ((2162, 2198), 'scipy.interpolate.LinearNDInterpolator', 'LinearNDInterpolator', (['coords', 'values'], {}), '(coords, values)\n', (2182, 2198), False, 'from scipy.interpolate import LinearNDInterpolator\n'), ((2459, 2479), 'numpy.isnan', 'np.isnan', (['img_filled'], {}), '(img_filled)\n', (2467, 2479), True, 'import numpy as np\n'), ((2559, 2583), 'skimage.filters.gaussian', 'gaussian', (['img_filled', '(64)'], {}), '(img_filled, 64)\n', (2567, 2583), False, 'from skimage.filters import gaussian, threshold_otsu\n'), ((1634, 1642), 'skimage.morphology.disk', 'disk', (['(16)'], {}), '(16)\n', (1638, 1642), False, 'from skimage.morphology import binary_dilation, binary_erosion, dilation, disk\n'), ((1782, 1790), 'skimage.morphology.disk', 'disk', (['(16)'], {}), '(16)\n', (1786, 1790), False, 'from skimage.morphology import binary_dilation, binary_erosion, dilation, disk\n'), ((1978, 1985), 'skimage.morphology.disk', 'disk', (['(1)'], {}), '(1)\n', (1982, 1985), False, 'from skimage.morphology import binary_dilation, binary_erosion, dilation, disk\n'), ((2088, 2110), 'numpy.nonzero', 'np.nonzero', (['valid_mask'], {}), '(valid_mask)\n', (2098, 2110), True, 'import numpy as np\n'), ((2292, 2316), 'numpy.nonzero', 'np.nonzero', (['invalid_mask'], {}), '(invalid_mask)\n', (2302, 2316), True, 'import numpy as np\n'), ((766, 781), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (774, 781), False, 'from skimage.color import rgb2gray\n')] |
import numpy as np
import warnings
from stingray.base import StingrayObject
from stingray.gti import check_separate, cross_two_gtis
from stingray.lightcurve import Lightcurve
from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress
from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate
from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance
from abc import ABCMeta, abstractmethod
__all__ = [
"VarEnergySpectrum",
"RmsEnergySpectrum",
"RmsSpectrum",
"LagEnergySpectrum",
"LagSpectrum",
"ExcessVarianceSpectrum",
"CovarianceSpectrum",
"ComplexCovarianceSpectrum",
"CountSpectrum",
]
def get_non_overlapping_ref_band(channel_band, ref_band):
"""
Ensures that the ``channel_band`` (i.e. the band of interest) is
not contained within the ``ref_band`` (i.e. the reference band)
Parameters
----------
channel_band : iterable of type ``[elow, ehigh]``
The lower/upper limits of the energies to be contained in the band
of interest
ref_band : iterable
The lower/upper limits of the energies in the reference band
Returns
-------
ref_intervals : iterable
The channels that are both in the reference band in not in the
bands of interest
Examples
--------
>>> channel_band = [2, 3]
>>> ref_band = [[0, 10]]
>>> new_ref = get_non_overlapping_ref_band(channel_band, ref_band)
>>> np.allclose(new_ref, [[0, 2], [3, 10]])
True
Test this also works with a 1-D ref. band
>>> new_ref = get_non_overlapping_ref_band(channel_band, [0, 10])
>>> np.allclose(new_ref, [[0, 2], [3, 10]])
True
>>> new_ref = get_non_overlapping_ref_band([0, 1], [[2, 3]])
>>> np.allclose(new_ref, [[2, 3]])
True
"""
channel_band = np.asarray(channel_band)
ref_band = np.asarray(ref_band)
if len(ref_band.shape) <= 1:
ref_band = np.asarray([ref_band])
if check_separate(ref_band, [channel_band]):
return np.asarray(ref_band)
not_channel_band = [
[0, channel_band[0]],
[channel_band[1], np.max([np.max(ref_band), channel_band[1] + 1])],
]
return cross_two_gtis(ref_band, not_channel_band)
def _decode_energy_specification(energy_spec):
"""Decode the energy specification tuple.
Parameters
----------
energy_spec : iterable
list containing the energy specification
Must have the following structure:
* energy_spec[0]: lower edge of (log) energy space
* energy_spec[1]: upper edge of (log) energy space
* energy_spec[2] +1 : energy bin edges (hence the +1)
* {`lin` | `log`} flat deciding whether the energy space is linear
or logarithmic
Returns
-------
energies : numpy.ndarray
An array of lower/upper bin edges for the energy array
Examples
--------
>>> _decode_energy_specification([0, 2, 2, 'lin'])
Traceback (most recent call last):
...
ValueError: Energy specification must be a tuple
>>> a = _decode_energy_specification((0, 2, 2, 'lin'))
>>> np.allclose(a, [0, 1, 2])
True
>>> a = _decode_energy_specification((1, 4, 2, 'log'))
>>> np.allclose(a, [1, 2, 4])
True
"""
if not isinstance(energy_spec, tuple):
raise ValueError("Energy specification must be a tuple")
if energy_spec[-1].lower() not in ["lin", "log"]:
raise ValueError("Incorrect energy specification")
log_distr = True if energy_spec[-1].lower() == "log" else False
if log_distr:
energies = np.logspace(
np.log10(energy_spec[0]), np.log10(energy_spec[1]), energy_spec[2] + 1
)
else:
energies = np.linspace(energy_spec[0], energy_spec[1], energy_spec[2] + 1)
return energies
class VarEnergySpectrum(StingrayObject, metaclass=ABCMeta):
main_array_attr = "energy"
"""
Base class for variability-energy spectrum.
This class is only a base for the various variability spectra, and it's
not to be instantiated by itself.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, floats
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax``], floats; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
events2 : :class:`stingray.events.EventList` object
event list for the second channel, if not the same. Useful if the
reference band has to be taken from another detector.
return_complex: bool, default False
In spectra that produce complex values, return the whole spectrum.
Otherwise, the absolute value will be returned.
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
events2 : array-like
if the spectrum requires it, second list of events
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the error bars corresponding to spectrum
energy : array-like
The centers of energy intervals
"""
def __init__(
self,
events,
freq_interval,
energy_spec,
ref_band=None,
bin_time=1,
use_pi=False,
segment_size=None,
events2=None,
return_complex=False,
):
self.events1 = events
self.events2 = assign_value_if_none(events2, events)
self._analyze_inputs()
# This will be set to True in ComplexCovariance
self.return_complex = return_complex
self.freq_interval = freq_interval
self.use_pi = use_pi
self.bin_time = bin_time
if isinstance(energy_spec, tuple):
energies = _decode_energy_specification(energy_spec)
else:
energies = np.asarray(energy_spec)
self.energy_intervals = list(zip(energies[0:-1], energies[1:]))
self.ref_band = np.asarray(assign_value_if_none(ref_band, [0, np.inf]))
if len(self.ref_band.shape) <= 1:
self.ref_band = np.asarray([self.ref_band])
self.segment_size = self.delta_nu = None
if segment_size is not None:
self.segment_size = segment_size
self.delta_nu = 1 / self.segment_size
self._create_empty_spectrum()
if len(events.time) == 0:
simon("There are no events in your event list!" + "Can't make a spectrum!")
else:
self._spectrum_function()
@property
def energy(self):
"""Give the centers of the energy intervals."""
return np.sum(self.energy_intervals, axis=1) / 2
def _analyze_inputs(self):
"""Make some checks on the inputs and set some internal variable.
If the object of events1 is the same as events2, set `same_events` to True.
This will, for example, tell the methods to use events1 for the subject bands
and events2 for the reference band (useful in deadtime-affected data).
Also, if the event lists are distinct, calculate common GTIs.
"""
events1 = self.events1
events2 = self.events2
common_gti = events1.gti
if events2 is None or events2 is events1:
self.events2 = self.events1
self.same_events = True
else:
common_gti = cross_two_gtis(events1.gti, events2.gti)
self.same_events = False
self.gti = common_gti
def _create_empty_spectrum(self):
"""Allocate the arrays of the output spectrum.
Default value is NaN. This is because most spectral timing products are
prone to numerical errors, and it's more informative to have a default invalid
value rather than something like, e.g., 0 or 1
"""
if self.return_complex:
dtype = complex
else:
dtype = float
self.spectrum = np.zeros(len(self.energy_intervals), dtype=dtype) + np.nan
self.spectrum_error = np.zeros_like(self.spectrum, dtype=dtype) + np.nan
def _get_times_from_energy_range(self, events, erange, use_pi=False):
"""Get event times from the wanted energy range.
Parameters
----------
events : `EventList`
Input event list
erange : [e0, e1]
Energy range in keV
Other parameters
----------------
use_pi : bool, default False
Use the PI channel instead of energies
Returns
-------
out_ev : `EventList`
The filtered event list.
"""
if use_pi:
energies = events.pi
else:
energies = events.energy
mask = (energies >= erange[0]) & (energies < erange[1])
return events.time[mask]
def _get_good_frequency_bins(self, freq=None):
"""Get frequency mask corresponding to the wanted frequency interval
Parameters
----------
freq : `np.array`, default None
The frequency array. If None, it will get calculated from the number
of spectral bins using `np.fft.fftfreq`
Returns
-------
freq_mask : `np.array` of bool
The frequency mask.
"""
if freq is None:
n_bin = np.rint(self.segment_size / self.bin_time)
freq = fftfreq(int(n_bin), self.bin_time)
freq = freq[freq > 0]
good = (freq >= self.freq_interval[0]) & (freq < self.freq_interval[1])
return good
def _construct_lightcurves(
self, channel_band, tstart=None, tstop=None, exclude=True, only_base=False
):
"""
Construct light curves from event data, for each band of interest.
Parameters
----------
channel_band : iterable of type ``[elow, ehigh]``
The lower/upper limits of the energies to be contained in the band
of interest
tstart : float, optional, default ``None``
A common start time (if start of observation is different from
the first recorded event)
tstop : float, optional, default ``None``
A common stop time (if start of observation is different from
the first recorded event)
exclude : bool, optional, default ``True``
if ``True``, exclude the band of interest from the reference band
only_base : bool, optional, default ``False``
if ``True``, only return the light curve of the channel of interest, not
that of the reference band
Returns
-------
base_lc : :class:`Lightcurve` object
The light curve of the channels of interest
ref_lc : :class:`Lightcurve` object (only returned if ``only_base`` is ``False``)
The reference light curve for comparison with ``base_lc``
"""
if self.use_pi:
energies1 = self.events1.pi
energies2 = self.events2.pi
else:
energies2 = self.events2.energy
energies1 = self.events1.energy
gti = cross_two_gtis(self.events1.gti, self.events2.gti)
tstart = assign_value_if_none(tstart, gti[0, 0])
tstop = assign_value_if_none(tstop, gti[-1, -1])
good = (energies1 >= channel_band[0]) & (energies1 < channel_band[1])
base_lc = Lightcurve.make_lightcurve(
self.events1.time[good],
self.bin_time,
tstart=tstart,
tseg=tstop - tstart,
gti=gti,
mjdref=self.events1.mjdref,
)
if only_base:
return base_lc
if exclude:
ref_intervals = get_non_overlapping_ref_band(channel_band, self.ref_band)
else:
ref_intervals = self.ref_band
ref_lc = Lightcurve(
base_lc.time,
np.zeros_like(base_lc.counts),
gti=base_lc.gti,
mjdref=base_lc.mjdref,
dt=base_lc.dt,
err_dist=base_lc.err_dist,
skip_checks=True,
)
for i in ref_intervals:
good = (energies2 >= i[0]) & (energies2 < i[1])
new_lc = Lightcurve.make_lightcurve(
self.events2.time[good],
self.bin_time,
tstart=tstart,
tseg=tstop - tstart,
gti=base_lc.gti,
mjdref=self.events2.mjdref,
)
ref_lc = ref_lc + new_lc
ref_lc.err_dist = base_lc.err_dist
return base_lc, ref_lc
@abstractmethod
def _spectrum_function(self):
pass
def from_astropy_table(self, *args, **kwargs):
raise NotImplementedError(
"from_XXXX methods are not implemented for VarEnergySpectrum")
def from_xarray(self, *args, **kwargs):
raise NotImplementedError(
"from_XXXX methods are not implemented for VarEnergySpectrum")
def from_pandas(self, *args, **kwargs):
raise NotImplementedError(
"from_XXXX methods are not implemented for VarEnergySpectrum")
class RmsSpectrum(VarEnergySpectrum):
"""Calculate the rms-Energy spectrum.
For each energy interval, calculate the power density spectrum in
absolute or fractional r.m.s. normalization, and integrate it in the
given frequency range to obtain the rms. If ``events2`` is specified,
the cospectrum is used instead of the PDS.
We assume absolute r.m.s. normalization. To get the fractional r.m.s.
we just divide by the mean count rate.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, list of float
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax]``, float; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
events2 : :class:`stingray.events.EventList` object
event list for the second channel, if not the same. Useful if the
reference band has to be taken from another detector.
norm : str, one of ["abs", "frac"]
The normalization of the rms, whether absolute or fractional.
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
events2 : array-like
if the spectrum requires it, second list of events
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the errorbars corresponding to spectrum
"""
def __init__(
self,
events,
energy_spec,
ref_band=None,
freq_interval=[0, 1],
bin_time=1,
use_pi=False,
segment_size=None,
events2=None,
norm="frac",
):
self.norm = norm
VarEnergySpectrum.__init__(
self,
events,
freq_interval=freq_interval,
energy_spec=energy_spec,
bin_time=bin_time,
use_pi=use_pi,
ref_band=ref_band,
segment_size=segment_size,
events2=events2,
)
def _spectrum_function(self):
# Get the frequency bins to be averaged in the final results.
good = self._get_good_frequency_bins()
n_ave_bin = np.count_nonzero(good)
# Get the frequency resolution of the final spectrum.
delta_nu_after_mean = self.delta_nu * n_ave_bin
for i, eint in enumerate(show_progress(self.energy_intervals)):
# Extract events from the subject band and calculate the count rate
# and Poisson noise level.
sub_events = self._get_times_from_energy_range(self.events1, eint)
countrate_sub = get_average_ctrate(sub_events, self.gti, self.segment_size)
sub_power_noise = poisson_level(norm="abs", meanrate=countrate_sub)
# If we provided the `events2` array, calculate the rms from the
# cospectrum, otherwise from the PDS
if not self.same_events:
# Extract events from the subject band in the other array, and
# calculate the count rate and Poisson noise level.
sub_events2 = self._get_times_from_energy_range(self.events2, eint)
countrate_sub2 = get_average_ctrate(sub_events2, self.gti, self.segment_size)
sub2_power_noise = poisson_level(norm="abs", meanrate=countrate_sub2)
# Calculate the cross spectrum
results = avg_cs_from_events(
sub_events,
sub_events2,
self.gti,
self.segment_size,
self.bin_time,
silent=True,
norm="abs",
)
if results is None:
continue
cross = results["power"]
m_ave, mean = [results.meta[key] for key in ["m", "mean"]]
mean_power = np.mean(cross[good])
power_noise = 0
rmsnoise = np.sqrt(delta_nu_after_mean *
np.sqrt(sub_power_noise * sub2_power_noise))
else:
results = avg_pds_from_events(
sub_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="abs"
)
if results is None:
continue
sub_power = results["power"]
m_ave, mean = [results.meta[key] for key in ["m", "mean"]]
mean_power = np.mean(sub_power[good])
power_noise = sub_power_noise
rmsnoise = np.sqrt(delta_nu_after_mean * power_noise)
meanrate = mean / self.bin_time
rms = np.sqrt(np.abs(mean_power - power_noise) * delta_nu_after_mean)
# Assume coherence 0, use Ingram+2019
num = rms ** 4 + rmsnoise ** 4 + 2 * rms * rmsnoise
den = 4 * m_ave * n_ave_bin * rms ** 2
rms_err = np.sqrt(num / den)
if self.norm == "frac":
rms, rms_err = rms / meanrate, rms_err / meanrate
self.spectrum[i] = rms
self.spectrum_error[i] = rms_err
RmsEnergySpectrum = RmsSpectrum
class ExcessVarianceSpectrum(VarEnergySpectrum):
"""Calculate the Excess Variance spectrum.
For each energy interval, calculate the excess variance in the specified
frequency range.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, list of float
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a list is specified, this is interpreted as a list of bin edges;
if a tuple is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax]``, floats; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the errorbars corresponding to spectrum
"""
def __init__(
self,
events,
freq_interval,
energy_spec,
bin_time=1,
use_pi=False,
segment_size=None,
normalization="fvar",
):
self.normalization = normalization
accepted_normalizations = ["fvar", "none"]
if normalization not in accepted_normalizations:
raise ValueError(
"The normalization of excess variance must be "
"one of {}".format(accepted_normalizations)
)
VarEnergySpectrum.__init__(
self,
events,
freq_interval,
energy_spec,
bin_time=bin_time,
use_pi=use_pi,
segment_size=segment_size,
)
def _spectrum_function(self):
spec = np.zeros(len(self.energy_intervals))
spec_err = np.zeros_like(spec)
for i, eint in enumerate(self.energy_intervals):
lc = self._construct_lightcurves(eint, exclude=False, only_base=True)
spec[i], spec_err[i] = excess_variance(lc, self.normalization)
return spec, spec_err
class CountSpectrum(VarEnergySpectrum):
"""Calculate the energy spectrum.
For each energy interval, compute the counts.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
use_pi : bool, default ``False``
Use channel instead of energy
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the errorbars corresponding to spectrum
"""
def __init__(
self,
events,
energy_spec,
use_pi=False
):
VarEnergySpectrum.__init__(
self,
events,
None,
energy_spec,
use_pi=use_pi,
)
def _spectrum_function(self):
events = self.events1
for i, eint in show_progress(enumerate(self.energy_intervals)):
sub_events = self._get_times_from_energy_range(events, eint, use_pi=self.use_pi)
sp = sub_events.size
self.spectrum[i] = sp
self.spectrum_error[i] = np.sqrt(sp)
class LagSpectrum(VarEnergySpectrum):
"""Calculate the lag-energy spectrum.
For each energy interval, calculate the lag between two bands.
If ``events2`` is specified, the energy bands are chosen from this second
event list, while the reference band from ``events``.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, list of float
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax]``, float; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
events2 : :class:`stingray.events.EventList` object
event list for the second channel, if not the same. Useful if the
reference band has to be taken from another detector.
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
events2 : array-like
if the spectrum requires it, second list of events
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the lag values, corresponding to each energy interval
spectrum_error : array-like
the errorbars corresponding to spectrum
"""
# events, freq_interval, energy_spec, ref_band = None
def __init__(
self,
events,
freq_interval,
energy_spec,
ref_band=None,
bin_time=1,
use_pi=False,
segment_size=None,
events2=None,
):
VarEnergySpectrum.__init__(
self,
events,
freq_interval,
energy_spec=energy_spec,
bin_time=bin_time,
use_pi=use_pi,
ref_band=ref_band,
segment_size=segment_size,
events2=events2,
)
def _spectrum_function(self):
# Extract the photon arrival times from the reference band
ref_events = self._get_times_from_energy_range(self.events2, self.ref_band[0])
ref_power_noise = poisson_level(norm="none", n_ph=ref_events.size)
# Calculate the PDS in the reference band. Needed to calculate errors.
results = avg_pds_from_events(
ref_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="none"
)
freq = results["freq"]
ref_power = results["power"]
m_ave = results.meta["m"]
# Get the frequency bins to be averaged in the final results.
good = self._get_good_frequency_bins(freq)
mean_ref_power = np.mean(ref_power[good])
n_ave_bin = np.count_nonzero(good)
m_tot = n_ave_bin * m_ave
f = (self.freq_interval[0] + self.freq_interval[1]) / 2
for i, eint in enumerate(show_progress(self.energy_intervals)):
# Extract the photon arrival times from the subject band
sub_events = self._get_times_from_energy_range(self.events1, eint)
sub_power_noise = poisson_level(norm="none", n_ph=sub_events.size)
results_cross = avg_cs_from_events(
sub_events,
ref_events,
self.gti,
self.segment_size,
self.bin_time,
silent=True,
norm="none"
)
results_ps = avg_pds_from_events(
sub_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="none"
)
if results_cross is None or results_ps is None:
continue
cross = results_cross["power"]
sub_power = results_ps["power"]
Cmean = np.mean(cross[good])
mean_sub_power = np.mean(sub_power[good])
# Is the subject band overlapping with the reference band?
# This will be used to correct the error bars, following
# Ingram 2019.
common_ref = self.same_events and len(cross_two_gtis([eint], self.ref_band)) > 0
_, _, phi_e, _ = error_on_averaged_cross_spectrum(
Cmean, mean_sub_power, mean_ref_power, m_tot, sub_power_noise, ref_power_noise, common_ref=common_ref
)
lag = np.mean((np.angle(cross[good]) / (2 * np.pi * freq[good])))
lag_e = phi_e / (2 * np.pi * f)
self.spectrum[i] = lag
self.spectrum_error[i] = lag_e
LagEnergySpectrum = LagSpectrum
class ComplexCovarianceSpectrum(VarEnergySpectrum):
"""Calculate the complex covariance spectrum.
For each energy interval, calculate the covariance between two bands.
If ``events2`` is specified, the energy bands are chosen from this second
event list, while the reference band from ``events``.
Mastroserio et al. 2018, MNRAS, 475, 4027
We assume absolute r.m.s. normalization. To get the fractional r.m.s.
we just divide by the mean count rate.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, list of float
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax]``, float; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
events2 : :class:`stingray.events.EventList` object
event list for the second channel, if not the same. Useful if the
reference band has to be taken from another detector.
norm : str, one of ["abs", "frac"]
The normalization of the covariance, whether absolute or fractional.
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
events2 : array-like
if the spectrum requires it, second list of events
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the errorbars corresponding to spectrum
"""
def __init__(
self,
events,
energy_spec,
ref_band=None,
freq_interval=[0, 1],
bin_time=1,
use_pi=False,
segment_size=None,
events2=None,
norm="frac",
return_complex=True,
):
self.norm = norm
VarEnergySpectrum.__init__(
self,
events,
freq_interval=freq_interval,
energy_spec=energy_spec,
bin_time=bin_time,
use_pi=use_pi,
ref_band=ref_band,
segment_size=segment_size,
events2=events2,
return_complex=return_complex
)
def _spectrum_function(self):
# Extract events from the reference band and calculate the PDS and
# the Poisson noise level.
ref_events = self._get_times_from_energy_range(self.events2, self.ref_band[0])
countrate_ref = get_average_ctrate(ref_events, self.gti, self.segment_size)
ref_power_noise = poisson_level(norm="abs", meanrate=countrate_ref)
results = avg_pds_from_events(
ref_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="abs"
)
freq = results["freq"]
ref_power = results["power"]
m_ave = results.meta["m"]
# Select the frequency range to be averaged for the measurement.
good = (freq >= self.freq_interval[0]) & (freq < self.freq_interval[1])
n_ave_bin = np.count_nonzero(good)
mean_ref_power = np.mean(ref_power[good])
m_tot = m_ave * n_ave_bin
# Frequency resolution
delta_nu = n_ave_bin * self.delta_nu
for i, eint in enumerate(show_progress(self.energy_intervals)):
# Extract events from the subject band
sub_events = self._get_times_from_energy_range(self.events1, eint)
countrate_sub = get_average_ctrate(sub_events, self.gti, self.segment_size)
sub_power_noise = poisson_level(norm="abs", meanrate=countrate_sub)
results_cross = avg_cs_from_events(
sub_events,
ref_events,
self.gti,
self.segment_size,
self.bin_time,
silent=True,
norm="abs",
)
results_ps = avg_pds_from_events(
sub_events, self.gti, self.segment_size, self.bin_time, silent=True, norm="abs"
)
if results_cross is None or results_ps is None:
continue
cross = results_cross["power"]
sub_power = results_ps["power"]
mean = results_ps.meta["mean"]
# Is the subject band overlapping with the reference band?
# This will be used to correct the error bars, following
# Ingram 2019.
common_ref = self.same_events and len(cross_two_gtis([eint], self.ref_band)) > 0
Cmean = np.mean(cross[good])
if common_ref:
# Equation 6 from Ingram+2019
Cmean -= sub_power_noise
Cmean_real = np.abs(Cmean)
mean_sub_power = np.mean(sub_power[good])
_, _, _, Ce = error_on_averaged_cross_spectrum(
Cmean, mean_sub_power, mean_ref_power, m_tot, sub_power_noise, ref_power_noise, common_ref=common_ref
)
if not self.return_complex:
Cmean = Cmean_real
# Convert the cross spectrum to a covariance.
cov, cov_e = cross_to_covariance(np.asarray(
[Cmean, Ce]), mean_ref_power, ref_power_noise, delta_nu)
meanrate = mean / self.bin_time
if self.norm == "frac":
cov, cov_e = cov / meanrate, cov_e / meanrate
self.spectrum[i] = cov
self.spectrum_error[i] = cov_e
class CovarianceSpectrum(ComplexCovarianceSpectrum):
"""Calculate the covariance spectrum.
This is just the absolute value of the complex covariance
spectrum. Refer to that documentation for details.
For the original formulation of the covariance spectrum,
see:
Wilkinson & Uttley 2009, MNRAS, 397, 666
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, list of float
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax]``, float; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
events2 : :class:`stingray.events.EventList` object
event list for the second channel, if not the same. Useful if the
reference band has to be taken from another detector.
norm : str, one of ["abs", "frac"]
The normalization of the covariance, whether absolute or fractional.
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
events2 : array-like
if the spectrum requires it, second list of events
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the errorbars corresponding to spectrum
"""
def __init__(
self,
events,
energy_spec,
ref_band=None,
freq_interval=[0, 1],
bin_time=1,
use_pi=False,
segment_size=None,
events2=None,
norm="abs",
):
ComplexCovarianceSpectrum.__init__(
self,
events,
freq_interval=freq_interval,
energy_spec=energy_spec,
bin_time=bin_time,
use_pi=use_pi,
norm=norm,
ref_band=ref_band,
return_complex=False,
segment_size=segment_size,
events2=events2,
)
| [
"numpy.log10",
"numpy.sqrt",
"stingray.gti.cross_two_gtis",
"numpy.count_nonzero",
"stingray.utils.excess_variance",
"stingray.lightcurve.Lightcurve.make_lightcurve",
"stingray.utils.show_progress",
"numpy.mean",
"stingray.fourier.error_on_averaged_cross_spectrum",
"numpy.asarray",
"numpy.max",
... | [((1897, 1921), 'numpy.asarray', 'np.asarray', (['channel_band'], {}), '(channel_band)\n', (1907, 1921), True, 'import numpy as np\n'), ((1937, 1957), 'numpy.asarray', 'np.asarray', (['ref_band'], {}), '(ref_band)\n', (1947, 1957), True, 'import numpy as np\n'), ((2040, 2080), 'stingray.gti.check_separate', 'check_separate', (['ref_band', '[channel_band]'], {}), '(ref_band, [channel_band])\n', (2054, 2080), False, 'from stingray.gti import check_separate, cross_two_gtis\n'), ((2267, 2309), 'stingray.gti.cross_two_gtis', 'cross_two_gtis', (['ref_band', 'not_channel_band'], {}), '(ref_band, not_channel_band)\n', (2281, 2309), False, 'from stingray.gti import check_separate, cross_two_gtis\n'), ((2010, 2032), 'numpy.asarray', 'np.asarray', (['[ref_band]'], {}), '([ref_band])\n', (2020, 2032), True, 'import numpy as np\n'), ((2097, 2117), 'numpy.asarray', 'np.asarray', (['ref_band'], {}), '(ref_band)\n', (2107, 2117), True, 'import numpy as np\n'), ((3831, 3894), 'numpy.linspace', 'np.linspace', (['energy_spec[0]', 'energy_spec[1]', '(energy_spec[2] + 1)'], {}), '(energy_spec[0], energy_spec[1], energy_spec[2] + 1)\n', (3842, 3894), True, 'import numpy as np\n'), ((6269, 6306), 'stingray.utils.assign_value_if_none', 'assign_value_if_none', (['events2', 'events'], {}), '(events2, events)\n', (6289, 6306), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((11960, 12010), 'stingray.gti.cross_two_gtis', 'cross_two_gtis', (['self.events1.gti', 'self.events2.gti'], {}), '(self.events1.gti, self.events2.gti)\n', (11974, 12010), False, 'from stingray.gti import check_separate, cross_two_gtis\n'), ((12029, 12068), 'stingray.utils.assign_value_if_none', 'assign_value_if_none', (['tstart', 'gti[0, 0]'], {}), '(tstart, gti[0, 0])\n', (12049, 12068), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((12085, 12125), 'stingray.utils.assign_value_if_none', 'assign_value_if_none', (['tstop', 'gti[-1, -1]'], {}), '(tstop, gti[-1, -1])\n', (12105, 12125), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((12223, 12367), 'stingray.lightcurve.Lightcurve.make_lightcurve', 'Lightcurve.make_lightcurve', (['self.events1.time[good]', 'self.bin_time'], {'tstart': 'tstart', 'tseg': '(tstop - tstart)', 'gti': 'gti', 'mjdref': 'self.events1.mjdref'}), '(self.events1.time[good], self.bin_time, tstart=\n tstart, tseg=tstop - tstart, gti=gti, mjdref=self.events1.mjdref)\n', (12249, 12367), False, 'from stingray.lightcurve import Lightcurve\n'), ((16848, 16870), 'numpy.count_nonzero', 'np.count_nonzero', (['good'], {}), '(good)\n', (16864, 16870), True, 'import numpy as np\n'), ((22174, 22193), 'numpy.zeros_like', 'np.zeros_like', (['spec'], {}), '(spec)\n', (22187, 22193), True, 'import numpy as np\n'), ((26708, 26756), 'stingray.fourier.poisson_level', 'poisson_level', ([], {'norm': '"""none"""', 'n_ph': 'ref_events.size'}), "(norm='none', n_ph=ref_events.size)\n", (26721, 26756), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((26855, 26960), 'stingray.fourier.avg_pds_from_events', 'avg_pds_from_events', (['ref_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""none"""'}), "(ref_events, self.gti, self.segment_size, self.bin_time,\n silent=True, norm='none')\n", (26874, 26960), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((27228, 27252), 'numpy.mean', 'np.mean', (['ref_power[good]'], {}), '(ref_power[good])\n', (27235, 27252), True, 'import numpy as np\n'), ((27273, 27295), 'numpy.count_nonzero', 'np.count_nonzero', (['good'], {}), '(good)\n', (27289, 27295), True, 'import numpy as np\n'), ((32156, 32215), 'stingray.fourier.get_average_ctrate', 'get_average_ctrate', (['ref_events', 'self.gti', 'self.segment_size'], {}), '(ref_events, self.gti, self.segment_size)\n', (32174, 32215), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((32242, 32291), 'stingray.fourier.poisson_level', 'poisson_level', ([], {'norm': '"""abs"""', 'meanrate': 'countrate_ref'}), "(norm='abs', meanrate=countrate_ref)\n", (32255, 32291), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((32311, 32415), 'stingray.fourier.avg_pds_from_events', 'avg_pds_from_events', (['ref_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""abs"""'}), "(ref_events, self.gti, self.segment_size, self.bin_time,\n silent=True, norm='abs')\n", (32330, 32415), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((32710, 32732), 'numpy.count_nonzero', 'np.count_nonzero', (['good'], {}), '(good)\n', (32726, 32732), True, 'import numpy as np\n'), ((32758, 32782), 'numpy.mean', 'np.mean', (['ref_power[good]'], {}), '(ref_power[good])\n', (32765, 32782), True, 'import numpy as np\n'), ((3721, 3745), 'numpy.log10', 'np.log10', (['energy_spec[0]'], {}), '(energy_spec[0])\n', (3729, 3745), True, 'import numpy as np\n'), ((3747, 3771), 'numpy.log10', 'np.log10', (['energy_spec[1]'], {}), '(energy_spec[1])\n', (3755, 3771), True, 'import numpy as np\n'), ((6691, 6714), 'numpy.asarray', 'np.asarray', (['energy_spec'], {}), '(energy_spec)\n', (6701, 6714), True, 'import numpy as np\n'), ((6824, 6867), 'stingray.utils.assign_value_if_none', 'assign_value_if_none', (['ref_band', '[0, np.inf]'], {}), '(ref_band, [0, np.inf])\n', (6844, 6867), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((6940, 6967), 'numpy.asarray', 'np.asarray', (['[self.ref_band]'], {}), '([self.ref_band])\n', (6950, 6967), True, 'import numpy as np\n'), ((7236, 7311), 'stingray.utils.simon', 'simon', (['(\'There are no events in your event list!\' + "Can\'t make a spectrum!")'], {}), '(\'There are no events in your event list!\' + "Can\'t make a spectrum!")\n', (7241, 7311), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((7472, 7509), 'numpy.sum', 'np.sum', (['self.energy_intervals'], {'axis': '(1)'}), '(self.energy_intervals, axis=1)\n', (7478, 7509), True, 'import numpy as np\n'), ((8213, 8253), 'stingray.gti.cross_two_gtis', 'cross_two_gtis', (['events1.gti', 'events2.gti'], {}), '(events1.gti, events2.gti)\n', (8227, 8253), False, 'from stingray.gti import check_separate, cross_two_gtis\n'), ((8864, 8905), 'numpy.zeros_like', 'np.zeros_like', (['self.spectrum'], {'dtype': 'dtype'}), '(self.spectrum, dtype=dtype)\n', (8877, 8905), True, 'import numpy as np\n'), ((10154, 10196), 'numpy.rint', 'np.rint', (['(self.segment_size / self.bin_time)'], {}), '(self.segment_size / self.bin_time)\n', (10161, 10196), True, 'import numpy as np\n'), ((12727, 12756), 'numpy.zeros_like', 'np.zeros_like', (['base_lc.counts'], {}), '(base_lc.counts)\n', (12740, 12756), True, 'import numpy as np\n'), ((13042, 13194), 'stingray.lightcurve.Lightcurve.make_lightcurve', 'Lightcurve.make_lightcurve', (['self.events2.time[good]', 'self.bin_time'], {'tstart': 'tstart', 'tseg': '(tstop - tstart)', 'gti': 'base_lc.gti', 'mjdref': 'self.events2.mjdref'}), '(self.events2.time[good], self.bin_time, tstart=\n tstart, tseg=tstop - tstart, gti=base_lc.gti, mjdref=self.events2.mjdref)\n', (13068, 13194), False, 'from stingray.lightcurve import Lightcurve\n'), ((17024, 17060), 'stingray.utils.show_progress', 'show_progress', (['self.energy_intervals'], {}), '(self.energy_intervals)\n', (17037, 17060), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((17289, 17348), 'stingray.fourier.get_average_ctrate', 'get_average_ctrate', (['sub_events', 'self.gti', 'self.segment_size'], {}), '(sub_events, self.gti, self.segment_size)\n', (17307, 17348), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((17379, 17428), 'stingray.fourier.poisson_level', 'poisson_level', ([], {'norm': '"""abs"""', 'meanrate': 'countrate_sub'}), "(norm='abs', meanrate=countrate_sub)\n", (17392, 17428), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((19607, 19625), 'numpy.sqrt', 'np.sqrt', (['(num / den)'], {}), '(num / den)\n', (19614, 19625), True, 'import numpy as np\n'), ((22369, 22408), 'stingray.utils.excess_variance', 'excess_variance', (['lc', 'self.normalization'], {}), '(lc, self.normalization)\n', (22384, 22408), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((24062, 24073), 'numpy.sqrt', 'np.sqrt', (['sp'], {}), '(sp)\n', (24069, 24073), True, 'import numpy as np\n'), ((27429, 27465), 'stingray.utils.show_progress', 'show_progress', (['self.energy_intervals'], {}), '(self.energy_intervals)\n', (27442, 27465), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((27646, 27694), 'stingray.fourier.poisson_level', 'poisson_level', ([], {'norm': '"""none"""', 'n_ph': 'sub_events.size'}), "(norm='none', n_ph=sub_events.size)\n", (27659, 27694), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((27724, 27840), 'stingray.fourier.avg_cs_from_events', 'avg_cs_from_events', (['sub_events', 'ref_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""none"""'}), "(sub_events, ref_events, self.gti, self.segment_size,\n self.bin_time, silent=True, norm='none')\n", (27742, 27840), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((27989, 28094), 'stingray.fourier.avg_pds_from_events', 'avg_pds_from_events', (['sub_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""none"""'}), "(sub_events, self.gti, self.segment_size, self.bin_time,\n silent=True, norm='none')\n", (28008, 28094), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((28316, 28336), 'numpy.mean', 'np.mean', (['cross[good]'], {}), '(cross[good])\n', (28323, 28336), True, 'import numpy as np\n'), ((28367, 28391), 'numpy.mean', 'np.mean', (['sub_power[good]'], {}), '(sub_power[good])\n', (28374, 28391), True, 'import numpy as np\n'), ((28683, 28822), 'stingray.fourier.error_on_averaged_cross_spectrum', 'error_on_averaged_cross_spectrum', (['Cmean', 'mean_sub_power', 'mean_ref_power', 'm_tot', 'sub_power_noise', 'ref_power_noise'], {'common_ref': 'common_ref'}), '(Cmean, mean_sub_power, mean_ref_power,\n m_tot, sub_power_noise, ref_power_noise, common_ref=common_ref)\n', (28715, 28822), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((32928, 32964), 'stingray.utils.show_progress', 'show_progress', (['self.energy_intervals'], {}), '(self.energy_intervals)\n', (32941, 32964), False, 'from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress\n'), ((33125, 33184), 'stingray.fourier.get_average_ctrate', 'get_average_ctrate', (['sub_events', 'self.gti', 'self.segment_size'], {}), '(sub_events, self.gti, self.segment_size)\n', (33143, 33184), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((33215, 33264), 'stingray.fourier.poisson_level', 'poisson_level', ([], {'norm': '"""abs"""', 'meanrate': 'countrate_sub'}), "(norm='abs', meanrate=countrate_sub)\n", (33228, 33264), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((33294, 33409), 'stingray.fourier.avg_cs_from_events', 'avg_cs_from_events', (['sub_events', 'ref_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""abs"""'}), "(sub_events, ref_events, self.gti, self.segment_size,\n self.bin_time, silent=True, norm='abs')\n", (33312, 33409), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((33559, 33663), 'stingray.fourier.avg_pds_from_events', 'avg_pds_from_events', (['sub_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""abs"""'}), "(sub_events, self.gti, self.segment_size, self.bin_time,\n silent=True, norm='abs')\n", (33578, 33663), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((34188, 34208), 'numpy.mean', 'np.mean', (['cross[good]'], {}), '(cross[good])\n', (34195, 34208), True, 'import numpy as np\n'), ((34349, 34362), 'numpy.abs', 'np.abs', (['Cmean'], {}), '(Cmean)\n', (34355, 34362), True, 'import numpy as np\n'), ((34393, 34417), 'numpy.mean', 'np.mean', (['sub_power[good]'], {}), '(sub_power[good])\n', (34400, 34417), True, 'import numpy as np\n'), ((34445, 34584), 'stingray.fourier.error_on_averaged_cross_spectrum', 'error_on_averaged_cross_spectrum', (['Cmean', 'mean_sub_power', 'mean_ref_power', 'm_tot', 'sub_power_noise', 'ref_power_noise'], {'common_ref': 'common_ref'}), '(Cmean, mean_sub_power, mean_ref_power,\n m_tot, sub_power_noise, ref_power_noise, common_ref=common_ref)\n', (34477, 34584), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((17857, 17917), 'stingray.fourier.get_average_ctrate', 'get_average_ctrate', (['sub_events2', 'self.gti', 'self.segment_size'], {}), '(sub_events2, self.gti, self.segment_size)\n', (17875, 17917), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((17953, 18003), 'stingray.fourier.poisson_level', 'poisson_level', ([], {'norm': '"""abs"""', 'meanrate': 'countrate_sub2'}), "(norm='abs', meanrate=countrate_sub2)\n", (17966, 18003), False, 'from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance\n'), ((18078, 18194), 'stingray.fourier.avg_cs_from_events', 'avg_cs_from_events', (['sub_events', 'sub_events2', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""abs"""'}), "(sub_events, sub_events2, self.gti, self.segment_size,\n self.bin_time, silent=True, norm='abs')\n", (18096, 18194), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((18561, 18581), 'numpy.mean', 'np.mean', (['cross[good]'], {}), '(cross[good])\n', (18568, 18581), True, 'import numpy as np\n'), ((18795, 18899), 'stingray.fourier.avg_pds_from_events', 'avg_pds_from_events', (['sub_events', 'self.gti', 'self.segment_size', 'self.bin_time'], {'silent': '(True)', 'norm': '"""abs"""'}), "(sub_events, self.gti, self.segment_size, self.bin_time,\n silent=True, norm='abs')\n", (18814, 18899), False, 'from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate\n'), ((19149, 19173), 'numpy.mean', 'np.mean', (['sub_power[good]'], {}), '(sub_power[good])\n', (19156, 19173), True, 'import numpy as np\n'), ((19247, 19289), 'numpy.sqrt', 'np.sqrt', (['(delta_nu_after_mean * power_noise)'], {}), '(delta_nu_after_mean * power_noise)\n', (19254, 19289), True, 'import numpy as np\n'), ((34790, 34813), 'numpy.asarray', 'np.asarray', (['[Cmean, Ce]'], {}), '([Cmean, Ce])\n', (34800, 34813), True, 'import numpy as np\n'), ((2207, 2223), 'numpy.max', 'np.max', (['ref_band'], {}), '(ref_band)\n', (2213, 2223), True, 'import numpy as np\n'), ((19362, 19394), 'numpy.abs', 'np.abs', (['(mean_power - power_noise)'], {}), '(mean_power - power_noise)\n', (19368, 19394), True, 'import numpy as np\n'), ((28877, 28898), 'numpy.angle', 'np.angle', (['cross[good]'], {}), '(cross[good])\n', (28885, 28898), True, 'import numpy as np\n'), ((18706, 18749), 'numpy.sqrt', 'np.sqrt', (['(sub_power_noise * sub2_power_noise)'], {}), '(sub_power_noise * sub2_power_noise)\n', (18713, 18749), True, 'import numpy as np\n'), ((28610, 28647), 'stingray.gti.cross_two_gtis', 'cross_two_gtis', (['[eint]', 'self.ref_band'], {}), '([eint], self.ref_band)\n', (28624, 28647), False, 'from stingray.gti import check_separate, cross_two_gtis\n'), ((34125, 34162), 'stingray.gti.cross_two_gtis', 'cross_two_gtis', (['[eint]', 'self.ref_band'], {}), '([eint], self.ref_band)\n', (34139, 34162), False, 'from stingray.gti import check_separate, cross_two_gtis\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 22:35:02 2020
@author: hp
"""
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import pickle
from sklearn.utils import resample
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score, precision_score, recall_score, log_loss, confusion_matrix
def scores(Y_test, y_pred):
print('Accuracy Score is:{}'.format(accuracy_score(Y_test, y_pred)))
print('F1 Score is:{}'.format(f1_score(Y_test, y_pred)))
print('Sensitivity Score is:{}'.format(recall_score(Y_test, y_pred)))
print('Precision Score is:{}'.format(precision_score(Y_test, y_pred)))
print('ROC AUC Score is:{}'.format(roc_auc_score(Y_test, y_pred)))
df=pd.read_pickle('Cleaned_data')
df=pd.read_csv('train.csv')
'''UPSAMPLING'''
df_minority = df[df.Survived==1]
df_majority = df[df.Survived==0]
df_minority_upsampled = resample(df_minority,
replace=True,
n_samples=549,
random_state=123)
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
df=df_upsampled
X=df.iloc[:,1:].values
Y=df.iloc[:,-10].values
X_train, X_test, Y_train, Y_test=train_test_split(X, Y, random_state=42, test_size=0.2)
df['Survived'].value_counts()/df.shape[0]
classifier = tree.DecisionTreeClassifier()
classifier.fit(X_train, Y_train)
y_pred = classifier.predict(X_test)
scores(Y_test,y_pred)
from sklearn.ensemble import RandomForestClassifier
forest=RandomForestClassifier(n_estimators=100)
forest.fit(X_train, Y_train)
y_pred=forest.predict(X_test)
scores(Y_test,y_pred)
'''Filling in prediction values in test file'''
df2=pd.read_csv('test.csv')
df2=df1
df1['Age'].fillna(df1['Age'].mean(), inplace=True)
df1['Fare'].fillna(df1['Fare'].median(), inplace=True)
df1.drop(labels=['Cabin','PassengerId','Ticket','Name'], inplace=True, axis=1)
sex=pd.get_dummies(df1['Sex'], drop_first=True)
embark=pd.get_dummies(df1['Embarked'], drop_first=True)
df1=pd.concat([df1,sex,embark],axis=1)
df1.drop(labels=['Sex','Embarked'], inplace=True, axis=1)
y_pred=classifier.predict(df1)
df_ans=pd.DataFrame(y_pred)
df_ans.columns=['Survived']
df_ans['PassengerId']=df2['PassengerId']
df_ans.set_index('PassengerId', inplace=True)
df_ans.index.name='PassengerId'
df_ans.to_csv('Predicted Labels.csv')
'''Dimensionality Reduction'''
from sklearn.ensemble import RandomForestRegressor
df=df.drop(['Item_Identifier', 'Outlet_Identifier'], axis=1)
model = RandomForestRegressor(random_state=1, max_depth=10)
df=pd.get_dummies(df)
model.fit(df,train.Item_Outlet_Sales)
features = df.columns
importances = model.feature_importances_
indices = np.argsort(importances)[-9:] # top 10 features
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
from sklearn.feature_selection import SelectFromModel
feature = SelectFromModel(model)
Fit = feature.fit_transform(df, train.Item_Outlet_Sales)
'''RF no tuning scores
scores(Y_test,y_pred)
Accuracy Score is:0.8954545454545455
F1 Score is:0.8909952606635071
Sensitivity Score is:0.9306930693069307
Precision Score is:0.8545454545454545
ROC AUC Score is:0.8981196438971628''' | [
"pandas.read_csv",
"sklearn.metrics.precision_score",
"numpy.argsort",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"pandas.read_pickle",
"sklearn.ensemble.RandomForestRegressor",
"matplotlib.pyplot.xlabel",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.utils.resample",
"pa... | [((945, 975), 'pandas.read_pickle', 'pd.read_pickle', (['"""Cleaned_data"""'], {}), "('Cleaned_data')\n", (959, 975), True, 'import pandas as pd\n'), ((980, 1004), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (991, 1004), True, 'import pandas as pd\n'), ((1122, 1190), 'sklearn.utils.resample', 'resample', (['df_minority'], {'replace': '(True)', 'n_samples': '(549)', 'random_state': '(123)'}), '(df_minority, replace=True, n_samples=549, random_state=123)\n', (1130, 1190), False, 'from sklearn.utils import resample\n'), ((1312, 1359), 'pandas.concat', 'pd.concat', (['[df_majority, df_minority_upsampled]'], {}), '([df_majority, df_minority_upsampled])\n', (1321, 1359), True, 'import pandas as pd\n'), ((1466, 1520), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(X, Y, random_state=42, test_size=0.2)\n', (1482, 1520), False, 'from sklearn.model_selection import train_test_split\n'), ((1582, 1611), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (1609, 1611), False, 'from sklearn import tree\n'), ((1775, 1815), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (1797, 1815), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1958, 1981), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (1969, 1981), True, 'import pandas as pd\n'), ((2194, 2237), 'pandas.get_dummies', 'pd.get_dummies', (["df1['Sex']"], {'drop_first': '(True)'}), "(df1['Sex'], drop_first=True)\n", (2208, 2237), True, 'import pandas as pd\n'), ((2246, 2294), 'pandas.get_dummies', 'pd.get_dummies', (["df1['Embarked']"], {'drop_first': '(True)'}), "(df1['Embarked'], drop_first=True)\n", (2260, 2294), True, 'import pandas as pd\n'), ((2302, 2339), 'pandas.concat', 'pd.concat', (['[df1, sex, embark]'], {'axis': '(1)'}), '([df1, sex, embark], axis=1)\n', (2311, 2339), True, 'import pandas as pd\n'), ((2442, 2462), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {}), '(y_pred)\n', (2454, 2462), True, 'import pandas as pd\n'), ((2816, 2867), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(1)', 'max_depth': '(10)'}), '(random_state=1, max_depth=10)\n', (2837, 2867), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2872, 2890), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {}), '(df)\n', (2886, 2890), True, 'import pandas as pd\n'), ((3057, 3089), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importances"""'], {}), "('Feature Importances')\n", (3066, 3089), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Relative Importance"""'], {}), "('Relative Importance')\n", (3246, 3269), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3279, 3281), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3372), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['model'], {}), '(model)\n', (3365, 3372), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((3008, 3031), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (3018, 3031), True, 'import numpy as np\n'), ((621, 651), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'y_pred'], {}), '(Y_test, y_pred)\n', (635, 651), False, 'from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score, precision_score, recall_score, log_loss, confusion_matrix\n'), ((689, 713), 'sklearn.metrics.f1_score', 'f1_score', (['Y_test', 'y_pred'], {}), '(Y_test, y_pred)\n', (697, 713), False, 'from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score, precision_score, recall_score, log_loss, confusion_matrix\n'), ((760, 788), 'sklearn.metrics.recall_score', 'recall_score', (['Y_test', 'y_pred'], {}), '(Y_test, y_pred)\n', (772, 788), False, 'from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score, precision_score, recall_score, log_loss, confusion_matrix\n'), ((833, 864), 'sklearn.metrics.precision_score', 'precision_score', (['Y_test', 'y_pred'], {}), '(Y_test, y_pred)\n', (848, 864), False, 'from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score, precision_score, recall_score, log_loss, confusion_matrix\n'), ((907, 936), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['Y_test', 'y_pred'], {}), '(Y_test, y_pred)\n', (920, 936), False, 'from sklearn.metrics import f1_score, accuracy_score, roc_curve, roc_auc_score, precision_score, recall_score, log_loss, confusion_matrix\n')] |
import habitat
import cv2
import os
import time
import git
import magnum as mn
import matplotlib.pyplot as plt
import numpy as np
import math
import habitat_sim
from habitat_sim.utils import viz_utils as vut
# import quadruped_wrapper
from habitat_sim.robots import AntV2Robot
repo = git.Repo(".", search_parent_directories=True)
dir_path = repo.working_tree_dir
data_path = os.path.join(dir_path, "../habitat-sim/data")
def transform_rgb_bgr(image):
return image[:, :, [2, 1, 0]]
def place_agent(sim):
# place our agent in the scene
agent_state = habitat_sim.AgentState()
agent_state.position = [-0.15, -0.7, 1.0]
agent_state.rotation = np.quaternion(-0.83147, 0, 0.55557, 0)
agent = sim.initialize_agent(0, agent_state)
return agent.scene_node.transformation_matrix()
def make_configuration():
# simulator configuration
backend_cfg = habitat_sim.SimulatorConfiguration()
backend_cfg.scene_id = "NONE"
backend_cfg.enable_physics = True
# sensor configurations
# Note: all sensors must have the same resolution
# setup 2 rgb sensors for 1st and 3rd person views
camera_resolution = [544, 720]
sensor_specs = []
rgba_camera_1stperson_spec = habitat_sim.CameraSensorSpec()
rgba_camera_1stperson_spec.uuid = "rgba_camera_1stperson"
rgba_camera_1stperson_spec.sensor_type = habitat_sim.SensorType.COLOR
rgba_camera_1stperson_spec.resolution = camera_resolution
rgba_camera_1stperson_spec.position = [0.0, 0.6, 0.0]
rgba_camera_1stperson_spec.orientation = [0.0, 0.0, 0.0]
rgba_camera_1stperson_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(rgba_camera_1stperson_spec)
depth_camera_1stperson_spec = habitat_sim.CameraSensorSpec()
depth_camera_1stperson_spec.uuid = "depth_camera_1stperson"
depth_camera_1stperson_spec.sensor_type = habitat_sim.SensorType.DEPTH
depth_camera_1stperson_spec.resolution = camera_resolution
depth_camera_1stperson_spec.position = [0.0, 0.6, 0.0]
depth_camera_1stperson_spec.orientation = [0.0, 0.0, 0.0]
depth_camera_1stperson_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(depth_camera_1stperson_spec)
rgba_camera_3rdperson_spec = habitat_sim.CameraSensorSpec()
rgba_camera_3rdperson_spec.uuid = "rgba_camera_3rdperson"
rgba_camera_3rdperson_spec.sensor_type = habitat_sim.SensorType.COLOR
rgba_camera_3rdperson_spec.resolution = camera_resolution
rgba_camera_3rdperson_spec.position = [0.0, 1.0, 0.3]
rgba_camera_3rdperson_spec.orientation = [-45, 0.0, 0.0]
rgba_camera_3rdperson_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE
sensor_specs.append(rgba_camera_3rdperson_spec)
# agent configuration
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
return habitat_sim.Configuration(backend_cfg, [agent_cfg])
def example():
# Note: Use with for the example testing, doesn't need to be like this on the README
cfg = make_configuration()
sim = habitat_sim.Simulator(cfg)
agent_transform = place_agent(sim)
# get the primitive assets attributes manager
prim_templates_mgr = sim.get_asset_template_manager()
# get the physics object attributes manager
obj_templates_mgr = sim.get_object_template_manager()
# get the rigid object manager
rigid_obj_mgr = sim.get_rigid_object_manager()
observations = []
count_steps = 0
# add floor
# build box
cube_handle = obj_templates_mgr.get_template_handles("cube")[0]
floor = obj_templates_mgr.get_template_by_handle(cube_handle)
floor.scale = np.array([2.0, 0.05, 2.0])
obj_templates_mgr.register_template(floor, "floor")
floor_obj = rigid_obj_mgr.add_object_by_template_handle("floor")
floor_obj.motion_type = habitat_sim.physics.MotionType.KINEMATIC
floor_obj.translation = np.array([2.50, -1, 0.5])
floor_obj.motion_type = habitat_sim.physics.MotionType.STATIC
# Add ant robot
robot_path = "data/robots/ant.urdf"
ant = AntV2Robot(robot_path, sim)
ant.reconfigure()
ant.base_pos = mn.Vector3(-3, 1.0, 0.2)
ant.base_rot = math.pi / 2
while True:
#keystroke = cv2.waitKey(0)
#if keystroke == 27:
# break
sim.step_physics(1.0 / 60.0)
observations.append(sim.get_sensor_observations())
if count_steps == 120:
ant.leg_joint_pos = [0, 0, 0, 0, -0.3, 0.3, 0.3, -0.3]
if count_steps == 180:
ant.leg_joint_pos = [1, 1, 1, 1, -1, 1, 1, -1]
if count_steps == 210:
ant.leg_joint_pos = [0, 0, 0, 0, -1, 1, 1, -1]
#print(ant.observational_space)
print("_____")
print(count_steps)
#print(keystroke)
#observations = env.step(env.action_space.sample()) # noqa: F841
print(observations[-1].keys())
cv2.imshow("RGB", transform_rgb_bgr(observations[-1]["rgba_camera_1stperson"]))
count_steps += 1
print("Episode finished after {} steps.".format(count_steps))
vut.make_video(
observations,
"rgba_camera_1stperson",
"color",
"test_ant_wrapper",
open_vid=True,
)
if __name__ == "__main__":
example()
| [
"habitat_sim.SimulatorConfiguration",
"habitat_sim.Configuration",
"habitat_sim.Simulator",
"habitat_sim.CameraSensorSpec",
"habitat_sim.robots.AntV2Robot",
"os.path.join",
"numpy.array",
"habitat_sim.utils.viz_utils.make_video",
"git.Repo",
"numpy.quaternion",
"habitat_sim.AgentState",
"habit... | [((289, 334), 'git.Repo', 'git.Repo', (['"""."""'], {'search_parent_directories': '(True)'}), "('.', search_parent_directories=True)\n", (297, 334), False, 'import git\n'), ((380, 425), 'os.path.join', 'os.path.join', (['dir_path', '"""../habitat-sim/data"""'], {}), "(dir_path, '../habitat-sim/data')\n", (392, 425), False, 'import os\n'), ((567, 591), 'habitat_sim.AgentState', 'habitat_sim.AgentState', ([], {}), '()\n', (589, 591), False, 'import habitat_sim\n'), ((665, 703), 'numpy.quaternion', 'np.quaternion', (['(-0.83147)', '(0)', '(0.55557)', '(0)'], {}), '(-0.83147, 0, 0.55557, 0)\n', (678, 703), True, 'import numpy as np\n'), ((880, 916), 'habitat_sim.SimulatorConfiguration', 'habitat_sim.SimulatorConfiguration', ([], {}), '()\n', (914, 916), False, 'import habitat_sim\n'), ((1219, 1249), 'habitat_sim.CameraSensorSpec', 'habitat_sim.CameraSensorSpec', ([], {}), '()\n', (1247, 1249), False, 'import habitat_sim\n'), ((1736, 1766), 'habitat_sim.CameraSensorSpec', 'habitat_sim.CameraSensorSpec', ([], {}), '()\n', (1764, 1766), False, 'import habitat_sim\n'), ((2260, 2290), 'habitat_sim.CameraSensorSpec', 'habitat_sim.CameraSensorSpec', ([], {}), '()\n', (2288, 2290), False, 'import habitat_sim\n'), ((2785, 2823), 'habitat_sim.agent.AgentConfiguration', 'habitat_sim.agent.AgentConfiguration', ([], {}), '()\n', (2821, 2823), False, 'import habitat_sim\n'), ((2887, 2938), 'habitat_sim.Configuration', 'habitat_sim.Configuration', (['backend_cfg', '[agent_cfg]'], {}), '(backend_cfg, [agent_cfg])\n', (2912, 2938), False, 'import habitat_sim\n'), ((3088, 3114), 'habitat_sim.Simulator', 'habitat_sim.Simulator', (['cfg'], {}), '(cfg)\n', (3109, 3114), False, 'import habitat_sim\n'), ((3685, 3711), 'numpy.array', 'np.array', (['[2.0, 0.05, 2.0]'], {}), '([2.0, 0.05, 2.0])\n', (3693, 3711), True, 'import numpy as np\n'), ((3936, 3960), 'numpy.array', 'np.array', (['[2.5, -1, 0.5]'], {}), '([2.5, -1, 0.5])\n', (3944, 3960), True, 'import numpy as np\n'), ((4100, 4127), 'habitat_sim.robots.AntV2Robot', 'AntV2Robot', (['robot_path', 'sim'], {}), '(robot_path, sim)\n', (4110, 4127), False, 'from habitat_sim.robots import AntV2Robot\n'), ((4169, 4193), 'magnum.Vector3', 'mn.Vector3', (['(-3)', '(1.0)', '(0.2)'], {}), '(-3, 1.0, 0.2)\n', (4179, 4193), True, 'import magnum as mn\n'), ((5135, 5236), 'habitat_sim.utils.viz_utils.make_video', 'vut.make_video', (['observations', '"""rgba_camera_1stperson"""', '"""color"""', '"""test_ant_wrapper"""'], {'open_vid': '(True)'}), "(observations, 'rgba_camera_1stperson', 'color',\n 'test_ant_wrapper', open_vid=True)\n", (5149, 5236), True, 'from habitat_sim.utils import viz_utils as vut\n')] |
# MIT License
#
# Copyright (c) 2017-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
### Find thhe max electron density and corresponding location from the ne vs x plots
data = np.genfromtxt("xenon_ne_vs_x_do-0.76mm_Id-2.3A.csv",delimiter=",",skip_header=13,names=True)
data19 = data[data['phi_wf']==1.9]
data25 = data[data['phi_wf']==2.5]
Id = 2.3 # A
print("Id,ne_max,x(ne_max)")
for data in [data19,data25]:
xvec = data['x']
yvec = data['ne']
ne_max = np.max(yvec)*1e19
idx = np.argmax(yvec)
x_max = xvec[idx] # Convert from cm to mm
print(Id,ne_max,x_max)
| [
"numpy.genfromtxt",
"numpy.argmax",
"numpy.max"
] | [((1224, 1323), 'numpy.genfromtxt', 'np.genfromtxt', (['"""xenon_ne_vs_x_do-0.76mm_Id-2.3A.csv"""'], {'delimiter': '""","""', 'skip_header': '(13)', 'names': '(True)'}), "('xenon_ne_vs_x_do-0.76mm_Id-2.3A.csv', delimiter=',',\n skip_header=13, names=True)\n", (1237, 1323), True, 'import numpy as np\n'), ((1554, 1569), 'numpy.argmax', 'np.argmax', (['yvec'], {}), '(yvec)\n', (1563, 1569), True, 'import numpy as np\n'), ((1526, 1538), 'numpy.max', 'np.max', (['yvec'], {}), '(yvec)\n', (1532, 1538), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # The Boundary Element Method (BEM)
#
#
# You can run this code directly in your browser by clicking on the rocket logo ( <i class="fas fa-rocket"></i> ) at the top of the page, and clicking 'Binder'. This will open a Jupyter Notebook in a [Binder](https://mybinder.org/) environment which is set up to contain everything you need to run the code. **Don't forget to save a local copy if you make any changes!**
#
# If you prefer, you can download the Jupyter Notebook file to run locally, by clicking the download logo ( <i class="fas fa-download"></i> ) at the top of the page and selecting '.ipynb'.
#
# If you are new to using Jupyter Notebooks, [this guide](https://www.dataquest.io/blog/jupyter-notebook-tutorial/) will help you get started.
#
#
# ## Prerequisites
#
# To understand the basic principles explained in this tutorial, you should:
# * Have an understanding of the exterior Helmholtz scattering problem
# * Have a basic understanding of the midpoint rule, for approximating integrals
# * Be comfortable using ```numpy```
#
# ## Introduction
#
# This notebook introduces the **Boundary Element Method** (BEM), by a simple numerical example. The main idea behind is to use Green's identities to represent the (unknown) scattered wave in terms of an unknown which is defined only on the boundary of our domain. For example, if we are modelling scattering by a sphere, the unknown is a function which lives on the surface of the sphere. BEM involves applying a finite element method to approximate this unknown on the boundary, hence the name. This is advantagous for the following two reasons:
# * We have reduced the dimension of our problem by one, so meshing is considerably simpler, and fewer degrees of freedom are required
# * We do not need to worry about meshing an unbounded domian, constructing an artificial boundary, etc.
#
#
# ## Setup
#
# This example is intended to be from first principles, so we will only use methods from three of the main python libraries:
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
# ## Step one: Obtain a representation for the solution in terms of boundary data
#
# For our scattering problem, we consider an incident wave $u^i(x)$ impinging on an obstacle $\Omega\subset\mathbb{R}^n$, where $n=2,3$. Our starting point is the Helmholtz equation
#
# $$\label{eq:1}
# (\Delta+k^2)u=0,
# $$
#
# where $u$ denotes the total field, and $u^s=u-u^i$ is the scattered field, which satisfies the Sommerfeld radiation condition.
#
# Applying [Green's third identity](https://en.wikipedia.org/wiki/Green's_identities#Green's_third_identity) we obtain the representation:
#
# $$
# u(x) = u^i(x) - \int_\Gamma \left[\Phi(x,y)\frac{\partial u}{\partial n}(y) - \frac{\partial \Phi(x,y)}{\partial n(y)}u(y)\right]~\mathrm{d}s(y),\quad x\in\mathbb{R}^n\setminus\Omega,
# $$
#
# where $\frac{\partial}{\partial n}$ denotes the outward normal derivative, $\Gamma$ denotes the boundary of $\Omega$, and $\Phi$ denotes the fundamental solution
#
# $$
# \Phi(x,y) = \left\{
# \begin{array}{ll}
# \frac{\mathrm{i}}{4}H^{(1)}_0(k|x-y|),&\quad n=2,\\
# \frac{\mathrm{e}^{\mathrm{i}k|x-y|}}{4\pi|x-y|},&\quad n=3,
# \end{array}
# \right.
# $$
#
# where $H^{(1)}_0$ is the [Hankel function](https://mathworld.wolfram.com/HankelFunctionoftheFirstKind.html) of the first kind order zero. You probably recognise the function for $n=3$, but if you haven't seen $H^{(1)}_0$ before, it looks like the ripples on the surface of a lake after you drop a pebble into the water:
# In[2]:
from scipy.special import hankel1 as H1
t = np.linspace(-50,50,1000)
X,Y = np.meshgrid(t,t)
ripples = plt.imshow(np.real(H1(0,np.sqrt(X**2+Y**2))),extent =[t.min(), t.max(), t.min(), t.max()])
plt.colorbar(ripples)
plt.title('Real part of H_0^{(1)}(|x|))');
# For this simple example, we will consider scattering by a circle in two-dimensions, with sound-soft aka Dirichlet boundary conditions. This means that $u=0$ on $\Gamma$, so the BIE above simplifies to
#
# $$
# u(x) = u^i(x) - \int_\Gamma \Phi(x,y)\frac{\partial u}{\partial n}(y)~\mathrm{d}s(y),\quad x\in\mathbb{R}^2\setminus\Omega.
# $$
#
# The integral may be interpreted as lots of tiny speakers $\Phi(x,y)$ on our surface $\Gamma$, whilst $\frac{\partial u}{\partial n}(y)$ can be interpreted as the volume of these speakers. We will choose our incoming wave to be an incident plane wave, $u^i(x):=\mathrm{e}^{\mathrm{i} k x\cdot d}$, where $d\in\mathbb{R}^2$ is a unit vector which represents the direction of propagation.
# In[3]:
k = 5.0 # wavenumber
d = np.array([1.0,0.0]) # incident direction
# In[4]:
Phi = lambda x,y: 1j/4*H1(0,k*np.linalg.norm(np.array(x)-np.array(y)))
ui = lambda x: np.exp(1j*k*np.dot(x,d))
# ## Step two: Reformulate as a problem on the boundary $\Gamma$
# Remember, our long-term aim is to approximate $\frac{\partial u}{\partial n}$, then we can plug that approximation into the above equation, to obtain an approximation for $u(x)$. To get an equation we can solve, we take the limit of the above equation as $x$ tends to $\Gamma$ and rearrange, to obtain a **boundary integral equation** (BIE):
#
# $$
# \int_\Gamma \Phi(x,y)\frac{\partial u}{\partial n}(y)~\mathrm{d}s(y)=u^i(x),\quad x\in\Gamma.
# $$
#
# A BEM is an approximation of an equation of this type, defined on the boundary $\Gamma$. Before approximating, we can parametrise the circle $\Gamma$ by $\theta\in[0,2\pi)\to x\in\Gamma$ in the natural way, $x(\theta)=[\cos(\theta),\sin(\theta)]$, to rewrite the above BIE in terms of a one-dimensional parameter
#
# $$
# \int_0^{2\pi} \tilde\Phi(\theta,\vartheta)\frac{\partial u}{\partial n}(y(\vartheta))~\mathrm{d}\vartheta=u^i(x(\theta)),\quad \theta\in[0,2\pi),
# $$
#
# where $\tilde\Phi(\theta,\vartheta):=\Phi(x(\theta),y(\vartheta))$ is purely to keep things a bit simpler.
# There are many BEMs, but for the purpose of this example, I will choose the simplest one I can think of.
# In[5]:
circle_map = lambda theta: [np.cos(theta), np.sin(theta)]
ui_angular = lambda theta: ui(circle_map(theta))
Phi_tilde = lambda theta,vartheta: Phi(circle_map(theta),circle_map(vartheta))
# ## Step three: Approximate the boundary data
#
# Choose $N$ equispaced points on the circle $\theta_n=nh$ where $h:=2\pi/N$ for $n=0,\ldots,N-1$. For our approximation, we specify that the above BIE must hold exactly at these points. This is known as a **collocation BEM**, and $\theta_n$ are the **collocation points**:
#
# $$
# \int_0^{2\pi} \tilde\Phi(\theta_n,\vartheta)v_h(\vartheta)~\mathrm{d}\vartheta=u^i(x(\theta_n)),\quad n=0,\ldots,N-1,
# $$
#
# where we choose the details of our approximation $v^{(h)}(\theta)\approx\frac{\partial u}{\partial n}(y(\theta))$ next.
# In[6]:
N=80 # number of collocation points
theta = np.linspace(0,2*np.pi,N,endpoint=False) # equispaced points on circle
h = 2*np.pi/N # meshwidth
plt.plot(np.cos(theta),np.sin(theta),'k.-')
plt.title('Collocation points on circle');
# We will use a [piecewise constant](https://mathworld.wolfram.com/PiecewiseConstantFunction.html) approximation $v^{(h)}(\theta)$, such that $v^{(h)}(\theta)=v_m$ for $\theta\in[\theta_m-h/2,\theta_m+h/2]$ and $v^{(h)}(\theta)=0$ otherwise, for $m=1,\ldots,N$. Note that the values $v_m$ are currently unknown. A piecewise constant approximation is sometimes referred to as $h$-BEM. So the full name of this method is **collocation $h$-BEM**, and it can be expressed in the following form:
#
# $$
# \sum_{m=1}^Nv_m\int_{\theta_m-h/2}^{\theta_m+h/2} \tilde\Phi(\theta_n,\vartheta)~\mathrm{d}\vartheta=u^i(x(\theta_n)),\quad n=0,\ldots,N-1.
# $$
#
# We can represent the above equation as a linear system for the unknowns $v_m$:
#
# $$A\mathbf{v}=\mathbf{u},$$
#
# where $A_{mn}:=\int_{\theta_m-h/2}^{\theta_m+h/2}\tilde\Phi(\theta_m,\vartheta)~\mathrm{d}\vartheta$, and $u_n := u^i(x(\theta_n))$. Even in this simple example, I hope it is clear that efficient methods for evaluating singular integrals play a key role in BEMs. The Nystrom variant of BEM is fast and simple (perfect for this example). The idea is to approximate (almost) each integral $A_{mn}:=\int_{\theta_m-h/2}^{\theta_m+h/2}\tilde\Phi(\theta_n,\vartheta)~\mathrm{d}\vartheta$ by a one-point quadrature rule, which means we can use our collocation points as our quadrature points. This gives
#
# $$A_{mn}= h\tilde\Phi(\theta_n,\theta_m)+O(h^2),\quad\text{for }m\neq n,$$
#
# where $O(h^2)$ means the error is bounded above by $Ch^2$, for some constant $C$ and sufficiently small $h$.
#
# But we must be careful, a one-point quadrature rule for $m=n$ gives $h\Phi(\theta_n,\theta_m)=\infty$, since the Hankel function is unbounded at zero! So we need something a little more sophisticated for the diagonal elements.
#
# From DLMF [(10.4.3)](https://dlmf.nist.gov/10.4.3), [(10.8.2)](https://dlmf.nist.gov/10.8.2), [(10.2.2)](https://dlmf.nist.gov/10.2#E2), we can consider the first term in the asymptotic expansion of the Hankel function, integrate the $\log$ exactly, to write
#
# $$
# \int_{\theta_n-h/2}^{\theta_m+h/2}\tilde\Phi(\theta_m,\vartheta)~\mathrm{d}\vartheta
# =
# \frac{\mathrm{i}h\left(2\mathrm{i}\gamma+2\mathrm{i}\log(hk/4)+\mathrm{i}\pi-2\mathrm{i}\right)}{4\pi}+O(h^{2-\epsilon})
# $$
#
# where $\gamma\approx0.577$ is Euler's number and $\epsilon$ is any number in $(0,1)$.
#
# Now we can construct the matrix $A$:
# In[7]:
eulergamma = 0.57721566
singular_diagonal = lambda h: 1j*h*(2j*eulergamma + 2j*np.log(h*k/4)+1j*np.pi-2j)/(4*np.pi)
# construct matrix
A = np.zeros((N,N),dtype=complex)
for n in range(N):
for m in range(N):
if n==m:
A[m,n] = singular_diagonal(h)
else:
A[m,n] = h*Phi_tilde(theta[n],theta[m])
# construct right-hand side vector
u = np.zeros(N,dtype=complex)
for n in range(N):
u[n] = ui_angular(theta[n])
# solve linear system to get values of piecewise constant approximation:
v = np.linalg.solve(A,u)
# In[8]:
plt.plot(theta,np.real(v))
plt.title('Real part of approximation to normal derivative');
# ## Step four: Use approximate boundary data in representation formula
#
# Specifically: we will use the approximate boundary data **from step three** in the representation formula **from step one**.
#
# Plugging $v_h$ into the representation formula, and paramerising in the same way as before, gives
#
# $$
# u(x) \approx u_h(x) := u^i(x) - \int_0^{2\pi}\Phi(x,y(\theta))v_h(\theta)~\mathrm{d}\theta,\quad x\in\mathbb{R}^2\setminus\Omega.
# $$
#
# To be quick, we can use a one-point quadrature rule to evaluate this integral:
#
# $$
# u_h(x)\approx u^i(x) - h\sum_{m=1}^Nv_n\Phi(x,y(\theta_m)),\quad x\in\mathbb{R}^2\setminus\Omega.
# $$
#
# In[9]:
# create a method which approximates the solution at a point in the scattering domain
def sol_approx(x):
val = ui(x)
for n in range(N):
val -= h*Phi(x,circle_map(theta[n]))*v[n]
return val
# In[10]:
num_pixels = 100
t = np.linspace(-3,3,num_pixels)
X,Y = np.meshgrid(t,t)
u_h = np.zeros((num_pixels,num_pixels),dtype=complex)
for i in range(num_pixels):
for j in range(num_pixels):
if (X[i,j]**2+Y[i,j]**2)>1:
u_h[i,j]=sol_approx([X[i,j],Y[i,j]])
sol = plt.imshow(np.real(u_h),extent =[t.min(), t.max(), t.min(), t.max()])
plt.colorbar(sol)
plt.title('Real part of approximation to solution');
# ## Drawbacks of BEM compared with FEM
# As mentioned in the introduction, the key advantage of BEM is that reduces the dimension of the problem by one. We saw that this resuls in a linear system, which is (usually) significantly smaller in size than we would expect with other methods. But we don't get this for free, there are two main drawbacks:
# * The matrix entries requires the approximation singular integrals, and this can be difficult to do efficiently.
# * The matrix will be dense.
#
# The first of these two points is the main difficulty when implementing a BEM. If possible, use BEM software such as [bempp](https://bempp.com), where quadrature has been implemented carefully and efficiently. If you are hellbent on implementing your own BEM, get your quadrature routines from a colleage who has tried and tested them for similar problems.
#
# ## Further details - variations on BEM
# We have seen one example, but there are several other ways to implement a boundary element method. I will summarise these here:
#
# ### Choice of boundary condition
# We used sound-soft/Dirichlet boundary condition for this example. Different choices of boundary condition will lead to a different BIE forrmulation.
#
# ### Choice of BIE
# We used the BIE which arose naturally when we moved $x$ onto the boundary, in the sound-soft problem. This was the simplest to explain, however there exist values of $k$ for which this BIE is ill-posed, in which case our BEM has no chance of being accurate. Other BIEs exist for the same problem, some of which are well-posed for all values of $k$. The most commonly used is the **standard combined formulation**, which is well-posed for any $k$, and only requires a few extra lines of code.
#
# The general form of a BIE is:
#
# $$
# Kv
# =f,\quad\text{on }\Gamma
# $$
#
# where $K$ is an integral opeartor, which map functions on $\Gamma$ to functions on $\Gamma$, $f$ and $g$ are known. The unknown is $v$, which may be $v=\frac{\partial u}{\partial n}$ (sound-soft), $v=u$ (sound-hard), or $v=\left(\begin{array}{c}
# u\\\frac{\partial u}{\partial n}
# \end{array}\right)$ (impedance).
# ### Choice of basis
# We approximated the boundary data using a piecewise constant function. Higher degree polynomials can be used on a piecewise mesh, and on smooth obstacles such as these a fourier basis could be used, which removes the need for a mesh entirely. For obstacles with corners, the boundary data will be singular towards the corners, so graded meshes can be used. For certain geometries, specialised bases can be used, which incorporate known singular or oscillatory behaviour, to avoid grading.
#
# ### Choice of projection
# We used a collocation BEM which forces the BIE to hold exactly at a fixed set of points, written generally this is:
#
# $$
# \sum_{m=1}^N K\varphi_m(x_n)=f(x_n),\quad\text{for }n=1,\ldots,N
# $$
#
# The main alternative is Galerkin BEM, which forces the BIE to hold in a weak sense when tested against our approximation space, like so:
#
# $$
# \sum_{m=1}^N(K\varphi_m,\varphi_n) = (f,\varphi_n),\quad\text{for }n=1,\ldots,N,
# $$
#
# where $(f,g)=\int_\Gamma f(x)\overline{g}(x)\mathrm{d}s(x)$.
#
# Galerkin BEM has the advantage that we have guarenteed convergence for most problems, wheras collocation problems can be ill-posed for certain choices of collocation points, and nobody has a general rule for choosing them in a reliable way. The disadvantage of Galerkin is that the implementation requires an extra integral. So when $n=2$ Galerkin has two-dimensional integrals, and when $n=3$ Galerkin has four dimensional integrals. For this reason, engineers prefer collocation, and mathematicians perfer Galerkin.
#
# Because of its efficiency, researchers are often looking for ways to tweak collocation so that its results are as accurate/reliable as Galerkin, but without the nasty double integrals. A well-established technique is the use of CHIEF points. Points inside the scatterer, where the field is known to be zero, are added to the approximation problem. A more recent area of research is [*oversampled collocation*](https://arxiv.org/abs/2103.17212), where the number of collocation points is larger than the number of basis functions. This can overcome the risks asociated with collocation.
#
# ### Choice of quadrature
# In the coded example, we used a one-point quadrature rule for our integrals, which is the most basic approximation concievable. For smooth integrands $(m\neq n)$, [Gauss-Legendre quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss–Legendre_quadrature) is very popular in practice, as this converges much faster. In higher dimensional integrals, a popular approach is to use Gauss quadrature in each direction. This is sub-optimal, cubature rules are the most efficient way to do this, but are rarely used in practice.
#
# For singular integrals $(m=n)$, grading can be used as a one-size-fits all approach. However, we often know the precise singular behaviour, so grading can be overkill. A more informed approach is that of singularity subtraction, where the singular part of the integrand is evaluated analytically, and the remaining part is evaluated using standard quadrature. A second informed approach is to use generalised Gaussian quadrature, which is designed to be accurate for integrals containing a certain type of singularity.
#
# For singular double integrals, when the singularity is along the diagonal of the integration domain, the Duffy transform can be used to convert to two integrals over a square/cube/hypercube with singularities at the edges, making it more amenable to techniques for 1D singular integrals.
# ## Summary
#
# * Certain scattering problems can be reforumlated as a problem on the boundary
# * A BEM can be used to solve this problem on the boundary
# * Certain BIEs and/or certain choices of collocation points can lead to numerical instabilities
# * Care must be taken to ensure all integrals, especially the singular ones, are being evaluated to a sufficiet accuracy
# * For this reason Galerkin (as opposd to collocation) BEM is harder to implement, and typically slower to run. But the results are more reliable
| [
"numpy.linalg.solve",
"matplotlib.pyplot.title",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"numpy.log",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.real",
"numpy.cos",
"numpy.dot",
"numpy.sin",
"numpy.meshgrid"
] | [((3635, 3661), 'numpy.linspace', 'np.linspace', (['(-50)', '(50)', '(1000)'], {}), '(-50, 50, 1000)\n', (3646, 3661), True, 'import numpy as np\n'), ((3666, 3683), 'numpy.meshgrid', 'np.meshgrid', (['t', 't'], {}), '(t, t)\n', (3677, 3683), True, 'import numpy as np\n'), ((3784, 3805), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ripples'], {}), '(ripples)\n', (3796, 3805), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3847), 'matplotlib.pyplot.title', 'plt.title', (['"""Real part of H_0^{(1)}(|x|))"""'], {}), "('Real part of H_0^{(1)}(|x|))')\n", (3815, 3847), True, 'import matplotlib.pyplot as plt\n'), ((4621, 4641), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (4629, 4641), True, 'import numpy as np\n'), ((6843, 6887), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'N'], {'endpoint': '(False)'}), '(0, 2 * np.pi, N, endpoint=False)\n', (6854, 6887), True, 'import numpy as np\n'), ((6983, 7024), 'matplotlib.pyplot.title', 'plt.title', (['"""Collocation points on circle"""'], {}), "('Collocation points on circle')\n", (6992, 7024), True, 'import matplotlib.pyplot as plt\n'), ((9599, 9630), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'complex'}), '((N, N), dtype=complex)\n', (9607, 9630), True, 'import numpy as np\n'), ((9836, 9862), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'complex'}), '(N, dtype=complex)\n', (9844, 9862), True, 'import numpy as np\n'), ((9995, 10016), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'u'], {}), '(A, u)\n', (10010, 10016), True, 'import numpy as np\n'), ((10056, 10116), 'matplotlib.pyplot.title', 'plt.title', (['"""Real part of approximation to normal derivative"""'], {}), "('Real part of approximation to normal derivative')\n", (10065, 10116), True, 'import matplotlib.pyplot as plt\n'), ((11027, 11057), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'num_pixels'], {}), '(-3, 3, num_pixels)\n', (11038, 11057), True, 'import numpy as np\n'), ((11062, 11079), 'numpy.meshgrid', 'np.meshgrid', (['t', 't'], {}), '(t, t)\n', (11073, 11079), True, 'import numpy as np\n'), ((11085, 11134), 'numpy.zeros', 'np.zeros', (['(num_pixels, num_pixels)'], {'dtype': 'complex'}), '((num_pixels, num_pixels), dtype=complex)\n', (11093, 11134), True, 'import numpy as np\n'), ((11363, 11380), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sol'], {}), '(sol)\n', (11375, 11380), True, 'import matplotlib.pyplot as plt\n'), ((11381, 11432), 'matplotlib.pyplot.title', 'plt.title', (['"""Real part of approximation to solution"""'], {}), "('Real part of approximation to solution')\n", (11390, 11432), True, 'import matplotlib.pyplot as plt\n'), ((6948, 6961), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6954, 6961), True, 'import numpy as np\n'), ((6962, 6975), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6968, 6975), True, 'import numpy as np\n'), ((10044, 10054), 'numpy.real', 'np.real', (['v'], {}), '(v)\n', (10051, 10054), True, 'import numpy as np\n'), ((11304, 11316), 'numpy.real', 'np.real', (['u_h'], {}), '(u_h)\n', (11311, 11316), True, 'import numpy as np\n'), ((6044, 6057), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6050, 6057), True, 'import numpy as np\n'), ((6059, 6072), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6065, 6072), True, 'import numpy as np\n'), ((3717, 3741), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (3724, 3741), True, 'import numpy as np\n'), ((4773, 4785), 'numpy.dot', 'np.dot', (['x', 'd'], {}), '(x, d)\n', (4779, 4785), True, 'import numpy as np\n'), ((4720, 4731), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4728, 4731), True, 'import numpy as np\n'), ((4732, 4743), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4740, 4743), True, 'import numpy as np\n'), ((9538, 9555), 'numpy.log', 'np.log', (['(h * k / 4)'], {}), '(h * k / 4)\n', (9544, 9555), True, 'import numpy as np\n')] |
# A collection of common functions used in the creation and manipulation of the data for this project.
from approaches.approach import Multiclass_Logistic_Regression, Perceptron, Sklearn_SVM
import comp_vis.img_tools as it
import numpy as np
import sys
def images_to_data(images, label, already_cropped=True):
'''
# TODO: Should this be fixed or modular? I think just returning the most successful arrangement of data from our tets is best, but who knows!
:param images: A list of OpenCV images
:param already_cropped: if these images have already been cropped. If not, they will be.
:param label: An integer label for the data.
:return: A numpy matrix of the data, with the label if any.
'''
# Crop the images appropriately
cropped_images = []
if already_cropped:
cropped_images = images
else:
for image in images:
cropped_img = it.crop_image(image)
if np.shape(cropped_img) != np.shape(image):
cropped_images += [cropped_img]
if len(cropped_images) == 0:
sys.stderr.write('Error: No objects detected in images.\n(Did you mistakenly set already_cropped to false?)')
img_avgs = [it.average_color(image) for image in cropped_images]
img_dims = it.get_images_dimensions(cropped_images, ordered=True)
data = [img_avgs[i] + list(img_dims[i]) + [label] for i in range(len(images))]
data = np.array([np.array(x) for x in data])
return data
def string_to_model(approach_name):
'''
:param approach_name: The string name of the model to be returned
:return: The model (subset of the Approach class) with a name matching approach_name
:raises ValueError: Raises if approach_name not recognized
'''
if approach_name == "perceptron":
return Perceptron()
elif approach_name == "multiclass":
return Multiclass_Logistic_Regression()
elif approach_name == "sklearn_svm":
return Sklearn_SVM()
else:
raise ValueError('Model type ' + approach_name + ' not recognized.')
def training_and_testing_sep(data, training_fraction):
'''
:param data: Numpy matrix of data
:param training_fraction: Float between 0.00 and 1.00 denoting the size of the training set
:return: A training and testing set.
'''
# Randomly shuffle the data
np.random.shuffle(data)
training_size = int(training_fraction*np.shape(data)[0])
training_data, testing_data = data[0:training_size], data[training_size:]
return training_data, testing_data
| [
"approaches.approach.Multiclass_Logistic_Regression",
"numpy.shape",
"comp_vis.img_tools.average_color",
"approaches.approach.Sklearn_SVM",
"comp_vis.img_tools.crop_image",
"sys.stderr.write",
"numpy.array",
"comp_vis.img_tools.get_images_dimensions",
"approaches.approach.Perceptron",
"numpy.rando... | [((1276, 1330), 'comp_vis.img_tools.get_images_dimensions', 'it.get_images_dimensions', (['cropped_images'], {'ordered': '(True)'}), '(cropped_images, ordered=True)\n', (1300, 1330), True, 'import comp_vis.img_tools as it\n'), ((2353, 2376), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (2370, 2376), True, 'import numpy as np\n'), ((1208, 1231), 'comp_vis.img_tools.average_color', 'it.average_color', (['image'], {}), '(image)\n', (1224, 1231), True, 'import comp_vis.img_tools as it\n'), ((1809, 1821), 'approaches.approach.Perceptron', 'Perceptron', ([], {}), '()\n', (1819, 1821), False, 'from approaches.approach import Multiclass_Logistic_Regression, Perceptron, Sklearn_SVM\n'), ((906, 926), 'comp_vis.img_tools.crop_image', 'it.crop_image', (['image'], {}), '(image)\n', (919, 926), True, 'import comp_vis.img_tools as it\n'), ((1081, 1203), 'sys.stderr.write', 'sys.stderr.write', (['"""Error: No objects detected in images.\n(Did you mistakenly set already_cropped to false?)"""'], {}), '(\n """Error: No objects detected in images.\n(Did you mistakenly set already_cropped to false?)"""\n )\n', (1097, 1203), False, 'import sys\n'), ((1436, 1447), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1444, 1447), True, 'import numpy as np\n'), ((1878, 1910), 'approaches.approach.Multiclass_Logistic_Regression', 'Multiclass_Logistic_Regression', ([], {}), '()\n', (1908, 1910), False, 'from approaches.approach import Multiclass_Logistic_Regression, Perceptron, Sklearn_SVM\n'), ((942, 963), 'numpy.shape', 'np.shape', (['cropped_img'], {}), '(cropped_img)\n', (950, 963), True, 'import numpy as np\n'), ((967, 982), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (975, 982), True, 'import numpy as np\n'), ((1967, 1980), 'approaches.approach.Sklearn_SVM', 'Sklearn_SVM', ([], {}), '()\n', (1978, 1980), False, 'from approaches.approach import Multiclass_Logistic_Regression, Perceptron, Sklearn_SVM\n'), ((2419, 2433), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (2427, 2433), True, 'import numpy as np\n')] |
# encoding: utf-8
# USE :
# python setup_project.py build_ext --inplace
#
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
ext_modules = [Extension("SoundEffectLib", ["SoundEffectLib.pyx"],
include_dirs=[numpy.get_include()], language="c++"),
]
#
# ext_modules = [
# Extension("FadeEffect", ["FadeEffect.pyx"], include_dirs=[numpy.get_include()]),
# Extension("Validation", ["Validation.pyx"], include_dirs=[numpy.get_include()])
# ]
setup(
name="SoundServer",
cmdclass={"build_ext": build_ext},
ext_modules=ext_modules
)
| [
"numpy.get_include",
"warnings.filterwarnings",
"distutils.core.setup"
] | [((228, 290), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (251, 290), False, 'import warnings\n'), ((292, 349), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (315, 349), False, 'import warnings\n'), ((749, 839), 'distutils.core.setup', 'setup', ([], {'name': '"""SoundServer"""', 'cmdclass': "{'build_ext': build_ext}", 'ext_modules': 'ext_modules'}), "(name='SoundServer', cmdclass={'build_ext': build_ext}, ext_modules=\n ext_modules)\n", (754, 839), False, 'from distutils.core import setup\n'), ((450, 469), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (467, 469), False, 'import numpy\n')] |
import numpy as np
jit = lambda x: x
# Make things faster? volume() certainly become faster, but ensure_closed,
# which is more critical, does not.
# import numba
# jit = numba.jit
# Interesting methods:
# - remove unused vertices, but can be obtained by m = Mesh(m.get_flat_vertices())
# - split the different connected objects into multiple mesh objects.
# - combine mesh objects into one.
# - squash so that anything on the inside gets removed, glueing intersection shapes together.
class Mesh:
""" A class to represent a geometric mesh.
Can be instantiated as:
* Mesh(vertices, faces)
* Mesh(vertices) # the implicit will be automatically detected
Winding: this class adopts the right hand-rule to determine what
is inside or out. This is similar to STL and e.g. Blender (but
different from Unity). It means that the winding is in the direction
of the fingers of your right hand (i.e. counter clockwise) and the
normal (i.e. outside) will be in the direction of your thumb.
"""
def __init__(self, vertices, faces=None, v2f=None):
if faces is None:
self._from_vertices(vertices)
else:
self._vertices = np.asarray(vertices, dtype=np.float32)
self._faces = np.asarray(faces, dtype=np.int32)
if v2f is None:
self._v2f = self._calculate_v2f(self._faces)
else:
self._v2f = v2f
self.validate()
def __repr__(self):
t = "<Mesh with {} vertices and {} faces at 0x{}>"
return t.format(len(self._vertices), len(self._faces), hex(id(self)).upper())
def get_vertices_and_faces(self):
""" Get the vertices and faces arrays.
These are the original (internal) arrays, don't edit!
"""
return self._vertices, self._faces
@jit
def get_flat_vertices(self):
""" Get a representation of the mesh as flat vertices, e.g. to
export to STL.
"""
faces = self._faces
vertices = self._vertices
flat_vertices = np.zeros((0, 3), np.float32)
for fi in range(len(faces)):
vi1, vi2, vi3 = faces[fi, 0], faces[fi, 1], faces[fi, 2]
append3(flat_vertices, vertices[vi1])
append3(flat_vertices, vertices[vi2])
append3(flat_vertices, vertices[vi3])
return flat_vertices
@jit
def _calculate_v2f(self, faces):
""" Calculate the v2f map from the given faces.
"""
v2f = {}
for fi in range(len(faces)):
vi1, vi2, vi3 = faces[fi, 0], faces[fi, 1], faces[fi, 2]
v2f.setdefault(vi1, []).append(fi)
v2f.setdefault(vi2, []).append(fi)
v2f.setdefault(vi3, []).append(fi)
return v2f
@jit
def _from_vertices(self, vertices_in):
""" Create a mesh from only the vertices (e.g. from STL) by
recombining equal vertices into faces.
"""
self._vertices = vertices = np.zeros((0, 3), np.float32)
self._faces = faces = np.zeros((0, 3), np.int32)
self._v2f = v2f = {}
xyz2f = {}
if not(vertices_in.ndim == 2 and vertices_in.shape[1] == 3):
raise ValueError("Vertices must be an Nx3 array.")
if len(vertices_in) % 3 != 0:
raise ValueError("There must be a multiple of 3 vertices.")
for fi in range(len(vertices_in) // 3):
fi2 = len(faces)
face = []
for vi in (fi * 3 + 0, fi * 3 + 1, fi * 3 + 2):
xyz = vertices_in[vi]
xyz = float(xyz[0]), float(xyz[1]), float(xyz[2])
if xyz in xyz2f:
vi2 = xyz2f[xyz] # re-use vertex
else:
vi2 = len(vertices) # new vertex
append3(vertices, xyz)
xyz2f[xyz] = vi2
face.append(vi2)
faceslist = v2f.setdefault(vi2, [])
faceslist.append(fi2)
append3(faces, face)
@jit
def validate(self):
""" perform basic validation on the mesh.
"""
assert isinstance(self._vertices, np.ndarray)
assert self._vertices.ndim == 2 and self._vertices.shape[1] == 3
assert isinstance(self._faces, np.ndarray)
assert self._faces.ndim == 2 and self._faces.shape[1] == 3
assert isinstance(self._v2f, dict)
# The vertices in faces all exist
vertices_in_faces = set()
all_vertices = set(range(len(self._vertices)))
for fi in range(len(self._faces)):
for i in range(3):
vertices_in_faces.add(self._faces[fi, i])
if vertices_in_faces.difference(all_vertices):
raise ValueError("Some faces refer to nonexisting vertices")
# NOTE: there may still be unused vertices!
# All vertices are in at least 3 faces. This is a basic sanity check
# but does not mean that the mesh is closed!
counts = dict()
for vi, faces in self._v2f.items():
counts[len(faces)] = counts.get(len(faces), 0) + 1
for count in counts.keys():
if count < 3:
raise ValueError("was expecting all vertices to be in at least 3 faces.")
@jit
def ensure_closed(self):
""" Ensure that the mesh is closed, that all faces have the
same winding ,and that the winding follows the right hand rule
(by checking that the volume is positive). It is recommended
to call this on incoming data.
Returns the number of faces that were changed to correct winding.
"""
vertices = self._vertices
faces = self._faces
v2f = self._v2f
faces_to_check = set(range(len(faces)))
count_reversed = 0
while faces_to_check:
front = set([faces_to_check.pop()])
while front:
fi_check = front.pop()
vi1, vi2, vi3 = faces[fi_check, 0], faces[fi_check, 1], faces[fi_check, 2]
neighbour_per_edge = [0, 0, 0]
neighbour_faces = set()
neighbour_faces.update(v2f[vi1])
neighbour_faces.update(v2f[vi2])
neighbour_faces.update(v2f[vi3])
for fi in neighbour_faces:
vj1, vj2, vj3 = faces[fi, 0], faces[fi, 1], faces[fi, 2]
matching_vertices = {vj1, vj2, vj3}.intersection({vi1, vi2, vi3})
if len(matching_vertices) >= 2:
if {vi1, vi2} == matching_vertices:
neighbour_per_edge[0] += 1
if fi in faces_to_check:
faces_to_check.discard(fi)
front.add(fi)
if ((vi1 == vj1 and vi2 == vj2) or
(vi1 == vj2 and vi2 == vj3) or
(vi1 == vj3 and vi2 == vj1)):
count_reversed += 1
faces[fi, 1], faces[fi, 2] = int(faces[fi, 2]), int(faces[fi, 1])
elif {vi2, vi3} == matching_vertices:
neighbour_per_edge[1] += 1
if fi in faces_to_check:
faces_to_check.discard(fi)
front.add(fi)
if ((vi2 == vj1 and vi3 == vj2) or
(vi2 == vj2 and vi3 == vj3) or
(vi2 == vj3 and vi3 == vj1)):
count_reversed += 1
faces[fi, 1], faces[fi, 2] = int(faces[fi, 2]), int(faces[fi, 1])
elif {vi3, vi1} == matching_vertices:
neighbour_per_edge[2] += 1
if fi in faces_to_check:
faces_to_check.discard(fi)
front.add(fi)
if ((vi3 == vj1 and vi1 == vj2) or
(vi3 == vj2 and vi1 == vj3) or
(vi3 == vj3 and vi1 == vj1)):
count_reversed += 1
faces[fi, 1], faces[fi, 2] = int(faces[fi, 2]), int(faces[fi, 1])
# Now that we checked all neighbours, check if we have a neighbour on each edge.
# If this is so for all faces, we know that the mesh is closed. The mesh
# can still have weird crossovers or parts sticking out (e.g. a Klein bottle).
if neighbour_per_edge != [1, 1, 1]:
if 0 in neighbour_per_edge:
msg = "There is a hole in the mesh at face {} {}".format(fi_check, neighbour_per_edge)
else:
msg = "To many neighbour faces for face {} {}".format(fi_check, neighbour_per_edge)
#print("WARNING:", msg)
raise ValueError(msg)
# todo: this check should really be done to each connected component within the mesh.
# For now we assume that the mesh is on single object.
# Reverse all?
if self.volume() < 0:
faces[:,1], faces[:,2] = faces[:,2].copy(), faces[:,1].copy()
count_reversed = len(faces) - count_reversed
return count_reversed
@jit
def volume(self):
""" Calculate the volume of the mesh. You probably want to run ensure_closed()
on untrusted data when using this.
"""
vertices = self._vertices
faces = self._faces
vol1 = 0
# vol2 = 0
for fi in range(len(faces)):
vi1, vi2, vi3 = faces[fi, 0], faces[fi, 1], faces[fi, 2]
vol1 += volume_of_triangle(vertices[vi1], vertices[vi2], vertices[vi3])
# vol2 += volume_of_triangle(vertices[vi1] + 10, vertices[vi2] + 10, vertices[vi3] + 10)
# # Check integrity
# err_per = (abs(vol1) - abs(vol2)) / max(abs(vol1) + abs(vol2), 0.000000001)
# if err_per > 0.1:
# raise RuntimeError("Cannot calculate volume, the mesh looks not to be closed!")
# elif err_per > 0.001:
# print("WARNING: maybe the mesh is not completely closed?")
return vol1
def cut_plane(self, plane):
""" Cut the part of the mesh that is in behind the given plane.
The plane is a tuple (a, b, c, d), such that a*x + b*y + c*z + d = 0.
The (a, b, c) part can be seen as the plan's normal. E.g. (0, 0, 1, -1)
represents the plane that will cut everything below z=1.
"""
# It seems most natural to remove the part *behind* the given plane,
# so that the plane kinda forms the "lid" of the hole that is created.
# This mesh
faces = self._faces
vertices = self._vertices
# Prepare for creating a new mesh
new_faces = np.zeros((0, 3), np.int32) # Build up from scratch
new_vertices = self._vertices.copy() # Start with full, decimate later
reused_rim_vertices = set()
edge_to_vertex_index = {}
# Get signed distance of each vertex to the plane
signed_distances = signed_distance_to_plane(vertices, plane)
def get_new_vertex_id(vi1, vi2):
key = min(vi1, vi2), max(vi1, vi2) # todo: if we ensure the order, we can omit this
try:
return edge_to_vertex_index[key]
except KeyError:
d1, d2 = abs(signed_distances[vi1]), abs(signed_distances[vi2])
w1, w2 = d2 / (d1 + d2), d1 / (d1 + d2)
new_vi = len(new_vertices)
append3(new_vertices, w1 * vertices[vi1] + w2 * vertices[vi2])
edge_to_vertex_index[key] = new_vi
return new_vi
# Now iterate over the faces (triangles), and check each edge. If the two
# points are on different sides of the plane, then we interpolate on the
# edge to get the exact spot where the edge intersects the plane.
for fi in range(len(faces)): # for each face index
vi1, vi2, vi3 = faces[fi, 0], faces[fi, 1], faces[fi, 2]
s1, s2, s3 = signed_distances[vi1], signed_distances[vi2], signed_distances[vi3]
if s1 < 0 and s2 < 0 and s3 < 0:
pass # The whole face is to be dropped
elif s1 >= 0 and s2 >= 0 and s3 >= 0:
# The whole face is to be included
append3(new_faces, faces[fi])
elif s1 >= 0 and s2 < 0 and s3 < 0:
a = vi1
b = get_new_vertex_id(vi1, vi2)
c = get_new_vertex_id(vi1, vi3)
append3(new_faces, (a, b, c))
elif s2 >= 0 and s3 < 0 and s1 < 0:
a = vi2
b = get_new_vertex_id(vi2, vi3)
c = get_new_vertex_id(vi2, vi1)
append3(new_faces, (a, b, c))
elif s3 >= 0 and s1 < 0 and s2 < 0:
a = vi3
b = get_new_vertex_id(vi3, vi1)
c = get_new_vertex_id(vi3, vi2)
append3(new_faces, (a, b, c))
elif s1 >= 0 and s2 >= 0 and s3 < 0:
a = vi1
b = vi2
c = get_new_vertex_id(vi1, vi3)
d = get_new_vertex_id(vi2, vi3)
append3(new_faces, (a, d, c))
append3(new_faces, (b, d, a))
elif s2 >= 0 and s3 >= 0 and s1 < 0:
a = vi2
b = vi3
c = get_new_vertex_id(vi2, vi1)
d = get_new_vertex_id(vi3, vi1)
append3(new_faces, (a, d, c))
append3(new_faces, (b, d, a))
elif s3 >= 0 and s1 >= 0 and s2 < 0:
a = vi3
b = vi1
c = get_new_vertex_id(vi3, vi2)
d = get_new_vertex_id(vi1, vi2)
append3(new_faces, (a, d, c))
append3(new_faces, (b, d, a))
else:
assert False, "Unforeseen, this should not happen"
# Create v2f map
new_v2f = self._calculate_v2f(new_faces)
# Find the different holes that make up the rim of the mesh.
# We collect vertices in groups that each represent a path over a rim.
groups = []
rim_indices_left = set(range(len(self._vertices), len(new_vertices)))
rim_indices_left.update(reused_rim_vertices)
while rim_indices_left:
# Pick arbitrarty vertex on the rim. This will be our seed for a new group.
vi = rim_indices_left.pop()
rim_indices_left.add(vi) # Because we want to and here
group = [vi]
groups.append(group)
# Now walk along the rim until we're back
while not (len(group) >= 2 and group[0] == group[-1]):
faces = new_v2f[vi]
vi_next = None
for fi in faces:
# If the vertex next to the current vertex (in the correct direction/winding)
# is on the rim, make it the next current.
vi1, vi2, vi3 = new_faces[fi, 0], new_faces[fi, 1], new_faces[fi, 2]
if vi1 == vi and vi2 in rim_indices_left:
vi_next = vi2
break
elif vi2 == vi and vi3 in rim_indices_left:
vi_next = vi3
break
elif vi3 == vi and vi1 in rim_indices_left:
vi_next = vi1
break
# Done, or next
if vi_next is None:
raise RuntimeError("Could not find next vertex on the rim")
vi = vi_next
rim_indices_left.remove(vi)
group.append(vi)
continue
# Put the lid on each hole. Each group is ordered with correct winding already.
for group in groups:
assert group[0] == group[-1] # is already circular
center_vertex = new_vertices[group].mean(0)
center_vi = len(new_vertices)
append3(new_vertices, center_vertex)
new_v2f[center_vi] = fis = []
for i in range(len(group) - 1):
fi = len(new_faces)
fis.append(fi)
new_v2f[group[i]].append(fi)
new_v2f[group[i + 1]].append(fi)
append3(new_faces, (group[i], center_vi, group[i + 1]))
return Mesh(new_vertices, new_faces, new_v2f)
## Util functions
@jit
def append3(arr, p):
arr.resize((arr.shape[0] + 1, arr.shape[1]), refcheck=False)
arr[-1] = p
@jit
def norm(p):
return (p[0] ** 2 + p[1] ** 2 + p[2] ** 2) ** 0.5
@jit
def volume_of_triangle(p1, p2, p3):
# https://stackoverflow.com/a/1568551
v321 = p3[0] * p2[1] * p1[2]
v231 = p2[0] * p3[1] * p1[2]
v312 = p3[0] * p1[1] * p2[2]
v132 = p1[0] * p3[1] * p2[2]
v213 = p2[0] * p1[1] * p3[2]
v123 = p1[0] * p2[1] * p3[2]
return (1.0 / 6.0) * (-v321 + v231 + v312 - v132 - v213 + v123)
@jit
def signed_distance_to_plane(pp, plane):
a, b, c, d = plane
plane_norm = (a**2 + b**2 + c**2) ** 0.5
return (a * pp[:, 0] + b * pp[:, 1] + c * pp[:, 2] + d) / plane_norm
## Maker functions
def make_cube():
""" Create a vertex array representing a cube centered at the origin,
spanning 1 unit in each direction (thus having a volume of 8).
"""
vertices = np.zeros((0, 3), np.float32)
for rot in [0, 1, 2]:
for c in [-1, +1]:
a1, a2 = -1 * c, +1 * c
b1, b2 = -1, +1
for values in [(a1, b1, c), (a2, b2, c), (a1, b2, c),
(a1, b1, c), (a2, b1, c), (a2, b2, c)]:
values = values[rot:] + values[:rot]
append3(vertices, values)
return vertices
def make_tetrahedron():
""" Create a vertex array representing a tetrahedron (pyramid)
centered at the origin, with its vertices on the unit sphere. The
tatrahedon is the 3D object with the least possible number of faces.
"""
sqrt = lambda x: x**0.5
# Points on unit sphere
v1 = sqrt(8/9), 0, -1/3
v2 = -sqrt(2/9), sqrt(2/3), -1/3
v3 = -sqrt(2/9), -sqrt(2/3), -1/3
v4 = 0, 0, 1
# Create faces
vertices = np.zeros((0, 3), np.float32)
for v1, v2, v3 in [(v1, v2, v4), (v2, v3, v4), (v3, v1, v4), (v1, v3, v2)]:
append3(vertices, v1)
append3(vertices, v2)
append3(vertices, v3)
return vertices
def make_icosahedron():
""" Create a vertex array representing an icosahedron (a polyhedron
with 20 faces) centered at the origin, with its vertices on the
unit sphere.
"""
# Inspired from the Red book, end of chaper 2.
X = 0.525731112119133606
Z = 0.850650808352039932
vdata = [
(-X, 0.0, Z), (X, 0.0, Z), (-X, 0.0, -Z), (X, 0.0, -Z),
(0.0, Z, X), (0.0, Z, -X), (0.0, -Z, X), (0.0, -Z, -X),
(Z, X, 0.0), (-Z, X, 0.0), (Z, -X, 0.0), (-Z, -X, 0.0),
]
faces = [
(0,4,1), (0,9,4), (9,5,4), (4,5,8), (4,8,1),
(8,10,1), (8,3,10), (5,3,8), (5,2,3), (2,7,3),
(7,10,3), (7,6,10), (7,11,6), (11,0,6), (0,1,6),
(6,1,10), (9,0,11), (9,11,2), (9,2,5), (7,2,11)
]
vertices = np.zeros((0, 3), np.float32)
for v1, v2, v3 in faces:
append3(vertices, vdata[v1])
append3(vertices, vdata[v3]) # note the out-of order to make CCW winding
append3(vertices, vdata[v2])
return vertices
def make_sphere(ndiv=3):
""" Create a vertex array representing a unit sphere centered at
the origin. The vertices are generated by subdividing an icosahedron.
"""
vertices = make_icosahedron()
for iter in range(ndiv):
new_vertices = np.zeros((0, 3), np.float32)
for vi0 in range(0, len(vertices), 3):
v1, v2, v3 = vertices[vi0 + 0], vertices[vi0 + 1], vertices[vi0 + 2]
v4, v5, v6 = 0.5 * (v1 + v2), 0.5 * (v2 + v3), 0.5 * (v3 + v1)
v4, v5, v6 = v4 / norm(v4), v5 / norm(v5), v6 / norm(v6)
for vi in [v1, v4, v6, v2, v5, v4, v3, v6, v5, v4, v5, v6]:
append3(new_vertices, vi)
vertices = new_vertices
return vertices
if __name__ == "__main__":
import os
import visvis as vv
from stentseg.utils.datahandling import loadmesh
ptcode = 'LSPEAS_003'
ctcode1 = 'discharge'
basedirMesh = r"C:\stack\data\lspeas\vaatwand"
# filename ='{}_{}_neck.stl'.format(ptcode, ctcode1)
# vessel1 = loadmesh(basedirMesh, ptcode[-3:], filename) #inverts Z
# vv.processing.unwindFaces(vessel1)
# m = from_vertices(vessel1._vertices)
plane = (0.23038404068509294, -0.2301466100921624, 0.945492322370042, -102.36959192746241)
# m.cut_plane(plane)
# Make a sphere with one whole in it
s = Mesh(make_sphere()) # should have volume of 4/3 * np.pi * 0.5**3 = 0.5236
q = Mesh(make_cube()) # should have volume of 1
# s._faces[3,1], s._faces[3,2] = s._faces[3,2], s._faces[3,1]
| [
"numpy.zeros",
"numpy.asarray"
] | [((17750, 17778), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.float32'], {}), '((0, 3), np.float32)\n', (17758, 17778), True, 'import numpy as np\n'), ((18598, 18626), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.float32'], {}), '((0, 3), np.float32)\n', (18606, 18626), True, 'import numpy as np\n'), ((19589, 19617), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.float32'], {}), '((0, 3), np.float32)\n', (19597, 19617), True, 'import numpy as np\n'), ((2071, 2099), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.float32'], {}), '((0, 3), np.float32)\n', (2079, 2099), True, 'import numpy as np\n'), ((2999, 3027), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.float32'], {}), '((0, 3), np.float32)\n', (3007, 3027), True, 'import numpy as np\n'), ((3059, 3085), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.int32'], {}), '((0, 3), np.int32)\n', (3067, 3085), True, 'import numpy as np\n'), ((11101, 11127), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.int32'], {}), '((0, 3), np.int32)\n', (11109, 11127), True, 'import numpy as np\n'), ((20088, 20116), 'numpy.zeros', 'np.zeros', (['(0, 3)', 'np.float32'], {}), '((0, 3), np.float32)\n', (20096, 20116), True, 'import numpy as np\n'), ((1200, 1238), 'numpy.asarray', 'np.asarray', (['vertices'], {'dtype': 'np.float32'}), '(vertices, dtype=np.float32)\n', (1210, 1238), True, 'import numpy as np\n'), ((1265, 1298), 'numpy.asarray', 'np.asarray', (['faces'], {'dtype': 'np.int32'}), '(faces, dtype=np.int32)\n', (1275, 1298), True, 'import numpy as np\n')] |
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim # try out sonnet instead?
from tensorflow.python.client import timeline
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.utils import shuffle
import numpy as np
import os
from utils import *
tf.app.flags.DEFINE_string('logdir', '/tmp/test', 'location for saved embeedings')
tf.app.flags.DEFINE_string('datadir', '/tmp/mnist', 'location for data')
tf.app.flags.DEFINE_integer('batchsize', 50, 'batch size.')
tf.app.flags.DEFINE_integer('epochs', 50, 'number of times through dataset.')
tf.app.flags.DEFINE_float('lr', 0.0001, 'learning rate.')
FLAGS = tf.app.flags.FLAGS
def batch(ims, labels, batchsize):
ims, labels = shuffle(ims, labels)
shape = ims.shape
for i in range(len(labels)//batchsize):
yield (i, ims[i*batchsize:(i+1)*batchsize, ...],
labels[i*batchsize:(i+1)*batchsize, ...])
def accuracy(p, y):
return tf.reduce_mean(tf.cast(tf.equal(tf.argmax(p, axis=1), y), tf.float32))
def main(_):
print('Get data')
mnist = input_data.read_data_sets(FLAGS.datadir, one_hot=False)
ims = np.reshape(mnist.train.images, [-1, 784]).astype(np.float32)
labels = np.reshape(mnist.train.labels, [-1]).astype(np.int64)
test_ims = np.reshape(mnist.test.images, [-1, 784]).astype(np.float32)
test_labels = np.reshape(mnist.test.labels, [-1]).astype(np.int64)
x = tf.placeholder(shape=[50, 784], dtype=tf.complex64)
l = tf.placeholder(shape=[50], dtype=tf.int64)
L = tf.one_hot(l, 10, 0.0, 1.0)
channel_sizes = [50, 10]
with slim.arg_scope([complex_fc],
activation_fn=None,
weights_initializer=tf.orthogonal_initializer(),
biases_initializer=tf.constant_initializer(0.0)):
logits = slim.stack(x, complex_fc, channel_sizes)
y = tf.nn.softmax(tf.abs(logits))
loss = tf.reduce_mean(-L*tf.log(y)-(1-L)*tf.log(1-y))
loss_summary = tf.summary.scalar('loss', loss)
global_step = tf.Variable(0, name='global_step', trainable=False)
step_summary = tf.summary.scalar('global_step', global_step)
opt = tf.train.AdamOptimizer(FLAGS.lr)
logdir = os.path.join(FLAGS.logdir, ''.join([str(c) for c in channel_sizes]))
train_step = opt.minimize(loss, global_step=global_step)
p = tf.nn.softmax(tf.abs(logits))
acc = accuracy(p, l)
train_accuracy = tf.summary.scalar('train_acc', acc)
train_im = None #tf.summary.image('train_im', x)
train = tf.summary.merge([train_accuracy]) # train_im, fmap_summ
test_accuracy = tf.summary.scalar('test_acc', acc)
test_im = None #tf.summary.image('test_im', x)
test = tf.summary.merge([test_accuracy]) # test_im
n = np.sum([np.prod(var.get_shape().as_list())
for var in tf.trainable_variables()])
print('num of vars {}'.format(n))
with tf.Session() as sess:
writer = tf.summary.FileWriter(logdir, sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(tf.global_variables_initializer())
step = 0
for e in range(FLAGS.epochs):
for i, batch_ims, batch_labels in batch(ims, labels, FLAGS.batchsize):
L, _ = sess.run([loss, train_step],
{x: batch_ims.astype(np.complex64), l: batch_labels},
options=run_options, run_metadata=run_metadata)
print('\rloss: {:.3f}'.format(L), end='', flush=True)
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open(os.path.join(logdir, 'timeline.json'), 'w') as f:
f.write(ctf)
if step%500==0:
# validate and save summaries
ids = np.random.randint(0, 5000, 50)
batch_test_ims = test_ims[ids, ...]
batch_test_labels = test_labels[ids]
loss_summ, train_summ = sess.run( [loss_summary, train],
{x: batch_ims.astype(np.complex64), l: batch_labels})
writer.add_summary(train_summ, step)
writer.add_summary(loss_summ, step)
test_summ = sess.run(test,
{x: batch_test_ims.astype(np.complex64),
l: batch_test_labels})
writer.add_summary(test_summ, step)
step += 1
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.RunMetadata",
"tensorflow.log",
"tensorflow.app.run",
"numpy.reshape",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.RunOptions",
"tensorflow.python.client.timeline.Timeline",
"tensorflow.train.AdamOptimizer... | [((320, 406), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""logdir"""', '"""/tmp/test"""', '"""location for saved embeedings"""'], {}), "('logdir', '/tmp/test',\n 'location for saved embeedings')\n", (346, 406), True, 'import tensorflow as tf\n'), ((403, 475), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""datadir"""', '"""/tmp/mnist"""', '"""location for data"""'], {}), "('datadir', '/tmp/mnist', 'location for data')\n", (429, 475), True, 'import tensorflow as tf\n'), ((476, 535), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batchsize"""', '(50)', '"""batch size."""'], {}), "('batchsize', 50, 'batch size.')\n", (503, 535), True, 'import tensorflow as tf\n'), ((536, 613), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epochs"""', '(50)', '"""number of times through dataset."""'], {}), "('epochs', 50, 'number of times through dataset.')\n", (563, 613), True, 'import tensorflow as tf\n'), ((614, 671), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""lr"""', '(0.0001)', '"""learning rate."""'], {}), "('lr', 0.0001, 'learning rate.')\n", (639, 671), True, 'import tensorflow as tf\n'), ((754, 774), 'sklearn.utils.shuffle', 'shuffle', (['ims', 'labels'], {}), '(ims, labels)\n', (761, 774), False, 'from sklearn.utils import shuffle\n'), ((1106, 1161), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['FLAGS.datadir'], {'one_hot': '(False)'}), '(FLAGS.datadir, one_hot=False)\n', (1131, 1161), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1456, 1507), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[50, 784]', 'dtype': 'tf.complex64'}), '(shape=[50, 784], dtype=tf.complex64)\n', (1470, 1507), True, 'import tensorflow as tf\n'), ((1516, 1558), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[50]', 'dtype': 'tf.int64'}), '(shape=[50], dtype=tf.int64)\n', (1530, 1558), True, 'import tensorflow as tf\n'), ((1567, 1594), 'tensorflow.one_hot', 'tf.one_hot', (['l', '(10)', '(0.0)', '(1.0)'], {}), '(l, 10, 0.0, 1.0)\n', (1577, 1594), True, 'import tensorflow as tf\n'), ((2028, 2059), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (2045, 2059), True, 'import tensorflow as tf\n'), ((2079, 2130), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (2090, 2130), True, 'import tensorflow as tf\n'), ((2150, 2195), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""global_step"""', 'global_step'], {}), "('global_step', global_step)\n", (2167, 2195), True, 'import tensorflow as tf\n'), ((2208, 2240), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['FLAGS.lr'], {}), '(FLAGS.lr)\n', (2230, 2240), True, 'import tensorflow as tf\n'), ((2470, 2505), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train_acc"""', 'acc'], {}), "('train_acc', acc)\n", (2487, 2505), True, 'import tensorflow as tf\n'), ((2571, 2605), 'tensorflow.summary.merge', 'tf.summary.merge', (['[train_accuracy]'], {}), '([train_accuracy])\n', (2587, 2605), True, 'import tensorflow as tf\n'), ((2651, 2685), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""test_acc"""', 'acc'], {}), "('test_acc', acc)\n", (2668, 2685), True, 'import tensorflow as tf\n'), ((2748, 2781), 'tensorflow.summary.merge', 'tf.summary.merge', (['[test_accuracy]'], {}), '([test_accuracy])\n', (2764, 2781), True, 'import tensorflow as tf\n'), ((4680, 4692), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4690, 4692), True, 'import tensorflow as tf\n'), ((1872, 1912), 'tensorflow.contrib.slim.stack', 'slim.stack', (['x', 'complex_fc', 'channel_sizes'], {}), '(x, complex_fc, channel_sizes)\n', (1882, 1912), True, 'import tensorflow.contrib.slim as slim\n'), ((1935, 1949), 'tensorflow.abs', 'tf.abs', (['logits'], {}), '(logits)\n', (1941, 1949), True, 'import tensorflow as tf\n'), ((2407, 2421), 'tensorflow.abs', 'tf.abs', (['logits'], {}), '(logits)\n', (2413, 2421), True, 'import tensorflow as tf\n'), ((2947, 2959), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2957, 2959), True, 'import tensorflow as tf\n'), ((2986, 3027), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logdir', 'sess.graph'], {}), '(logdir, sess.graph)\n', (3007, 3027), True, 'import tensorflow as tf\n'), ((3050, 3101), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), '(trace_level=tf.RunOptions.FULL_TRACE)\n', (3063, 3101), True, 'import tensorflow as tf\n'), ((3125, 3141), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (3139, 3141), True, 'import tensorflow as tf\n'), ((1172, 1213), 'numpy.reshape', 'np.reshape', (['mnist.train.images', '[-1, 784]'], {}), '(mnist.train.images, [-1, 784])\n', (1182, 1213), True, 'import numpy as np\n'), ((1246, 1282), 'numpy.reshape', 'np.reshape', (['mnist.train.labels', '[-1]'], {}), '(mnist.train.labels, [-1])\n', (1256, 1282), True, 'import numpy as np\n'), ((1316, 1356), 'numpy.reshape', 'np.reshape', (['mnist.test.images', '[-1, 784]'], {}), '(mnist.test.images, [-1, 784])\n', (1326, 1356), True, 'import numpy as np\n'), ((1394, 1429), 'numpy.reshape', 'np.reshape', (['mnist.test.labels', '[-1]'], {}), '(mnist.test.labels, [-1])\n', (1404, 1429), True, 'import numpy as np\n'), ((3160, 3193), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3191, 3193), True, 'import tensorflow as tf\n'), ((1019, 1039), 'tensorflow.argmax', 'tf.argmax', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (1028, 1039), True, 'import tensorflow as tf\n'), ((1752, 1779), 'tensorflow.orthogonal_initializer', 'tf.orthogonal_initializer', ([], {}), '()\n', (1777, 1779), True, 'import tensorflow as tf\n'), ((1824, 1852), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1847, 1852), True, 'import tensorflow as tf\n'), ((1980, 1989), 'tensorflow.log', 'tf.log', (['y'], {}), '(y)\n', (1986, 1989), True, 'import tensorflow as tf\n'), ((1996, 2009), 'tensorflow.log', 'tf.log', (['(1 - y)'], {}), '(1 - y)\n', (2002, 2009), True, 'import tensorflow as tf\n'), ((2872, 2896), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2894, 2896), True, 'import tensorflow as tf\n'), ((3643, 3685), 'tensorflow.python.client.timeline.Timeline', 'timeline.Timeline', (['run_metadata.step_stats'], {}), '(run_metadata.step_stats)\n', (3660, 3685), False, 'from tensorflow.python.client import timeline\n'), ((3960, 3990), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5000)', '(50)'], {}), '(0, 5000, 50)\n', (3977, 3990), True, 'import numpy as np\n'), ((3768, 3805), 'os.path.join', 'os.path.join', (['logdir', '"""timeline.json"""'], {}), "(logdir, 'timeline.json')\n", (3780, 3805), False, 'import os\n')] |
"""Run parallel shallow water domain.
run using command like:
mpiexec -np m python run_parallel_sw_merimbula.py
where m is the number of processors to be used.
Will produce sww files with names domain_Pn_m.sww where m is number of processors and
n in [0, m-1] refers to specific processor that owned this part of the partitioned mesh.
"""
from __future__ import print_function
from __future__ import division
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from builtins import str
from builtins import range
from past.utils import old_div
from future.utils import raise_
import os
import sys
import time
from anuga.utilities import parallel_abstraction as pypar
import numpy as num
import unittest
import tempfile
from struct import pack, unpack
from anuga.file.netcdf import NetCDFFile
import copy
#------------------------
# ANUGA Modules
#------------------------
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.utilities.util_ext import double_precision
from anuga.utilities.norms import l1_norm, l2_norm, linf_norm
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import File_boundary
from anuga.file.mux import WAVEHEIGHT_MUX2_LABEL, EAST_VELOCITY_MUX2_LABEL, \
NORTH_VELOCITY_MUX2_LABEL
from anuga.file.mux import read_mux2_py
from anuga.file_conversion.urs2sts import urs2sts
from anuga.file.urs import Read_urs
from anuga.file.sts import create_sts_boundary
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.coordinate_transforms.redfearn import redfearn
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga import rectangular_cross_domain
from anuga.pmesh.mesh_interface import create_mesh_from_regions
from anuga import create_domain_from_file
from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize
from anuga.file.tests.test_mux import Test_Mux
verbose = False
class Test_urs2sts_parallel(Test_Mux):
""" A suite of tests to test urs2sts file conversion functions.
These tests are quite coarse-grained: converting a file
and checking that its headers and some of its contents
are correct.
"""
def sequential_time_varying_file_boundary_sts(self):
"""sequential_ltest_time_varying_file_boundary_sts_sequential(self):
Read correct points from ordering file and apply sts to boundary. The boundary is time varying. FIXME add to test_urs2sts.
"""
lat_long_points=[[6.01,97.0],[6.02,97.0],[6.05,96.9],[6.0,97.0]]
bounding_polygon=[[6.0,97.0],[6.01,97.0],[6.02,97.0],
[6.02,97.02],[6.00,97.02]]
tide = 3.0
time_step_count = 65
time_step = 2.
n=len(lat_long_points)
first_tstep=num.ones(n,int)
last_tstep=(time_step_count)*num.ones(n,int)
finaltime=float(time_step*(time_step_count-1))
yieldstep=float(time_step)
gauge_depth=20*num.ones(n,float)
ha=2*num.ones((n,time_step_count),float)
ua=10*num.ones((n,time_step_count),float)
va=-10*num.ones((n,time_step_count),float)
times=num.arange(0., float(time_step_count*time_step), time_step)
for i in range(n):
#ha[i]+=num.sin(times)
ha[i]+=old_div(times,finaltime)
sts_file="test"
if myid==0:
base_name, files = self.write_mux2(lat_long_points,
time_step_count,
time_step,
first_tstep,
last_tstep,
depth=gauge_depth,
ha=ha,
ua=ua,
va=va)
# base name will not exist, but 3 other files are created
# Write order file
file_handle, order_base_name = tempfile.mkstemp("")
os.close(file_handle)
os.remove(order_base_name)
d=","
order_file=order_base_name+'order.txt'
fid=open(order_file,'w')
# Write Header
header='index, longitude, latitude\n'
fid.write(header)
indices=[3,0,1]
for i in indices:
line=str(i)+d+str(lat_long_points[i][1])+d+\
str(lat_long_points[i][0])+"\n"
fid.write(line)
fid.close()
urs2sts(base_name,
basename_out=sts_file,
ordering_filename=order_file,
mean_stage=tide,
verbose=verbose)
self.delete_mux(files)
assert(os.access(sts_file+'.sts', os.F_OK))
os.remove(order_file)
barrier()
boundary_polygon = create_sts_boundary(sts_file)
# Append the remaining part of the boundary polygon to be defined by
# the user
bounding_polygon_utm=[]
for point in bounding_polygon:
zone,easting,northing=redfearn(point[0],point[1])
bounding_polygon_utm.append([easting,northing])
boundary_polygon.append(bounding_polygon_utm[3])
boundary_polygon.append(bounding_polygon_utm[4])
assert num.allclose(bounding_polygon_utm,boundary_polygon)
extent_res=1000000
meshname = 'urs_test_mesh' + '.tsh'
interior_regions=None
boundary_tags={'ocean': [0,1], 'otherocean': [2,3,4]}
# have to change boundary tags from last example because now bounding
# polygon starts in different place.
if myid==0:
create_mesh_from_regions(boundary_polygon,
boundary_tags=boundary_tags,
maximum_triangle_area=extent_res,
filename=meshname,
interior_regions=interior_regions,
verbose=verbose)
barrier()
domain_fbound = Domain(meshname)
domain_fbound.set_quantities_to_be_stored(None)
domain_fbound.set_quantity('stage', tide)
if verbose: print("Creating file boundary condition")
Bf = File_boundary(sts_file+'.sts',
domain_fbound,
boundary_polygon=boundary_polygon)
Br = Reflective_boundary(domain_fbound)
domain_fbound.set_boundary({'ocean': Bf,'otherocean': Br})
temp_fbound=num.zeros(int(old_div(finaltime,yieldstep))+1,float)
if verbose: print("Evolving domain with file boundary condition")
for i, t in enumerate(domain_fbound.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
temp_fbound[i]=domain_fbound.quantities['stage'].centroid_values[2]
if verbose: domain_fbound.write_time()
domain_drchlt = Domain(meshname)
domain_drchlt.set_quantities_to_be_stored(None)
domain_drchlt.set_starttime(time_step)
domain_drchlt.set_quantity('stage', tide)
Br = Reflective_boundary(domain_drchlt)
#Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
Bd = Time_boundary(domain=domain_drchlt, f=lambda t: [2.0+old_div(t,finaltime)+tide,220.+10.*tide+old_div(10.*t,finaltime),-220.-10.*tide-old_div(10.*t,finaltime)])
#Bd = Time_boundary(domain=domain_drchlt,f=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
domain_drchlt.set_boundary({'ocean': Bd,'otherocean': Br})
temp_drchlt=num.zeros(int(old_div(finaltime,yieldstep))+1,float)
for i, t in enumerate(domain_drchlt.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
temp_drchlt[i]=domain_drchlt.quantities['stage'].centroid_values[2]
#domain_drchlt.write_time()
#print domain_fbound.quantities['stage'].vertex_values
#print domain_drchlt.quantities['stage'].vertex_values
assert num.allclose(temp_fbound,temp_drchlt),temp_fbound-temp_drchlt
assert num.allclose(domain_fbound.quantities['stage'].vertex_values,
domain_drchlt.quantities['stage'].vertex_values)
assert num.allclose(domain_fbound.quantities['xmomentum'].vertex_values,
domain_drchlt.quantities['xmomentum'].vertex_values)
assert num.allclose(domain_fbound.quantities['ymomentum'].vertex_values,
domain_drchlt.quantities['ymomentum'].vertex_values)
if not sys.platform == 'win32':
if myid==0: os.remove(sts_file+'.sts')
if myid==0: os.remove(meshname)
def parallel_time_varying_file_boundary_sts(self):
""" parallel_test_time_varying_file_boundary_sts_sequential(self):
Read correct points from ordering file and apply sts to boundary.
The boundary is time varying. Compares sequential result with
distributed result found using anuga_parallel
"""
#------------------------------------------------------------
# Define test variables
#------------------------------------------------------------
lat_long_points=[[6.01,97.0],[6.02,97.0],[6.05,96.9],[6.0,97.0]]
bounding_polygon=[[6.0,97.0],[6.01,97.0],[6.02,97.0],
[6.02,97.02],[6.00,97.02]]
tide = 3.0
time_step_count = 65
time_step = 2
n=len(lat_long_points)
first_tstep=num.ones(n,int)
last_tstep=(time_step_count)*num.ones(n,int)
finaltime=float(time_step*(time_step_count-1))
yieldstep=float(time_step)
gauge_depth=20*num.ones(n,float)
ha=2*num.ones((n,time_step_count),float)
ua=10*num.ones((n,time_step_count),float)
va=-10*num.ones((n,time_step_count),float)
times=num.arange(0, time_step_count*time_step, time_step)
for i in range(n):
#ha[i]+=num.sin(times)
ha[i]+=old_div(times,finaltime)
#------------------------------------------------------------
# Write mux data to file then convert to sts format
#------------------------------------------------------------
sts_file="test"
if myid==0:
base_name, files = self.write_mux2(lat_long_points,
time_step_count,
time_step,
first_tstep,
last_tstep,
depth=gauge_depth,
ha=ha,
ua=ua,
va=va)
# base name will not exist, but 3 other files are created
# Write order file
file_handle, order_base_name = tempfile.mkstemp("")
os.close(file_handle)
os.remove(order_base_name)
d=","
order_file=order_base_name+'order.txt'
fid=open(order_file,'w')
# Write Header
header='index, longitude, latitude\n'
fid.write(header)
indices=[3,0,1]
for i in indices:
line=str(i)+d+str(lat_long_points[i][1])+d+\
str(lat_long_points[i][0])+"\n"
fid.write(line)
fid.close()
urs2sts(base_name,
basename_out=sts_file,
ordering_filename=order_file,
mean_stage=tide,
verbose=verbose)
self.delete_mux(files)
assert(os.access(sts_file+'.sts', os.F_OK))
os.remove(order_file)
barrier()
#------------------------------------------------------------
# Define boundary_polygon on each processor. This polygon defines the
# urs boundary and lies on a portion of the bounding_polygon
#------------------------------------------------------------
boundary_polygon = create_sts_boundary(sts_file)
# Append the remaining part of the boundary polygon to be defined by
# the user
bounding_polygon_utm=[]
for point in bounding_polygon:
zone,easting,northing=redfearn(point[0],point[1])
bounding_polygon_utm.append([easting,northing])
boundary_polygon.append(bounding_polygon_utm[3])
boundary_polygon.append(bounding_polygon_utm[4])
assert num.allclose(bounding_polygon_utm,boundary_polygon)
extent_res=10000
meshname = 'urs_test_mesh' + '.tsh'
interior_regions=None
boundary_tags={'ocean': [0,1], 'otherocean': [2,3,4]}
#------------------------------------------------------------
# Create mesh on the master processor and store in file. This file
# is read in by each slave processor when needed
#------------------------------------------------------------
if myid==0:
create_mesh_from_regions(boundary_polygon,
boundary_tags=boundary_tags,
maximum_triangle_area=extent_res,
filename=meshname,
interior_regions=interior_regions,
verbose=verbose)
# barrier()
domain_fbound = Domain(meshname)
domain_fbound.set_quantities_to_be_stored(None)
domain_fbound.set_quantity('stage', tide)
# print domain_fbound.mesh.get_boundary_polygon()
else:
domain_fbound=None
barrier()
if ( verbose and myid == 0 ):
print('DISTRIBUTING PARALLEL DOMAIN')
domain_fbound = distribute(domain_fbound)
#--------------------------------------------------------------------
# Find which sub_domain in which the interpolation points are located
#
# Sometimes the interpolation points sit exactly
# between two centroids, so in the parallel run we
# reset the interpolation points to the centroids
# found in the sequential run
#--------------------------------------------------------------------
interpolation_points = [[279000,664000], [280250,664130],
[279280,665400], [280500,665000]]
interpolation_points=num.array(interpolation_points)
#if myid==0:
# import pylab as P
# boundary_polygon=num.array(boundary_polygon)
# P.plot(boundary_polygon[:,0],boundary_polygon[:,1])
# P.plot(interpolation_points[:,0],interpolation_points[:,1],'ko')
# P.show()
fbound_gauge_values = []
fbound_proc_tri_ids = []
for i, point in enumerate(interpolation_points):
fbound_gauge_values.append([]) # Empty list for timeseries
try:
k = domain_fbound.get_triangle_containing_point(point)
if domain_fbound.tri_full_flag[k] == 1:
fbound_proc_tri_ids.append(k)
else:
fbound_proc_tri_ids.append(-1)
except:
fbound_proc_tri_ids.append(-2)
if verbose: print('P%d has points = %s' %(myid, fbound_proc_tri_ids))
#------------------------------------------------------------
# Set boundary conditions
#------------------------------------------------------------
Bf = File_boundary(sts_file+'.sts',
domain_fbound,
boundary_polygon=boundary_polygon)
Br = Reflective_boundary(domain_fbound)
domain_fbound.set_boundary({'ocean': Bf,'otherocean': Br})
#------------------------------------------------------------
# Evolve the domain on each processor
#------------------------------------------------------------
for i, t in enumerate(domain_fbound.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
stage = domain_fbound.get_quantity('stage')
for i in range(4):
if fbound_proc_tri_ids[i] > -1:
fbound_gauge_values[i].append(stage.centroid_values[fbound_proc_tri_ids[i]])
#------------------------------------------------------------
# Create domain to be run sequntially on each processor
#------------------------------------------------------------
domain_drchlt = Domain(meshname)
domain_drchlt.set_quantities_to_be_stored(None)
domain_drchlt.set_starttime(time_step)
domain_drchlt.set_quantity('stage', tide)
Br = Reflective_boundary(domain_drchlt)
#Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
Bd = Time_boundary(domain=domain_drchlt, function=lambda t: [2.0+old_div(t,finaltime)+tide,220.+10.*tide+old_div(10.*t,finaltime),-220.-10.*tide-old_div(10.*t,finaltime)])
#Bd = Time_boundary(domain=domain_drchlt,function=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
domain_drchlt.set_boundary({'ocean': Bd,'otherocean': Br})
drchlt_gauge_values = []
drchlt_proc_tri_ids = []
for i, point in enumerate(interpolation_points):
drchlt_gauge_values.append([]) # Empty list for timeseries
try:
k = domain_drchlt.get_triangle_containing_point(point)
if domain_drchlt.tri_full_flag[k] == 1:
drchlt_proc_tri_ids.append(k)
else:
drchlt_proc_tri_ids.append(-1)
except:
drchlt_proc_tri_ids.append(-2)
if verbose: print('P%d has points = %s' %(myid, drchlt_proc_tri_ids))
#------------------------------------------------------------
# Evolve entire domain on each processor
#------------------------------------------------------------
for i, t in enumerate(domain_drchlt.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
stage = domain_drchlt.get_quantity('stage')
for i in range(4):
drchlt_gauge_values[i].append(stage.centroid_values[drchlt_proc_tri_ids[i]])
#------------------------------------------------------------
# Compare sequential values with parallel values
#------------------------------------------------------------
barrier()
success = True
for i in range(4):
if fbound_proc_tri_ids[i] > -1:
fbound_gauge_values[i]=num.array(fbound_gauge_values[i])
drchlt_gauge_values[i]=num.array(drchlt_gauge_values[i])
#print i,fbound_gauge_values[i][4]
#print i,drchlt_gauge_values[i][4]
success = success and num.allclose(fbound_gauge_values[i], drchlt_gauge_values[i])
assert success#, (fbound_gauge_values[i]-drchlt_gauge_values[i])
#assert_(success)
if not sys.platform == 'win32':
if myid==0: os.remove(sts_file+'.sts')
if myid==0: os.remove(meshname)
# Because we are doing assertions outside of the TestCase class
# the PyUnit defined assert_ function can't be used.
def assert_(condition, msg="Assertion Failed"):
if condition == False:
#pypar.finalize()
raise_(AssertionError, msg)
# Test an nprocs-way run of the shallow water equations
# against the sequential code.
if __name__=="__main__":
#verbose=False
if myid ==0 and verbose:
print('PARALLEL START')
suite = unittest.makeSuite(Test_urs2sts_parallel,'parallel_test')
#suite = unittest.makeSuite(Test_urs2sts_parallel,'sequential_test')
runner = unittest.TextTestRunner()
runner.run(suite)
#------------------------------------------
# Run the code code and compare sequential
# results at 4 gauge stations
#------------------------------------------
finalize()
| [
"anuga.Domain",
"unittest.makeSuite",
"anuga.parallel.finalize",
"builtins.str",
"past.utils.old_div",
"anuga.parallel.barrier",
"builtins.range",
"numpy.array",
"anuga.coordinate_transforms.redfearn.redfearn",
"unittest.TextTestRunner",
"numpy.arange",
"os.remove",
"anuga.File_boundary",
... | [((20979, 21037), 'unittest.makeSuite', 'unittest.makeSuite', (['Test_urs2sts_parallel', '"""parallel_test"""'], {}), "(Test_urs2sts_parallel, 'parallel_test')\n", (20997, 21037), False, 'import unittest\n'), ((21123, 21148), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (21146, 21148), False, 'import unittest\n'), ((21359, 21369), 'anuga.parallel.finalize', 'finalize', ([], {}), '()\n', (21367, 21369), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((3069, 3085), 'numpy.ones', 'num.ones', (['n', 'int'], {}), '(n, int)\n', (3077, 3085), True, 'import numpy as num\n'), ((3511, 3519), 'builtins.range', 'range', (['n'], {}), '(n)\n', (3516, 3519), False, 'from builtins import range\n'), ((5203, 5212), 'anuga.parallel.barrier', 'barrier', ([], {}), '()\n', (5210, 5212), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((5240, 5269), 'anuga.file.sts.create_sts_boundary', 'create_sts_boundary', (['sts_file'], {}), '(sts_file)\n', (5259, 5269), False, 'from anuga.file.sts import create_sts_boundary\n'), ((5691, 5743), 'numpy.allclose', 'num.allclose', (['bounding_polygon_utm', 'boundary_polygon'], {}), '(bounding_polygon_utm, boundary_polygon)\n', (5703, 5743), True, 'import numpy as num\n'), ((6443, 6452), 'anuga.parallel.barrier', 'barrier', ([], {}), '()\n', (6450, 6452), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((6486, 6502), 'anuga.Domain', 'Domain', (['meshname'], {}), '(meshname)\n', (6492, 6502), False, 'from anuga import Domain\n'), ((6684, 6771), 'anuga.File_boundary', 'File_boundary', (["(sts_file + '.sts')", 'domain_fbound'], {'boundary_polygon': 'boundary_polygon'}), "(sts_file + '.sts', domain_fbound, boundary_polygon=\n boundary_polygon)\n", (6697, 6771), False, 'from anuga import File_boundary\n'), ((6832, 6866), 'anuga.Reflective_boundary', 'Reflective_boundary', (['domain_fbound'], {}), '(domain_fbound)\n', (6851, 6866), False, 'from anuga import Reflective_boundary\n'), ((7485, 7501), 'anuga.Domain', 'Domain', (['meshname'], {}), '(meshname)\n', (7491, 7501), False, 'from anuga import Domain\n'), ((7668, 7702), 'anuga.Reflective_boundary', 'Reflective_boundary', (['domain_drchlt'], {}), '(domain_drchlt)\n', (7687, 7702), False, 'from anuga import Reflective_boundary\n'), ((8752, 8790), 'numpy.allclose', 'num.allclose', (['temp_fbound', 'temp_drchlt'], {}), '(temp_fbound, temp_drchlt)\n', (8764, 8790), True, 'import numpy as num\n'), ((8839, 8954), 'numpy.allclose', 'num.allclose', (["domain_fbound.quantities['stage'].vertex_values", "domain_drchlt.quantities['stage'].vertex_values"], {}), "(domain_fbound.quantities['stage'].vertex_values, domain_drchlt\n .quantities['stage'].vertex_values)\n", (8851, 8954), True, 'import numpy as num\n'), ((9018, 9140), 'numpy.allclose', 'num.allclose', (["domain_fbound.quantities['xmomentum'].vertex_values", "domain_drchlt.quantities['xmomentum'].vertex_values"], {}), "(domain_fbound.quantities['xmomentum'].vertex_values,\n domain_drchlt.quantities['xmomentum'].vertex_values)\n", (9030, 9140), True, 'import numpy as num\n'), ((9229, 9351), 'numpy.allclose', 'num.allclose', (["domain_fbound.quantities['ymomentum'].vertex_values", "domain_drchlt.quantities['ymomentum'].vertex_values"], {}), "(domain_fbound.quantities['ymomentum'].vertex_values,\n domain_drchlt.quantities['ymomentum'].vertex_values)\n", (9241, 9351), True, 'import numpy as num\n'), ((10362, 10378), 'numpy.ones', 'num.ones', (['n', 'int'], {}), '(n, int)\n', (10370, 10378), True, 'import numpy as num\n'), ((10727, 10780), 'numpy.arange', 'num.arange', (['(0)', '(time_step_count * time_step)', 'time_step'], {}), '(0, time_step_count * time_step, time_step)\n', (10737, 10780), True, 'import numpy as num\n'), ((10796, 10804), 'builtins.range', 'range', (['n'], {}), '(n)\n', (10801, 10804), False, 'from builtins import range\n'), ((12686, 12695), 'anuga.parallel.barrier', 'barrier', ([], {}), '()\n', (12693, 12695), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((13010, 13039), 'anuga.file.sts.create_sts_boundary', 'create_sts_boundary', (['sts_file'], {}), '(sts_file)\n', (13029, 13039), False, 'from anuga.file.sts import create_sts_boundary\n'), ((13462, 13514), 'numpy.allclose', 'num.allclose', (['bounding_polygon_utm', 'boundary_polygon'], {}), '(bounding_polygon_utm, boundary_polygon)\n', (13474, 13514), True, 'import numpy as num\n'), ((14660, 14669), 'anuga.parallel.barrier', 'barrier', ([], {}), '()\n', (14667, 14669), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((14783, 14808), 'anuga.parallel.distribute', 'distribute', (['domain_fbound'], {}), '(domain_fbound)\n', (14793, 14808), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((15434, 15465), 'numpy.array', 'num.array', (['interpolation_points'], {}), '(interpolation_points)\n', (15443, 15465), True, 'import numpy as num\n'), ((16552, 16639), 'anuga.File_boundary', 'File_boundary', (["(sts_file + '.sts')", 'domain_fbound'], {'boundary_polygon': 'boundary_polygon'}), "(sts_file + '.sts', domain_fbound, boundary_polygon=\n boundary_polygon)\n", (16565, 16639), False, 'from anuga import File_boundary\n'), ((16700, 16734), 'anuga.Reflective_boundary', 'Reflective_boundary', (['domain_fbound'], {}), '(domain_fbound)\n', (16719, 16734), False, 'from anuga import Reflective_boundary\n'), ((17691, 17707), 'anuga.Domain', 'Domain', (['meshname'], {}), '(meshname)\n', (17697, 17707), False, 'from anuga import Domain\n'), ((17874, 17908), 'anuga.Reflective_boundary', 'Reflective_boundary', (['domain_drchlt'], {}), '(domain_drchlt)\n', (17893, 17908), False, 'from anuga import Reflective_boundary\n'), ((19805, 19814), 'anuga.parallel.barrier', 'barrier', ([], {}), '()\n', (19812, 19814), False, 'from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize\n'), ((19855, 19863), 'builtins.range', 'range', (['(4)'], {}), '(4)\n', (19860, 19863), False, 'from builtins import range\n'), ((20743, 20770), 'future.utils.raise_', 'raise_', (['AssertionError', 'msg'], {}), '(AssertionError, msg)\n', (20749, 20770), False, 'from future.utils import raise_\n'), ((3122, 3138), 'numpy.ones', 'num.ones', (['n', 'int'], {}), '(n, int)\n', (3130, 3138), True, 'import numpy as num\n'), ((3251, 3269), 'numpy.ones', 'num.ones', (['n', 'float'], {}), '(n, float)\n', (3259, 3269), True, 'import numpy as num\n'), ((3282, 3319), 'numpy.ones', 'num.ones', (['(n, time_step_count)', 'float'], {}), '((n, time_step_count), float)\n', (3290, 3319), True, 'import numpy as num\n'), ((3332, 3369), 'numpy.ones', 'num.ones', (['(n, time_step_count)', 'float'], {}), '((n, time_step_count), float)\n', (3340, 3369), True, 'import numpy as num\n'), ((3383, 3420), 'numpy.ones', 'num.ones', (['(n, time_step_count)', 'float'], {}), '((n, time_step_count), float)\n', (3391, 3420), True, 'import numpy as num\n'), ((3575, 3600), 'past.utils.old_div', 'old_div', (['times', 'finaltime'], {}), '(times, finaltime)\n', (3582, 3600), False, 'from past.utils import old_div\n'), ((4325, 4345), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""""""'], {}), "('')\n", (4341, 4345), False, 'import tempfile\n'), ((4358, 4379), 'os.close', 'os.close', (['file_handle'], {}), '(file_handle)\n', (4366, 4379), False, 'import os\n'), ((4392, 4418), 'os.remove', 'os.remove', (['order_base_name'], {}), '(order_base_name)\n', (4401, 4418), False, 'import os\n'), ((4881, 4990), 'anuga.file_conversion.urs2sts.urs2sts', 'urs2sts', (['base_name'], {'basename_out': 'sts_file', 'ordering_filename': 'order_file', 'mean_stage': 'tide', 'verbose': 'verbose'}), '(base_name, basename_out=sts_file, ordering_filename=order_file,\n mean_stage=tide, verbose=verbose)\n', (4888, 4990), False, 'from anuga.file_conversion.urs2sts import urs2sts\n'), ((5122, 5159), 'os.access', 'os.access', (["(sts_file + '.sts')", 'os.F_OK'], {}), "(sts_file + '.sts', os.F_OK)\n", (5131, 5159), False, 'import os\n'), ((5172, 5193), 'os.remove', 'os.remove', (['order_file'], {}), '(order_file)\n', (5181, 5193), False, 'import os\n'), ((5472, 5500), 'anuga.coordinate_transforms.redfearn.redfearn', 'redfearn', (['point[0]', 'point[1]'], {}), '(point[0], point[1])\n', (5480, 5500), False, 'from anuga.coordinate_transforms.redfearn import redfearn\n'), ((6072, 6257), 'anuga.pmesh.mesh_interface.create_mesh_from_regions', 'create_mesh_from_regions', (['boundary_polygon'], {'boundary_tags': 'boundary_tags', 'maximum_triangle_area': 'extent_res', 'filename': 'meshname', 'interior_regions': 'interior_regions', 'verbose': 'verbose'}), '(boundary_polygon, boundary_tags=boundary_tags,\n maximum_triangle_area=extent_res, filename=meshname, interior_regions=\n interior_regions, verbose=verbose)\n', (6096, 6257), False, 'from anuga.pmesh.mesh_interface import create_mesh_from_regions\n'), ((9505, 9524), 'os.remove', 'os.remove', (['meshname'], {}), '(meshname)\n', (9514, 9524), False, 'import os\n'), ((10415, 10431), 'numpy.ones', 'num.ones', (['n', 'int'], {}), '(n, int)\n', (10423, 10431), True, 'import numpy as num\n'), ((10544, 10562), 'numpy.ones', 'num.ones', (['n', 'float'], {}), '(n, float)\n', (10552, 10562), True, 'import numpy as num\n'), ((10575, 10612), 'numpy.ones', 'num.ones', (['(n, time_step_count)', 'float'], {}), '((n, time_step_count), float)\n', (10583, 10612), True, 'import numpy as num\n'), ((10625, 10662), 'numpy.ones', 'num.ones', (['(n, time_step_count)', 'float'], {}), '((n, time_step_count), float)\n', (10633, 10662), True, 'import numpy as num\n'), ((10676, 10713), 'numpy.ones', 'num.ones', (['(n, time_step_count)', 'float'], {}), '((n, time_step_count), float)\n', (10684, 10713), True, 'import numpy as num\n'), ((10860, 10885), 'past.utils.old_div', 'old_div', (['times', 'finaltime'], {}), '(times, finaltime)\n', (10867, 10885), False, 'from past.utils import old_div\n'), ((11808, 11828), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""""""'], {}), "('')\n", (11824, 11828), False, 'import tempfile\n'), ((11841, 11862), 'os.close', 'os.close', (['file_handle'], {}), '(file_handle)\n', (11849, 11862), False, 'import os\n'), ((11875, 11901), 'os.remove', 'os.remove', (['order_base_name'], {}), '(order_base_name)\n', (11884, 11901), False, 'import os\n'), ((12364, 12473), 'anuga.file_conversion.urs2sts.urs2sts', 'urs2sts', (['base_name'], {'basename_out': 'sts_file', 'ordering_filename': 'order_file', 'mean_stage': 'tide', 'verbose': 'verbose'}), '(base_name, basename_out=sts_file, ordering_filename=order_file,\n mean_stage=tide, verbose=verbose)\n', (12371, 12473), False, 'from anuga.file_conversion.urs2sts import urs2sts\n'), ((12605, 12642), 'os.access', 'os.access', (["(sts_file + '.sts')", 'os.F_OK'], {}), "(sts_file + '.sts', os.F_OK)\n", (12614, 12642), False, 'import os\n'), ((12655, 12676), 'os.remove', 'os.remove', (['order_file'], {}), '(order_file)\n', (12664, 12676), False, 'import os\n'), ((13242, 13270), 'anuga.coordinate_transforms.redfearn.redfearn', 'redfearn', (['point[0]', 'point[1]'], {}), '(point[0], point[1])\n', (13250, 13270), False, 'from anuga.coordinate_transforms.redfearn import redfearn\n'), ((13989, 14174), 'anuga.pmesh.mesh_interface.create_mesh_from_regions', 'create_mesh_from_regions', (['boundary_polygon'], {'boundary_tags': 'boundary_tags', 'maximum_triangle_area': 'extent_res', 'filename': 'meshname', 'interior_regions': 'interior_regions', 'verbose': 'verbose'}), '(boundary_polygon, boundary_tags=boundary_tags,\n maximum_triangle_area=extent_res, filename=meshname, interior_regions=\n interior_regions, verbose=verbose)\n', (14013, 14174), False, 'from anuga.pmesh.mesh_interface import create_mesh_from_regions\n'), ((14413, 14429), 'anuga.Domain', 'Domain', (['meshname'], {}), '(meshname)\n', (14419, 14429), False, 'from anuga import Domain\n'), ((17299, 17307), 'builtins.range', 'range', (['(4)'], {}), '(4)\n', (17304, 17307), False, 'from builtins import range\n'), ((19496, 19504), 'builtins.range', 'range', (['(4)'], {}), '(4)\n', (19501, 19504), False, 'from builtins import range\n'), ((20492, 20511), 'os.remove', 'os.remove', (['meshname'], {}), '(meshname)\n', (20501, 20511), False, 'import os\n'), ((9449, 9477), 'os.remove', 'os.remove', (["(sts_file + '.sts')"], {}), "(sts_file + '.sts')\n", (9458, 9477), False, 'import os\n'), ((19948, 19981), 'numpy.array', 'num.array', (['fbound_gauge_values[i]'], {}), '(fbound_gauge_values[i])\n', (19957, 19981), True, 'import numpy as num\n'), ((20021, 20054), 'numpy.array', 'num.array', (['drchlt_gauge_values[i]'], {}), '(drchlt_gauge_values[i])\n', (20030, 20054), True, 'import numpy as num\n'), ((20436, 20464), 'os.remove', 'os.remove', (["(sts_file + '.sts')"], {}), "(sts_file + '.sts')\n", (20445, 20464), False, 'import os\n'), ((6970, 6999), 'past.utils.old_div', 'old_div', (['finaltime', 'yieldstep'], {}), '(finaltime, yieldstep)\n', (6977, 6999), False, 'from past.utils import old_div\n'), ((8188, 8217), 'past.utils.old_div', 'old_div', (['finaltime', 'yieldstep'], {}), '(finaltime, yieldstep)\n', (8195, 8217), False, 'from past.utils import old_div\n'), ((20195, 20255), 'numpy.allclose', 'num.allclose', (['fbound_gauge_values[i]', 'drchlt_gauge_values[i]'], {}), '(fbound_gauge_values[i], drchlt_gauge_values[i])\n', (20207, 20255), True, 'import numpy as num\n'), ((4780, 4806), 'builtins.str', 'str', (['lat_long_points[i][0]'], {}), '(lat_long_points[i][0])\n', (4783, 4806), False, 'from builtins import str\n'), ((12263, 12289), 'builtins.str', 'str', (['lat_long_points[i][0]'], {}), '(lat_long_points[i][0])\n', (12266, 12289), False, 'from builtins import str\n'), ((7879, 7907), 'past.utils.old_div', 'old_div', (['(10.0 * t)', 'finaltime'], {}), '(10.0 * t, finaltime)\n', (7886, 7907), False, 'from past.utils import old_div\n'), ((7919, 7947), 'past.utils.old_div', 'old_div', (['(10.0 * t)', 'finaltime'], {}), '(10.0 * t, finaltime)\n', (7926, 7947), False, 'from past.utils import old_div\n'), ((18092, 18120), 'past.utils.old_div', 'old_div', (['(10.0 * t)', 'finaltime'], {}), '(10.0 * t, finaltime)\n', (18099, 18120), False, 'from past.utils import old_div\n'), ((18132, 18160), 'past.utils.old_div', 'old_div', (['(10.0 * t)', 'finaltime'], {}), '(10.0 * t, finaltime)\n', (18139, 18160), False, 'from past.utils import old_div\n'), ((4729, 4755), 'builtins.str', 'str', (['lat_long_points[i][1]'], {}), '(lat_long_points[i][1])\n', (4732, 4755), False, 'from builtins import str\n'), ((7839, 7860), 'past.utils.old_div', 'old_div', (['t', 'finaltime'], {}), '(t, finaltime)\n', (7846, 7860), False, 'from past.utils import old_div\n'), ((12212, 12238), 'builtins.str', 'str', (['lat_long_points[i][1]'], {}), '(lat_long_points[i][1])\n', (12215, 12238), False, 'from builtins import str\n'), ((18052, 18073), 'past.utils.old_div', 'old_div', (['t', 'finaltime'], {}), '(t, finaltime)\n', (18059, 18073), False, 'from past.utils import old_div\n'), ((4720, 4726), 'builtins.str', 'str', (['i'], {}), '(i)\n', (4723, 4726), False, 'from builtins import str\n'), ((12203, 12209), 'builtins.str', 'str', (['i'], {}), '(i)\n', (12206, 12209), False, 'from builtins import str\n')] |
import numpy as np
from algorithms.ddpg.replay_buffer import ReplayBuffer
class PDSReplayBuffer(ReplayBuffer):
"""
Replay buffer for PDS-DDPG, just add some extra information to the basic replay buffer
"""
def __init__(self, state_dim, action_dim, length, batch_size):
super().__init__(state_dim, action_dim, length, batch_size)
# Add the information needed for PDS-DDPG
self.next_seg_size_buf = np.zeros([length, 1], dtype=np.float32) # The size of segment to be played in the next TF
self.next2_seg_size_buf = np.zeros([length, 1], dtype=np.float32) # The size of segment to be played in the next*2 TF
self.reward_scale_buf = np.zeros(length, dtype=np.float32)
self.exp_len_buf = np.zeros(length, dtype=np.int16)
def add(self, state, action, reward, next_state, done, next_seg_size=None, next2_seg_size=None, reward_scale=None):
self.next_seg_size_buf[self.pointer] = next_seg_size
self.next2_seg_size_buf[self.pointer] = next2_seg_size
self.reward_scale_buf[self.pointer] = reward_scale
super().add(state, action, reward, next_state, done)
def sample(self):
batch = super().sample()
return batch + (self.next_seg_size_buf[self.sampled_indices],
self.next2_seg_size_buf[self.sampled_indices],
self.reward_scale_buf[self.sampled_indices])
| [
"numpy.zeros"
] | [((451, 490), 'numpy.zeros', 'np.zeros', (['[length, 1]'], {'dtype': 'np.float32'}), '([length, 1], dtype=np.float32)\n', (459, 490), True, 'import numpy as np\n'), ((577, 616), 'numpy.zeros', 'np.zeros', (['[length, 1]'], {'dtype': 'np.float32'}), '([length, 1], dtype=np.float32)\n', (585, 616), True, 'import numpy as np\n'), ((703, 737), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'np.float32'}), '(length, dtype=np.float32)\n', (711, 737), True, 'import numpy as np\n'), ((766, 798), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'np.int16'}), '(length, dtype=np.int16)\n', (774, 798), True, 'import numpy as np\n')] |
from socketserver import StreamRequestHandler, TCPServer
from socket import error as SocketError
import errno
import datetime
import time
import os
import base64
import numpy as np
import cv2
from scipy import misc
from warpgan import WarpGAN
from align.detect_align import detect_align
class GANnetworks:
def __init__(self, isAligned, num_styles):
self.warpgan_dir = "./warpgan_pretrained/warpgan_pretrained"
self.isAligned = isAligned
self.num_styles = num_styles
self.warpGAN = self.load_warpGAN()
def load_warpGAN(self):
network = WarpGAN()
network.load_model(self.warpgan_dir)
return network
def generate_cartoon(self, img):
if not self.isAligned:
s = time.time()
img = detect_align(img)
e = time.time()
print("detect time cost ", e - s, " s")
if img is None:
print("no face in img ******")
return
img = (img - 127.5) / 128.0
images = np.tile(img[None], [self.num_styles, 1, 1, 1])
scales = 1.0 * np.ones((self.num_styles))
styles = np.random.normal(0., 1., (self.num_styles, self.warpGAN.input_style.shape[1].value))
start = time.time()
output = self.warpGAN.generate_BA(images, scales, 16, styles=styles)
output = 0.5 * output + 0.5
end = time.time()
print("generate caricatue time cost: ", end - start, " s.")
return output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Just disables the warning, doesn't enable AVX/FMA
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
num_styles = 1
warpGAN = GANnetworks(isAligned=False, num_styles=num_styles)
# image_file = "image/2019-11-29_13_32_50.jpg"
# image = misc.imread(image_file, mode='RGB')
# outputs = warpGAN.generate_cartoon(image)
# for i in range(4):
# # outdir = os.path.join("image", image_file[:-4])
# misc.imsave(image_file[:-4] + '_{}.jpg'.format(i), outputs[i])
class MyTCPHandler(StreamRequestHandler):
def handle(self):
op = ""
imglens = 0
lens = 0
datalist = []
print("op: ", op, " image1: ", len(datalist))
try:
while (imglens == 0 or lens < imglens):
data = self.request.recv(10240) # 拿到客户端发送的数据
if not data or len(data) == 0:
break
# print(data)
if len(op) == 0:
op = data.decode("utf-8")
msg = "OK".encode("utf-8")
self.request.send(msg)
print("op : ", op)
elif len(op) > 0 and imglens == 0:
print("客户端传送图片大小: ", data)
imglens = data.decode("utf-8")
imglens = int(imglens)
print("imglens: ", imglens, " lens: ", lens)
else:
datalist.extend(data)
lens = lens + len(data)
# print("处理中...", "imglens: ", imglens, " lens: ", lens)
if len(datalist) > 0 and lens == imglens:
image = np.asarray(bytearray(datalist), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image_file = datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S.jpg')
print("save file to: : ", image_file)
cv2.imwrite(os.path.join("./image", image_file), image)
print("接收完成")
## 生成漫画图片
outputs = warpGAN.generate_cartoon(image)
for i in range(num_styles):
outdir = os.path.join("image", image_file[:-4])
misc.imsave(outdir + '_{}.jpg'.format(i), outputs[i])
# cv2.imshow("img ", output[i])
## 返回给客户端
img_encode = cv2.imencode(".jpg", outputs[0])[1]
data_encode = np.array(img_encode)
img_data = data_encode.tostring()
lenImg = str(len(img_data)) + "\t"
print("发送图片大小:", lenImg)
print("??? ", type(img_encode), img_encode.shape, type(data_encode), data_encode.shape)
cv2.imwrite("./image/temp.jpg", outputs[0])
self.request.send(lenImg.encode("utf-8"))
self.request.send(img_data)
else:
print("%%%%%%%%%%%% error image is none or break!")
self.request.send("done".encode("utf-8"))
except Exception as e:
print(e)
print(self.client_address, "error : 连接断开")
finally:
self.request.close() # 异常之后,关闭连接
# before handle,连接建立:
def setup(self):
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print("\n\ntime: ", now_time, " 连接建立:", self.client_address)
# finish run after handle
def finish(self):
print("释放连接")
if __name__ == "__main__":
from threading import Thread
try:
NWORKERS = 16
TCPServer.allow_reuse_address = True
serv = TCPServer(('', 8999), MyTCPHandler)
for n in range(NWORKERS):
t = Thread(target=serv.serve_forever)
t.daemon = True
t.start()
serv.serve_forever()
except Exception as e:
print("exit: ", e) | [
"numpy.random.normal",
"numpy.tile",
"cv2.imwrite",
"socketserver.TCPServer",
"warpgan.WarpGAN",
"align.detect_align.detect_align",
"numpy.ones",
"cv2.imencode",
"os.path.join",
"numpy.array",
"datetime.datetime.now",
"cv2.imdecode",
"threading.Thread",
"time.time"
] | [((586, 595), 'warpgan.WarpGAN', 'WarpGAN', ([], {}), '()\n', (593, 595), False, 'from warpgan import WarpGAN\n'), ((1032, 1078), 'numpy.tile', 'np.tile', (['img[None]', '[self.num_styles, 1, 1, 1]'], {}), '(img[None], [self.num_styles, 1, 1, 1])\n', (1039, 1078), True, 'import numpy as np\n'), ((1146, 1237), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(self.num_styles, self.warpGAN.input_style.shape[1].value)'], {}), '(0.0, 1.0, (self.num_styles, self.warpGAN.input_style.shape\n [1].value))\n', (1162, 1237), True, 'import numpy as np\n'), ((1248, 1259), 'time.time', 'time.time', ([], {}), '()\n', (1257, 1259), False, 'import time\n'), ((1387, 1398), 'time.time', 'time.time', ([], {}), '()\n', (1396, 1398), False, 'import time\n'), ((5101, 5136), 'socketserver.TCPServer', 'TCPServer', (["('', 8999)", 'MyTCPHandler'], {}), "(('', 8999), MyTCPHandler)\n", (5110, 5136), False, 'from socketserver import StreamRequestHandler, TCPServer\n'), ((750, 761), 'time.time', 'time.time', ([], {}), '()\n', (759, 761), False, 'import time\n'), ((780, 797), 'align.detect_align.detect_align', 'detect_align', (['img'], {}), '(img)\n', (792, 797), False, 'from align.detect_align import detect_align\n'), ((814, 825), 'time.time', 'time.time', ([], {}), '()\n', (823, 825), False, 'import time\n'), ((1102, 1126), 'numpy.ones', 'np.ones', (['self.num_styles'], {}), '(self.num_styles)\n', (1109, 1126), True, 'import numpy as np\n'), ((5187, 5220), 'threading.Thread', 'Thread', ([], {'target': 'serv.serve_forever'}), '(target=serv.serve_forever)\n', (5193, 5220), False, 'from threading import Thread\n'), ((3214, 3251), 'cv2.imdecode', 'cv2.imdecode', (['image', 'cv2.IMREAD_COLOR'], {}), '(image, cv2.IMREAD_COLOR)\n', (3226, 3251), False, 'import cv2\n'), ((3939, 3959), 'numpy.array', 'np.array', (['img_encode'], {}), '(img_encode)\n', (3947, 3959), True, 'import numpy as np\n'), ((4223, 4266), 'cv2.imwrite', 'cv2.imwrite', (['"""./image/temp.jpg"""', 'outputs[0]'], {}), "('./image/temp.jpg', outputs[0])\n", (4234, 4266), False, 'import cv2\n'), ((4748, 4771), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4769, 4771), False, 'import datetime\n'), ((3421, 3456), 'os.path.join', 'os.path.join', (['"""./image"""', 'image_file'], {}), "('./image', image_file)\n", (3433, 3456), False, 'import os\n'), ((3653, 3691), 'os.path.join', 'os.path.join', (['"""image"""', 'image_file[:-4]'], {}), "('image', image_file[:-4])\n", (3665, 3691), False, 'import os\n'), ((3873, 3905), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'outputs[0]'], {}), "('.jpg', outputs[0])\n", (3885, 3905), False, 'import cv2\n'), ((3281, 3304), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3302, 3304), False, 'import datetime\n')] |
import numpy as np
import pandas as pd
def sma(series: pd.Series, short_period: int, long_period: int) -> pd.Series:
short_series = series.rolling(short_period).mean()
long_series = series.rolling(long_period).mean()
sma_positions = pd.Series(
np.where(short_series > long_series, 1, -1), index=series.index
)
# set nan values manually as > above concerts them to bool
nans = np.logical_or(short_series.isna(), long_series.isna())
sma_positions.loc[nans] = np.nan
return sma_positions
| [
"numpy.where"
] | [((266, 309), 'numpy.where', 'np.where', (['(short_series > long_series)', '(1)', '(-1)'], {}), '(short_series > long_series, 1, -1)\n', (274, 309), True, 'import numpy as np\n')] |
import numpy as np
import cv2
def img_clahe(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img = clahe.apply(img)
return img
def img_clahe_cm(img):
b,g,r = cv2.split(img)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8,8))
b = clahe.apply(b)
g = clahe.apply(g)
r = clahe.apply(r)
output = cv2.merge((b,g,r))
return output
def img_normalized(img):
std = np.std(img)
mean = np.mean(img)
img_normalized = (img - mean) / (std + 1e-10)
return img_normalized
def convert_16to8(img):
img = (img - np.mean(img)) / np.std(img)
img = (img - np.min(img)) / (np.max(img) - np.min(img))
img = (img * 255).astype(np.uint8)
return img
def convert_8to16(img):
img = (img - np.mean(img)) / np.std(img)
img = (img - np.min(img)) / (np.max(img) - np.min(img))
img = (img * 65535).astype(np.uint16)
return img
def sober_filter(img):
if img.dtype == "uint16":
dx = np.array(cv2.Sobel(img, cv2.CV_32F, 1, 0))
dy = np.array(cv2.Sobel(img, cv2.CV_32F, 0, 1))
elif img.dtype == "uint8":
dx = np.array(cv2.Sobel(img, cv2.CV_16S, 1, 0))
dy = np.array(cv2.Sobel(img, cv2.CV_16S, 0, 1))
dx = np.abs(dx)
dy = np.abs(dy)
edge = cv2.addWeighted(dx, 0.5, dy, 0.5, 0)
return edge
| [
"numpy.mean",
"cv2.merge",
"numpy.abs",
"numpy.min",
"cv2.createCLAHE",
"numpy.max",
"cv2.addWeighted",
"cv2.split",
"numpy.std",
"cv2.Sobel"
] | [((63, 114), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (78, 114), False, 'import cv2\n'), ((192, 206), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (201, 206), False, 'import cv2\n'), ((219, 270), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(1.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=1.0, tileGridSize=(8, 8))\n', (234, 270), False, 'import cv2\n'), ((352, 372), 'cv2.merge', 'cv2.merge', (['(b, g, r)'], {}), '((b, g, r))\n', (361, 372), False, 'import cv2\n'), ((425, 436), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (431, 436), True, 'import numpy as np\n'), ((448, 460), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (455, 460), True, 'import numpy as np\n'), ((1227, 1237), 'numpy.abs', 'np.abs', (['dx'], {}), '(dx)\n', (1233, 1237), True, 'import numpy as np\n'), ((1247, 1257), 'numpy.abs', 'np.abs', (['dy'], {}), '(dy)\n', (1253, 1257), True, 'import numpy as np\n'), ((1269, 1305), 'cv2.addWeighted', 'cv2.addWeighted', (['dx', '(0.5)', 'dy', '(0.5)', '(0)'], {}), '(dx, 0.5, dy, 0.5, 0)\n', (1284, 1305), False, 'import cv2\n'), ((596, 607), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (602, 607), True, 'import numpy as np\n'), ((780, 791), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (786, 791), True, 'import numpy as np\n'), ((580, 592), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (587, 592), True, 'import numpy as np\n'), ((625, 636), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (631, 636), True, 'import numpy as np\n'), ((641, 652), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (647, 652), True, 'import numpy as np\n'), ((655, 666), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (661, 666), True, 'import numpy as np\n'), ((764, 776), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (771, 776), True, 'import numpy as np\n'), ((809, 820), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (815, 820), True, 'import numpy as np\n'), ((825, 836), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (831, 836), True, 'import numpy as np\n'), ((839, 850), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (845, 850), True, 'import numpy as np\n'), ((985, 1017), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_32F', '(1)', '(0)'], {}), '(img, cv2.CV_32F, 1, 0)\n', (994, 1017), False, 'import cv2\n'), ((1041, 1073), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_32F', '(0)', '(1)'], {}), '(img, cv2.CV_32F, 0, 1)\n', (1050, 1073), False, 'import cv2\n'), ((1128, 1160), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_16S', '(1)', '(0)'], {}), '(img, cv2.CV_16S, 1, 0)\n', (1137, 1160), False, 'import cv2\n'), ((1184, 1216), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_16S', '(0)', '(1)'], {}), '(img, cv2.CV_16S, 0, 1)\n', (1193, 1216), False, 'import cv2\n')] |
# call this script with `python -m evaluation.evaluate_poselines_globalaction`
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import datetime
import torch
import pickle
from torch.functional import norm
from tqdm import tqdm
from . import eval_utils
from compoelem.config import config
from compoelem.generate import global_action, pose_abstraction
from compoelem.compare.pose_line import compare_pose_lines_3, compare_pose_lines_3, filter_pose_line_ga_result
from compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action
def compare_setupA(data, sort_method, norm_method):
if norm_method != 'norm_by_global_action':
raise NotImplementedError("only norm_by_global_action is implemented")
res_metrics = {}
precision_curves = {}
for query_data in tqdm(data, total=len(data)):
compare_results = []
#query_pose_lines = minmax_norm_by_imgrect(query_data["compoelem"][pose_lines_key], query_data["width"], query_data["height"])
query_pose_lines_seq = norm_by_global_action(query_data["compoelem"]["pose_lines"], query_data["compoelem"]["global_action_lines"], fallback=True)
for target_data in data:
if query_data["className"] == target_data["className"] and query_data["imgName"] == target_data["imgName"]:
continue
#combined_ratio, hit_ratio, mean_distance_hits = compare_pose_lines_3(query_pose_lines, minmax_norm_by_imgrect(target_data["compoelem"][pose_lines_key], target_data["width"], target_data["height"]))
target_pose_lines_seq = norm_by_global_action(target_data["compoelem"]["pose_lines"], target_data["compoelem"]["global_action_lines"], fallback=True)
pair_compare_results = []
for query_pose_lines in query_pose_lines_seq:
for target_pose_lines in target_pose_lines_seq:
combined_ratio, hit_ratio, mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)
pair_compare_results.append((combined_ratio, hit_ratio, mean_distance_hits, target_data))
compare_results.append(filter_pose_line_ga_result(pair_compare_results))
compare_results = np.array(compare_results)
sorted_compare_results = sort_method(compare_results)
query_label = query_data["className"]
res_labels = list(map(lambda x: x["className"], sorted_compare_results[:,-1]))
metrics = eval_utils.score_retrievals(query_label, res_labels)
label = metrics["label"]
precision_curves[label] = metrics["precision_at_rank"]
for key in metrics.keys():
if key != "label":
if key not in res_metrics:
res_metrics[key] = {}
if label not in res_metrics[key]:
res_metrics[key][label] = []
res_metrics[key][label].append(metrics[key])
return (eval_utils.get_eval_dataframe(res_metrics), precision_curves)
def lexsort_cr_hr(compare_results):
# first index has lower importance as second indice -> so idx 1 = hit ratio is the primary sorting and cr the secondary sorting
# in other words, first sorted by first key and then resorted by second key
sorted_compare_results = compare_results[np.lexsort((compare_results[:,0], compare_results[:,1]))][::-1] # 0,1 -> level1:hit_ratio, level2:combined_ratio,
return sorted_compare_results
# def eval_all_combinations(datastore, datastore_name):
def eval_all_combinations(datastore, datastore_name, filter_threshold, with_fallback):
tmp_eval_log = []
all_res_metrics = []
sort_method = lexsort_cr_hr
setup = compare_setupA
norm_method = 'norm_by_global_action'
for cone_base_scale_factor in [0, 1, 2, 2.5]:
for cone_scale_factor in [5, 10, 15]:
for cone_opening_angle in [70, 80, 90]:
for correction_angle in [40, 50]:
config["bisection"]["cone_base_scale_factor"] = cone_base_scale_factor
config["bisection"]["correction_angle"] = correction_angle
config["bisection"]["cone_opening_angle"] = cone_opening_angle
config["bisection"]["cone_scale_factor"] = cone_scale_factor
config["compare"]["filter_threshold"] = filter_threshold
new_datastore_values = []
for key in datastore.keys():
poses = datastore[key]["compoelem"]["poses"]
datastore[key]["compoelem"]["global_action_lines"] = global_action.get_global_action_lines(poses, fallback=False)
datastore[key]["compoelem"]["pose_lines"] = pose_abstraction.get_pose_lines(poses, fallback=with_fallback)
new_datastore_values.append(datastore[key])
start_time = datetime.datetime.now()
if setup.__name__ == 'compare_setupA':
result_filter_method_name = "filter_pose_line_ga_result"
else:
result_filter_method_name = "none"
experiment_id = "datastore: {}, setup: {}, filter_threshold: {}, norm_method: {}, compare_method: compare_pose_lines_3, result_filter_method: {}, sort_method: {}, correction_angle: {}, cone_opening_angle: {}, cone_scale_factor: {}".format(
datastore_name, setup.__name__, filter_threshold, norm_method, result_filter_method_name, sort_method.__name__, correction_angle, cone_opening_angle, cone_scale_factor
)
print("EXPERIMENT:", experiment_id)
start_time = datetime.datetime.now()
eval_dataframe, precision_curves = setup(list(datastore.values()), sort_method, norm_method)
res = {
"experiment_id": experiment_id,
"config": config,
"filter_threshold": filter_threshold,
"correction_angle": correction_angle,
"cone_opening_angle": cone_opening_angle,
"cone_scale_factor": cone_scale_factor,
"cone_base_scale_factor": cone_base_scale_factor,
"datetime": start_time,
"setup": setup.__name__,
"eval_time_s": (datetime.datetime.now() - start_time).seconds,
"datastore_name": datastore_name,
"norm_method": norm_method,
"compare_method": "compare_pose_lines_3",
"result_filter_method": result_filter_method_name,
"sort_method": sort_method.__name__,
"eval_dataframe": eval_dataframe,
"precision_curves": precision_curves,
"with_fallback": with_fallback,
"only_poseline_fallbakc": True,
"new": True,
}
all_res_metrics.append(res)
tmp_eval_log.append(res)
print(res)
pickle.dump(tmp_eval_log, open(".tmpEvalLog_fth{}_onlyPoseFb_normGacFallback".format(filter_threshold), "wb"))
return all_res_metrics
| [
"compoelem.compare.pose_line.compare_pose_lines_3",
"compoelem.generate.global_action.get_global_action_lines",
"numpy.array",
"numpy.lexsort",
"datetime.datetime.now",
"compoelem.compare.pose_line.filter_pose_line_ga_result",
"compoelem.compare.normalize.norm_by_global_action",
"compoelem.generate.po... | [((1072, 1200), 'compoelem.compare.normalize.norm_by_global_action', 'norm_by_global_action', (["query_data['compoelem']['pose_lines']", "query_data['compoelem']['global_action_lines']"], {'fallback': '(True)'}), "(query_data['compoelem']['pose_lines'], query_data[\n 'compoelem']['global_action_lines'], fallback=True)\n", (1093, 1200), False, 'from compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action\n'), ((2254, 2279), 'numpy.array', 'np.array', (['compare_results'], {}), '(compare_results)\n', (2262, 2279), True, 'import numpy as np\n'), ((1621, 1751), 'compoelem.compare.normalize.norm_by_global_action', 'norm_by_global_action', (["target_data['compoelem']['pose_lines']", "target_data['compoelem']['global_action_lines']"], {'fallback': '(True)'}), "(target_data['compoelem']['pose_lines'], target_data[\n 'compoelem']['global_action_lines'], fallback=True)\n", (1642, 1751), False, 'from compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action\n'), ((3321, 3379), 'numpy.lexsort', 'np.lexsort', (['(compare_results[:, 0], compare_results[:, 1])'], {}), '((compare_results[:, 0], compare_results[:, 1]))\n', (3331, 3379), True, 'import numpy as np\n'), ((2178, 2226), 'compoelem.compare.pose_line.filter_pose_line_ga_result', 'filter_pose_line_ga_result', (['pair_compare_results'], {}), '(pair_compare_results)\n', (2204, 2226), False, 'from compoelem.compare.pose_line import compare_pose_lines_3, compare_pose_lines_3, filter_pose_line_ga_result\n'), ((1975, 2032), 'compoelem.compare.pose_line.compare_pose_lines_3', 'compare_pose_lines_3', (['query_pose_lines', 'target_pose_lines'], {}), '(query_pose_lines, target_pose_lines)\n', (1995, 2032), False, 'from compoelem.compare.pose_line import compare_pose_lines_3, compare_pose_lines_3, filter_pose_line_ga_result\n'), ((4905, 4928), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4926, 4928), False, 'import datetime\n'), ((5717, 5740), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5738, 5740), False, 'import datetime\n'), ((4612, 4672), 'compoelem.generate.global_action.get_global_action_lines', 'global_action.get_global_action_lines', (['poses'], {'fallback': '(False)'}), '(poses, fallback=False)\n', (4649, 4672), False, 'from compoelem.generate import global_action, pose_abstraction\n'), ((4741, 4803), 'compoelem.generate.pose_abstraction.get_pose_lines', 'pose_abstraction.get_pose_lines', (['poses'], {'fallback': 'with_fallback'}), '(poses, fallback=with_fallback)\n', (4772, 4803), False, 'from compoelem.generate import global_action, pose_abstraction\n'), ((6445, 6468), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6466, 6468), False, 'import datetime\n')] |
"""Rfs Module.
Uncompleted/Cancelled RFS Journal Implementation (Journal 2).
"""
from datetime import datetime
import numpy as np
from baseStation import Transmitter
from helpers import CoordinateConverter
from snapshot import *
class RfsAnalog:
def __init__(self, n_of_cell_per_ec, n_of_ec, n_of_ue_per_ec, traffic_scen):
self.traffic_scen = traffic_scen
self.n_of_cell_per_ec = n_of_cell_per_ec
self.n_of_ec = n_of_ec
self.n_of_ue_per_ec = n_of_ue_per_ec
self.edge_size_of_a_ec_zone = int(np.sqrt(self.n_of_ue_per_ec))
self.rfs_nsr = np.array([[[INFINITELY_SMALL for x in range(NUMBER_OF_TIME_SLOT_IN_ONE_DAY)]
for x in range(self.n_of_cell_per_ec)]
for x in range(self.n_of_ue_per_ec * self.n_of_ec)], dtype='float32')
self.bs_coors = list(range(n_of_cell_per_ec))
self.bs_coors[0] = (2, 2)
self.bs_coors[1] = (0, 0)
self.bs_coors[2] = (0, 4)
self.bs_coors[3] = (4, 4)
self.bs_coors[4] = (4, 0)
self.service_rate = self.__get_service_rate_table()
self.service_rate *= BITSEC_2_MBYTEHOUR
pass
def _get_coordinate_of_a_user_by_index(self, user_nominal_index):
x_coor = int(user_nominal_index / self.edge_size_of_a_ec_zone)
y_coor = int(user_nominal_index % self.edge_size_of_a_ec_zone)
return x_coor, y_coor
def create_rfs_nsr(self):
snapshot = Snapshot()
snapshot.set_traffic_scen_folder(self.traffic_scen)
tr, threshold = snapshot.load_tr_cran()
print("create_nominal_service_rate starts at:{}".format(datetime.now()))
# In a Loop
for ec_index in range(self.n_of_ec):
for user_in_ec in range(ec_index, (1 + ec_index) * self.n_of_ue_per_ec):
user_nominal_index = int(user_in_ec % self.n_of_ue_per_ec)
x_coor, y_coor = self._get_coordinate_of_a_user_by_index(user_nominal_index)
tr_of_a_user = tr[user_in_ec]
number_of_time_slot = len(tr_of_a_user)
for t in range(number_of_time_slot):
for bs_index in range(self.n_of_cell_per_ec):
val = tr_of_a_user[t] / self.service_rate[bs_index][x_coor][y_coor]
self.rfs_nsr[user_in_ec][bs_index][t] = val
snapshot.save_nominal_service_rate(self.rfs_nsr)
def __get_service_rate_table(self):
n_of_user_in_one_side = 5
service_rate = np.array([[[INFINITELY_SMALL for x in range(n_of_user_in_one_side)]
for x in range(n_of_user_in_one_side)]
for x in range(self.n_of_cell_per_ec)])
macro_bs_transmitter = Transmitter(CoordinateConverter.GRID_WIDTH, BSType.MACRO) # BSType.MACRO MAX_TX_POWER: 20
micro_bs_transmitter = Transmitter(CoordinateConverter.GRID_WIDTH, BSType.MICRO) # BSType.MICRO MAX_TX_POWER: 6.3
for bs_index in range(self.n_of_cell_per_ec):
bs_coor = self.bs_coors[bs_index]
if bs_index == 0:
macro_bs_transmitter.calculate_service_rate_overall(service_rate[bs_index], bs_coor, None)
else:
micro_bs_transmitter.calculate_service_rate_overall(service_rate[bs_index], bs_coor, None)
return service_rate
| [
"datetime.datetime.now",
"numpy.sqrt",
"baseStation.Transmitter"
] | [((2787, 2844), 'baseStation.Transmitter', 'Transmitter', (['CoordinateConverter.GRID_WIDTH', 'BSType.MACRO'], {}), '(CoordinateConverter.GRID_WIDTH, BSType.MACRO)\n', (2798, 2844), False, 'from baseStation import Transmitter\n'), ((2909, 2966), 'baseStation.Transmitter', 'Transmitter', (['CoordinateConverter.GRID_WIDTH', 'BSType.MICRO'], {}), '(CoordinateConverter.GRID_WIDTH, BSType.MICRO)\n', (2920, 2966), False, 'from baseStation import Transmitter\n'), ((541, 569), 'numpy.sqrt', 'np.sqrt', (['self.n_of_ue_per_ec'], {}), '(self.n_of_ue_per_ec)\n', (548, 569), True, 'import numpy as np\n'), ((1670, 1684), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1682, 1684), False, 'from datetime import datetime\n')] |
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import piquasso as pq
def test_measure_particle_number_on_one_mode():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 1, 1]) * np.sqrt(2 / 6)
pq.Q(2) | pq.StateVector([1]) * np.sqrt(1 / 6)
pq.Q(2) | pq.StateVector([2]) * np.sqrt(3 / 6)
pq.Q(2) | pq.ParticleNumberMeasurement()
simulator = pq.PureFockSimulator(d=3)
result = simulator.execute(program)
assert np.isclose(sum(result.state.fock_probabilities), 1)
sample = result.samples[0]
assert sample == (1,) or sample == (2,)
if sample == (1,):
expected_simulator = pq.PureFockSimulator(d=3)
expected_state = expected_simulator.execute_instructions(
instructions=[
0.5773502691896258 * pq.StateVector([0, 0, 1]),
0.816496580927726 * pq.StateVector([0, 1, 1]),
]
).state
elif sample == (2,):
expected_simulator = pq.PureFockSimulator(d=3)
expected_state = expected_simulator.execute_instructions(
instructions=[pq.StateVector([0, 0, 2])]
).state
assert result.state == expected_state
def test_measure_particle_number_on_two_modes():
with pq.Program() as program:
pq.Q(1, 2) | pq.StateVector([1, 1]) * np.sqrt(2 / 6)
pq.Q(1, 2) | pq.StateVector([0, 1]) * np.sqrt(1 / 6)
pq.Q(1, 2) | pq.StateVector([0, 2]) * np.sqrt(3 / 6)
pq.Q(1, 2) | pq.ParticleNumberMeasurement()
simulator = pq.PureFockSimulator(d=3)
result = simulator.execute(program)
assert np.isclose(sum(result.state.fock_probabilities), 1)
sample = result.samples[0]
assert sample == (0, 1) or sample == (1, 1) or sample == (0, 2)
if sample == (0, 1):
expected_simulator = pq.PureFockSimulator(d=3)
expected_state = expected_simulator.execute_instructions(
instructions=[pq.StateVector([0, 0, 1])]
).state
elif sample == (1, 1):
expected_simulator = pq.PureFockSimulator(d=3)
expected_state = expected_simulator.execute_instructions(
instructions=[pq.StateVector([0, 1, 1])]
).state
elif sample == (0, 2):
expected_simulator = pq.PureFockSimulator(d=3)
expected_state = expected_simulator.execute_instructions(
instructions=[pq.StateVector([0, 0, 2])]
).state
assert result.state == expected_state
def test_measure_particle_number_on_all_modes():
config = pq.Config(cutoff=2)
simulator = pq.PureFockSimulator(d=3, config=config)
with pq.Program() as program:
pq.Q() | 0.5 * pq.StateVector([0, 0, 0])
pq.Q() | 0.5 * pq.StateVector([0, 0, 1])
pq.Q() | np.sqrt(1 / 2) * pq.StateVector([1, 0, 0])
pq.Q() | pq.ParticleNumberMeasurement()
result = simulator.execute(program)
assert np.isclose(sum(result.state.fock_probabilities), 1)
sample = result.samples[0]
assert sample == (0, 0, 0) or sample == (1, 0, 0) or sample == (0, 0, 1)
if sample == (0, 0, 0):
expected_simulator = pq.PureFockSimulator(d=3, config=config)
expected_state = expected_simulator.execute_instructions(
instructions=[
pq.StateVector([0, 0, 0]),
],
).state
elif sample == (0, 0, 1):
expected_simulator = pq.PureFockSimulator(d=3, config=config)
expected_state = expected_simulator.execute_instructions(
instructions=[
pq.StateVector([0, 0, 1]),
]
).state
elif sample == (1, 0, 0):
expected_simulator = pq.PureFockSimulator(d=3, config=config)
expected_state = expected_simulator.execute_instructions(
instructions=[
pq.StateVector([1, 0, 0]),
],
).state
assert result.state == expected_state
def test_measure_particle_number_with_multiple_shots():
shots = 4
# TODO: This is very unusual, that we need to know the cutoff for specifying the
# state. It should be imposed, that the only parameter for a state should be `d` and
# `config` maybe.
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=2))
with pq.Program() as program:
pq.Q() | 0.5 * pq.StateVector([0, 0, 0])
pq.Q() | 0.5 * pq.StateVector([0, 0, 1])
pq.Q() | np.sqrt(1 / 2) * pq.StateVector([1, 0, 0])
pq.Q() | pq.ParticleNumberMeasurement()
result = simulator.execute(program, shots)
assert np.isclose(sum(result.state.fock_probabilities), 1)
assert len(result.samples) == shots
| [
"piquasso.Config",
"piquasso.Q",
"numpy.sqrt",
"piquasso.PureFockSimulator",
"piquasso.Program",
"piquasso.ParticleNumberMeasurement",
"piquasso.StateVector"
] | [((963, 988), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (983, 988), True, 'import piquasso as pq\n'), ((2096, 2121), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (2116, 2121), True, 'import piquasso as pq\n'), ((3086, 3105), 'piquasso.Config', 'pq.Config', ([], {'cutoff': '(2)'}), '(cutoff=2)\n', (3095, 3105), True, 'import piquasso as pq\n'), ((3123, 3163), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)', 'config': 'config'}), '(d=3, config=config)\n', (3143, 3163), True, 'import piquasso as pq\n'), ((700, 712), 'piquasso.Program', 'pq.Program', ([], {}), '()\n', (710, 712), True, 'import piquasso as pq\n'), ((1223, 1248), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (1243, 1248), True, 'import piquasso as pq\n'), ((1818, 1830), 'piquasso.Program', 'pq.Program', ([], {}), '()\n', (1828, 1830), True, 'import piquasso as pq\n'), ((2382, 2407), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (2402, 2407), True, 'import piquasso as pq\n'), ((3174, 3186), 'piquasso.Program', 'pq.Program', ([], {}), '()\n', (3184, 3186), True, 'import piquasso as pq\n'), ((3678, 3718), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)', 'config': 'config'}), '(d=3, config=config)\n', (3698, 3718), True, 'import piquasso as pq\n'), ((4814, 4826), 'piquasso.Program', 'pq.Program', ([], {}), '()\n', (4824, 4826), True, 'import piquasso as pq\n'), ((733, 739), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (737, 739), True, 'import piquasso as pq\n'), ((794, 801), 'piquasso.Q', 'pq.Q', (['(2)'], {}), '(2)\n', (798, 801), True, 'import piquasso as pq\n'), ((849, 856), 'piquasso.Q', 'pq.Q', (['(2)'], {}), '(2)\n', (853, 856), True, 'import piquasso as pq\n'), ((905, 912), 'piquasso.Q', 'pq.Q', (['(2)'], {}), '(2)\n', (909, 912), True, 'import piquasso as pq\n'), ((915, 945), 'piquasso.ParticleNumberMeasurement', 'pq.ParticleNumberMeasurement', ([], {}), '()\n', (943, 945), True, 'import piquasso as pq\n'), ((1554, 1579), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (1574, 1579), True, 'import piquasso as pq\n'), ((1851, 1861), 'piquasso.Q', 'pq.Q', (['(1)', '(2)'], {}), '(1, 2)\n', (1855, 1861), True, 'import piquasso as pq\n'), ((1912, 1922), 'piquasso.Q', 'pq.Q', (['(1)', '(2)'], {}), '(1, 2)\n', (1916, 1922), True, 'import piquasso as pq\n'), ((1973, 1983), 'piquasso.Q', 'pq.Q', (['(1)', '(2)'], {}), '(1, 2)\n', (1977, 1983), True, 'import piquasso as pq\n'), ((2035, 2045), 'piquasso.Q', 'pq.Q', (['(1)', '(2)'], {}), '(1, 2)\n', (2039, 2045), True, 'import piquasso as pq\n'), ((2048, 2078), 'piquasso.ParticleNumberMeasurement', 'pq.ParticleNumberMeasurement', ([], {}), '()\n', (2076, 2078), True, 'import piquasso as pq\n'), ((2600, 2625), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (2620, 2625), True, 'import piquasso as pq\n'), ((3207, 3213), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (3211, 3213), True, 'import piquasso as pq\n'), ((3256, 3262), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (3260, 3262), True, 'import piquasso as pq\n'), ((3305, 3311), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (3309, 3311), True, 'import piquasso as pq\n'), ((3366, 3372), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (3370, 3372), True, 'import piquasso as pq\n'), ((3375, 3405), 'piquasso.ParticleNumberMeasurement', 'pq.ParticleNumberMeasurement', ([], {}), '()\n', (3403, 3405), True, 'import piquasso as pq\n'), ((3946, 3986), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)', 'config': 'config'}), '(d=3, config=config)\n', (3966, 3986), True, 'import piquasso as pq\n'), ((4783, 4802), 'piquasso.Config', 'pq.Config', ([], {'cutoff': '(2)'}), '(cutoff=2)\n', (4792, 4802), True, 'import piquasso as pq\n'), ((4847, 4853), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (4851, 4853), True, 'import piquasso as pq\n'), ((4896, 4902), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (4900, 4902), True, 'import piquasso as pq\n'), ((4945, 4951), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (4949, 4951), True, 'import piquasso as pq\n'), ((5006, 5012), 'piquasso.Q', 'pq.Q', ([], {}), '()\n', (5010, 5012), True, 'import piquasso as pq\n'), ((5015, 5045), 'piquasso.ParticleNumberMeasurement', 'pq.ParticleNumberMeasurement', ([], {}), '()\n', (5043, 5045), True, 'import piquasso as pq\n'), ((742, 767), 'piquasso.StateVector', 'pq.StateVector', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (756, 767), True, 'import piquasso as pq\n'), ((770, 784), 'numpy.sqrt', 'np.sqrt', (['(2 / 6)'], {}), '(2 / 6)\n', (777, 784), True, 'import numpy as np\n'), ((804, 823), 'piquasso.StateVector', 'pq.StateVector', (['[1]'], {}), '([1])\n', (818, 823), True, 'import piquasso as pq\n'), ((826, 840), 'numpy.sqrt', 'np.sqrt', (['(1 / 6)'], {}), '(1 / 6)\n', (833, 840), True, 'import numpy as np\n'), ((859, 878), 'piquasso.StateVector', 'pq.StateVector', (['[2]'], {}), '([2])\n', (873, 878), True, 'import piquasso as pq\n'), ((881, 895), 'numpy.sqrt', 'np.sqrt', (['(3 / 6)'], {}), '(3 / 6)\n', (888, 895), True, 'import numpy as np\n'), ((1864, 1886), 'piquasso.StateVector', 'pq.StateVector', (['[1, 1]'], {}), '([1, 1])\n', (1878, 1886), True, 'import piquasso as pq\n'), ((1889, 1903), 'numpy.sqrt', 'np.sqrt', (['(2 / 6)'], {}), '(2 / 6)\n', (1896, 1903), True, 'import numpy as np\n'), ((1925, 1947), 'piquasso.StateVector', 'pq.StateVector', (['[0, 1]'], {}), '([0, 1])\n', (1939, 1947), True, 'import piquasso as pq\n'), ((1950, 1964), 'numpy.sqrt', 'np.sqrt', (['(1 / 6)'], {}), '(1 / 6)\n', (1957, 1964), True, 'import numpy as np\n'), ((1986, 2008), 'piquasso.StateVector', 'pq.StateVector', (['[0, 2]'], {}), '([0, 2])\n', (2000, 2008), True, 'import piquasso as pq\n'), ((2011, 2025), 'numpy.sqrt', 'np.sqrt', (['(3 / 6)'], {}), '(3 / 6)\n', (2018, 2025), True, 'import numpy as np\n'), ((2818, 2843), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)'}), '(d=3)\n', (2838, 2843), True, 'import piquasso as pq\n'), ((3222, 3247), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3236, 3247), True, 'import piquasso as pq\n'), ((3271, 3296), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3285, 3296), True, 'import piquasso as pq\n'), ((3314, 3328), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (3321, 3328), True, 'import numpy as np\n'), ((3331, 3356), 'piquasso.StateVector', 'pq.StateVector', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (3345, 3356), True, 'import piquasso as pq\n'), ((4213, 4253), 'piquasso.PureFockSimulator', 'pq.PureFockSimulator', ([], {'d': '(3)', 'config': 'config'}), '(d=3, config=config)\n', (4233, 4253), True, 'import piquasso as pq\n'), ((4862, 4887), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4876, 4887), True, 'import piquasso as pq\n'), ((4911, 4936), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4925, 4936), True, 'import piquasso as pq\n'), ((4954, 4968), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (4961, 4968), True, 'import numpy as np\n'), ((4971, 4996), 'piquasso.StateVector', 'pq.StateVector', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4985, 4996), True, 'import piquasso as pq\n'), ((2500, 2525), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2514, 2525), True, 'import piquasso as pq\n'), ((3828, 3853), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3842, 3853), True, 'import piquasso as pq\n'), ((1379, 1404), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1393, 1404), True, 'import piquasso as pq\n'), ((1442, 1467), 'piquasso.StateVector', 'pq.StateVector', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (1456, 1467), True, 'import piquasso as pq\n'), ((1672, 1697), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 2]'], {}), '([0, 0, 2])\n', (1686, 1697), True, 'import piquasso as pq\n'), ((2718, 2743), 'piquasso.StateVector', 'pq.StateVector', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (2732, 2743), True, 'import piquasso as pq\n'), ((4096, 4121), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4110, 4121), True, 'import piquasso as pq\n'), ((2936, 2961), 'piquasso.StateVector', 'pq.StateVector', (['[0, 0, 2]'], {}), '([0, 0, 2])\n', (2950, 2961), True, 'import piquasso as pq\n'), ((4363, 4388), 'piquasso.StateVector', 'pq.StateVector', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4377, 4388), True, 'import piquasso as pq\n')] |
import time
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import tf_utils
from common import utils
from common.ops import ops as custom_ops
from common.ops import transformation
from common.ops.em_routing import em_routing
from common.ops.routing import dynamic_routing
eps = 1e-10
class Activation(Layer):
def __init__(self,
activation='squash',
with_prob=False,
**kwargs):
super(Activation, self).__init__(**kwargs)
self.activation_fn = custom_ops.get_activation(activation)
self.with_prob = with_prob
def call(self, inputs, **kwargs):
if self.activation_fn:
pose, prob = self.activation_fn(inputs, axis=-1)
else:
pose, prob = inputs, None
if self.with_prob:
return pose, prob
else:
return pose
class PrimaryCapsule(Layer):
def __init__(self,
kernel_size,
strides,
use_bias=False,
conv_caps=False,
padding='valid',
groups=32,
atoms=8,
activation='squash',
kernel_initializer=keras.initializers.glorot_normal(),
kernel_regularizer=None,
**kwargs):
super(PrimaryCapsule, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.groups = groups
self.atoms = atoms
self.conv_caps = conv_caps
self.activation_fn = custom_ops.get_activation(activation)
self.conv = keras.layers.Conv2D(filters=self.groups * self.atoms,
kernel_size=self.kernel_size,
strides=self.strides,
padding=self.padding,
use_bias=use_bias,
activation=None,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)
def call(self, inputs, **kwargs):
pose = self.conv(inputs)
pose_shape = pose.get_shape().as_list()
if self.conv_caps:
pose = tf.reshape(pose, shape=[-1, pose_shape[1], pose_shape[2], self.groups, self.atoms])
else:
pose = tf.reshape(pose, shape=[-1, pose_shape[1]*pose_shape[2]*self.groups, self.atoms])
if self.activation_fn:
pose, prob = self.activation_fn(pose, axis=-1)
return pose, prob
else:
return pose
class CapsuleTransformDense(Layer):
def __init__(self,
num_out,
out_atom,
share_weights=False,
matrix=False,
initializer=keras.initializers.glorot_normal(),
regularizer=None,
**kwargs):
super(CapsuleTransformDense, self).__init__(**kwargs)
self.num_out = num_out
self.out_atom = out_atom
self.share_weights = share_weights
self.matrix = matrix
self.wide = None
self.initializer = initializer
self.regularizer = regularizer
def build(self, input_shape):
in_atom = input_shape[-1]
in_num = input_shape[-2]
if self.matrix:
self.wide = int(np.sqrt(in_atom))
if self.share_weights:
if self.wide:
self.kernel = self.add_weight(
name='capsule_kernel',
shape=(1, self.num_out, self.wide, self.wide),
initializer=self.initializer,
regularizer=self.regularizer,
trainable=True)
self.kernel = tf.tile(self.kernel, [in_num, 1, 1, 1])
else:
self.kernel = self.add_weight(
name='capsule_kernel',
shape=(1, in_atom,
self.num_out * self.out_atom),
initializer=self.initializer,
regularizer=self.regularizer,
trainable=True)
self.kernel = tf.tile(self.kernel, [in_num, 1, 1])
else:
if self.wide:
self.kernel = self.add_weight(
name='capsule_kernel',
shape=(in_num, self.num_out, self.wide, self.wide),
initializer=self.initializer,
regularizer=self.regularizer,
trainable=True)
else:
self.kernel = self.add_weight(
name='capsule_kernel',
shape=(in_num, in_atom,
self.num_out * self.out_atom),
initializer=self.initializer,
regularizer=self.regularizer,
trainable=True)
def call(self, inputs, **kwargs):
in_shape = inputs.get_shape().as_list()
in_shape[0] = -1
if self.wide:
# [bs, in_num, in_atom] -> [bs, in_num, wide, wide]
inputs = tf.reshape(inputs, in_shape[:-1]+[self.wide, self.wide])
# [bs, in_num, a, b] X [in_num, out_num, b, c]
# -> [bs, in_num, out_num, a, c]
outputs = transformation.matrix_capsule_element_wise(inputs, self.kernel, self.num_out)
outputs = tf.reshape(outputs, in_shape[:-1] + [self.num_out] + [in_shape[-1]])
else:
# [bs, in_num, in_atom] X [in_num, in_atom, out_num*out_atom]
# -> [bs, in_num, out_num, out_atom]
outputs = transformation.matmul_element_wise(inputs, self.kernel, self.num_out, self.out_atom)
return outputs
class CapsuleTransformConv(Layer):
def __init__(self,
kernel_size,
stride,
filter,
atom,
initializer=keras.initializers.glorot_normal(),
regularizer=None,
**kwargs):
super(CapsuleTransformConv, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.stride = stride
self.filter = filter
self.atom = atom
self.initializer = initializer
self.regularizer = regularizer
def build(self, input_shape):
# inputs [bs, height, width, channel, in_atom]
in_channel = input_shape[-2]
in_atom = input_shape[-1]
self.matrix = self.add_weight(
name='capsule_kernel',
shape=(self.kernel_size*self.kernel_size*in_channel, in_atom,
self.filter*self.atom),
initializer=self.initializer,
regularizer=self.regularizer,
trainable=True)
def call(self, inputs, **kwargs):
# inputs [bs, height, width, channel, in_atom]
inputs_tile, _ = utils.kernel_tile(inputs, self.kernel_size, self.stride)
# tile [bs, out_height, out_width, kernel*kernel*channel, in_atom]
outputs = transformation.matmul_element_wise(inputs_tile, self.matrix, self.filter, self.atom)
# [bs, out_height, out_width, kernel*kernel*channel, out_num, out_atom]
return outputs
class CapsuleGroups(Layer):
def __init__(self,
height,
width,
channel,
atoms,
activation=None,
**kwargs):
super(CapsuleGroups, self).__init__(**kwargs)
self.height = height
self.width = width
self.channel = channel
self.atoms = atoms
self.activation_fn = custom_ops.get_activation(activation)
def call(self, inputs, **kwargs):
group_num = self.channel // self.atoms
vote = tf.reshape(inputs, shape=[-1, self.height * self.width, group_num, self.atoms])
if self.activation_fn:
vote, _ = self.activation_fn(vote)
return vote
class RoutingPooling(Layer):
def __init__(self,
kernel_size,
strides,
atoms,
in_norm=True,
num_routing=2,
temper=1.0,
**kwargs):
super(RoutingPooling, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.strides = strides
self.atoms = atoms
self.in_norm = in_norm
self.num_routing = num_routing
self.temper = temper
def call(self, inputs, **kwargs):
patched = batch_2d(inputs, self.kernel_size, self.strides)
patched_shape = patched.get_shape().as_list()
patched = tf.reshape(patched,
[-1] + patched_shape[1:4] + [patched_shape[4] // self.atoms, self.atoms])
patched = tf.transpose(patched, perm=[0, 1, 2, 4, 3, 5])
patched = tf.expand_dims(patched, axis=-2)
pose, _ = dynamic_routing(patched,
num_routing=self.num_routing,
softmax_in=True,
temper=self.temper,
activation='norm')
pose = tf.reshape(pose, [-1] + patched_shape[1:3] + [patched_shape[4]])
return pose
class FMPooling(Layer):
def __init__(self,
kernel_size,
strides,
fm_norm,
atoms,
out_norm=False,
**kwargs):
super(FMPooling, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.strides = strides
self.fm_norm = fm_norm
self.atoms = atoms
self.out_norm = out_norm
def call(self, inputs, **kwargs):
patched = batch_2d(inputs, self.kernel_size, self.strides)
patched_shape = patched.get_shape().as_list()
patched = tf.reshape(patched,
[-1] + patched_shape[1:4] + [patched_shape[4] // self.atoms, self.atoms])
patched = tf.transpose(patched, perm=[0, 1, 2, 4, 3, 5])
pool = get_factorization_machines(patched, axis=-2)
pool = tf.reshape(pool, [-1] + patched_shape[1:3] + [patched_shape[4]])
if self.out_norm:
pool, _ = custom_ops.vector_norm(pool)
return pool
class DynamicRouting(Layer):
def __init__(self,
num_routing=3,
softmax_in=False,
temper=1.0,
activation='squash',
pooling=False,
log=None,
**kwargs):
super(DynamicRouting, self).__init__(**kwargs)
self.num_routing = num_routing
self.softmax_in = softmax_in
self.temper = temper
self.activation = activation
self.pooling = pooling
self.log = log
def call(self, inputs, **kwargs):
if self.pooling:
inputs = tf.expand_dims(inputs, -2)
pose, prob = dynamic_routing(inputs,
num_routing=self.num_routing,
softmax_in=self.softmax_in,
temper=self.temper,
activation=self.activation)
pose = tf.squeeze(pose, axis=-3)
prob = tf.squeeze(prob, axis=[-3, -1])
return pose, prob
class EMRouting(Layer):
def __init__(self,
num_routing=3,
log=None,
**kwargs):
super(EMRouting, self).__init__(**kwargs)
self.num_routing = num_routing
self.log = log
def build(self, input_shape):
# ----- Betas -----#
"""
# Initialization from <NAME> [1]:
beta_v_hui = tf.get_variable(
name='beta_v',
shape=[1, 1, 1, o],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
beta_a_hui = tf.get_variable(
name='beta_a',
shape=[1, 1, 1, o],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
# AG 21/11/2018:
# Tried to find std according to Hinton's comments on OpenReview
# https://openreview.net/forum?id=HJWLfGWRb¬eId=r1lQjCAChm
# Hinton: "We used truncated_normal_initializer and set the std so that at the
# start of training half of the capsules in each layer are active and half
# inactive (for the Primary Capsule layer where the activation is not computed
# through routing we use different std for activation convolution weights &
# for pose parameter convolution weights)."
#
# std beta_v seems to control the spread of activations
# To try and achieve what Hinton said about half active and half not active,
# I change the std values and check the histogram/distributions in
# Tensorboard
# to try and get a good spread across all values. I couldn't get this working
# nicely.
beta_v_hui = slim.model_variable(
name='beta_v',
shape=[1, 1, 1, 1, o, 1],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=10.0))
"""
o = input_shape[0].as_list()[-2] # out caps
self.beta_a = self.add_weight(name='beta_a',
shape=[1, 1, o, 1],
dtype=tf.float32,
initializer=tf.keras.initializers.TruncatedNormal(mean=-1000.0, stddev=500.0))
# AG 04/10/2018: using slim.variable to create instead of tf.get_variable so
# that they get correctly placed on the CPU instead of GPU in the multi-gpu
# version.
# One beta per output capsule type
# (N, i, o, atom)
self.beta_v = self.add_weight(name='beta_v',
shape=[1, 1, o, 1],
dtype=tf.float32,
initializer=tf.keras.initializers.GlorotNormal(),
regularizer=None)
def call(self, inputs, **kwargs):
# votes (bs, in, out, atom)
# activations (bs, in, 1)
votes_flat, activation_flat = inputs
pose, prob = em_routing(votes_flat,
activation_flat,
self.beta_a,
self.beta_v,
self.num_routing,
final_lambda=0.01,
epsilon=1e-9,
spatial_routing_matrix=[[1]])
prob = tf.squeeze(prob, axis=[-1])
return pose, prob
class Decoder(Layer):
def __init__(self,
height,
width,
channel,
balance_factor,
layers=[512, 1024],
**kwargs):
super(Decoder, self).__init__(**kwargs)
self.height = height
self.width = width
self.channel = channel
self.balance_factor = balance_factor
self.layers = []
for layer in layers:
self.layers.append(keras.layers.Dense(layer, tf.nn.relu))
self.layers.append(keras.layers.Dense(self.height*self.width*self.channel, tf.sigmoid))
def call(self, inputs, **kwargs):
active_caps, images = inputs
for layer in self.layers:
active_caps = layer(active_caps)
recons = active_caps
recons_img = tf.reshape(recons, [-1, self.height, self.width, self.channel])
distance = tf.pow(recons_img - images, 2)
loss = tf.reduce_sum(distance, [-1, -2, -3])
recons_loss = self.balance_factor * tf.reduce_mean(loss)
self.add_loss(recons_loss)
return recons_loss, recons_img
class Mask(Layer):
def __init__(self,
order,
share=True,
out_num=10,
**kwargs):
super(Mask, self).__init__(**kwargs)
self.order = order
self.share = share
self.out_num = out_num
def call(self, inputs, **kwargs):
poses, probs, labels = inputs
if len(labels.get_shape()) != len(probs.get_shape()):
labels = tf.one_hot(labels, probs.get_shape().as_list()[-1])
def inference():
if self.order > 0:
_, top_k = tf.nn.top_k(probs, self.order + 1)
split = tf.split(top_k, self.order + 1, -1)
split = split[-1]
predictions = tf.expand_dims(tf.one_hot(tf.squeeze(split, -1), self.out_num), -1)
else:
predictions = tf.expand_dims(tf.one_hot(tf.argmax(probs, -1), self.out_num), -1)
return predictions
training = keras.backend.learning_phase()
mask = tf_utils.smart_cond(training, lambda: tf.expand_dims(labels, -1), inference)
masked_caps = tf.multiply(poses, mask)
if self.share:
active_caps = tf.reduce_sum(masked_caps, axis=-2)
else:
active_caps = keras.layers.Flatten()(masked_caps)
return active_caps
class DecoderConv(Layer):
def __init__(self,
height,
width,
channel,
balance_factor,
base=10,
filter=64,
kernel_initializer=keras.initializers.he_normal(),
kernel_regularizer=keras.regularizers.l2(1e-4),
**kwargs):
super(DecoderConv, self).__init__(**kwargs)
self.height = height
self.width = width
self.channel = channel
self.balance_factor = balance_factor
self.layers = [keras.layers.Dense(base*base*filter),
keras.layers.BatchNormalization(),
keras.layers.LeakyReLU(),
keras.layers.Reshape((base, base, filter)),
keras.layers.Conv2DTranspose(filter//2, (5, 5), strides=(1, 1), padding='same',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer),
keras.layers.BatchNormalization(),
keras.layers.LeakyReLU(),
keras.layers.Conv2DTranspose(filter//4, (5, 5), strides=(2, 2), padding='same',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer),
keras.layers.BatchNormalization(),
keras.layers.LeakyReLU(),
keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', activation=tf.sigmoid,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer),
]
def call(self, inputs, **kwargs):
active_caps, images = inputs
for layer in self.layers:
active_caps = layer(active_caps)
recons_img = active_caps
distance = tf.pow(recons_img - images, 2)
loss = tf.reduce_sum(distance, [-1, -2, -3])
recons_loss = self.balance_factor * tf.reduce_mean(loss)
self.add_loss(recons_loss)
return recons_loss, recons_img
class VectorNorm(Layer):
def __init__(self, **kwargs):
super(VectorNorm, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
x = custom_ops.vector_norm(inputs, -1)
return x
class LastFMPool(Layer):
def __init__(self,
axis=-2,
activation='accumulate',
shrink=None,
stable=False,
norm_pose=True,
log=None,
regularize=True,
**kwargs):
super(LastFMPool, self).__init__(**kwargs)
self.axis = axis
self.activation = activation
self.shrink = shrink
self.stable = stable
self.norm_pose = norm_pose
self.log = log
self.regularize = regularize
def call(self, inputs, **kwargs):
# [bs, caps_in, caps_out, atom]
outputs, importance = get_factorization_machines(inputs,
self.axis,
regularize=self.regularize)
if self.log:
if importance:
self.log.add_hist('importance', importance)
self.log.add_hist('fm_out', outputs)
self.log.add_hist('fm_similarity', tf.reduce_sum(outputs, axis=-1))
if self.activation == 'accumulate':
outputs, norm = custom_ops.accumulate(outputs, shrink=self.shrink, stable=self.stable, norm_pose=self.norm_pose)
norm = tf.squeeze(norm, -1)
return outputs, norm
elif self.activation == 'squash':
outputs, norm = custom_ops.squash(outputs)
norm = tf.squeeze(norm, -1)
return outputs, norm
elif self.activation == 'norm':
outputs, _ = custom_ops.vector_norm(outputs)
return outputs
else:
return outputs
class LastAveragePooling(Layer):
def __init__(self,
axis=-2,
**kwargs):
super(LastAveragePooling, self).__init__(**kwargs)
self.axis = axis
def call(self, inputs, **kwargs):
outputs = tf.reduce_mean(inputs, axis=self.axis)
return outputs
class LastMaxPooling(Layer):
def __init__(self,
axis=-2,
**kwargs):
super(LastMaxPooling, self).__init__(**kwargs)
self.axis = axis
def call(self, inputs, **kwargs):
outputs = tf.reduce_max(inputs, axis=self.axis)
return outputs
def get_original_capsule_layer(inputs, num_out, out_atom, num_routing=3, temper=1.0):
transformed = CapsuleTransformDense(num_out=num_out, out_atom=out_atom, share_weights=False)(inputs)
routed = DynamicRouting(num_routing=num_routing, temper=temper)(transformed)
return routed
def get_factorization_machines(inputs, axis=-2, regularize=True):
# [bs, caps_in, caps_out, atom]
if regularize:
cap_in = inputs.get_shape()[1]
inputs /= np.sqrt(cap_in)
x1 = tf.reduce_sum(inputs, axis, keepdims=True) # [bs, 1, caps_out, atom]
x1 = tf.square(x1)
x2_square = tf.square(inputs)
x2 = tf.reduce_sum(x2_square, axis, keepdims=True) # [bs, 1, caps_out, atom]
outputs = x1 - x2
outputs = tf.squeeze(outputs, axis)
weight = None
return outputs, weight
def get_average_pooling(inputs):
x = tf.reduce_mean(inputs, axis=-2)
return
def get_cross_mul(inputs):
n = inputs.get_shape().as_list()[-2]
outputs = 0
for i in range(n):
for j in range(n):
if i != j:
outputs += inputs[i] * inputs[j]
outputs /= (2 * n)
return outputs
def batch_2d(inputs, kernel_size, strides, name=None):
if not isinstance(kernel_size, tuple):
kernel_size = (kernel_size, kernel_size)
if not isinstance(strides, tuple):
strides = (strides, strides)
name = "batch_to_pool" if name is None else name
with tf.name_scope(name):
height, width = inputs.get_shape().as_list()[1:3]
h_offsets = [[(h + k) for k in range(0, kernel_size[0])] for h in range(0, height + 1 - kernel_size[0], strides[0])]
w_offsets = [[(w + k) for k in range(0, kernel_size[1])] for w in range(0, width + 1 - kernel_size[1], strides[1])]
patched = tf.gather(inputs, h_offsets, axis=1)
patched = tf.gather(patched, w_offsets, axis=3)
perm = [0, 1, 3, 2, 4, 5]
patched = tf.transpose(patched, perm=perm)
shape = patched.get_shape().as_list()
shape = [-1] + shape[1:3] + [np.prod(shape[3:-1]), shape[-1]]
patched = tf.reshape(patched, shape=shape)
return patched
def test_routing_pool():
x = tf.random.normal([64, 16, 16, 64])
x = RoutingPooling(3, 2, 8)(x)
print(x.shape)
def test_batch_2d():
x = tf.random.normal([128, 32, 32, 3])
x = batch_2d(x, 2, 2)
print(x.shape)
def verify_factorization_machines():
x = tf.random.normal([1000, 16])
t1 = time.time()
out1 = get_cross_mul(x)
t2 = time.time()
print('cross mul cost:', t2 - t1)
print('cross mul result:', out1)
out2 = get_factorization_machines(x)
t3 = time.time()
print('FM cost:', t3 - t2)
print('FM result:', out2)
def test_ablation_fm():
num = 1000
atom = 16
a = tf.random.normal([10000, num, atom], 2, 20)
a = a / tf.norm(a, axis=-1, keepdims=True)
a = a / tf.sqrt(tf.cast(num, tf.float32))
b1 = tf.square(tf.reduce_sum(a, 1))
# b1_mean, b1_var = tf.nn.moments(b1, 0)
# print('b1_mean:', b1_mean.numpy())
# print('b1_var:', b1_var.numpy())
b2 = tf.reduce_sum(tf.square(a), 1)
# b2_mean, b2_var = tf.nn.moments(b2, 0)
# print('b2_mean:', b2_mean.numpy())
# print('b2_var:', b2_var.numpy())
fm = b1 - b2
b3_mean, b3_var = tf.nn.moments(fm, 0)
print('b3_mean:', b3_mean.numpy())
print('b3_var:', b3_var.numpy())
act = tf.reduce_sum(fm, 1)
act_mean, act_var = tf.nn.moments(act, 0)
print('act_mean:', act_mean.numpy())
print('act_var:', act_var.numpy())
def verify_vec_norm():
vec_norm = VectorNorm()
x = tf.random.normal([64, 16])
x = vec_norm(x)
print(tf.norm(x, axis=-1))
def verify_average_pooling():
x = tf.random.normal([10, 8, 8, 64])
x1 = keras.layers.AveragePooling2D([8, 8])(x)
x1 = tf.squeeze(x1)
x = tf.reshape(x, [10, 64, 64])
x2 = LastAveragePooling()(x)
print(tf.reduce_sum(x1-x2))
def verify_pri_capsule():
x = tf.random.normal([10, 8, 8, 64])
x = CapsuleGroups(height=8,
width=8,
channel=64,
atoms=32,
activation='norm')(x)
print(tf.norm(x, axis=-1))
def verify_dynamic_routing():
x = tf.random.normal([10, 2, 64, 1, 32])
y = DynamicRouting()(x)
print(y)
def verify_fm_pool():
x = tf.random.normal([128, 32, 32, 64])
y = FMPooling(3, 2, 8)(x)
print(y)
def verify_last_fm_pool():
x = tf.random.normal([128, 1152, 16])
x_mean, x_var = tf.nn.moments(x, [0,1,2])
y = LastFMPool(activation=None)(x)
y_mean, y_var = tf.nn.moments(y, [0,1])
print(y)
print(y_var)
def verify_transform():
x = tf.random.normal((128, 30, 8))
trans = CapsuleTransformDense(10, 16)(x)
print(trans)
def var_experiments():
x = tf.random.normal([10240, 64])
x1_mean, x1_var = tf.nn.moments(x, [0, 1])
x2_mean, x2_var = tf.nn.moments(x*x, [0, 1])
x4_mean = tf.reduce_mean(x*x*x*x)
print('x1_var', x1_var)
print('x2_var', x2_var)
print('x4_mean', x4_mean)
y = get_factorization_machines(x, 1)
y_mean, y_var = tf.nn.moments(y, [0])
print('y_mean', y_mean)
print('y_var', y_var)
def var_vec_norm_scale():
x = tf.random.normal([12800, 160])
x_norm, _ = custom_ops.vector_norm_scale(x, -1)
x_norm_verify = tf.norm(x_norm, axis=-1)
y_mean, y_var = tf.nn.moments(x_norm, [0, 1])
print('E:',y_mean.numpy(), 'D:', y_var.numpy())
if __name__ == "__main__":
# verify_factorization_machines()
# verify_last_fm_pool()
# var_experiments()
# verify_dynamic_routing()
# verify_transform()
# test_batch_2d()
# test_routing_pool()
# verify_fm_pool()
var_vec_norm_scale()
| [
"tensorflow.tile",
"numpy.prod",
"numpy.sqrt",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"tensorflow.split",
"tensorflow.multiply",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"common.utils.k... | [((22707, 22749), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['inputs', 'axis'], {'keepdims': '(True)'}), '(inputs, axis, keepdims=True)\n', (22720, 22749), True, 'import tensorflow as tf\n'), ((22786, 22799), 'tensorflow.square', 'tf.square', (['x1'], {}), '(x1)\n', (22795, 22799), True, 'import tensorflow as tf\n'), ((22816, 22833), 'tensorflow.square', 'tf.square', (['inputs'], {}), '(inputs)\n', (22825, 22833), True, 'import tensorflow as tf\n'), ((22843, 22888), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x2_square', 'axis'], {'keepdims': '(True)'}), '(x2_square, axis, keepdims=True)\n', (22856, 22888), True, 'import tensorflow as tf\n'), ((22952, 22977), 'tensorflow.squeeze', 'tf.squeeze', (['outputs', 'axis'], {}), '(outputs, axis)\n', (22962, 22977), True, 'import tensorflow as tf\n'), ((23066, 23097), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': '(-2)'}), '(inputs, axis=-2)\n', (23080, 23097), True, 'import tensorflow as tf\n'), ((24391, 24425), 'tensorflow.random.normal', 'tf.random.normal', (['[64, 16, 16, 64]'], {}), '([64, 16, 16, 64])\n', (24407, 24425), True, 'import tensorflow as tf\n'), ((24511, 24545), 'tensorflow.random.normal', 'tf.random.normal', (['[128, 32, 32, 3]'], {}), '([128, 32, 32, 3])\n', (24527, 24545), True, 'import tensorflow as tf\n'), ((24638, 24666), 'tensorflow.random.normal', 'tf.random.normal', (['[1000, 16]'], {}), '([1000, 16])\n', (24654, 24666), True, 'import tensorflow as tf\n'), ((24676, 24687), 'time.time', 'time.time', ([], {}), '()\n', (24685, 24687), False, 'import time\n'), ((24725, 24736), 'time.time', 'time.time', ([], {}), '()\n', (24734, 24736), False, 'import time\n'), ((24862, 24873), 'time.time', 'time.time', ([], {}), '()\n', (24871, 24873), False, 'import time\n'), ((24998, 25041), 'tensorflow.random.normal', 'tf.random.normal', (['[10000, num, atom]', '(2)', '(20)'], {}), '([10000, num, atom], 2, 20)\n', (25014, 25041), True, 'import tensorflow as tf\n'), ((25504, 25524), 'tensorflow.nn.moments', 'tf.nn.moments', (['fm', '(0)'], {}), '(fm, 0)\n', (25517, 25524), True, 'import tensorflow as tf\n'), ((25611, 25631), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['fm', '(1)'], {}), '(fm, 1)\n', (25624, 25631), True, 'import tensorflow as tf\n'), ((25656, 25677), 'tensorflow.nn.moments', 'tf.nn.moments', (['act', '(0)'], {}), '(act, 0)\n', (25669, 25677), True, 'import tensorflow as tf\n'), ((25819, 25845), 'tensorflow.random.normal', 'tf.random.normal', (['[64, 16]'], {}), '([64, 16])\n', (25835, 25845), True, 'import tensorflow as tf\n'), ((25937, 25969), 'tensorflow.random.normal', 'tf.random.normal', (['[10, 8, 8, 64]'], {}), '([10, 8, 8, 64])\n', (25953, 25969), True, 'import tensorflow as tf\n'), ((26029, 26043), 'tensorflow.squeeze', 'tf.squeeze', (['x1'], {}), '(x1)\n', (26039, 26043), True, 'import tensorflow as tf\n'), ((26052, 26079), 'tensorflow.reshape', 'tf.reshape', (['x', '[10, 64, 64]'], {}), '(x, [10, 64, 64])\n', (26062, 26079), True, 'import tensorflow as tf\n'), ((26181, 26213), 'tensorflow.random.normal', 'tf.random.normal', (['[10, 8, 8, 64]'], {}), '([10, 8, 8, 64])\n', (26197, 26213), True, 'import tensorflow as tf\n'), ((26458, 26494), 'tensorflow.random.normal', 'tf.random.normal', (['[10, 2, 64, 1, 32]'], {}), '([10, 2, 64, 1, 32])\n', (26474, 26494), True, 'import tensorflow as tf\n'), ((26568, 26603), 'tensorflow.random.normal', 'tf.random.normal', (['[128, 32, 32, 64]'], {}), '([128, 32, 32, 64])\n', (26584, 26603), True, 'import tensorflow as tf\n'), ((26684, 26717), 'tensorflow.random.normal', 'tf.random.normal', (['[128, 1152, 16]'], {}), '([128, 1152, 16])\n', (26700, 26717), True, 'import tensorflow as tf\n'), ((26738, 26765), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0, 1, 2]'], {}), '(x, [0, 1, 2])\n', (26751, 26765), True, 'import tensorflow as tf\n'), ((26823, 26847), 'tensorflow.nn.moments', 'tf.nn.moments', (['y', '[0, 1]'], {}), '(y, [0, 1])\n', (26836, 26847), True, 'import tensorflow as tf\n'), ((26911, 26941), 'tensorflow.random.normal', 'tf.random.normal', (['(128, 30, 8)'], {}), '((128, 30, 8))\n', (26927, 26941), True, 'import tensorflow as tf\n'), ((27037, 27066), 'tensorflow.random.normal', 'tf.random.normal', (['[10240, 64]'], {}), '([10240, 64])\n', (27053, 27066), True, 'import tensorflow as tf\n'), ((27089, 27113), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0, 1]'], {}), '(x, [0, 1])\n', (27102, 27113), True, 'import tensorflow as tf\n'), ((27136, 27164), 'tensorflow.nn.moments', 'tf.nn.moments', (['(x * x)', '[0, 1]'], {}), '(x * x, [0, 1])\n', (27149, 27164), True, 'import tensorflow as tf\n'), ((27177, 27206), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(x * x * x * x)'], {}), '(x * x * x * x)\n', (27191, 27206), True, 'import tensorflow as tf\n'), ((27348, 27369), 'tensorflow.nn.moments', 'tf.nn.moments', (['y', '[0]'], {}), '(y, [0])\n', (27361, 27369), True, 'import tensorflow as tf\n'), ((27460, 27490), 'tensorflow.random.normal', 'tf.random.normal', (['[12800, 160]'], {}), '([12800, 160])\n', (27476, 27490), True, 'import tensorflow as tf\n'), ((27507, 27542), 'common.ops.ops.vector_norm_scale', 'custom_ops.vector_norm_scale', (['x', '(-1)'], {}), '(x, -1)\n', (27535, 27542), True, 'from common.ops import ops as custom_ops\n'), ((27563, 27587), 'tensorflow.norm', 'tf.norm', (['x_norm'], {'axis': '(-1)'}), '(x_norm, axis=-1)\n', (27570, 27587), True, 'import tensorflow as tf\n'), ((27608, 27637), 'tensorflow.nn.moments', 'tf.nn.moments', (['x_norm', '[0, 1]'], {}), '(x_norm, [0, 1])\n', (27621, 27637), True, 'import tensorflow as tf\n'), ((618, 655), 'common.ops.ops.get_activation', 'custom_ops.get_activation', (['activation'], {}), '(activation)\n', (643, 655), True, 'from common.ops import ops as custom_ops\n'), ((1308, 1342), 'tensorflow.keras.initializers.glorot_normal', 'keras.initializers.glorot_normal', ([], {}), '()\n', (1340, 1342), False, 'from tensorflow import keras\n'), ((1690, 1727), 'common.ops.ops.get_activation', 'custom_ops.get_activation', (['activation'], {}), '(activation)\n', (1715, 1727), True, 'from common.ops import ops as custom_ops\n'), ((1748, 2003), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(self.groups * self.atoms)', 'kernel_size': 'self.kernel_size', 'strides': 'self.strides', 'padding': 'self.padding', 'use_bias': 'use_bias', 'activation': 'None', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), '(filters=self.groups * self.atoms, kernel_size=self.\n kernel_size, strides=self.strides, padding=self.padding, use_bias=\n use_bias, activation=None, kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer)\n', (1767, 2003), False, 'from tensorflow import keras\n'), ((3005, 3039), 'tensorflow.keras.initializers.glorot_normal', 'keras.initializers.glorot_normal', ([], {}), '()\n', (3037, 3039), False, 'from tensorflow import keras\n'), ((6122, 6156), 'tensorflow.keras.initializers.glorot_normal', 'keras.initializers.glorot_normal', ([], {}), '()\n', (6154, 6156), False, 'from tensorflow import keras\n'), ((7066, 7122), 'common.utils.kernel_tile', 'utils.kernel_tile', (['inputs', 'self.kernel_size', 'self.stride'], {}), '(inputs, self.kernel_size, self.stride)\n', (7083, 7122), False, 'from common import utils\n'), ((7216, 7304), 'common.ops.transformation.matmul_element_wise', 'transformation.matmul_element_wise', (['inputs_tile', 'self.matrix', 'self.filter', 'self.atom'], {}), '(inputs_tile, self.matrix, self.filter,\n self.atom)\n', (7250, 7304), False, 'from common.ops import transformation\n'), ((7815, 7852), 'common.ops.ops.get_activation', 'custom_ops.get_activation', (['activation'], {}), '(activation)\n', (7840, 7852), True, 'from common.ops import ops as custom_ops\n'), ((7954, 8033), 'tensorflow.reshape', 'tf.reshape', (['inputs'], {'shape': '[-1, self.height * self.width, group_num, self.atoms]'}), '(inputs, shape=[-1, self.height * self.width, group_num, self.atoms])\n', (7964, 8033), True, 'import tensorflow as tf\n'), ((8815, 8913), 'tensorflow.reshape', 'tf.reshape', (['patched', '([-1] + patched_shape[1:4] + [patched_shape[4] // self.atoms, self.atoms])'], {}), '(patched, [-1] + patched_shape[1:4] + [patched_shape[4] // self.\n atoms, self.atoms])\n', (8825, 8913), True, 'import tensorflow as tf\n'), ((8956, 9002), 'tensorflow.transpose', 'tf.transpose', (['patched'], {'perm': '[0, 1, 2, 4, 3, 5]'}), '(patched, perm=[0, 1, 2, 4, 3, 5])\n', (8968, 9002), True, 'import tensorflow as tf\n'), ((9021, 9053), 'tensorflow.expand_dims', 'tf.expand_dims', (['patched'], {'axis': '(-2)'}), '(patched, axis=-2)\n', (9035, 9053), True, 'import tensorflow as tf\n'), ((9073, 9187), 'common.ops.routing.dynamic_routing', 'dynamic_routing', (['patched'], {'num_routing': 'self.num_routing', 'softmax_in': '(True)', 'temper': 'self.temper', 'activation': '"""norm"""'}), "(patched, num_routing=self.num_routing, softmax_in=True,\n temper=self.temper, activation='norm')\n", (9088, 9187), False, 'from common.ops.routing import dynamic_routing\n'), ((9335, 9399), 'tensorflow.reshape', 'tf.reshape', (['pose', '([-1] + patched_shape[1:3] + [patched_shape[4]])'], {}), '(pose, [-1] + patched_shape[1:3] + [patched_shape[4]])\n', (9345, 9399), True, 'import tensorflow as tf\n'), ((10025, 10123), 'tensorflow.reshape', 'tf.reshape', (['patched', '([-1] + patched_shape[1:4] + [patched_shape[4] // self.atoms, self.atoms])'], {}), '(patched, [-1] + patched_shape[1:4] + [patched_shape[4] // self.\n atoms, self.atoms])\n', (10035, 10123), True, 'import tensorflow as tf\n'), ((10166, 10212), 'tensorflow.transpose', 'tf.transpose', (['patched'], {'perm': '[0, 1, 2, 4, 3, 5]'}), '(patched, perm=[0, 1, 2, 4, 3, 5])\n', (10178, 10212), True, 'import tensorflow as tf\n'), ((10289, 10353), 'tensorflow.reshape', 'tf.reshape', (['pool', '([-1] + patched_shape[1:3] + [patched_shape[4]])'], {}), '(pool, [-1] + patched_shape[1:3] + [patched_shape[4]])\n', (10299, 10353), True, 'import tensorflow as tf\n'), ((11110, 11244), 'common.ops.routing.dynamic_routing', 'dynamic_routing', (['inputs'], {'num_routing': 'self.num_routing', 'softmax_in': 'self.softmax_in', 'temper': 'self.temper', 'activation': 'self.activation'}), '(inputs, num_routing=self.num_routing, softmax_in=self.\n softmax_in, temper=self.temper, activation=self.activation)\n', (11125, 11244), False, 'from common.ops.routing import dynamic_routing\n'), ((11403, 11428), 'tensorflow.squeeze', 'tf.squeeze', (['pose'], {'axis': '(-3)'}), '(pose, axis=-3)\n', (11413, 11428), True, 'import tensorflow as tf\n'), ((11444, 11475), 'tensorflow.squeeze', 'tf.squeeze', (['prob'], {'axis': '[-3, -1]'}), '(prob, axis=[-3, -1])\n', (11454, 11475), True, 'import tensorflow as tf\n'), ((14459, 14616), 'common.ops.em_routing.em_routing', 'em_routing', (['votes_flat', 'activation_flat', 'self.beta_a', 'self.beta_v', 'self.num_routing'], {'final_lambda': '(0.01)', 'epsilon': '(1e-09)', 'spatial_routing_matrix': '[[1]]'}), '(votes_flat, activation_flat, self.beta_a, self.beta_v, self.\n num_routing, final_lambda=0.01, epsilon=1e-09, spatial_routing_matrix=[[1]]\n )\n', (14469, 14616), False, 'from common.ops.em_routing import em_routing\n'), ((14845, 14872), 'tensorflow.squeeze', 'tf.squeeze', (['prob'], {'axis': '[-1]'}), '(prob, axis=[-1])\n', (14855, 14872), True, 'import tensorflow as tf\n'), ((15724, 15787), 'tensorflow.reshape', 'tf.reshape', (['recons', '[-1, self.height, self.width, self.channel]'], {}), '(recons, [-1, self.height, self.width, self.channel])\n', (15734, 15787), True, 'import tensorflow as tf\n'), ((15807, 15837), 'tensorflow.pow', 'tf.pow', (['(recons_img - images)', '(2)'], {}), '(recons_img - images, 2)\n', (15813, 15837), True, 'import tensorflow as tf\n'), ((15853, 15890), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['distance', '[-1, -2, -3]'], {}), '(distance, [-1, -2, -3])\n', (15866, 15890), True, 'import tensorflow as tf\n'), ((17003, 17033), 'tensorflow.keras.backend.learning_phase', 'keras.backend.learning_phase', ([], {}), '()\n', (17031, 17033), False, 'from tensorflow import keras\n'), ((17148, 17172), 'tensorflow.multiply', 'tf.multiply', (['poses', 'mask'], {}), '(poses, mask)\n', (17159, 17172), True, 'import tensorflow as tf\n'), ((17610, 17640), 'tensorflow.keras.initializers.he_normal', 'keras.initializers.he_normal', ([], {}), '()\n', (17638, 17640), False, 'from tensorflow import keras\n'), ((17678, 17707), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.0001)'], {}), '(0.0001)\n', (17699, 17707), False, 'from tensorflow import keras\n'), ((19473, 19503), 'tensorflow.pow', 'tf.pow', (['(recons_img - images)', '(2)'], {}), '(recons_img - images, 2)\n', (19479, 19503), True, 'import tensorflow as tf\n'), ((19519, 19556), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['distance', '[-1, -2, -3]'], {}), '(distance, [-1, -2, -3])\n', (19532, 19556), True, 'import tensorflow as tf\n'), ((19859, 19893), 'common.ops.ops.vector_norm', 'custom_ops.vector_norm', (['inputs', '(-1)'], {}), '(inputs, -1)\n', (19881, 19893), True, 'from common.ops import ops as custom_ops\n'), ((21842, 21880), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': 'self.axis'}), '(inputs, axis=self.axis)\n', (21856, 21880), True, 'import tensorflow as tf\n'), ((22149, 22186), 'tensorflow.reduce_max', 'tf.reduce_max', (['inputs'], {'axis': 'self.axis'}), '(inputs, axis=self.axis)\n', (22162, 22186), True, 'import tensorflow as tf\n'), ((22682, 22697), 'numpy.sqrt', 'np.sqrt', (['cap_in'], {}), '(cap_in)\n', (22689, 22697), True, 'import numpy as np\n'), ((23646, 23665), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (23659, 23665), True, 'import tensorflow as tf\n'), ((23992, 24028), 'tensorflow.gather', 'tf.gather', (['inputs', 'h_offsets'], {'axis': '(1)'}), '(inputs, h_offsets, axis=1)\n', (24001, 24028), True, 'import tensorflow as tf\n'), ((24047, 24084), 'tensorflow.gather', 'tf.gather', (['patched', 'w_offsets'], {'axis': '(3)'}), '(patched, w_offsets, axis=3)\n', (24056, 24084), True, 'import tensorflow as tf\n'), ((24137, 24169), 'tensorflow.transpose', 'tf.transpose', (['patched'], {'perm': 'perm'}), '(patched, perm=perm)\n', (24149, 24169), True, 'import tensorflow as tf\n'), ((24304, 24336), 'tensorflow.reshape', 'tf.reshape', (['patched'], {'shape': 'shape'}), '(patched, shape=shape)\n', (24314, 24336), True, 'import tensorflow as tf\n'), ((25054, 25088), 'tensorflow.norm', 'tf.norm', (['a'], {'axis': '(-1)', 'keepdims': '(True)'}), '(a, axis=-1, keepdims=True)\n', (25061, 25088), True, 'import tensorflow as tf\n'), ((25154, 25173), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['a', '(1)'], {}), '(a, 1)\n', (25167, 25173), True, 'import tensorflow as tf\n'), ((25323, 25335), 'tensorflow.square', 'tf.square', (['a'], {}), '(a)\n', (25332, 25335), True, 'import tensorflow as tf\n'), ((25876, 25895), 'tensorflow.norm', 'tf.norm', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (25883, 25895), True, 'import tensorflow as tf\n'), ((25979, 26016), 'tensorflow.keras.layers.AveragePooling2D', 'keras.layers.AveragePooling2D', (['[8, 8]'], {}), '([8, 8])\n', (26008, 26016), False, 'from tensorflow import keras\n'), ((26123, 26145), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x1 - x2)'], {}), '(x1 - x2)\n', (26136, 26145), True, 'import tensorflow as tf\n'), ((26397, 26416), 'tensorflow.norm', 'tf.norm', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (26404, 26416), True, 'import tensorflow as tf\n'), ((2436, 2524), 'tensorflow.reshape', 'tf.reshape', (['pose'], {'shape': '[-1, pose_shape[1], pose_shape[2], self.groups, self.atoms]'}), '(pose, shape=[-1, pose_shape[1], pose_shape[2], self.groups, self\n .atoms])\n', (2446, 2524), True, 'import tensorflow as tf\n'), ((2553, 2642), 'tensorflow.reshape', 'tf.reshape', (['pose'], {'shape': '[-1, pose_shape[1] * pose_shape[2] * self.groups, self.atoms]'}), '(pose, shape=[-1, pose_shape[1] * pose_shape[2] * self.groups,\n self.atoms])\n', (2563, 2642), True, 'import tensorflow as tf\n'), ((5308, 5366), 'tensorflow.reshape', 'tf.reshape', (['inputs', '(in_shape[:-1] + [self.wide, self.wide])'], {}), '(inputs, in_shape[:-1] + [self.wide, self.wide])\n', (5318, 5366), True, 'import tensorflow as tf\n'), ((5493, 5570), 'common.ops.transformation.matrix_capsule_element_wise', 'transformation.matrix_capsule_element_wise', (['inputs', 'self.kernel', 'self.num_out'], {}), '(inputs, self.kernel, self.num_out)\n', (5535, 5570), False, 'from common.ops import transformation\n'), ((5593, 5661), 'tensorflow.reshape', 'tf.reshape', (['outputs', '(in_shape[:-1] + [self.num_out] + [in_shape[-1]])'], {}), '(outputs, in_shape[:-1] + [self.num_out] + [in_shape[-1]])\n', (5603, 5661), True, 'import tensorflow as tf\n'), ((5822, 5911), 'common.ops.transformation.matmul_element_wise', 'transformation.matmul_element_wise', (['inputs', 'self.kernel', 'self.num_out', 'self.out_atom'], {}), '(inputs, self.kernel, self.num_out, self.\n out_atom)\n', (5856, 5911), False, 'from common.ops import transformation\n'), ((10402, 10430), 'common.ops.ops.vector_norm', 'custom_ops.vector_norm', (['pool'], {}), '(pool)\n', (10424, 10430), True, 'from common.ops import ops as custom_ops\n'), ((11062, 11088), 'tensorflow.expand_dims', 'tf.expand_dims', (['inputs', '(-2)'], {}), '(inputs, -2)\n', (11076, 11088), True, 'import tensorflow as tf\n'), ((15450, 15521), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(self.height * self.width * self.channel)', 'tf.sigmoid'], {}), '(self.height * self.width * self.channel, tf.sigmoid)\n', (15468, 15521), False, 'from tensorflow import keras\n'), ((15935, 15955), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (15949, 15955), True, 'import tensorflow as tf\n'), ((17222, 17257), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['masked_caps'], {'axis': '(-2)'}), '(masked_caps, axis=-2)\n', (17235, 17257), True, 'import tensorflow as tf\n'), ((17942, 17982), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(base * base * filter)'], {}), '(base * base * filter)\n', (17960, 17982), False, 'from tensorflow import keras\n'), ((18003, 18036), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (18034, 18036), False, 'from tensorflow import keras\n'), ((18061, 18085), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (18083, 18085), False, 'from tensorflow import keras\n'), ((18110, 18152), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['(base, base, filter)'], {}), '((base, base, filter))\n', (18130, 18152), False, 'from tensorflow import keras\n'), ((18177, 18346), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(filter // 2)', '(5, 5)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), "(filter // 2, (5, 5), strides=(1, 1), padding=\n 'same', kernel_initializer=kernel_initializer, kernel_regularizer=\n kernel_regularizer)\n", (18205, 18346), False, 'from tensorflow import keras\n'), ((18463, 18496), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (18494, 18496), False, 'from tensorflow import keras\n'), ((18521, 18545), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (18543, 18545), False, 'from tensorflow import keras\n'), ((18570, 18739), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(filter // 4)', '(5, 5)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), "(filter // 4, (5, 5), strides=(2, 2), padding=\n 'same', kernel_initializer=kernel_initializer, kernel_regularizer=\n kernel_regularizer)\n", (18598, 18739), False, 'from tensorflow import keras\n'), ((18856, 18889), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (18887, 18889), False, 'from tensorflow import keras\n'), ((18914, 18938), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (18936, 18938), False, 'from tensorflow import keras\n'), ((18963, 19143), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(1)', '(5, 5)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': 'tf.sigmoid', 'kernel_initializer': 'kernel_initializer', 'kernel_regularizer': 'kernel_regularizer'}), "(1, (5, 5), strides=(2, 2), padding='same',\n activation=tf.sigmoid, kernel_initializer=kernel_initializer,\n kernel_regularizer=kernel_regularizer)\n", (18991, 19143), False, 'from tensorflow import keras\n'), ((19601, 19621), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (19615, 19621), True, 'import tensorflow as tf\n'), ((21084, 21184), 'common.ops.ops.accumulate', 'custom_ops.accumulate', (['outputs'], {'shrink': 'self.shrink', 'stable': 'self.stable', 'norm_pose': 'self.norm_pose'}), '(outputs, shrink=self.shrink, stable=self.stable,\n norm_pose=self.norm_pose)\n', (21105, 21184), True, 'from common.ops import ops as custom_ops\n'), ((21200, 21220), 'tensorflow.squeeze', 'tf.squeeze', (['norm', '(-1)'], {}), '(norm, -1)\n', (21210, 21220), True, 'import tensorflow as tf\n'), ((25109, 25133), 'tensorflow.cast', 'tf.cast', (['num', 'tf.float32'], {}), '(num, tf.float32)\n', (25116, 25133), True, 'import tensorflow as tf\n'), ((3559, 3575), 'numpy.sqrt', 'np.sqrt', (['in_atom'], {}), '(in_atom)\n', (3566, 3575), True, 'import numpy as np\n'), ((3957, 3996), 'tensorflow.tile', 'tf.tile', (['self.kernel', '[in_num, 1, 1, 1]'], {}), '(self.kernel, [in_num, 1, 1, 1])\n', (3964, 3996), True, 'import tensorflow as tf\n'), ((4368, 4404), 'tensorflow.tile', 'tf.tile', (['self.kernel', '[in_num, 1, 1]'], {}), '(self.kernel, [in_num, 1, 1])\n', (4375, 4404), True, 'import tensorflow as tf\n'), ((13648, 13713), 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'mean': '(-1000.0)', 'stddev': '(500.0)'}), '(mean=-1000.0, stddev=500.0)\n', (13685, 13713), True, 'import tensorflow as tf\n'), ((14190, 14226), 'tensorflow.keras.initializers.GlorotNormal', 'tf.keras.initializers.GlorotNormal', ([], {}), '()\n', (14224, 14226), True, 'import tensorflow as tf\n'), ((15384, 15421), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['layer', 'tf.nn.relu'], {}), '(layer, tf.nn.relu)\n', (15402, 15421), False, 'from tensorflow import keras\n'), ((16610, 16644), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['probs', '(self.order + 1)'], {}), '(probs, self.order + 1)\n', (16621, 16644), True, 'import tensorflow as tf\n'), ((16669, 16704), 'tensorflow.split', 'tf.split', (['top_k', '(self.order + 1)', '(-1)'], {}), '(top_k, self.order + 1, -1)\n', (16677, 16704), True, 'import tensorflow as tf\n'), ((17087, 17113), 'tensorflow.expand_dims', 'tf.expand_dims', (['labels', '(-1)'], {}), '(labels, -1)\n', (17101, 17113), True, 'import tensorflow as tf\n'), ((17298, 17320), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (17318, 17320), False, 'from tensorflow import keras\n'), ((20979, 21010), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['outputs'], {'axis': '(-1)'}), '(outputs, axis=-1)\n', (20992, 21010), True, 'import tensorflow as tf\n'), ((21324, 21350), 'common.ops.ops.squash', 'custom_ops.squash', (['outputs'], {}), '(outputs)\n', (21341, 21350), True, 'from common.ops import ops as custom_ops\n'), ((21370, 21390), 'tensorflow.squeeze', 'tf.squeeze', (['norm', '(-1)'], {}), '(norm, -1)\n', (21380, 21390), True, 'import tensorflow as tf\n'), ((24253, 24273), 'numpy.prod', 'np.prod', (['shape[3:-1]'], {}), '(shape[3:-1])\n', (24260, 24273), True, 'import numpy as np\n'), ((21489, 21520), 'common.ops.ops.vector_norm', 'custom_ops.vector_norm', (['outputs'], {}), '(outputs)\n', (21511, 21520), True, 'from common.ops import ops as custom_ops\n'), ((16795, 16816), 'tensorflow.squeeze', 'tf.squeeze', (['split', '(-1)'], {}), '(split, -1)\n', (16805, 16816), True, 'import tensorflow as tf\n'), ((16911, 16931), 'tensorflow.argmax', 'tf.argmax', (['probs', '(-1)'], {}), '(probs, -1)\n', (16920, 16931), True, 'import tensorflow as tf\n')] |
# Undergraduate Student: <NAME>
# Professor: <NAME>
# Federal University of Uberlândia - UFU, Fluid Mechanics Laboratory - MFLab, Block 5P, Uberlândia, MG, Brazil
# Third exercise: Fibonacci sequence - by a common loop
import numpy as np
import time
n = int(input("Enter the n indices: "))
Fbn=0 # Value of the first box
A = 0 # Values of the second
B = 1 # Value of the third box
##############################
# Variable iterative fibonacci --> Better performance, why?
##############################
def varloop_fibonacci(k,F1,F2,S):
for i in range(0,k):
F1=S
S = F1+F2
F2 = F1
return S
t_initial1 = time.time()
Fbn = varloop_fibonacci(n,A,B,Fbn)
print("Elapsed time is: %s seconds" % (time.time()-t_initial1))
print("The correspond indices number is: ",Fbn)
print("\n")
###########################
# Array iterative fibonacci --> Worse performance, why?
###########################
Fbn=0 # Value of the first box
A = np.array([0, 1]) # Values of the second an third boxes
def arrayloop_fibonacci(k,F,S):
for i in range(0,k):
F[0]=S
S = sum(F)
F[1] = F[0]
return S
t_initial2 = time.time()
Fbn = arrayloop_fibonacci(n,A,Fbn)
print("Elapsed time is: %s seconds" % (time.time()-t_initial2))
print("The correspond indices number is: ",Fbn)
| [
"numpy.array",
"time.time"
] | [((648, 659), 'time.time', 'time.time', ([], {}), '()\n', (657, 659), False, 'import time\n'), ((971, 987), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (979, 987), True, 'import numpy as np\n'), ((1167, 1178), 'time.time', 'time.time', ([], {}), '()\n', (1176, 1178), False, 'import time\n'), ((735, 746), 'time.time', 'time.time', ([], {}), '()\n', (744, 746), False, 'import time\n'), ((1254, 1265), 'time.time', 'time.time', ([], {}), '()\n', (1263, 1265), False, 'import time\n')] |
import numpy as np
import helper
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from datetime import datetime
# Build the Neural Network
# Components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
#
# - model_inputs
# - process_decoder_input
# - encoding_layer
# - decoding_layer_train
# - decoding_layer_infer
# - decoding_layer
# - seq2seq_model
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
input = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(tf.int32, shape=[None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
target_sequence_length = tf.placeholder(tf.int32, shape=[None], name='target_sequence_length')
max_target_len = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, shape=[None], name='source_sequence_length')
return input, targets, lr, keep_prob, target_sequence_length, max_target_len, source_sequence_length
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(
rnn_inputs,
source_vocab_size,
encoding_embedding_size
)
# RNN cell
def make_cell(rnn_size):
gru = tf.contrib.rnn.LSTMCell(
rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)
)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(gru, output_keep_prob=keep_prob)
return drop
# create a RNN cell composed sequentially of a number of RNNCells
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(
enc_cell,
enc_embed_input,
sequence_length=source_sequence_length,
dtype=tf.float32
)
return enc_output, enc_state
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# Helper for the training process. Used by BasicDecoder to read inputs.
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False
)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=dec_cell,
helper=training_helper,
initial_state=encoder_state,
output_layer=output_layer
)
# Perform dynamic decoding using the decoder
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(
decoder=training_decoder,
impute_finished=True,
maximum_iterations=max_summary_length
)[0]
return training_decoder_output
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
start_tokens = tf.tile(
tf.constant([start_of_sequence_id], dtype=tf.int32),
[batch_size],
name='start_tokens'
)
# Helper for the inference process.
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=dec_embeddings,
start_tokens=start_tokens,
end_token=end_of_sequence_id
)
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=dec_cell,
helper=inference_helper,
initial_state=encoder_state,
output_layer=output_layer
)
# Perform dynamic decoding using the decoder
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(
decoder=inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length
)[0]
return inference_decoder_output
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# Decoder Embedding
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(
rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)
)
return dec_cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(
target_vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)
)
# Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
training_decoder_output = decoding_layer_train(
encoder_state,
dec_cell,
dec_embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob
)
# Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
inference_decoder_output = decoding_layer_infer(
encoder_state,
dec_cell,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
max_target_sequence_length,
target_vocab_size,
output_layer,
batch_size,
keep_prob
)
return training_decoder_output, inference_decoder_output
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# Pass the input data through the encoder. We'll ignore the encoder output, but use the state.
_, enc_state = encoding_layer(
input_data,
rnn_size,
num_layers,
keep_prob,
source_sequence_length,
source_vocab_size,
enc_embedding_size
)
# Prepare the target sequences we'll feed to the decoder in training mode.
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
# Pass encoder state and decoder inputs to the decoders.
training_decoder_output, inference_decoder_output = decoding_layer(
dec_input,
enc_state,
target_sequence_length,
max_target_sentence_length,
rnn_size,
num_layers,
target_vocab_to_int,
target_vocab_size,
batch_size,
keep_prob,
dec_embedding_size
)
return training_decoder_output, inference_decoder_output
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0, 0), (0, max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0, 0), (0, max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
epochs = 21
batch_size = 1024
rnn_size = 128
num_layers = 2
encoding_embedding_size = 100
decoding_embedding_size = 100
learning_rate = 0.01
keep_probability = 0.75
display_step = 20
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int
)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "./logs/"
logdir = "{}/run-{}-lstm".format(root_logdir, now)
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths) = next(
get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
writer = tf.summary.FileWriter(logdir, graph=train_graph)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{
input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability
}
)
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{
input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0
}
)
batch_valid_logits = sess.run(
inference_logits,
{
input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0
}
)
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
step = epoch_i * (len(source_batch) // batch_size) + batch_i
summary = tf.Summary()
summary.value.add(tag='Train Accuracy', simple_value=train_acc)
summary.value.add(tag='Validation Accuracy', simple_value=valid_acc)
summary.value.add(tag='Loss', simple_value=loss)
writer.add_summary(summary, global_step=step)
print(
'Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)
)
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
| [
"tensorflow.shape",
"numpy.equal",
"tensorflow.truncated_normal_initializer",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.Graph",
"tensorflow.nn.embedding_lookup",
"tensorflow.contrib.seq2seq.sequence_loss",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.dynamic_rnn",
... | [((13606, 13630), 'helper.load_preprocess', 'helper.load_preprocess', ([], {}), '()\n', (13628, 13630), False, 'import helper\n'), ((13728, 13738), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13736, 13738), True, 'import tensorflow as tf\n'), ((15813, 15861), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logdir'], {'graph': 'train_graph'}), '(logdir, graph=train_graph)\n', (15834, 15861), True, 'import tensorflow as tf\n'), ((728, 786), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""input"""'}), "(tf.int32, shape=[None, None], name='input')\n", (742, 786), True, 'import tensorflow as tf\n'), ((801, 861), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""targets"""'}), "(tf.int32, shape=[None, None], name='targets')\n", (815, 861), True, 'import tensorflow as tf\n'), ((871, 919), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (885, 919), True, 'import tensorflow as tf\n'), ((936, 980), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (950, 980), True, 'import tensorflow as tf\n'), ((1010, 1079), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""target_sequence_length"""'}), "(tf.int32, shape=[None], name='target_sequence_length')\n", (1024, 1079), True, 'import tensorflow as tf\n'), ((1101, 1161), 'tensorflow.reduce_max', 'tf.reduce_max', (['target_sequence_length'], {'name': '"""max_target_len"""'}), "(target_sequence_length, name='max_target_len')\n", (1114, 1161), True, 'import tensorflow as tf\n'), ((1191, 1260), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""source_sequence_length"""'}), "(tf.int32, shape=[None], name='source_sequence_length')\n", (1205, 1260), True, 'import tensorflow as tf\n'), ((1705, 1768), 'tensorflow.strided_slice', 'tf.strided_slice', (['target_data', '[0, 0]', '[batch_size, -1]', '[1, 1]'], {}), '(target_data, [0, 0], [batch_size, -1], [1, 1])\n', (1721, 1768), True, 'import tensorflow as tf\n'), ((2562, 2654), 'tensorflow.contrib.layers.embed_sequence', 'tf.contrib.layers.embed_sequence', (['rnn_inputs', 'source_vocab_size', 'encoding_embedding_size'], {}), '(rnn_inputs, source_vocab_size,\n encoding_embedding_size)\n', (2594, 2654), True, 'import tensorflow as tf\n'), ((3196, 3303), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['enc_cell', 'enc_embed_input'], {'sequence_length': 'source_sequence_length', 'dtype': 'tf.float32'}), '(enc_cell, enc_embed_input, sequence_length=\n source_sequence_length, dtype=tf.float32)\n', (3213, 3303), True, 'import tensorflow as tf\n'), ((4187, 4307), 'tensorflow.contrib.seq2seq.TrainingHelper', 'tf.contrib.seq2seq.TrainingHelper', ([], {'inputs': 'dec_embed_input', 'sequence_length': 'target_sequence_length', 'time_major': '(False)'}), '(inputs=dec_embed_input, sequence_length=\n target_sequence_length, time_major=False)\n', (4220, 4307), True, 'import tensorflow as tf\n'), ((4377, 4507), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', ([], {'cell': 'dec_cell', 'helper': 'training_helper', 'initial_state': 'encoder_state', 'output_layer': 'output_layer'}), '(cell=dec_cell, helper=training_helper,\n initial_state=encoder_state, output_layer=output_layer)\n', (4408, 4507), True, 'import tensorflow as tf\n'), ((5864, 5991), 'tensorflow.contrib.seq2seq.GreedyEmbeddingHelper', 'tf.contrib.seq2seq.GreedyEmbeddingHelper', ([], {'embedding': 'dec_embeddings', 'start_tokens': 'start_tokens', 'end_token': 'end_of_sequence_id'}), '(embedding=dec_embeddings,\n start_tokens=start_tokens, end_token=end_of_sequence_id)\n', (5904, 5991), True, 'import tensorflow as tf\n'), ((6063, 6194), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', ([], {'cell': 'dec_cell', 'helper': 'inference_helper', 'initial_state': 'encoder_state', 'output_layer': 'output_layer'}), '(cell=dec_cell, helper=inference_helper,\n initial_state=encoder_state, output_layer=output_layer)\n', (6094, 6194), True, 'import tensorflow as tf\n'), ((7656, 7705), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['dec_embeddings', 'dec_input'], {}), '(dec_embeddings, dec_input)\n', (7678, 7705), True, 'import tensorflow as tf\n'), ((13920, 13940), 'tensorflow.shape', 'tf.shape', (['input_data'], {}), '(input_data)\n', (13928, 13940), True, 'import tensorflow as tf\n'), ((14417, 14468), 'tensorflow.identity', 'tf.identity', (['train_logits.rnn_output'], {'name': '"""logits"""'}), "(train_logits.rnn_output, name='logits')\n", (14428, 14468), True, 'import tensorflow as tf\n'), ((14492, 14551), 'tensorflow.identity', 'tf.identity', (['inference_logits.sample_id'], {'name': '"""predictions"""'}), "(inference_logits.sample_id, name='predictions')\n", (14503, 14551), True, 'import tensorflow as tf\n'), ((14565, 14670), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['target_sequence_length', 'max_target_sequence_length'], {'dtype': 'tf.float32', 'name': '"""masks"""'}), "(target_sequence_length, max_target_sequence_length, dtype=\n tf.float32, name='masks')\n", (14581, 14670), True, 'import tensorflow as tf\n'), ((15868, 15897), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'train_graph'}), '(graph=train_graph)\n', (15878, 15897), True, 'import tensorflow as tf\n'), ((18380, 18396), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18394, 18396), True, 'import tensorflow as tf\n'), ((2919, 2981), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['gru'], {'output_keep_prob': 'keep_prob'}), '(gru, output_keep_prob=keep_prob)\n', (2948, 2981), True, 'import tensorflow as tf\n'), ((4622, 4747), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', ([], {'decoder': 'training_decoder', 'impute_finished': '(True)', 'maximum_iterations': 'max_summary_length'}), '(decoder=training_decoder, impute_finished\n =True, maximum_iterations=max_summary_length)\n', (4655, 4747), True, 'import tensorflow as tf\n'), ((5691, 5742), 'tensorflow.constant', 'tf.constant', (['[start_of_sequence_id]'], {'dtype': 'tf.int32'}), '([start_of_sequence_id], dtype=tf.int32)\n', (5702, 5742), True, 'import tensorflow as tf\n'), ((6310, 6443), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', ([], {'decoder': 'inference_decoder', 'impute_finished': '(True)', 'maximum_iterations': 'max_target_sequence_length'}), '(decoder=inference_decoder,\n impute_finished=True, maximum_iterations=max_target_sequence_length)\n', (6343, 6443), True, 'import tensorflow as tf\n'), ((7569, 7632), 'tensorflow.random_uniform', 'tf.random_uniform', (['[target_vocab_size, decoding_embedding_size]'], {}), '([target_vocab_size, decoding_embedding_size])\n', (7586, 7632), True, 'import tensorflow as tf\n'), ((8385, 8412), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decode"""'], {}), "('decode')\n", (8402, 8412), True, 'import tensorflow as tf\n'), ((8757, 8796), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decode"""'], {'reuse': '(True)'}), "('decode', reuse=True)\n", (8774, 8796), True, 'import tensorflow as tf\n'), ((12997, 13065), 'numpy.pad', 'np.pad', (['target', '[(0, 0), (0, max_seq - target.shape[1])]', '"""constant"""'], {}), "(target, [(0, 0), (0, max_seq - target.shape[1])], 'constant')\n", (13003, 13065), True, 'import numpy as np\n'), ((13154, 13222), 'numpy.pad', 'np.pad', (['logits', '[(0, 0), (0, max_seq - logits.shape[1])]', '"""constant"""'], {}), "(logits, [(0, 0), (0, max_seq - logits.shape[1])], 'constant')\n", (13160, 13222), True, 'import numpy as np\n'), ((13280, 13304), 'numpy.equal', 'np.equal', (['target', 'logits'], {}), '(target, logits)\n', (13288, 13304), True, 'import numpy as np\n'), ((14002, 14030), 'tensorflow.reverse', 'tf.reverse', (['input_data', '[-1]'], {}), '(input_data, [-1])\n', (14012, 14030), True, 'import tensorflow as tf\n'), ((14676, 14705), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimization"""'], {}), "('optimization')\n", (14689, 14705), True, 'import tensorflow as tf\n'), ((14746, 14811), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['training_logits', 'targets', 'masks'], {}), '(training_logits, targets, masks)\n', (14778, 14811), True, 'import tensorflow as tf\n'), ((14890, 14916), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (14912, 14916), True, 'import tensorflow as tf\n'), ((15185, 15202), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (15200, 15202), False, 'from datetime import datetime\n'), ((15920, 15953), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15951, 15953), True, 'import tensorflow as tf\n'), ((1796, 1849), 'tensorflow.fill', 'tf.fill', (['[batch_size, 1]', "target_vocab_to_int['<GO>']"], {}), "([batch_size, 1], target_vocab_to_int['<GO>'])\n", (1803, 1849), True, 'import tensorflow as tf\n'), ((8235, 8288), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.1)'}), '(mean=0.0, stddev=0.1)\n', (8266, 8288), True, 'import tensorflow as tf\n'), ((2811, 2859), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {'seed': '(2)'}), '(-0.1, 0.1, seed=2)\n', (2840, 2859), True, 'import tensorflow as tf\n'), ((7859, 7907), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.1)', '(0.1)'], {'seed': '(2)'}), '(-0.1, 0.1, seed=2)\n', (7888, 7907), True, 'import tensorflow as tf\n'), ((15029, 15062), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (15045, 15062), True, 'import tensorflow as tf\n'), ((17765, 17777), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (17775, 17777), True, 'import tensorflow as tf\n')] |
import os
import json
import shutil
import tempfile
import contextlib
import functools
from collections import OrderedDict
import numpy as np
import h5py
from quilted.h5blockstore import H5BlockStore
@contextlib.contextmanager
def autocleaned_tmpdir():
tmpdir = tempfile.mkdtemp()
yield tmpdir
shutil.rmtree(tmpdir)
def with_autocleaned_tempdir(f):
@functools.wraps(f)
def wrapped(*args):
with autocleaned_tmpdir() as tmpdir:
args += (tmpdir,)
return f(*args)
return wrapped
class TestH5BlockStore(object):
@with_autocleaned_tempdir
def test_access(self, tmpdir):
blockstore_root_dir = tmpdir
blockstore = H5BlockStore(blockstore_root_dir, mode='a', axes='zyx', dtype=np.uint8)
assert os.path.exists(blockstore.index_path)
with open(blockstore.index_path, 'r') as index_file:
index_contents = json.load(index_file, object_pairs_hook=OrderedDict)
assert index_contents['block_entries'] == []
first_block_bounds = ( (0,0,0), (100,200,300) )
block = blockstore.get_block( first_block_bounds )
with open (blockstore.index_path, 'r') as index_file:
index_contents = json.load(index_file, object_pairs_hook=OrderedDict)
assert len(index_contents['block_entries']) == 1
assert os.path.exists( blockstore_root_dir + '/' + blockstore._get_block_file_path(first_block_bounds) )
#with open(blockstore.index_path, 'r') as index_file:
# print index_file.read()
# Shouldn't be able to access this block while block_f is open
try:
blockstore.get_block( first_block_bounds, timeout=0.0 )
except H5BlockStore.TimeoutError:
pass
else:
assert False, "Expected to see a TimeoutError!"
block.close()
# Should be possible to access after close
block2 = blockstore.get_block( first_block_bounds, timeout=0.0 )
# Deleting block_f2 should auto-close
del block2
# Should be possible to access after close
block3 = blockstore.get_block( first_block_bounds, timeout=0.0 )
del block3
@with_autocleaned_tempdir
def test_write(self, tmpdir):
blockstore_root_dir = tmpdir
blockstore = H5BlockStore(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)
first_block_bounds = ( (0,0,0), (100,200,300) )
block = blockstore.get_block( first_block_bounds )
block[:] = 0.123
block.close()
# Read directly from hdf5
with h5py.File(block._block_abspath, 'r') as block_f:
block_dset = block_f['data']
assert (block_dset[:] == 0.123).all()
# Re-open in read mode and read that way
blockstore = H5BlockStore(blockstore_root_dir, mode='r')
first_block_bounds = ( (0,0,0), (100,200,300) )
block = blockstore.get_block( first_block_bounds )
assert (block[:] == 0.123).all()
@with_autocleaned_tempdir
def test_incomplete_bounds_query(self, tmpdir):
blockstore_root_dir = tmpdir
blockstore = H5BlockStore(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)
first_block_bounds = ( (0,0,0), (100,200,300) )
block = blockstore.get_block( first_block_bounds )
block[:] = 0.123
block.close()
# Try giving an incomplete block specification (using None)
incomplete_bounds = ( (0,0,0), (100,200,None) )
block = blockstore.get_block( incomplete_bounds )
del block
# But should not be possible to create a new block this way
try:
block = blockstore.get_block( ( (0,0,0), (100,5000,None) ) )
except H5BlockStore.MissingBlockError:
pass
else:
assert False, "Expected a MissingBlockError"
@with_autocleaned_tempdir
def test_reset_access(self, tmpdir):
blockstore_root_dir = tmpdir
blockstore = H5BlockStore(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)
first_block_bounds = ( (0,0,0), (100,200,300) )
block = blockstore.get_block( first_block_bounds )
block[:] = 0.123
# Accessing same block will fail -- it's already locked
try:
block2 = blockstore.get_block( first_block_bounds, timeout=0.0 )
except H5BlockStore.TimeoutError:
pass
# Now, without deleting our reference to the block,
# reset the blockstore and access it again -- should work this time.
blockstore.reset_access()
block3 = blockstore.get_block( first_block_bounds, timeout=0.0 )
@with_autocleaned_tempdir
def test_export_to_hdf5(self, tmpdir):
blockstore_root_dir = tmpdir
blockstore = H5BlockStore(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)
first_block_bounds = ( (0,0,0), (110,210,310) )
with blockstore.get_block( first_block_bounds ) as first_block:
first_block[:] = 1
second_block_bounds = ( (90,190,290), (210,310,410) )
with blockstore.get_block( second_block_bounds ) as second_block:
second_block[:] = 2
def remove_halo(block_bounds):
block_bounds = np.array(block_bounds)
block_bounds[0] += 10
block_bounds[1] -= 10
return block_bounds
export_filepath = tmpdir + '/exported.h5'
blockstore.export_to_single_dset(export_filepath, 'data', remove_halo)
with h5py.File(export_filepath, 'r') as exported_file:
assert exported_file['data'].dtype == np.float32
assert exported_file['data'].shape == (200,300,400)
# Cropped-out pixels from the halo should be zero
assert (exported_file['data'][0:10, 0:10, 0:10] == 0).all()
# Did the overlapping region get properly handled in each block?
assert (exported_file['data'][10:100, 10:200, 10:300] == 1).all()
assert (exported_file['data'][100:200, 200:300, 300:400] == 2).all()
@with_autocleaned_tempdir
def test_export_to_array(self, tmpdir):
blockstore_root_dir = tmpdir
blockstore = H5BlockStore(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)
first_block_bounds = ( (0,0,0), (110,210,310) )
with blockstore.get_block( first_block_bounds ) as first_block:
first_block[:] = 1
second_block_bounds = ( (90,190,290), (210,310,410) )
with blockstore.get_block( second_block_bounds ) as second_block:
second_block[:] = 2
def remove_halo(block_bounds):
block_bounds = np.array(block_bounds)
block_bounds[0] += 10
block_bounds[1] -= 10
return block_bounds
subvol = blockstore.export_to_array([(50, 150, 250), (150, 250, 350)], remove_halo)
assert subvol.shape == (100,100,100)
assert subvol.dtype == np.float32
assert (subvol[0:50, 0:50, 0:50] == 1).all()
assert (subvol[50:100, 50:100, 50:100] == 2).all()
# Pixels not touched by any of the cropped blocks should be zero
assert (subvol[0:50, 50:100, 0:50] == 0).all()
if __name__ == "__main__":
import sys
import logging
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
# Comment this out to see warnings from reset_access()
logging.getLogger('quilted.h5blockstore').setLevel(logging.ERROR)
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
nose.run(defaultTest=__file__)
| [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"sys.argv.append",
"functools.wraps",
"h5py.File",
"numpy.array",
"tempfile.mkdtemp",
"shutil.rmtree",
"json.load",
"quilted.h5blockstore.H5BlockStore",
"nose.run"
] | [((269, 287), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (285, 287), False, 'import tempfile\n'), ((309, 330), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (322, 330), False, 'import shutil\n'), ((370, 388), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (385, 388), False, 'import functools\n'), ((7739, 7769), 'sys.argv.append', 'sys.argv.append', (['"""--nocapture"""'], {}), "('--nocapture')\n", (7754, 7769), False, 'import sys\n'), ((7833, 7866), 'sys.argv.append', 'sys.argv.append', (['"""--nologcapture"""'], {}), "('--nologcapture')\n", (7848, 7866), False, 'import sys\n'), ((7928, 7958), 'nose.run', 'nose.run', ([], {'defaultTest': '__file__'}), '(defaultTest=__file__)\n', (7936, 7958), False, 'import nose\n'), ((692, 763), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""a"""', 'axes': '"""zyx"""', 'dtype': 'np.uint8'}), "(blockstore_root_dir, mode='a', axes='zyx', dtype=np.uint8)\n", (704, 763), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((779, 816), 'os.path.exists', 'os.path.exists', (['blockstore.index_path'], {}), '(blockstore.index_path)\n', (793, 816), False, 'import os\n'), ((2352, 2425), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""a"""', 'axes': '"""zyx"""', 'dtype': 'np.float32'}), "(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)\n", (2364, 2425), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((2872, 2915), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""r"""'}), "(blockstore_root_dir, mode='r')\n", (2884, 2915), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((3213, 3286), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""a"""', 'axes': '"""zyx"""', 'dtype': 'np.float32'}), "(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)\n", (3225, 3286), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((4103, 4176), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""a"""', 'axes': '"""zyx"""', 'dtype': 'np.float32'}), "(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)\n", (4115, 4176), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((4933, 5006), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""a"""', 'axes': '"""zyx"""', 'dtype': 'np.float32'}), "(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)\n", (4945, 5006), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((6406, 6479), 'quilted.h5blockstore.H5BlockStore', 'H5BlockStore', (['blockstore_root_dir'], {'mode': '"""a"""', 'axes': '"""zyx"""', 'dtype': 'np.float32'}), "(blockstore_root_dir, mode='a', axes='zyx', dtype=np.float32)\n", (6418, 6479), False, 'from quilted.h5blockstore import H5BlockStore\n'), ((7534, 7567), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (7555, 7567), False, 'import logging\n'), ((907, 959), 'json.load', 'json.load', (['index_file'], {'object_pairs_hook': 'OrderedDict'}), '(index_file, object_pairs_hook=OrderedDict)\n', (916, 959), False, 'import json\n'), ((1222, 1274), 'json.load', 'json.load', (['index_file'], {'object_pairs_hook': 'OrderedDict'}), '(index_file, object_pairs_hook=OrderedDict)\n', (1231, 1274), False, 'import json\n'), ((2653, 2689), 'h5py.File', 'h5py.File', (['block._block_abspath', '"""r"""'], {}), "(block._block_abspath, 'r')\n", (2662, 2689), False, 'import h5py\n'), ((5427, 5449), 'numpy.array', 'np.array', (['block_bounds'], {}), '(block_bounds)\n', (5435, 5449), True, 'import numpy as np\n'), ((5702, 5733), 'h5py.File', 'h5py.File', (['export_filepath', '"""r"""'], {}), "(export_filepath, 'r')\n", (5711, 5733), False, 'import h5py\n'), ((6884, 6906), 'numpy.array', 'np.array', (['block_bounds'], {}), '(block_bounds)\n', (6892, 6906), True, 'import numpy as np\n'), ((7503, 7522), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7520, 7522), False, 'import logging\n'), ((7633, 7674), 'logging.getLogger', 'logging.getLogger', (['"""quilted.h5blockstore"""'], {}), "('quilted.h5blockstore')\n", (7650, 7674), False, 'import logging\n')] |
import numpy as np
from numba import njit
@njit(cache=True)
def calculate_goodness_of_fit(table):
n = table.shape[0]
m = table.sum()
row_sum = table.sum(axis=0).astype(np.float64)
col_sum = table.sum(axis=1).astype(np.float64)
e = np.dot(col_sum.reshape(n, 1), row_sum.reshape(1, 2)) / m
s = np.sqrt(((table - e) ** 2).sum() / 2 / n)
return s
@njit(cache=True)
def make_permutation_table(p, m, n):
permuted_flat = np.zeros_like(p, dtype=np.int64)
order = np.arange(0, permuted_flat.size)
for _ in range(m):
permuted_flat[np.random.choice(order)] += 1
return permuted_flat.reshape(n, 2)
@njit(cache=True)
def permute_root_mean_square_test(table, n_permute=3000, min_pvalue=0.034):
# calculate real goodness-of-fit s
n = table.shape[0]
m = table.sum()
row_sum = table.sum(axis=0).astype(np.float64)
col_sum = table.sum(axis=1).astype(np.float64)
e = np.dot(col_sum.reshape(n, 1), row_sum.reshape(1, 2)) / m
real_s = calculate_goodness_of_fit(table)
# permutation
p = e.flatten() / m
greater_than_real = 1
max_greater_value = n_permute * min_pvalue
for i in range(n_permute):
p_table = make_permutation_table(p, m, n)
# calculate permuted goodness of fit s'
s = calculate_goodness_of_fit(p_table)
greater_than_real += int(s >= real_s)
# break in advance if p-value can be significant
if greater_than_real > max_greater_value:
# return current p value
return greater_than_real / (i + 2)
p_value = greater_than_real / n_permute
return p_value
@njit
def calculate_residue(table):
n = table.shape[0]
m = table.sum()
row_sum = table.sum(axis=0).astype(np.float64)
col_sum = table.sum(axis=1).astype(np.float64)
e = np.dot(col_sum.reshape(n, 1), row_sum.reshape(1, 2)) / m
residual = (table - e) / np.sqrt(
np.multiply(e, (1 - e.sum(axis=1) / m).reshape(n, 1) *
(e.sum(axis=0) / m).reshape(1, 2)))
return residual
| [
"numpy.random.choice",
"numpy.zeros_like",
"numba.njit",
"numpy.arange"
] | [((45, 61), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (49, 61), False, 'from numba import njit\n'), ((376, 392), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (380, 392), False, 'from numba import njit\n'), ((645, 661), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (649, 661), False, 'from numba import njit\n'), ((450, 482), 'numpy.zeros_like', 'np.zeros_like', (['p'], {'dtype': 'np.int64'}), '(p, dtype=np.int64)\n', (463, 482), True, 'import numpy as np\n'), ((495, 527), 'numpy.arange', 'np.arange', (['(0)', 'permuted_flat.size'], {}), '(0, permuted_flat.size)\n', (504, 527), True, 'import numpy as np\n'), ((573, 596), 'numpy.random.choice', 'np.random.choice', (['order'], {}), '(order)\n', (589, 596), True, 'import numpy as np\n')] |
import numpy as np
def trust_region_solver(M, g, d_max, max_iter=2000, stepsize=1.0e-3):
"""Solves trust region problem with gradient descent
maximize 1/2 * x^T M x + g^T x
s.t. |x|_2 <= d_max
initialize x = g / |g| * d_max
"""
x = g / np.linalg.norm(g) * d_max
for _ in range(max_iter):
# gradient ascent
x = x + stepsize * (M @ x + g)
# projection to sphere
x = x / np.linalg.norm(x) * d_max
## debug
#loss = 0.5 * x.T @ M @ x + g.T @ x
#print(f'Loss: {loss}')
return x
| [
"numpy.linalg.norm"
] | [((261, 278), 'numpy.linalg.norm', 'np.linalg.norm', (['g'], {}), '(g)\n', (275, 278), True, 'import numpy as np\n'), ((429, 446), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (443, 446), True, 'import numpy as np\n')] |
'''
DLRM Facebookresearch Debloating
author: sjoon-oh @ Github
source: dlrm/dlrm_s_pytorch.py
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
# miscellaneous
import builtins
import datetime
import json
import sys
import time
# data generation
import dlrm_data as dp
from dlrm_net import *
# numpy
import numpy as np
# pytorch
import torch
import torch.nn as nn
from torch._ops import ops
from torch.nn.parameter import Parameter
import optim.rwsadagrad as RowWiseSparseAdagrad
exc = getattr(builtins, "IOError", "FileNotFoundError")
#
# ---
# Function: time_wrap
def time_wrap(use_gpu):
if use_gpu:
torch.cuda.synchronize()
return time.time()
#
# ---
# Function: dash_separated_ints
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value
)
return value
#
# ---
# Function: dash_separated_floats
def dash_separated_floats(value):
vals = value.split("-")
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value
)
return value
#
# ---
# Function: get_split_lengths
# Moved from extended_distributed.py
def get_split_lengths(n):
k, m = divmod(n, 1)
if m == 0:
splits = None
my_len = k
else:
splits = [(k + 1) if i < m else k for i in range(1)]
my_len = splits[my_rank]
return (my_len, splits)
#
# ---
# Function: inference
def inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter=-1,
):
test_accu = 0
test_samp = 0
for i, testBatch in enumerate(test_ld):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch(
testBatch
)
# forward pass
Z_test = dlrm_wrap(
dlrm,
X_test,
lS_o_test,
lS_i_test,
use_gpu,
device,
ndevices=ndevices,
)
if Z_test.is_cuda:
torch.cuda.synchronize()
(_, batch_split_lengths) = get_split_lengths(X_test.size(0))
# compute loss and accuracy
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
mbs_test = T_test.shape[0] # = mini_batch_size except last
A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8))
test_accu += A_test
test_samp += mbs_test
acc_test = test_accu / test_samp
# writer.add_scalar("Test/Acc", acc_test, log_iter)
model_metrics_dict = {
"nepochs": args.nepochs,
"nbatches": nbatches,
"nbatches_test": nbatches_test,
"state_dict": dlrm.state_dict(),
"test_acc": acc_test,
}
is_best = acc_test > best_acc_test
if is_best:
best_acc_test = acc_test
print(
" accuracy {:3.3f} %, best {:3.3f} %".format(
acc_test * 100, best_acc_test * 100
),
flush=True,
)
return model_metrics_dict, is_best
#
# ---
# Function: prepare_parser
def prepare_parser():
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
# j will be replaced with the table number
parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot"
)
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
# activations and loss
parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce
parser.add_argument(
"--loss-weights", type=dash_separated_floats, default="1.0-1.0"
) # for wbce
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-workers", type=int, default=0)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
parser.add_argument("--optimizer", type=str, default="sgd")
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--test-mini-batch-size", type=int, default=-1)
parser.add_argument("--test-num-workers", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--print-wall-time", action="store_true", default=False)
# store/load model
parser.add_argument("--save-model", type=str, default="")
parser.add_argument("--load-model", type=str, default="")
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--cat-feature-num", type=int, default=0)
parser.add_argument("--den-feature-num", type=int, default=0)
return parser
#
# ---
# Function: run
def run():
### parse arguments ###
parser = prepare_parser()
global args
global nbatches
global nbatches_test
args = parser.parse_args()
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(precision=args.print_precision)
torch.manual_seed(args.numpy_rand_seed)
if args.test_mini_batch_size < 0:
# if the parameter is not set, use the training batch size
args.test_mini_batch_size = args.mini_batch_size
if args.test_num_workers < 0:
# if the parameter is not set, use the same parameter for training
args.test_num_workers = args.num_workers
use_gpu = args.use_gpu and torch.cuda.is_available()
if use_gpu:
torch.cuda.manual_seed_all(args.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
ngpus = torch.cuda.device_count()
device = torch.device("cuda", 0)
print("Using {} GPU(s)...".format(ngpus))
else:
device = torch.device("cpu")
print("Using CPU...")
### prepare training data ###
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
# input data
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
table_feature_map = {idx: idx for idx in range(len(train_data.counts))}
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
ln_emb = train_data.counts
# enforce maximum limit on number of vectors per embedding
print(f"Train count: {ln_emb}")
ln_emb = np.array(ln_emb)
m_den = train_data.m_den
ln_bot[0] = m_den
args.ln_emb = ln_emb.tolist()
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
ln_emb = np.asarray(ln_emb)
num_fea = ln_emb.size + 1 # num sparse + num dense features
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit(
"ERROR: arch-dense-feature-size "
+ str(m_den)
+ " does not match first dim of bottom mlp "
+ str(ln_bot[0])
)
if m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
if num_int != ln_top[0]:
sys.exit(
"ERROR: # of feature interactions "
+ str(num_int)
+ " does not match first dimension of top mlp "
+ str(ln_top[0])
)
global ndevices
ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1
### construct the neural network specified above ###
# WARNING: to obtain exactly the same initialization for
# the weights we need to start from the same random seed.
# np.random.seed(args.numpy_rand_seed)
global dlrm
dlrm = DLRM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op=args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=args.sync_dense_params,
ndevices=ndevices,
loss_function=args.loss_function
)
if use_gpu:
# Custom Model-Data Parallel
# the mlps are replicated and use data parallelism, while
# the embeddings are distributed and use model parallelism
dlrm = dlrm.to(device) # .cuda()
if dlrm.ndevices > 1:
dlrm.emb_l, dlrm.v_W_l = dlrm.create_emb(
m_spa, ln_emb
)
if not args.inference_only:
if use_gpu and args.optimizer in ["rwsadagrad", "adagrad"]:
sys.exit("GPU version of Adagrad is not supported by PyTorch.")
# specify the optimizer algorithm
opts = {
"sgd": torch.optim.SGD,
"rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad,
"adagrad": torch.optim.Adagrad,
}
parameters = (
dlrm.parameters()
)
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate)
lr_scheduler = LRPolicyScheduler(
optimizer,
0, # args.lr_num_warmup_steps,
0, # args.lr_decay_start_step,
0, # args.lr_num_decay_steps,
)
### main loop ###
# training or inference
best_acc_test = 0
best_auc_test = 0
skip_upto_epoch = 0
skip_upto_batch = 0
total_time = 0
total_loss = 0
total_iter = 0
total_samp = 0
# Load model is specified
if not (args.load_model == ""):
print("Loading saved model {}".format(args.load_model))
if use_gpu:
if dlrm.ndevices > 1:
# NOTE: when targeting inference on multiple GPUs,
# load the model as is on CPU or GPU, with the move
# to multiple GPUs to be done in parallel_forward
ld_model = torch.load(args.load_model)
else:
# NOTE: when targeting inference on single GPU,
# note that the call to .to(device) has already happened
ld_model = torch.load(
args.load_model,
map_location=torch.device("cuda")
# map_location=lambda storage, loc: storage.cuda(0)
)
else:
# when targeting inference on CPU
ld_model = torch.load(args.load_model, map_location=torch.device("cpu"))
dlrm.load_state_dict(ld_model["state_dict"])
ld_j = ld_model["iter"]
ld_k = ld_model["epoch"]
ld_nepochs = ld_model["nepochs"]
ld_nbatches = ld_model["nbatches"]
ld_nbatches_test = ld_model["nbatches_test"]
ld_train_loss = ld_model["train_loss"]
ld_total_loss = ld_model["total_loss"]
ld_acc_test = ld_model["test_acc"]
if not args.inference_only:
optimizer.load_state_dict(ld_model["opt_state_dict"])
best_acc_test = ld_acc_test
total_loss = ld_total_loss
skip_upto_epoch = ld_k # epochs
skip_upto_batch = ld_j # batches
else:
args.print_freq = ld_nbatches
args.test_freq = 0
print(
"Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}".format(
ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test
)
)
print(
"Training state: loss = {:.6f}".format(
ld_train_loss,
)
)
print("Testing state: accuracy = {:3.3f} %".format(ld_acc_test * 100))
if args.inference_only:
# Currently only dynamic quantization with INT8 and FP16 weights are
# supported for MLPs and INT4 and INT8 weights for EmbeddingBag
# post-training quantization during the inference.
# By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32)
pass
print("time/loss/accuracy (if enabled):")
# tb_file = "./" + args.tensor_board_filename
# writer = SummaryWriter(tb_file)
with torch.autograd.profiler.profile(
enabled=False,
use_cuda=use_gpu, record_shapes=True
) as prof:
if not args.inference_only:
k = 0
total_time_begin = 0
while k < args.nepochs:
if k < skip_upto_epoch:
continue
for j, inputBatch in enumerate(train_ld):
if j < skip_upto_batch:
continue
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
t1 = time_wrap(use_gpu)
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# forward pass
Z = dlrm_wrap(
dlrm,
X,
lS_o,
lS_i,
use_gpu,
device,
ndevices=ndevices,
)
# loss
E = loss_fn_wrap(args, dlrm, Z, T, use_gpu, device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
optimizer.zero_grad()
# backward pass
E.backward()
# optimizer
optimizer.step()
lr_scheduler.step()
t2 = time_wrap(use_gpu)
total_time += t2 - t1
total_loss += L * mbs
total_iter += 1
total_samp += mbs
should_print = ((j + 1) % args.print_freq == 0) or (
j + 1 == nbatches
)
should_test = (
(args.test_freq > 0)
and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches))
)
# print time, loss and accuracy
if should_print or should_test:
gT = 1000.0 * total_time / total_iter if args.print_time else -1
total_time = 0
train_loss = total_loss / total_samp
total_loss = 0
str_run_type = (
"inference" if args.inference_only else "training"
)
wall_time = ""
if args.print_wall_time:
wall_time = " ({})".format(time.strftime("%H:%M"))
print(
"Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format(
str_run_type, j + 1, nbatches, k, gT
)
+ " loss {:.6f}".format(train_loss)
+ wall_time,
flush=True,
)
log_iter = nbatches * k + j + 1
# writer.add_scalar("Train/Loss", train_loss, log_iter)
total_iter = 0
total_samp = 0
# testing
if should_test:
epoch_num_float = (j + 1) / len(train_ld) + k + 1
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k)
)
model_metrics_dict, is_best = inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter,
)
if (
is_best
and not (args.save_model == "")
and not args.inference_only
):
model_metrics_dict["epoch"] = k
model_metrics_dict["iter"] = j + 1
model_metrics_dict["train_loss"] = train_loss
model_metrics_dict["total_loss"] = total_loss
model_metrics_dict[
"opt_state_dict"
] = optimizer.state_dict()
print("Saving model to {}".format(args.save_model))
torch.save(model_metrics_dict, args.save_model)
k += 1 # nepochs
else:
print("Testing for inference only")
inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
)
total_time_end = time_wrap(use_gpu)
if __name__ == "__main__":
run()
| [
"torch.cuda.device_count",
"torch.cuda.synchronize",
"numpy.array",
"torch.cuda.is_available",
"sys.exit",
"torch.set_printoptions",
"argparse.ArgumentParser",
"numpy.asarray",
"numpy.random.seed",
"numpy.fromstring",
"numpy.round",
"argparse.ArgumentTypeError",
"torch.save",
"time.time",
... | [((711, 722), 'time.time', 'time.time', ([], {}), '()\n', (720, 722), False, 'import time\n'), ((3571, 3662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Deep Learning Recommendation Model (DLRM)"""'}), "(description=\n 'Train Deep Learning Recommendation Model (DLRM)')\n", (3594, 3662), False, 'import argparse\n'), ((6702, 6738), 'numpy.random.seed', 'np.random.seed', (['args.numpy_rand_seed'], {}), '(args.numpy_rand_seed)\n', (6716, 6738), True, 'import numpy as np\n'), ((6743, 6794), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': 'args.print_precision'}), '(precision=args.print_precision)\n', (6762, 6794), True, 'import numpy as np\n'), ((6799, 6853), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': 'args.print_precision'}), '(precision=args.print_precision)\n', (6821, 6853), False, 'import torch\n'), ((6858, 6897), 'torch.manual_seed', 'torch.manual_seed', (['args.numpy_rand_seed'], {}), '(args.numpy_rand_seed)\n', (6875, 6897), False, 'import torch\n'), ((7661, 7713), 'numpy.fromstring', 'np.fromstring', (['args.arch_mlp_bot'], {'dtype': 'int', 'sep': '"""-"""'}), "(args.arch_mlp_bot, dtype=int, sep='-')\n", (7674, 7713), True, 'import numpy as np\n'), ((7779, 7816), 'dlrm_data.make_criteo_data_and_loaders', 'dp.make_criteo_data_and_loaders', (['args'], {}), '(args)\n', (7810, 7816), True, 'import dlrm_data as dp\n'), ((8147, 8163), 'numpy.array', 'np.array', (['ln_emb'], {}), '(ln_emb)\n', (8155, 8163), True, 'import numpy as np\n'), ((8348, 8366), 'numpy.asarray', 'np.asarray', (['ln_emb'], {}), '(ln_emb)\n', (8358, 8366), True, 'import numpy as np\n'), ((9046, 9102), 'numpy.fromstring', 'np.fromstring', (['arch_mlp_top_adjusted'], {'dtype': 'int', 'sep': '"""-"""'}), "(arch_mlp_top_adjusted, dtype=int, sep='-')\n", (9059, 9102), True, 'import numpy as np\n'), ((675, 699), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (697, 699), False, 'import torch\n'), ((7251, 7276), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7274, 7276), False, 'import torch\n'), ((7302, 7350), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.numpy_rand_seed'], {}), '(args.numpy_rand_seed)\n', (7328, 7350), False, 'import torch\n'), ((7418, 7443), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7441, 7443), False, 'import torch\n'), ((7461, 7484), 'torch.device', 'torch.device', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (7473, 7484), False, 'import torch\n'), ((7563, 7582), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7575, 7582), False, 'import torch\n'), ((14446, 14534), 'torch.autograd.profiler.profile', 'torch.autograd.profiler.profile', ([], {'enabled': '(False)', 'use_cuda': 'use_gpu', 'record_shapes': '(True)'}), '(enabled=False, use_cuda=use_gpu,\n record_shapes=True)\n', (14477, 14534), False, 'import torch\n'), ((2457, 2481), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2479, 2481), False, 'import torch\n'), ((8829, 8923), 'sys.exit', 'sys.exit', (["('ERROR: --arch-interaction-op=' + args.arch_interaction_op +\n ' is not supported')"], {}), "('ERROR: --arch-interaction-op=' + args.arch_interaction_op +\n ' is not supported')\n", (8837, 8923), False, 'import sys\n'), ((11010, 11073), 'sys.exit', 'sys.exit', (['"""GPU version of Adagrad is not supported by PyTorch."""'], {}), "('GPU version of Adagrad is not supported by PyTorch.')\n", (11018, 11073), False, 'import sys\n'), ((924, 1011), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is not a valid dash separated list of ints' % value)"], {}), "('%s is not a valid dash separated list of ints' %\n value)\n", (950, 1011), False, 'import argparse\n'), ((1263, 1353), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is not a valid dash separated list of floats' % value)"], {}), "(\n '%s is not a valid dash separated list of floats' % value)\n", (1289, 1353), False, 'import argparse\n'), ((12259, 12286), 'torch.load', 'torch.load', (['args.load_model'], {}), '(args.load_model)\n', (12269, 12286), False, 'import torch\n'), ((12786, 12805), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12798, 12805), False, 'import torch\n'), ((2807, 2826), 'numpy.round', 'np.round', (['S_test', '(0)'], {}), '(S_test, 0)\n', (2815, 2826), True, 'import numpy as np\n'), ((12551, 12571), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12563, 12571), False, 'import torch\n'), ((19246, 19293), 'torch.save', 'torch.save', (['model_metrics_dict', 'args.save_model'], {}), '(model_metrics_dict, args.save_model)\n', (19256, 19293), False, 'import torch\n'), ((17198, 17220), 'time.strftime', 'time.strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (17211, 17220), False, 'import time\n')] |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from PIL import Image
import matplotlib.cm as Pltcolormap
from . import utils
class GradCAM:
"""
Gradient-weighted Class Activation Mapping (Grad-CAM)
Get a coarse heatmap of activation highlighting important location in
input image based on the gradient of (last) convolutional layer to
achieve "visual explanation" for CNN prediction
Reference: https://arxiv.org/abs/1610.02391
"""
def __init__(self, model, transform, target_layer, num_classes=1000, cuda=False):
self.model = model
self.model.train(False)
self.cuda = cuda
if self.cuda:
self.model.cuda()
self.transform = transform
self.target_layer = target_layer
self.num_classes = num_classes
# define hook function
def forward_hook(module, input, output):
self.feature_maps = output.data
def backward_hook(module, grad_input, grad_output):
self.gradients = grad_output[0].data
# register hook function
for name, module in self.model.named_modules():
if name == self.target_layer:
module.register_forward_hook(forward_hook)
module.register_backward_hook(backward_hook)
def forward(self, img):
""" The forward pass
Argument:
img (PIL Image) - the (unprocessed) input image
Return:
Tensor/Dict - the output of the model
"""
# preprocess the PIL image first
img_tensor = self.transform(img)
img_tensor.unsqueeze_(0) # this add a dimension as a dummy "batch"
img_variable = Variable(img_tensor)
self.output = self.model(img_variable)
return self.output.data
def backward(self, idx, sorted_idx=False):
self.model.zero_grad()
self.output.backward(gradient=utils.one_hot_tensor(idx, self.num_classes), retain_graph=True)
def get_gradcam_intensity(self, idx, sorted_idx=False, backward=True):
""" The (partial) backward pass and generate GradCAM intensity value for each pixel
Argument:
idx (int) - the idx of the class to be localize by GradCAM
sorted_idx (bool) - if sorted_idx==True, the idx[0] will be the class with highest score,
idx[1] will be the class with second highest score and so on
backward (bool) - perform backward pass or not,
under normal usecase, it should be true
Return:
Tensor (size == kernal size of target layer)
- GradCAM intensity value for each pixel
"""
# implement sorted_idx !!!
if backward:
self.backward(idx)
# self.feature_maps # 1x2048x7x7
# self.gradients # 1x2048x7x7
# GAP = torch.nn.AvgPool2d(self.gradients.size()[2:])
weights = F.avg_pool2d(Variable(self.gradients), kernel_size=self.gradients.size()[2:]).data
gradCAM_intensity = torch.FloatTensor(self.feature_maps.size()[2:]).zero_()
for feature_map, weight in zip(self.feature_maps[0], weights[0]):
gradCAM_intensity += feature_map * weight
#relu
gradCAM_intensity.clamp_(min=0)
return gradCAM_intensity
@staticmethod
def apply_color_map(intensity, img):
""" Apply the color map on the original image with GradCAM intensity value generated
by GradCAM.backward()
Argument:
intensity (Tensor) - GradCAM intensity value generated by GradCAM.backward()
img (PIL image) - The image that GradCAM intensity were to be apply to,
suppose to be the original image
Return:
PIL image - The img with GradCAM intensity applied to
Numpy array - The intensity same size as img (range: [0-1])
"""
# normalize
intensity = utils.normalize(intensity)
# use PIL bilinear resize interpolation
# note: *255 -> resize -> /255.0 (divide for heat map input[0,1]) is === resize
pil = Image.fromarray(intensity.cpu().numpy())
pil = pil.resize(img.size, resample=Image.BILINEAR)
intensity = np.asarray(pil)
# get the color map from matplotlib
color_map = Pltcolormap.get_cmap('jet')
heat_map = color_map(intensity)
heat_map[:,:,3] /= 2.0
heat_map *= 255
original_img = np.asarray(img)
return Image.fromarray(np.uint8((heat_map[:,:,:3]+original_img)/2.0)), intensity
class Backpropagation:
"""
Vanilla Backpropagation
Backpropagate to input then get the gradient at input
"""
def __init__(self, model, transform, num_classes=1000, cuda=False):
self.model = model
# self.model = model
self.model.train(False)
self.cuda = cuda
if self.cuda:
self.model.cuda()
self.transform = transform
self.num_classes = num_classes
def forward(self, img):
""" The forward pass
Argument:
img (PIL Image) - the (unprocessed) input image
Return:
Tensor/Dict - the output of the model
"""
img_tensor = self.transform(img)
img_tensor.unsqueeze_(0) # this add a dimension as a dummy "batch"
img_variable = Variable(img_tensor, requires_grad=True)
self.input = img_variable
self.output = self.model(img_variable)
return self.output.data
def backward(self, idx):
self.model.zero_grad()
self.output.backward(gradient=utils.one_hot_tensor(idx, self.num_classes), retain_graph=True)
def get_input_gradient(self, idx, sorted_idx=False, backward=True):
""" The backward pass and return the gradient at input image
Argument:
idx (int) - the idx of the class to be localize by GradCAM
sorted_idx (bool) - if sorted_idx==True, the idx[0] will be the class with highest score,
idx[1] will be the class with second highest score and so on
backward (bool) - perform backward pass or not,
under normal usecase, it should be true
Return:
PIL image - The RGB gradient images generated based on the gradient value
Numpy array (nxnxc) - The gradient value for each pixel
"""
#implement sorted_idx !!!
if backward:
self.backward(idx)
# 1x3x224x224 -> 224x224x3
gradient = self.input.grad.data.cpu().numpy()[0].transpose(1, 2, 0)
gradient_img_arr = (utils.normalize(gradient)*255).astype('uint8')
return Image.fromarray(gradient_img_arr), gradient
class GuidedBackpropagation(Backpropagation):
"""
Guided Backpropagation
x.grad or img.grad is what we wanted
GuidedBackprop: input>0 * gradin>0 * gradin on relu.backward
but original relu had implemented relu gradin = input>0 * gradin
thus we only need to add gradin>0 * relu gradin
Reference: https://arxiv.org/abs/1412.6806
NOTE:
The gradient on back propagation of the model will be modify, if this is
not the desired behaeviour, construct this class with a deepcopy of model
"""
def __init__(self, model, transform, num_classes, cuda=False):
super().__init__(model, transform, num_classes, cuda)
# define hook function
def backward_hook(module, grad_input, grad_output):
# Guided Backpropagation
# Only allows positive gradient to backflow
return (torch.clamp(grad_input[0], min=0.0),)
# register hook function on relu module
for name, module in self.model.named_modules():
if isinstance(module, torch.nn.ReLU):
module.register_backward_hook(backward_hook)
class GuidedGradCAM:
"""
Guided Grad-CAM
Use the heatmap of Grad-CAM to produce a localized guided-backprop saliency
Reference: https://arxiv.org/abs/1610.02391
NOTE:
This class is present for completeness and consistancy.
For general visualization usage, please use the Visualize wrapper class
"""
def __init__(self, model, transform, target_layer, cuda=False):
self.model = model
self.cuda = cuda
self.transform = transform
self.target_layer = target_layer
self.GradCAM = GradCAM(model, transform, target_layer, cuda)
self.GuidedBackprop = GuidedBackpropagation(copy.deepcopy(model),
transform, cuda)
def forward(self, img):
""" The forward pass
Argument:
img (Tensor) - the (unprocessed) input image
Return:
Tensor/Dict
"""
pass
| [
"numpy.uint8",
"PIL.Image.fromarray",
"numpy.asarray",
"torch.autograd.Variable",
"matplotlib.cm.get_cmap",
"torch.clamp"
] | [((1738, 1758), 'torch.autograd.Variable', 'Variable', (['img_tensor'], {}), '(img_tensor)\n', (1746, 1758), False, 'from torch.autograd import Variable\n'), ((4300, 4315), 'numpy.asarray', 'np.asarray', (['pil'], {}), '(pil)\n', (4310, 4315), True, 'import numpy as np\n'), ((4381, 4408), 'matplotlib.cm.get_cmap', 'Pltcolormap.get_cmap', (['"""jet"""'], {}), "('jet')\n", (4401, 4408), True, 'import matplotlib.cm as Pltcolormap\n'), ((4528, 4543), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (4538, 4543), True, 'import numpy as np\n'), ((5428, 5468), 'torch.autograd.Variable', 'Variable', (['img_tensor'], {'requires_grad': '(True)'}), '(img_tensor, requires_grad=True)\n', (5436, 5468), False, 'from torch.autograd import Variable\n'), ((6763, 6796), 'PIL.Image.fromarray', 'Image.fromarray', (['gradient_img_arr'], {}), '(gradient_img_arr)\n', (6778, 6796), False, 'from PIL import Image\n'), ((2992, 3016), 'torch.autograd.Variable', 'Variable', (['self.gradients'], {}), '(self.gradients)\n', (3000, 3016), False, 'from torch.autograd import Variable\n'), ((4576, 4627), 'numpy.uint8', 'np.uint8', (['((heat_map[:, :, :3] + original_img) / 2.0)'], {}), '((heat_map[:, :, :3] + original_img) / 2.0)\n', (4584, 4627), True, 'import numpy as np\n'), ((7682, 7717), 'torch.clamp', 'torch.clamp', (['grad_input[0]'], {'min': '(0.0)'}), '(grad_input[0], min=0.0)\n', (7693, 7717), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import sys
class LabelPath:
def __init__(self, label_inds, labels):
self._labels = labels
self._label_num = len(label_inds)
self._label_inds = label_inds
self._hit_count = 0.0
self._label2rank = dict()
for label_ind in label_inds:
self._label2rank[label_ind] = 0
self._is_cand = False
self._set_cand_rank = 0
def try_hit(self, label_ind, label_rank):
"""
try to hit label in current path
:param label_ind: target label index
:param label_rank: label rank
:return: current hit ratio
"""
if label_ind in self._label2rank:
self._label2rank[label_ind] = label_rank
self._hit_count += 1
return self._hit_count / self._label_num
def get_org_label_ind(self):
return self._label_inds[-1]
def get_hit_ratio(self):
return self._hit_count / self._label_num
def set_cand(self, rank):
self._is_cand = True
self._set_cand_rank = rank
def is_cand(self):
return self._is_cand
def get_set_cand_rank(self):
return self._set_cand_rank
def get_pred(self):
pred_label = self._label_inds[0]
for l in self._label2rank:
if self._label2rank[l] > 0:
pred_label = max(pred_label, l)
return pred_label
def dual_infer(scores, org2path, label2index, index2label):
"""
双向推断
1. 自顶向下,按排名从高往低扫描,保存命中率最高的若干路径
2. 自底向上,找出排名最高且最具体的若干label
3. 比对,产生最终预测结果
"""
index2label = np.array(index2label)
# all original label indexes
org_label_inds = set(org2path.keys())
# all label paths
all_paths = [LabelPath(path, index2label[path]) for path in org2path.values()]
# find the top 2 original label predictions
# fill paths with top k predication
ranked_inds = np.argsort(scores).tolist()
ranked_inds.reverse() # descending
ind2ranks = [0] * len(label2index.keys()) # label_ind 2 rank
cand_org_inds = []
cand_paths = []
# searching
iter = 0
while len(cand_org_inds) < 2:
rank = iter + 1
ind2ranks[ranked_inds[iter]] = rank
if ranked_inds[iter] in org_label_inds:
cand_org_inds.append([ranked_inds[iter], rank])
# try to fill all paths
for pi, path in enumerate(all_paths):
if len(cand_org_inds) == 0:
hit_ratio = path.try_hit(ranked_inds[iter], rank)
if hit_ratio > 0.5 and not path.is_cand():
cand_paths.append(path)
path.set_cand(rank)
iter += 1
# collect ranks of labels on the top 2 paths
pred_paths = [org2path[org_ind_rank[0]] for org_ind_rank in cand_org_inds]
pred_path_ranks = [[0] * len(org2path[org_ind_rank[0]]) for org_ind_rank in cand_org_inds]
for p, path in enumerate(pred_paths):
for i, l in enumerate(path):
pred_path_ranks[p][i] = ind2ranks[l]
# default predication
final_pred = pred_paths[0][-1]
top_org_rank_diff = cand_org_inds[1][1] - cand_org_inds[0][1]
if cand_org_inds[0][1] < 40:
# 正确的几率很高,除非top1与top2非常相似
diff_interval = 40
overlap_ratio_thr = 0.85
elif cand_org_inds[0][1] < 120:
diff_interval = 80
overlap_ratio_thr = 0.4
else:
# 正确的几率很低
# sort candidate paths according to the set_cand_rank
final_pred = cand_paths[0].get_pred()
diff_interval = 0
if top_org_rank_diff < diff_interval:
# top1 is close to top2
# cal top1 and top2 overlap
overlap = set(pred_paths[0]) & set(pred_paths[1])
overlap_ratio = len(overlap) * 1.0 / min(len(pred_paths[0]), len(pred_paths[1]))
# overlap is large
if overlap_ratio > overlap_ratio_thr:
final_pred = max(overlap)
return final_pred, cand_org_inds | [
"numpy.argsort",
"numpy.array"
] | [((1613, 1634), 'numpy.array', 'np.array', (['index2label'], {}), '(index2label)\n', (1621, 1634), True, 'import numpy as np\n'), ((1923, 1941), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (1933, 1941), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
## @package som_cm.som
#
# Implementation of SOM.
# @author tody
# @date 2015/08/14
import os
import numpy as np
import matplotlib.pyplot as plt
from som_cm.np.norm import normVectors
## SOM parameter.
class SOMParam:
# @param h image grid size.
# @param L0 initial parameter for learning restraint.
# @param lmbd iteration limit.
# @param dimensoin target dimension for SOM.
def __init__(self, h=32, L0=0.16, lmbd=0.6, sigma0=0.3, dimension=2):
self.h = h
self.L0 = L0
self.lmbd = lmbd
self.sigma0 = sigma0
self.dimension = dimension
## Implementation of SOM.
#
# SOM with numpy functions.
# - Compute nodes as n x 3 vector.
# - Avoid the loops for x and y.
# - xy coordinates are cached as n x 2 vector.
class SOM:
## Constructor
# @param samples training samples.
# @param param SOM parameter.
def __init__(self, samples, param=SOMParam()):
self._h = param.h
self._dimension = param.dimension
self._samples = samples
self._L0 = param.L0
self._nodes = self._initialNode(param.h, param.dimension)
num_samples = self.numSamples()
self._lmbd = param.lmbd * num_samples
self._sigma0 = param.sigma0 * param.h
self._computePositions(param.h, param.dimension)
self._t = 0
## Return the number of training samples.
def numSamples(self):
return len(self._samples)
## Return the current node image.
def nodeImage(self):
if self._dimension == 1:
return self._nodeImage1D()
else:
return self._nodeImage2D()
## Return the current time step t.
def currentStep(self):
return self._t
## Return if the training is finished.
def finished(self):
return self._t == self.numSamples()
## Process all training process.
def trainAll(self):
while self._t < len(self._samples):
self._train(self._t)
self._t += 1
## Process training step t to t+1.
def trainStep(self):
if self._t < len(self._samples):
self._train(self._t)
self._t += 1
def _nodeImage1D(self):
h = 10
w = self._h
node_image = np.zeros((h, w, 3))
for y in range(h):
node_image[y, :, :] = self._nodes[:, :]
return node_image
def _nodeImage2D(self):
return self._nodes.reshape(self._h, self._h, 3)
## Initial node.
def _initialNode(self, h, dimension):
if dimension == 1:
return self._initialNode1D(h)
else:
return self._initialNode2D(h)
def _initialNode1D(self, h):
return np.random.rand(h, 3)
def _initialNode2D(self, h):
return np.random.rand(h, h, 3).reshape(-1, 3)
## Compute position.
def _computePositions(self, h, dimension):
if dimension == 1:
self._computePositions1D(h)
else:
self._computePositions2D(h)
def _computePositions1D(self, h):
x = np.arange(h)
self._positions = x
def _computePositions2D(self, h):
x = np.arange(h)
y = np.arange(h)
xs, ys = np.meshgrid(x, y)
xs = xs.flatten()
ys = ys.flatten()
self._positions = np.array([xs, ys]).T
## Train process.
def _train(self, t):
sample = self._samples[t]
# bmu
bmu_id = self._bmu(sample)
bmu_position = self._positions[bmu_id]
# update weight
D = normVectors(self._positions - bmu_position)
L = self._learningRestraint(t)
T = self._neighborhoodFunction(t, D)
# update nodes
for ci in range(3):
self._nodes[:, ci] += L * T * (sample[ci] - self._nodes[:, ci])
## BMU: best matching unit.
# Return the unit of minimum distance from the sample.
def _bmu(self, sample):
norms = normVectors(self._nodes - sample)
bmu_id = np.argmin(norms)
return bmu_id
## Neighborhood function: exp (-D^2 / 2 sigma^2)
def _neighborhoodFunction(self, t, D):
sigma = self._sigma0 * np.exp(-t / self._lmbd)
Theta = np.exp(-D ** 2 / (2 * sigma ** 2))
return Theta
## Learning restraint: L0 exp (-t / lambda)
def _learningRestraint(self, t):
return self._L0 * np.exp(-t / self._lmbd)
## Plotting class with matplot.
class SOMPlot:
## Constructor
# @param samples training samples.
# @param param SOM parameter.
def __init__(self, som):
self._som = som
self._node_image = None
self._plot3d = None
self._step_text = None
## Return the updated image.
def updateImage(self):
node_image = self._som.nodeImage()
if self._node_image is None:
self._node_image = plt.imshow(node_image)
else:
self._node_image.set_array(node_image)
return self._node_image
## Return the current step status.
def updateStepText(self):
if self._step_text is None:
self._step_text = plt.text(1, 1, '', fontsize=15)
else:
if self._som.finished():
self._step_text.set_text('')
else:
self._step_text.set_text('step: %s' % self._som.currentStep())
return self._step_text
## Plot color manifold in 3D.
def plot3D(self, ax):
node_image = self._som.nodeImage()
colors = node_image.reshape(-1, 3)
plot3d = ax.scatter(colors[:, 0], colors[:, 1], colors[:, 2],
color=colors)
ax.set_xlabel('R', x=10, y=10)
ax.set_ylabel('G')
ax.set_zlabel('B')
ax.set_zlim3d([-0.1, 1.1])
ax.set_ylim3d([-0.1, 1.1])
ax.set_xlim3d([-0.1, 1.1])
ax.set_xticks(np.linspace(0.0, 1.0, 2))
ax.set_yticks(np.linspace(0.0, 1.0, 2))
ax.set_zticks(np.linspace(0.0, 1.0, 2))
return plot3d
## Animation function for FuncAnimation.
def trainAnimation(self, *args):
image = self.updateImage()
text = self.updateStepText()
self._som.trainStep()
return [image, text]
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.text",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.argmin",
"numpy.meshgrid",
"som_cm.np.norm.normVectors",
"numpy.arange"
] | [((2326, 2345), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (2334, 2345), True, 'import numpy as np\n'), ((2774, 2794), 'numpy.random.rand', 'np.random.rand', (['h', '(3)'], {}), '(h, 3)\n', (2788, 2794), True, 'import numpy as np\n'), ((3128, 3140), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (3137, 3140), True, 'import numpy as np\n'), ((3220, 3232), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (3229, 3232), True, 'import numpy as np\n'), ((3245, 3257), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (3254, 3257), True, 'import numpy as np\n'), ((3275, 3292), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3286, 3292), True, 'import numpy as np\n'), ((3608, 3651), 'som_cm.np.norm.normVectors', 'normVectors', (['(self._positions - bmu_position)'], {}), '(self._positions - bmu_position)\n', (3619, 3651), False, 'from som_cm.np.norm import normVectors\n'), ((4001, 4034), 'som_cm.np.norm.normVectors', 'normVectors', (['(self._nodes - sample)'], {}), '(self._nodes - sample)\n', (4012, 4034), False, 'from som_cm.np.norm import normVectors\n'), ((4052, 4068), 'numpy.argmin', 'np.argmin', (['norms'], {}), '(norms)\n', (4061, 4068), True, 'import numpy as np\n'), ((4259, 4293), 'numpy.exp', 'np.exp', (['(-D ** 2 / (2 * sigma ** 2))'], {}), '(-D ** 2 / (2 * sigma ** 2))\n', (4265, 4293), True, 'import numpy as np\n'), ((3371, 3389), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (3379, 3389), True, 'import numpy as np\n'), ((4219, 4242), 'numpy.exp', 'np.exp', (['(-t / self._lmbd)'], {}), '(-t / self._lmbd)\n', (4225, 4242), True, 'import numpy as np\n'), ((4427, 4450), 'numpy.exp', 'np.exp', (['(-t / self._lmbd)'], {}), '(-t / self._lmbd)\n', (4433, 4450), True, 'import numpy as np\n'), ((4913, 4935), 'matplotlib.pyplot.imshow', 'plt.imshow', (['node_image'], {}), '(node_image)\n', (4923, 4935), True, 'import matplotlib.pyplot as plt\n'), ((5171, 5202), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(1)', '""""""'], {'fontsize': '(15)'}), "(1, 1, '', fontsize=15)\n", (5179, 5202), True, 'import matplotlib.pyplot as plt\n'), ((5903, 5927), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(2)'], {}), '(0.0, 1.0, 2)\n', (5914, 5927), True, 'import numpy as np\n'), ((5951, 5975), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(2)'], {}), '(0.0, 1.0, 2)\n', (5962, 5975), True, 'import numpy as np\n'), ((5999, 6023), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(2)'], {}), '(0.0, 1.0, 2)\n', (6010, 6023), True, 'import numpy as np\n'), ((2844, 2867), 'numpy.random.rand', 'np.random.rand', (['h', 'h', '(3)'], {}), '(h, h, 3)\n', (2858, 2867), True, 'import numpy as np\n')] |
import unittest
import numpy.testing as testing
import numpy as np
import healpy as hp
from numpy import random
import healsparse
class GetSetTestCase(unittest.TestCase):
def test_getitem_single(self):
"""
Test __getitem__ single value
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
# Grab a single item, in range
testing.assert_almost_equal(sparse_map[100], full_map[100])
# Grab a single item out of range
testing.assert_almost_equal(sparse_map[6000], full_map[6000])
def test_getitem_recarray_single(self):
"""
Test __getitem__ from a recarray
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 128
dtype = [('col1', 'f8'), ('col2', 'f8')]
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, primary='col1')
pixel = np.arange(5000)
values = np.zeros_like(pixel, dtype=dtype)
values['col1'] = random.random(size=pixel.size)
values['col2'] = random.random(size=pixel.size)
sparse_map.update_values_pix(pixel, values)
# Test name access
test = sparse_map['col1']
testing.assert_array_almost_equal(test.get_values_pix(test.valid_pixels),
values['col1'])
# Test index access
test_item = sparse_map[1000]
testing.assert_almost_equal(test_item['col1'], values['col1'][1000])
testing.assert_almost_equal(test_item['col2'], values['col2'][1000])
test_item = sparse_map[10000]
testing.assert_almost_equal(test_item['col1'], hp.UNSEEN)
testing.assert_almost_equal(test_item['col2'], hp.UNSEEN)
def test_getitem_slice(self):
"""
Test __getitem__ using slices
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
# Test in-range, overlap, out-of-range
testing.assert_array_almost_equal(sparse_map[100: 500], full_map[100: 500])
testing.assert_array_almost_equal(sparse_map[4500: 5500], full_map[4500: 5500])
testing.assert_array_almost_equal(sparse_map[5500: 5600], full_map[5500: 5600])
# Test stepped
testing.assert_array_almost_equal(sparse_map[100: 500: 2], full_map[100: 500: 2])
testing.assert_array_almost_equal(sparse_map[4500: 5500: 2], full_map[4500: 5500: 2])
testing.assert_array_almost_equal(sparse_map[5500: 5600: 2], full_map[5500: 5600: 2])
# Test all
testing.assert_array_almost_equal(sparse_map[:], full_map[:])
def test_getitem_array(self):
"""
Test __getitem__ using an array
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
indices = np.array([1, 2, 100, 500, 10000])
testing.assert_array_almost_equal(sparse_map[indices], full_map[indices])
testing.assert_almost_equal(sparse_map[indices[0]], full_map[indices[0]])
indices = np.array([1., 2, 100, 500, 10000])
self.assertRaises(IndexError, sparse_map.__getitem__, indices)
self.assertRaises(IndexError, sparse_map.__getitem__, indices[0])
def test_getitem_list(self):
"""
Test __getitem__ using list/tuple
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
indices = [1, 2, 100, 500, 10000]
testing.assert_array_almost_equal(sparse_map[indices], full_map[indices])
indices = [1.0, 2, 100, 500, 10000]
self.assertRaises(IndexError, sparse_map.__getitem__, indices)
def test_getitem_other(self):
"""
Test __getitem__ using something else
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
indices = (1, 2, 3, 4)
self.assertRaises(IndexError, sparse_map.__getitem__, indices)
indices = 5.0
self.assertRaises(IndexError, sparse_map.__getitem__, indices)
def test_setitem_single(self):
"""
Test __setitem__ single value
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
sparse_map[1000] = 1.0
full_map[1000] = 1.0
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
sparse_map[10000] = 1.0
full_map[10000] = 1.0
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
def test_setitem_recarray_single(self):
"""
Test __setitem__ from recarray
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 128
dtype = [('col1', 'f8'), ('col2', 'f8'), ('col3', 'i4')]
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, primary='col1')
pixel = np.arange(5000)
values = np.zeros_like(pixel, dtype=dtype)
values['col1'] = random.random(size=pixel.size)
values['col2'] = random.random(size=pixel.size)
values['col3'] = np.ones(pixel.size, dtype=np.int32)
sparse_map.update_values_pix(pixel, values)
value = np.zeros(1, dtype=dtype)
value['col1'] = 1.0
value['col2'] = 1.0
value['col3'] = 10
sparse_map[1000] = value
testing.assert_almost_equal(sparse_map['col1'][1000], 1.0)
testing.assert_almost_equal(sparse_map['col2'][1000], 1.0)
self.assertEqual(sparse_map['col3'][1000], 10)
testing.assert_almost_equal(sparse_map[1000]['col1'], 1.0)
testing.assert_almost_equal(sparse_map[1000]['col2'], 1.0)
self.assertEqual(sparse_map[1000]['col3'], 10)
self.assertRaises(IndexError, sparse_map.__setitem__, 'col1', 1.0)
# Try setting individual columns... test both ways of calling
# although only the one works for setting
sparse_map['col1'][100] = 100.0
testing.assert_almost_equal(sparse_map['col1'][100], 100.0)
testing.assert_almost_equal(sparse_map[100]['col1'], 100.0)
sparse_map['col2'][100] = 100.0
testing.assert_almost_equal(sparse_map['col2'][100], 100.0)
testing.assert_almost_equal(sparse_map[100]['col2'], 100.0)
sparse_map['col3'][100] = 100
self.assertEqual(sparse_map['col3'][100], 100)
self.assertEqual(sparse_map[100]['col3'], 100)
sparse_map['col1'][100: 200] = np.zeros(100)
testing.assert_array_almost_equal(sparse_map['col1'][100: 200], 0.0)
testing.assert_array_almost_equal(sparse_map[100: 200]['col1'], 0.0)
sparse_map['col2'][100: 200] = np.zeros(100)
testing.assert_array_almost_equal(sparse_map['col2'][100: 200], 0.0)
testing.assert_array_almost_equal(sparse_map[100: 200]['col2'], 0.0)
sparse_map['col3'][100: 200] = np.zeros(100, dtype=np.int32)
testing.assert_array_equal(sparse_map['col3'][100: 200], 0)
testing.assert_array_equal(sparse_map[100: 200]['col3'], 0)
# Finally, assert that we cannot set new pixels
self.assertRaises(RuntimeError, sparse_map['col1'].__setitem__,
10000, 10.0)
def test_setitem_slice(self):
"""
Test __setitem__ slice
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
# This needs to be accessed with an array of length 1 or same length.
sparse_map[100: 500] = np.array([1.0])
full_map[100: 500] = np.array([1.0])
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
sparse_map[1000: 1500] = np.ones(500)
full_map[1000: 1500] = np.ones(500)
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
sparse_map[10000: 11000: 2] = np.ones(500)
full_map[10000: 11000: 2] = np.ones(500)
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
# Test all
sparse_map[:] = np.array([1.0])
full_map[:] = np.array([1.0])
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
def test_setitem_array(self):
"""
Test __setitem__ array
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
indices = np.array([1, 2, 100, 500, 10000])
sparse_map[indices] = np.array([1.0])
full_map[indices] = np.array([1.0])
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
# Simple in-place operation
sparse_map[indices] += 1.0
full_map[indices] += 1.0
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
indices = np.array([1, 2, 100, 500, 10000]) + 100
sparse_map[indices] = np.ones(len(indices))
full_map[indices] = np.ones(len(indices))
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
indices = np.array([1., 2, 100, 500, 10000])
self.assertRaises(IndexError, sparse_map.__setitem__, indices, 1.0)
def test_setitem_list(self):
"""
Test __setitem__ list
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
indices = [1, 2, 100, 500, 10000]
sparse_map[indices] = np.array([1.0])
full_map[indices] = np.array([1.0])
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
indices = [101, 102, 200, 600, 10100]
sparse_map[indices] = np.ones(len(indices))
full_map[indices] = np.ones(len(indices))
testing.assert_array_almost_equal(sparse_map.generate_healpix_map(),
full_map)
indices = [1., 2, 100, 500, 10000]
self.assertRaises(IndexError, sparse_map.__setitem__, indices, 1.0)
def test_setitem_other(self):
"""
Test __setitem__ using something else
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
full_map = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
full_map[0: 5000] = random.random(size=5000)
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage)
indices = (1, 2, 3, 4)
self.assertRaises(IndexError, sparse_map.__setitem__, indices, 1.0)
indices = 5.0
self.assertRaises(IndexError, sparse_map.__setitem__, indices, 1.0)
def test_setitem_integer(self):
"""
Test __setitem__ for integer HealSparseMaps
"""
random.seed(12345)
nside_coverage = 32
nside_map = 128
pxnums = np.arange(0, 2000)
pxvalues = pxnums
full_map = np.zeros(hp.nside2npix(nside_map), dtype=pxvalues.dtype)
full_map[pxnums] = pxvalues
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage=nside_coverage,
nside_sparse=nside_map, dtype=pxvalues.dtype)
sparse_map[pxnums[0]] = pxvalues[0]
testing.assert_equal(sparse_map[pxnums[0]], full_map[pxnums[0]])
sparse_map[pxnums] = pxvalues
testing.assert_array_almost_equal(sparse_map[pxnums], full_map[pxnums])
if __name__ == '__main__':
unittest.main()
| [
"numpy.testing.assert_array_almost_equal",
"numpy.ones",
"healsparse.HealSparseMap",
"numpy.testing.assert_equal",
"numpy.random.random",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_almost_equal",
"healsparse.HealSparseMap.make_empty",
"numpy.array",
"numpy.zeros",
"numpy.random.se... | [((13788, 13803), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13801, 13803), False, 'import unittest\n'), ((279, 297), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (290, 297), False, 'from numpy import random\n'), ((446, 470), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (459, 470), False, 'from numpy import random\n'), ((493, 570), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (517, 570), False, 'import healsparse\n'), ((619, 678), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['sparse_map[100]', 'full_map[100]'], {}), '(sparse_map[100], full_map[100])\n', (646, 678), True, 'import numpy.testing as testing\n'), ((730, 791), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['sparse_map[6000]', 'full_map[6000]'], {}), '(sparse_map[6000], full_map[6000])\n', (757, 791), True, 'import numpy.testing as testing\n'), ((910, 933), 'numpy.random.seed', 'random.seed', ([], {'seed': '(12345)'}), '(seed=12345)\n', (921, 933), False, 'from numpy import random\n'), ((1058, 1147), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {'primary': '"""col1"""'}), "(nside_coverage, nside_map, dtype,\n primary='col1')\n", (1093, 1147), False, 'import healsparse\n'), ((1160, 1175), 'numpy.arange', 'np.arange', (['(5000)'], {}), '(5000)\n', (1169, 1175), True, 'import numpy as np\n'), ((1193, 1226), 'numpy.zeros_like', 'np.zeros_like', (['pixel'], {'dtype': 'dtype'}), '(pixel, dtype=dtype)\n', (1206, 1226), True, 'import numpy as np\n'), ((1252, 1282), 'numpy.random.random', 'random.random', ([], {'size': 'pixel.size'}), '(size=pixel.size)\n', (1265, 1282), False, 'from numpy import random\n'), ((1308, 1338), 'numpy.random.random', 'random.random', ([], {'size': 'pixel.size'}), '(size=pixel.size)\n', (1321, 1338), False, 'from numpy import random\n'), ((1667, 1735), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["test_item['col1']", "values['col1'][1000]"], {}), "(test_item['col1'], values['col1'][1000])\n", (1694, 1735), True, 'import numpy.testing as testing\n'), ((1744, 1812), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["test_item['col2']", "values['col2'][1000]"], {}), "(test_item['col2'], values['col2'][1000])\n", (1771, 1812), True, 'import numpy.testing as testing\n'), ((1860, 1917), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["test_item['col1']", 'hp.UNSEEN'], {}), "(test_item['col1'], hp.UNSEEN)\n", (1887, 1917), True, 'import numpy.testing as testing\n'), ((1926, 1983), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["test_item['col2']", 'hp.UNSEEN'], {}), "(test_item['col2'], hp.UNSEEN)\n", (1953, 1983), True, 'import numpy.testing as testing\n'), ((2089, 2107), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (2100, 2107), False, 'from numpy import random\n'), ((2256, 2280), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (2269, 2280), False, 'from numpy import random\n'), ((2303, 2380), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (2327, 2380), False, 'import healsparse\n'), ((2437, 2510), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[100:500]', 'full_map[100:500]'], {}), '(sparse_map[100:500], full_map[100:500])\n', (2470, 2510), True, 'import numpy.testing as testing\n'), ((2521, 2598), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[4500:5500]', 'full_map[4500:5500]'], {}), '(sparse_map[4500:5500], full_map[4500:5500])\n', (2554, 2598), True, 'import numpy.testing as testing\n'), ((2609, 2686), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[5500:5600]', 'full_map[5500:5600]'], {}), '(sparse_map[5500:5600], full_map[5500:5600])\n', (2642, 2686), True, 'import numpy.testing as testing\n'), ((2721, 2798), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[100:500:2]', 'full_map[100:500:2]'], {}), '(sparse_map[100:500:2], full_map[100:500:2])\n', (2754, 2798), True, 'import numpy.testing as testing\n'), ((2811, 2897), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[4500:5500:2]', 'full_map[4500:5500:2]'], {}), '(sparse_map[4500:5500:2], full_map[4500:\n 5500:2])\n', (2844, 2897), True, 'import numpy.testing as testing\n'), ((2905, 2991), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[5500:5600:2]', 'full_map[5500:5600:2]'], {}), '(sparse_map[5500:5600:2], full_map[5500:\n 5600:2])\n', (2938, 2991), True, 'import numpy.testing as testing\n'), ((3019, 3080), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[:]', 'full_map[:]'], {}), '(sparse_map[:], full_map[:])\n', (3052, 3080), True, 'import numpy.testing as testing\n'), ((3188, 3206), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (3199, 3206), False, 'from numpy import random\n'), ((3355, 3379), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (3368, 3379), False, 'from numpy import random\n'), ((3402, 3479), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (3426, 3479), False, 'import healsparse\n'), ((3499, 3532), 'numpy.array', 'np.array', (['[1, 2, 100, 500, 10000]'], {}), '([1, 2, 100, 500, 10000])\n', (3507, 3532), True, 'import numpy as np\n'), ((3541, 3614), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[indices]', 'full_map[indices]'], {}), '(sparse_map[indices], full_map[indices])\n', (3574, 3614), True, 'import numpy.testing as testing\n'), ((3623, 3696), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['sparse_map[indices[0]]', 'full_map[indices[0]]'], {}), '(sparse_map[indices[0]], full_map[indices[0]])\n', (3650, 3696), True, 'import numpy.testing as testing\n'), ((3716, 3751), 'numpy.array', 'np.array', (['[1.0, 2, 100, 500, 10000]'], {}), '([1.0, 2, 100, 500, 10000])\n', (3724, 3751), True, 'import numpy as np\n'), ((4004, 4022), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (4015, 4022), False, 'from numpy import random\n'), ((4171, 4195), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (4184, 4195), False, 'from numpy import random\n'), ((4218, 4295), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (4242, 4295), False, 'import healsparse\n'), ((4347, 4420), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[indices]', 'full_map[indices]'], {}), '(sparse_map[indices], full_map[indices])\n', (4380, 4420), True, 'import numpy.testing as testing\n'), ((4650, 4668), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (4661, 4668), False, 'from numpy import random\n'), ((4817, 4841), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (4830, 4841), False, 'from numpy import random\n'), ((4864, 4941), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (4888, 4941), False, 'import healsparse\n'), ((5245, 5263), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (5256, 5263), False, 'from numpy import random\n'), ((5412, 5436), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (5425, 5436), False, 'from numpy import random\n'), ((5459, 5536), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (5483, 5536), False, 'import healsparse\n'), ((6035, 6058), 'numpy.random.seed', 'random.seed', ([], {'seed': '(12345)'}), '(seed=12345)\n', (6046, 6058), False, 'from numpy import random\n'), ((6199, 6288), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', (['nside_coverage', 'nside_map', 'dtype'], {'primary': '"""col1"""'}), "(nside_coverage, nside_map, dtype,\n primary='col1')\n", (6234, 6288), False, 'import healsparse\n'), ((6301, 6316), 'numpy.arange', 'np.arange', (['(5000)'], {}), '(5000)\n', (6310, 6316), True, 'import numpy as np\n'), ((6334, 6367), 'numpy.zeros_like', 'np.zeros_like', (['pixel'], {'dtype': 'dtype'}), '(pixel, dtype=dtype)\n', (6347, 6367), True, 'import numpy as np\n'), ((6393, 6423), 'numpy.random.random', 'random.random', ([], {'size': 'pixel.size'}), '(size=pixel.size)\n', (6406, 6423), False, 'from numpy import random\n'), ((6449, 6479), 'numpy.random.random', 'random.random', ([], {'size': 'pixel.size'}), '(size=pixel.size)\n', (6462, 6479), False, 'from numpy import random\n'), ((6505, 6540), 'numpy.ones', 'np.ones', (['pixel.size'], {'dtype': 'np.int32'}), '(pixel.size, dtype=np.int32)\n', (6512, 6540), True, 'import numpy as np\n'), ((6610, 6634), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (6618, 6634), True, 'import numpy as np\n'), ((6759, 6817), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map['col1'][1000]", '(1.0)'], {}), "(sparse_map['col1'][1000], 1.0)\n", (6786, 6817), True, 'import numpy.testing as testing\n'), ((6826, 6884), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map['col2'][1000]", '(1.0)'], {}), "(sparse_map['col2'][1000], 1.0)\n", (6853, 6884), True, 'import numpy.testing as testing\n'), ((6948, 7006), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map[1000]['col1']", '(1.0)'], {}), "(sparse_map[1000]['col1'], 1.0)\n", (6975, 7006), True, 'import numpy.testing as testing\n'), ((7015, 7073), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map[1000]['col2']", '(1.0)'], {}), "(sparse_map[1000]['col2'], 1.0)\n", (7042, 7073), True, 'import numpy.testing as testing\n'), ((7374, 7433), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map['col1'][100]", '(100.0)'], {}), "(sparse_map['col1'][100], 100.0)\n", (7401, 7433), True, 'import numpy.testing as testing\n'), ((7442, 7501), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map[100]['col1']", '(100.0)'], {}), "(sparse_map[100]['col1'], 100.0)\n", (7469, 7501), True, 'import numpy.testing as testing\n'), ((7551, 7610), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map['col2'][100]", '(100.0)'], {}), "(sparse_map['col2'][100], 100.0)\n", (7578, 7610), True, 'import numpy.testing as testing\n'), ((7619, 7678), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (["sparse_map[100]['col2']", '(100.0)'], {}), "(sparse_map[100]['col2'], 100.0)\n", (7646, 7678), True, 'import numpy.testing as testing\n'), ((7868, 7881), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (7876, 7881), True, 'import numpy as np\n'), ((7890, 7957), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (["sparse_map['col1'][100:200]", '(0.0)'], {}), "(sparse_map['col1'][100:200], 0.0)\n", (7923, 7957), True, 'import numpy.testing as testing\n'), ((7967, 8034), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (["sparse_map[100:200]['col1']", '(0.0)'], {}), "(sparse_map[100:200]['col1'], 0.0)\n", (8000, 8034), True, 'import numpy.testing as testing\n'), ((8076, 8089), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (8084, 8089), True, 'import numpy as np\n'), ((8098, 8165), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (["sparse_map['col2'][100:200]", '(0.0)'], {}), "(sparse_map['col2'][100:200], 0.0)\n", (8131, 8165), True, 'import numpy.testing as testing\n'), ((8175, 8242), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (["sparse_map[100:200]['col2']", '(0.0)'], {}), "(sparse_map[100:200]['col2'], 0.0)\n", (8208, 8242), True, 'import numpy.testing as testing\n'), ((8284, 8313), 'numpy.zeros', 'np.zeros', (['(100)'], {'dtype': 'np.int32'}), '(100, dtype=np.int32)\n', (8292, 8313), True, 'import numpy as np\n'), ((8322, 8380), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (["sparse_map['col3'][100:200]", '(0)'], {}), "(sparse_map['col3'][100:200], 0)\n", (8348, 8380), True, 'import numpy.testing as testing\n'), ((8390, 8448), 'numpy.testing.assert_array_equal', 'testing.assert_array_equal', (["sparse_map[100:200]['col3']", '(0)'], {}), "(sparse_map[100:200]['col3'], 0)\n", (8416, 8448), True, 'import numpy.testing as testing\n'), ((8716, 8734), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (8727, 8734), False, 'from numpy import random\n'), ((8883, 8907), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (8896, 8907), False, 'from numpy import random\n'), ((8930, 9007), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (8954, 9007), False, 'import healsparse\n'), ((9118, 9133), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9126, 9133), True, 'import numpy as np\n'), ((9163, 9178), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9171, 9178), True, 'import numpy as np\n'), ((9342, 9354), 'numpy.ones', 'np.ones', (['(500)'], {}), '(500)\n', (9349, 9354), True, 'import numpy as np\n'), ((9386, 9398), 'numpy.ones', 'np.ones', (['(500)'], {}), '(500)\n', (9393, 9398), True, 'import numpy as np\n'), ((9567, 9579), 'numpy.ones', 'np.ones', (['(500)'], {}), '(500)\n', (9574, 9579), True, 'import numpy as np\n'), ((9616, 9628), 'numpy.ones', 'np.ones', (['(500)'], {}), '(500)\n', (9623, 9628), True, 'import numpy as np\n'), ((9802, 9817), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9810, 9817), True, 'import numpy as np\n'), ((9840, 9855), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (9848, 9855), True, 'import numpy as np\n'), ((10083, 10101), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (10094, 10101), False, 'from numpy import random\n'), ((10250, 10274), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (10263, 10274), False, 'from numpy import random\n'), ((10297, 10374), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (10321, 10374), False, 'import healsparse\n'), ((10394, 10427), 'numpy.array', 'np.array', (['[1, 2, 100, 500, 10000]'], {}), '([1, 2, 100, 500, 10000])\n', (10402, 10427), True, 'import numpy as np\n'), ((10458, 10473), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (10466, 10473), True, 'import numpy as np\n'), ((10502, 10517), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (10510, 10517), True, 'import numpy as np\n'), ((11190, 11225), 'numpy.array', 'np.array', (['[1.0, 2, 100, 500, 10000]'], {}), '([1.0, 2, 100, 500, 10000])\n', (11198, 11225), True, 'import numpy as np\n'), ((11397, 11415), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (11408, 11415), False, 'from numpy import random\n'), ((11564, 11588), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (11577, 11588), False, 'from numpy import random\n'), ((11611, 11688), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (11635, 11688), False, 'import healsparse\n'), ((11762, 11777), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (11770, 11777), True, 'import numpy as np\n'), ((11806, 11821), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (11814, 11821), True, 'import numpy as np\n'), ((12462, 12480), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (12473, 12480), False, 'from numpy import random\n'), ((12629, 12653), 'numpy.random.random', 'random.random', ([], {'size': '(5000)'}), '(size=5000)\n', (12642, 12653), False, 'from numpy import random\n'), ((12676, 12753), 'healsparse.HealSparseMap', 'healsparse.HealSparseMap', ([], {'healpix_map': 'full_map', 'nside_coverage': 'nside_coverage'}), '(healpix_map=full_map, nside_coverage=nside_coverage)\n', (12700, 12753), False, 'import healsparse\n'), ((13082, 13100), 'numpy.random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (13093, 13100), False, 'from numpy import random\n'), ((13170, 13188), 'numpy.arange', 'np.arange', (['(0)', '(2000)'], {}), '(0, 2000)\n', (13179, 13188), True, 'import numpy as np\n'), ((13349, 13465), 'healsparse.HealSparseMap.make_empty', 'healsparse.HealSparseMap.make_empty', ([], {'nside_coverage': 'nside_coverage', 'nside_sparse': 'nside_map', 'dtype': 'pxvalues.dtype'}), '(nside_coverage=nside_coverage,\n nside_sparse=nside_map, dtype=pxvalues.dtype)\n', (13384, 13465), False, 'import healsparse\n'), ((13571, 13635), 'numpy.testing.assert_equal', 'testing.assert_equal', (['sparse_map[pxnums[0]]', 'full_map[pxnums[0]]'], {}), '(sparse_map[pxnums[0]], full_map[pxnums[0]])\n', (13591, 13635), True, 'import numpy.testing as testing\n'), ((13683, 13754), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['sparse_map[pxnums]', 'full_map[pxnums]'], {}), '(sparse_map[pxnums], full_map[pxnums])\n', (13716, 13754), True, 'import numpy.testing as testing\n'), ((10900, 10933), 'numpy.array', 'np.array', (['[1, 2, 100, 500, 10000]'], {}), '([1, 2, 100, 500, 10000])\n', (10908, 10933), True, 'import numpy as np\n'), ((13243, 13267), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (13256, 13267), True, 'import healpy as hp\n'), ((380, 404), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (393, 404), True, 'import healpy as hp\n'), ((2190, 2214), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (2203, 2214), True, 'import healpy as hp\n'), ((3289, 3313), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (3302, 3313), True, 'import healpy as hp\n'), ((4105, 4129), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (4118, 4129), True, 'import healpy as hp\n'), ((4751, 4775), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (4764, 4775), True, 'import healpy as hp\n'), ((5346, 5370), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (5359, 5370), True, 'import healpy as hp\n'), ((8817, 8841), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (8830, 8841), True, 'import healpy as hp\n'), ((10184, 10208), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (10197, 10208), True, 'import healpy as hp\n'), ((11498, 11522), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (11511, 11522), True, 'import healpy as hp\n'), ((12563, 12587), 'healpy.nside2npix', 'hp.nside2npix', (['nside_map'], {}), '(nside_map)\n', (12576, 12587), True, 'import healpy as hp\n')] |
import numpy as np
import matplotlib.pyplot as plt
def m2d( m=1.0, a=1.0, b=1.0 ):
return( a * np.power( m, b ) )
def get_v( mmin=0.0, mmax=0.1, minc=0.1,
alpha=40.0, beta=0.230, gamma=1/2,
rho_0=1.0, rho=1.0 ):
m_l = np.arange( mmin, mmax+minc, minc )
v_l = np.zeros( m_l.shape )
for i, m_ in enumerate( m_l ):
v_ = alpha * np.power( m_, beta ) * np.power( rho_0 / rho, gamma )
v_l[i] = v_
return( v_l, m_l )
########
alpha_def = 27.70
beta_def = 0.216
gamma = 1/2
rho_0 = 1.0
rho = 1.0
# [kg]
mmin = 0.0
mmax = 5.0e-5
minc = 1.e-8
alpha_snow_s = 29257.1601562500
alpha_snow_l = 305.678619384766
beta_snow_s = 0.528109610080719
beta_snow_l = 0.329863965511322
v_snow_def, m_l = get_v( alpha=alpha_def, beta=beta_def,
mmin=mmin, mmax=mmax, minc=minc )
v_snow_s, _ = get_v( alpha=alpha_snow_s, beta=beta_snow_s,
mmin=mmin, mmax=mmax, minc=minc )
v_snow_l, _ = get_v( alpha=alpha_snow_l, beta=beta_snow_l,
mmin=mmin, mmax=mmax, minc=minc )
ymin = 0.0
ymax = 10.0
fig, ax1 = plt.subplots( 1, 1, figsize=( 8, 6.5 ) )
dmin = 0.0
dmax = 1.e-5
ax1.set_xlim( dmin*1.e3, dmax*1.e3 )
ax1.set_ylim( ymin, ymax )
data_l = [ v_snow_def, v_snow_s, v_snow_l ]
lab_l = ["default", "small", "large" ]
c_l = [ 'k', 'b', 'r' ]
d_l = m2d( m_l )
for i, data, in enumerate( data_l ):
ax1.plot( d_l*1.e3, data, label=lab_l[i], color=c_l[i] )
ax1.legend( loc='upper left', fontsize=12, )
#ax1.set_xlabel( 'Mass (g)')
ax1.set_xlabel( 'Maximum dimension (mm)')
ax1.set_ylabel( 'Terminal velocity (m/s)')
plt.show()
| [
"numpy.power",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1105, 1141), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6.5)'}), '(1, 1, figsize=(8, 6.5))\n', (1117, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1623, 1633), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1631, 1633), True, 'import matplotlib.pyplot as plt\n'), ((253, 287), 'numpy.arange', 'np.arange', (['mmin', '(mmax + minc)', 'minc'], {}), '(mmin, mmax + minc, minc)\n', (262, 287), True, 'import numpy as np\n'), ((299, 318), 'numpy.zeros', 'np.zeros', (['m_l.shape'], {}), '(m_l.shape)\n', (307, 318), True, 'import numpy as np\n'), ((100, 114), 'numpy.power', 'np.power', (['m', 'b'], {}), '(m, b)\n', (108, 114), True, 'import numpy as np\n'), ((399, 427), 'numpy.power', 'np.power', (['(rho_0 / rho)', 'gamma'], {}), '(rho_0 / rho, gamma)\n', (407, 427), True, 'import numpy as np\n'), ((376, 394), 'numpy.power', 'np.power', (['m_', 'beta'], {}), '(m_, beta)\n', (384, 394), True, 'import numpy as np\n')] |
import numpy as np
n1 = np.array([10,20])
n2 = np.array([30,40])
print("Sum of n1 and n2:-")
new = np.sum([n1,n2])
print(new)
print("Sum of n1 and n2 row wise :-")
new_2 = np.sum([n1,n2],axis = 0) #axis = 0 (Row / Vertically adding), axis = 1 (Colunm / horizontally adding).
print(new_2)
#Basic adding,substracting,multiplication and division.
n3 = np.array([10,20,30])
n3 = n3+1
print("Adding 1 in n3:-")
print(n3)
n4 = n3-1
print("Substracting 1 in n3:-")
print(n4)
n5 = n3*2
print("Multipling 2 in n3:-")
print(n5)
n6 = n3/2
print("Dividing 2 in n3:-")
print(n6)
#mean
n7 = np.array([10,20,30,40,50,60])
new_3 = np.mean(n1)
print("Printing the mean value:-")
print(new_3)
#Standard Division.
print("Printing standard division value.")
new_4 = np.std(n7)
print(new_4) | [
"numpy.array",
"numpy.mean",
"numpy.sum",
"numpy.std"
] | [((25, 43), 'numpy.array', 'np.array', (['[10, 20]'], {}), '([10, 20])\n', (33, 43), True, 'import numpy as np\n'), ((48, 66), 'numpy.array', 'np.array', (['[30, 40]'], {}), '([30, 40])\n', (56, 66), True, 'import numpy as np\n'), ((101, 117), 'numpy.sum', 'np.sum', (['[n1, n2]'], {}), '([n1, n2])\n', (107, 117), True, 'import numpy as np\n'), ((175, 199), 'numpy.sum', 'np.sum', (['[n1, n2]'], {'axis': '(0)'}), '([n1, n2], axis=0)\n', (181, 199), True, 'import numpy as np\n'), ((353, 375), 'numpy.array', 'np.array', (['[10, 20, 30]'], {}), '([10, 20, 30])\n', (361, 375), True, 'import numpy as np\n'), ((586, 620), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60]'], {}), '([10, 20, 30, 40, 50, 60])\n', (594, 620), True, 'import numpy as np\n'), ((624, 635), 'numpy.mean', 'np.mean', (['n1'], {}), '(n1)\n', (631, 635), True, 'import numpy as np\n'), ((756, 766), 'numpy.std', 'np.std', (['n7'], {}), '(n7)\n', (762, 766), True, 'import numpy as np\n')] |
# Copyright 2020 Open Climate Tech Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Simple utility to break up rectangle into squares
"""
import os
import pathlib
import math
import logging
import numpy as np
def getSegmentRanges(fullSize, segmentSize):
"""Break the given fullSize into ranges of segmentSize
Divide the range (0,fullSize) into multiple ranges of size
segmentSize that are equally spaced apart and have approximately
15% overlap (overlapRatio)
Args:
fullSize (int): size of the full range (0, fullSize)
segmentSize (int): size of each segment
Returns:
(list): list of tuples (start, end) marking each segment's range
"""
overlapRatio = 1.15
if fullSize < segmentSize:
return [] # all segments must be exactly segmentSize
elif fullSize == segmentSize:
return [(0,segmentSize)]
firstCenter = int(segmentSize/2)
lastCenter = fullSize - int(segmentSize/2)
assert lastCenter > firstCenter
flexSize = lastCenter - firstCenter
numSegments = math.ceil(flexSize / (segmentSize/overlapRatio))
offset = flexSize / numSegments
ranges = []
for i in range(numSegments):
center = firstCenter + round(i * offset)
start = center - int(segmentSize/2)
if (start + segmentSize) > fullSize:
break
ranges.append((start,start + segmentSize))
ranges.append((fullSize - segmentSize, fullSize))
# print('ranges', fullSize, segmentSize, ranges)
# lastC = 0
# for i, r in enumerate(ranges):
# c = (r[0] + r[1])/2
# print(i, r[0], r[1], c, c - lastC)
# lastC = c
return ranges
def getRangeFromCenter(center, size, minLimit, maxLimit):
"""Get linear range from center given constraints
Return (min,max) pair with given range within (minLimit, maxLimit)
ideally centered at given center
Args:
cneter (int): desired center
size (int): size of the output range
minLimit (int): absolute minimum value of the output range
maxLimit (int): absolute maximum value of the output range
Returns:
(int, int): start, end of the range
"""
if (center - int(size/2)) <= minLimit: # left edge limited
val0 = minLimit
val1 = min(val0 + size, maxLimit)
# print('left', val0, val1, center, size)
elif (center + int(size/2)) >= maxLimit: # right edge limited
val1 = maxLimit
val0 = max(val1 - size, minLimit)
# print('right', val0, val1, center, size)
else: # unlimited
val0 = center - int(size/2)
val1 = min(val0 + size, maxLimit)
# print('center', val0, val1, center, size)
return (val0, val1)
def cutBoxesFiles(imgOrig, outputDirectory, imageFileName, callBackFn=None):
"""Cut the given image into fixed size boxes and store to files
Divide the given image into square segments of 299x299 (segmentSize below)
to match the size of images used by InceptionV3 image classification
machine learning model. This function uses the getSegmentRanges() function
above to calculate the exact start and end of each square
Args:
imgOrig (Image): Image object of the original image
outputDirectory (str): name of directory to store the segments
imageFileName (str): nane of image file (used as segment file prefix)
callBackFn (function): callback function that's called for each square
Returns:
(list): list of segments with filename and coordinates
"""
segmentSize = 299
segments = []
imgName = pathlib.PurePath(imageFileName).name
imgNameNoExt = str(os.path.splitext(imgName)[0])
xRanges = getSegmentRanges(imgOrig.size[0], segmentSize)
yRanges = getSegmentRanges(imgOrig.size[1], segmentSize)
for yRange in yRanges:
for xRange in xRanges:
coords = (xRange[0], yRange[0], xRange[1], yRange[1])
if callBackFn != None:
skip = callBackFn(coords)
if skip:
continue
# output cropped image
cropImgName = imgNameNoExt + '_Crop_' + 'x'.join(list(map(lambda x: str(x), coords))) + '.jpg'
cropImgPath = os.path.join(outputDirectory, cropImgName)
cropped_img = imgOrig.crop(coords)
cropped_img.save(cropImgPath, format='JPEG', quality=95)
cropped_img.close()
segments.append({
'imgPath': cropImgPath,
'MinX': coords[0],
'MinY': coords[1],
'MaxX': coords[2],
'MaxY': coords[3]
})
return segments
def cutBoxesArray(imgOrig, startX=0, endX=None, startY=0, endY=None):
"""Cut the given image into fixed size boxes, normalize data, and return as np arrays
Divide the given image into square segments of 299x299 (segmentSize below)
to match the size of images used by InceptionV3 image classification
machine learning model. This function uses the getSegmentRanges() function
above to calculate the exact start and end of each square
Args:
imgOrig (Image): Image object of the original image
Returns:
(list, list): pair of lists (cropped numpy arrays) and (metadata on boundaries)
"""
segmentSize = 299
if endX == None:
endX = imgOrig.size[0]
elif endX < 0:
endX = imgOrig.size[0] + endX
startX = max(0, startX)
endX = min(endX, imgOrig.size[0])
xRanges = getSegmentRanges(endX - startX, segmentSize)
xRanges = list(map(lambda x: (x[0] + startX, x[1] + startX), xRanges))
if endY == None:
endY = imgOrig.size[1]
elif endY < 0:
endY = imgOrig.size[1] + endY
startY = max(0, startY)
endY = min(endY, imgOrig.size[1])
yRanges = getSegmentRanges(endY - startY, segmentSize)
yRanges = list(map(lambda x: (x[0] + startY, x[1] + startY), yRanges))
crops = []
segments = []
imgNpArray = np.asarray(imgOrig, dtype=np.float32)
imgNormalized = np.divide(np.subtract(imgNpArray,128),128)
for yRange in yRanges:
for xRange in xRanges:
crops.append(imgNormalized[yRange[0]:yRange[1], xRange[0]:xRange[1]])
coords = (xRange[0], yRange[0], xRange[1], yRange[1])
coordStr = 'x'.join(list(map(lambda x: str(x), coords)))
segments.append({
'coords': coords,
'coordStr': coordStr,
'MinX': coords[0],
'MinY': coords[1],
'MaxX': coords[2],
'MaxY': coords[3]
})
crops = np.array(crops)
return crops, segments
| [
"math.ceil",
"numpy.asarray",
"os.path.splitext",
"numpy.subtract",
"os.path.join",
"pathlib.PurePath",
"numpy.array"
] | [((1653, 1703), 'math.ceil', 'math.ceil', (['(flexSize / (segmentSize / overlapRatio))'], {}), '(flexSize / (segmentSize / overlapRatio))\n', (1662, 1703), False, 'import math\n'), ((6639, 6676), 'numpy.asarray', 'np.asarray', (['imgOrig'], {'dtype': 'np.float32'}), '(imgOrig, dtype=np.float32)\n', (6649, 6676), True, 'import numpy as np\n'), ((7284, 7299), 'numpy.array', 'np.array', (['crops'], {}), '(crops)\n', (7292, 7299), True, 'import numpy as np\n'), ((4237, 4268), 'pathlib.PurePath', 'pathlib.PurePath', (['imageFileName'], {}), '(imageFileName)\n', (4253, 4268), False, 'import pathlib\n'), ((6707, 6735), 'numpy.subtract', 'np.subtract', (['imgNpArray', '(128)'], {}), '(imgNpArray, 128)\n', (6718, 6735), True, 'import numpy as np\n'), ((4297, 4322), 'os.path.splitext', 'os.path.splitext', (['imgName'], {}), '(imgName)\n', (4313, 4322), False, 'import os\n'), ((4873, 4915), 'os.path.join', 'os.path.join', (['outputDirectory', 'cropImgName'], {}), '(outputDirectory, cropImgName)\n', (4885, 4915), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from nltk.stem import PorterStemmer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from baseline.config import STOP_WORDS
# In[2]:
# Load PR Newswire Annotated Dataset
pr_newswire = pd.read_csv("../data/pr-newswire.csv")
# In[3]:
def text_cleaning(text: str):
"""Cleans raw text input for Doc2Vec."""
ps = PorterStemmer()
# Strip punctuation and special chars
stripped = re.sub(r"[^\w]", " ", text)
# Tokenize and stem words
tokenized = [
ps.stem(token.lower()) for token in stripped.split(" ")
if token.strip() and token.lower() not in STOP_WORDS
]
return tokenized
# In[4]:
raw_news_stories = pr_newswire["data"]
# Establish data and target for vectorization
stories = list(map(text_cleaning, raw_news_stories))
classifications = list(pr_newswire["target"])
# In[5]:
# Build Doc2Vec `TaggedDocument` array
documents = [
TaggedDocument(story, classifications[idx]) for idx, story in enumerate(stories)
]
# In[6]:
# Build Doc2Vec model
d2v = Doc2Vec(vector_size=40, min_count=2, epochs=30)
# Build vocabulary
d2v.build_vocab(documents)
# In[7]:
# Train doc2vec
d2v.train(documents, total_examples=d2v.corpus_count, epochs=d2v.epochs)
# In[8]:
# Destructure words and tags from TaggedDocument
words = [doc.words for doc in documents]
tags = [doc.tags for doc in documents]
# Train/test split
x_train, x_test, y_train, y_test = train_test_split(words, tags, test_size=0.20)
# In[9]:
# Build vectors for training
x_train_vectors = [
d2v.infer_vector(instance) for instance in x_train
]
# Build LabelEncoder for training
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
# Encode training lables
y_train_labels = label_encoder.transform(np.asarray(y_train))
# In[10]:
# Fit Logistic Regression on infered vectors
logreg = LogisticRegression(max_iter=1000, multi_class="multinomial")
logreg.fit(x_train_vectors, y_train_labels)
# In[11]:
# Build vectors for testing
x_test_vectors = [
d2v.infer_vector(instance) for instance in x_test
]
# In[12]:
# Predictions
y_pred = logreg.predict(x_test_vectors)
# In[13]:
# Encode test lables
y_test_labels = label_encoder.transform(y_test)
# In[14]:
print(f"Classes: {logreg.classes_}")
print(f"Intercepts: {logreg.intercept_}")
print(f"Coefficient: {logreg.coef_}")
# In[15]:
import matplotlib.pyplot as plt
from sklearn.metrics import (
classification_report,
confusion_matrix,
f1_score,
recall_score,
precision_score,
)
# In[16]:
c_matrix = confusion_matrix(y_test_labels, logreg.predict(x_test_vectors))
# In[17]:
# Plot confusion matrix
fig, ax = plt.subplots(figsize=(10, 10))
ax.imshow(c_matrix)
ax.set_ylabel("Actual Label")
ax.set_xlabel("Predicted Label")
labels = tuple(label_encoder.inverse_transform([0, 1, 2, 3, 4]))
ax.xaxis.set(ticks=(0, 1, 2, 3, 4), ticklabels=labels)
ax.yaxis.set(ticks=(0, 1, 2, 3, 4), ticklabels=labels)
plt.title("Doc2Vec - Logistic Regression")
for i in range(len(labels)): # ref: (https://realpython.com/logistic-regression-python/)
for j in range(len(labels)):
ax.text(j, i, c_matrix[i, j], ha='center', va='center', color='red')
plt.savefig("doc2vec-logistic-regression")
# In[18]:
# Calculate key metrics
precision = precision_score(y_test_labels, y_pred, average="weighted")
recall = recall_score(y_test_labels, y_pred, average="weighted")
f1 = f1_score(y_test_labels, y_pred, average="weighted")
print(f"Precision Score: {precision}")
print(f"Recall Score: {recall}")
print(f"F1 Score: {f1}")
# In[19]:
# Classification Report
print(classification_report(y_test, label_encoder.inverse_transform(y_pred)))
# In[20]:
from sklearn.decomposition import TruncatedSVD
# In[21]:
# Decompose sparse matrix
tsvd = TruncatedSVD()
x_decomposed = tsvd.fit_transform(x_train_vectors)
# Make dimensions
x = x_decomposed[:, 0]
y = x_decomposed[:, 1]
# In[22]:
# 2D Plot
fig2d = plt.figure(figsize=(12, 12))
ax = plt.axes()
scatter = plt.scatter(x, y, c=label_encoder.transform(y_train))
# Build Legend
legend = ax.legend(*scatter.legend_elements())
ax.add_artist(legend)
handles, labels = scatter.legend_elements()
ax.legend(
handles,
label_encoder.inverse_transform([int(as_int[-3]) for as_int in labels]),
loc=1
)
# Show Plot
plt.title("2 Feature Decomposed (2D): Doc2Vec - Logistic Regression")
plt.show()
fig2d.savefig("doc2vec-logistic-regression-2d-scatter")
# In[23]:
# Make 3 dimensions
x = x_decomposed[:, 0]
y = x_decomposed[:, 1]
z = x**1 * y**1
# 3D Plot
fig3d = plt.figure(figsize=(12, 12))
ax = plt.axes(projection ="3d")
ax.view_init(1)
ax.scatter3D(x, y, z, c=label_encoder.transform(y_train))
# Build Legend
legend_elements = ax.legend(*scatter.legend_elements())
ax.add_artist(legend_elements)
handles, labels = scatter.legend_elements()
ax.legend(
handles,
label_encoder.inverse_transform([int(as_int[-3]) for as_int in labels]),
loc=2
)
ax.view_init(30)
# Show Plot
plt.title("2 Feature Decomposed (3D): Doc2Vec - Logistic Regression")
plt.show()
fig3d.savefig("doc2vec-logistic-regression-3d-scatter")
# In[ ]:
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"gensim.models.doc2vec.TaggedDocument",
"numpy.asarray",
"nltk.stem.PorterStemmer",
"gensim.models.doc2vec.Doc2Vec",
"matplotlib.pyplot.savefig",
"sklearn.model_selection.tra... | [((451, 489), 'pandas.read_csv', 'pd.read_csv', (['"""../data/pr-newswire.csv"""'], {}), "('../data/pr-newswire.csv')\n", (462, 489), True, 'import pandas as pd\n'), ((1295, 1342), 'gensim.models.doc2vec.Doc2Vec', 'Doc2Vec', ([], {'vector_size': '(40)', 'min_count': '(2)', 'epochs': '(30)'}), '(vector_size=40, min_count=2, epochs=30)\n', (1302, 1342), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((1688, 1732), 'sklearn.model_selection.train_test_split', 'train_test_split', (['words', 'tags'], {'test_size': '(0.2)'}), '(words, tags, test_size=0.2)\n', (1704, 1732), False, 'from sklearn.model_selection import train_test_split\n'), ((1904, 1918), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1916, 1918), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2102, 2162), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)', 'multi_class': '"""multinomial"""'}), "(max_iter=1000, multi_class='multinomial')\n", (2120, 2162), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2923, 2953), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2935, 2953), True, 'import matplotlib.pyplot as plt\n'), ((3213, 3255), 'matplotlib.pyplot.title', 'plt.title', (['"""Doc2Vec - Logistic Regression"""'], {}), "('Doc2Vec - Logistic Regression')\n", (3222, 3255), True, 'import matplotlib.pyplot as plt\n'), ((3456, 3498), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""doc2vec-logistic-regression"""'], {}), "('doc2vec-logistic-regression')\n", (3467, 3498), True, 'import matplotlib.pyplot as plt\n'), ((3549, 3607), 'sklearn.metrics.precision_score', 'precision_score', (['y_test_labels', 'y_pred'], {'average': '"""weighted"""'}), "(y_test_labels, y_pred, average='weighted')\n", (3564, 3607), False, 'from sklearn.metrics import classification_report, confusion_matrix, f1_score, recall_score, precision_score\n'), ((3617, 3672), 'sklearn.metrics.recall_score', 'recall_score', (['y_test_labels', 'y_pred'], {'average': '"""weighted"""'}), "(y_test_labels, y_pred, average='weighted')\n", (3629, 3672), False, 'from sklearn.metrics import classification_report, confusion_matrix, f1_score, recall_score, precision_score\n'), ((3678, 3729), 'sklearn.metrics.f1_score', 'f1_score', (['y_test_labels', 'y_pred'], {'average': '"""weighted"""'}), "(y_test_labels, y_pred, average='weighted')\n", (3686, 3729), False, 'from sklearn.metrics import classification_report, confusion_matrix, f1_score, recall_score, precision_score\n'), ((4052, 4066), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {}), '()\n', (4064, 4066), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((4215, 4243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (4225, 4243), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4260), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (4258, 4260), True, 'import matplotlib.pyplot as plt\n'), ((4580, 4649), 'matplotlib.pyplot.title', 'plt.title', (['"""2 Feature Decomposed (2D): Doc2Vec - Logistic Regression"""'], {}), "('2 Feature Decomposed (2D): Doc2Vec - Logistic Regression')\n", (4589, 4649), True, 'import matplotlib.pyplot as plt\n'), ((4650, 4660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4658, 4660), True, 'import matplotlib.pyplot as plt\n'), ((4832, 4860), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (4842, 4860), True, 'import matplotlib.pyplot as plt\n'), ((4867, 4892), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (4875, 4892), True, 'import matplotlib.pyplot as plt\n'), ((5258, 5327), 'matplotlib.pyplot.title', 'plt.title', (['"""2 Feature Decomposed (3D): Doc2Vec - Logistic Regression"""'], {}), "('2 Feature Decomposed (3D): Doc2Vec - Logistic Regression')\n", (5267, 5327), True, 'import matplotlib.pyplot as plt\n'), ((5328, 5338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5336, 5338), True, 'import matplotlib.pyplot as plt\n'), ((587, 602), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (600, 602), False, 'from nltk.stem import PorterStemmer\n'), ((665, 692), 're.sub', 're.sub', (['"""[^\\\\w]"""', '""" """', 'text'], {}), "('[^\\\\w]', ' ', text)\n", (671, 692), False, 'import re\n'), ((1171, 1214), 'gensim.models.doc2vec.TaggedDocument', 'TaggedDocument', (['story', 'classifications[idx]'], {}), '(story, classifications[idx])\n', (1185, 1214), False, 'from gensim.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((2013, 2032), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (2023, 2032), True, 'import numpy as np\n')] |
import numpy as np
import scipy.special
def powder_isotropic(omega, pas):
"""
Frequency domain calculation over an isotropic powder for CSA tensor.
The expressions are evaluated using complete elliptic integrals of the first kind.
Parameters
----------
omega : array
frequency
pas : array
[omega_11, omega_22, omega_33]
Returns
-------
intensity(omega) array
for an isotropic powder
"""
# Ensure \omega_11 < omega_{22} \le omega_{33}
pas = sorted(pas)
# \omega_{11} < \omega > \omega_{33}
intensity = np.zeros_like(omega)
# \omega_{33} \ge \omega > \omega_{22}
where = ((pas[2] >= omega) & (omega > pas[1]))
m = (((pas[1] - pas[0]) * (pas[2] - omega[where])) /
((pas[2] - pas[1]) * (omega[where] - pas[0])))
a = np.pi * np.sqrt((omega[where] - pas[0]) * (pas[2] - pas[1]))
k = scipy.special.ellipk(m)
intensity[where] = k / a
# \omega_{22} > \omega \ge \omega_{11}
where = ((pas[1] > omega) & (omega >= pas[0]))
m = (((omega[where] - pas[0]) * (pas[2] - pas[1])) /
((pas[2] - omega[where]) * (pas[1] - pas[0])))
k = scipy.special.ellipk(m)
a = np.pi * np.sqrt((pas[2] - omega[where]) * (pas[1] - pas[0]))
intensity[where] = k / a
return intensity
# Line shape simulations.
def sim_gauss_fwhm(x, x0, fwhm):
return np.exp(-(x - x0) ** 2 * 4 * np.log(2) / (fwhm ** 2))
def sim_lorentz_fwhm(x, x0, fwhm):
return (0.5 * fwhm) ** 2 / ((0.5 * fwhm) ** 2 + (x - x0) ** 2)
def lorentz_kernel(x, fwhm, kernel_length=10):
"""
:param: fwhm : Full-Width-Half-Max of Lorentzian.
"""
step = abs(x[1] - x[0])
if step < fwhm:
limit = round(kernel_length * fwhm) * step
kernel_x = np.arange(-limit, limit + step, step)
return sim_lorentz_fwhm(kernel_x, 0, fwhm)
else:
raise ValueError("FWHM can't be < step size of input.")
# Generate a kernel from the line-shapes
def gauss_kernel(x, fwhm, kernel_length=10):
"""
:param: fwhm : Full-Width-Half-Max of Gaussian.
"""
step = abs(x[1] - x[0])
if step < fwhm:
limit = round(kernel_length * fwhm) * step
kernel_x = np.arange(-limit, limit + step, step)
return sim_gauss_fwhm(kernel_x, 0, fwhm)
else:
raise ValueError("FWHM can't be < step size of input.")
def filter1d(x, y, filter_type='gauss', **kwargs):
"""
Apply a convolution of y with a line-shape function defined by a
FWHM in units of x.
Parameters
----------
:param x: array of length n, position
:param y: array of length n, intensity
:param filter_type: 'lorentz', 'gauss', 'voigt' or 'pvoigt' filter-type
:param fwhm: Full-Width-Half-Max of Lorentzian or Gaussian
:param fwhm_g:Full-Width-Half-Max of Gaussian part of Voigt or
psudo-Voigt
:param fwhm_l: Full-Width-Half-Max of Lorentzian part of Voigt or
psudo-Voigt
:param kernel_length: int to indicate ~ kernel length / 2. A
reasonable default is set for each function but you do
you.
Returns
--------
:return array of length n
"""
filter_dispatch = {'gauss': gauss_kernel,
'lorentz': lorentz_kernel}
filter_function = filter_dispatch[filter_type]
kernel = filter_function(x, **kwargs)
kernel /= kernel.sum()
return np.convolve(y, kernel, mode='same')
| [
"numpy.convolve",
"numpy.sqrt",
"numpy.log",
"numpy.zeros_like",
"numpy.arange"
] | [((592, 612), 'numpy.zeros_like', 'np.zeros_like', (['omega'], {}), '(omega)\n', (605, 612), True, 'import numpy as np\n'), ((3408, 3443), 'numpy.convolve', 'np.convolve', (['y', 'kernel'], {'mode': '"""same"""'}), "(y, kernel, mode='same')\n", (3419, 3443), True, 'import numpy as np\n'), ((837, 889), 'numpy.sqrt', 'np.sqrt', (['((omega[where] - pas[0]) * (pas[2] - pas[1]))'], {}), '((omega[where] - pas[0]) * (pas[2] - pas[1]))\n', (844, 889), True, 'import numpy as np\n'), ((1207, 1259), 'numpy.sqrt', 'np.sqrt', (['((pas[2] - omega[where]) * (pas[1] - pas[0]))'], {}), '((pas[2] - omega[where]) * (pas[1] - pas[0]))\n', (1214, 1259), True, 'import numpy as np\n'), ((1776, 1813), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + step)', 'step'], {}), '(-limit, limit + step, step)\n', (1785, 1813), True, 'import numpy as np\n'), ((2214, 2251), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + step)', 'step'], {}), '(-limit, limit + step, step)\n', (2223, 2251), True, 'import numpy as np\n'), ((1410, 1419), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1416, 1419), True, 'import numpy as np\n')] |
#
# radarbeam.py
#
# module for calculating geometry parameters and magnetic aspect
# angle of radar targets monitored by any radar
#
# use aspect_elaz or aspect_txty to calculate aspect angles of targets
# specified by (el,az) or (tx,ty) angles
#
# Created by <NAME> on 11/29/08 as jrobeam.py
# Copyright (c) 2008 ECE, UIUC. All rights reserved.
# history
# - Aug29,2013 by <NAME>
# -Generate a module that accepts the lon,lat,h coordinates for the location
# of any radar.
# -flattening has been changed from 1/298.257 to 1./298.257223563
# using the WGS84 reference in:
# http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf
# - A new routine called enu2xyz to move a point from xr,yr,zr to some
# direction east, north, up
def llh2xyz(latg,lon,h):
# returns geocentric xyz coordinates (ECEF) in km of a target with
# latitude latg (rad) --- geodetic
# longitude lon (rad)
# height h (km above local ellipsoid)
n=a_WGS / np.sqrt(1.-flatness*(2.-flatness) * np.sin(latg)**2.)
# cartesian geocentric coordinates wrt Greenwich
x=(n+h)*np.cos(latg)*np.cos(lon)
y=(n+h)*np.cos(latg)*np.sin(lon)
z=(n*(1.-eccentricity**2.)+h)*np.sin(latg)
return x,y,z
def xyz2llh(x,y,z):
# returns longitude 'lon', geodetic latitude 'lat', and height 'h'
# of position (x,y,z) defined in geocentric coordinate system (ECEF)
# on Oct23,2013 by <NAME>, adding the .all() in order to support
# arrays
p=np.sqrt(x**2.+y**2.)
lon=np.arctan2(y,x)
lat=np.arctan2(z,p)
latp=lat.copy()
for i in range(10):
n=a_WGS/np.sqrt(1.-flatness*(2-flatness)*np.sin(latp)**2.)
h=p/np.cos(latp)-n
lat=np.arctan(z/(p*(1.-n*eccentricity**2./(n+h))))
if (abs(lat-latp)<3.*eps).all():
n=a_WGS/np.sqrt(1.-flatness*(2.-flatness)*np.sin(lat)**2.)
h=p/np.cos(lat)-n
break
latp=lat.copy()
return lat,lon,h
def enu2xyz(xr,yr,zr,east,north,up):
# moves a point from xr,yr,zr to x,y,z by moving into the direction
# specified by east,north,up (enu) coordinates in km
latg,lon,h = xyz2llh(xr,yr,zr)
A = np.array([[-np.sin(lon),-np.sin(latg)*np.cos(lon),np.cos(latg)*np.cos(lon)],
[ np.cos(lon),-np.sin(latg)*np.sin(lon),np.cos(latg)*np.sin(lon)],
[ 0 , np.cos(latg) ,np.sin(latg)]])
x,y,z = np.dot(A,np.array([east,north,up]))+np.array([xr,yr,zr])
return x,y,z
def cosBs(year,rr,el,az):
# decomposes the radial unit vector to the target to direction cosines of magnetic North, East, and Up
tx=cos(el)*sin(az) # direction cosines wrt east and north
ty=cos(el)*cos(az)
tz=sin(el)
xyz=xyz0+rr*(tx*east0+ty*north0+tz*zenith0) # target vector
r=sqrt(dot(xyz,xyz))
lat,lon,h=xyz2llh(xyz[0],xyz[1],xyz[2]) # target lat, lon, height
radial=xyz/r; # unit vector to target
p=sqrt(xyz[0]**2+xyz[1]**2)
east=array([-xyz[1],xyz[0],0])/p # unit vector to east from target
north=-cross(east,radial) # unit vector to north from target
rr_=xyz-xyz0 # vector from radar to target
rr_u=rr_/sqrt(dot(rr_,rr_)) # unit vector from radar to target
[bX,bY,bZ,bB]=igrf.igrf_B(year,r-a_igrf,lon/deg,lat/deg)
bfield=array([bX,bY,bZ])
B=bX*north+bY*east-bZ*radial # magnetic field vector B
bn=B/sqrt(dot(B,B)) # "magnetic north" unit vector since B points by definition in "magnetic north" direction
be=cross(bn,radial)
be=be/sqrt(dot(be,be)) # magnetic east unit vector
bu=cross(be,bn) # magnetic up unit vector
cosBn=dot(bn,rr_u) # magnetic north direction-cosine of rr_u
aspect_angle=arccos(cosBn)
cosBe=dot(be,rr_u) # magnetic east direction-cosine of rr_u
cosBu=dot(bu,rr_u) # magnetic up direction-cosine of rr_u
"""
uLOS=cosBe*U(h)+cosBn*V(h)+cosBu*W(h) ... LOS wind model in terms of wind components to calculate and direction cosines
"""
return r,lat,lon,h,xyz,B,aspect,cosBn,cosBe,cosBu
# --------------------------------------------------------------
import numpy as np
from pyigrf import igrf
eps=np.finfo(float).eps # float resolution
deg=np.pi/180. # to express angles in degree values
a_igrf=6371.2 # mean earth radius (km)
# WGS84 constants
# reference:
# http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf
a_WGS=6378.137 # equatorial radius WGS 84 (semi-major axis) in km
#flatness=1/298.257
flatness = 1./298.257223563 # flatenning
b_WGS=a_WGS*(1.-flatness) # WGS polar radius (semi-minor axis) in km
eccentricity=np.sqrt(a_WGS**2-b_WGS**2)/a_WGS
# ------------ radar specifications -------------------------
class radarspecs:
"""Will contain radar coordinates and coordinate conversions
saved locations:
JRO : lat: -11.947917 , lon: -76.872306, h0: 0.463 km
JRO_GE : as zoom in with GoogleEarth to the center of the antenna.
IRIS@ROI
ALTAIR
IRIS@URBANA
"""
def __init__(self,lat0=None,lon0=None,h0=None,location=None):
if location!=None:
if location.upper() == "JRO":
# geodetic, the usual map or GPS latitude
self.lat0 = -11.947917 * deg
self.lon0 = -76.872306 * deg
self.h0 = 0.463 # local height in km above reference ellipsoid
elif location.upper() == "JRO_GE":
# gusing google earth to the center of the Antenna
# -11.9514944444 = -(11.+57./60.+5.38/3600.) # 11deg57'5.38"S
self.lat0 = -11.9514944444 * deg
# -76.8743916667#-(76.+52./60.+27.81/3600.) # 76deg52'27.81"W
self.lon0 = -76.8743916667 * deg
self.h0 = 0.463 # local height in km above reference ellipsoid
elif location.upper() == "IRIS@ROI":
# 9.39794444444 = (9.+23./60.+52.6/3600.) # 9deg23'52.60"N
self.lat0 = 9.39794444444 * deg
# 167.469166667 = (167.+28./60.+9./3600.) # 167deg28'9.00"E
self.lon0 = 167.469166667 * deg
self.h0 = 0.012
elif location.upper() == "ALTAIR":
# 9.39794444444 = (9.+23./60.+43.5/3600.) # 9deg23'43.50"N
self.lat0 = 9.39541666667 * deg
# 167.469166667 = (167.+28./60.+45.6/3600.) # 167deg28'45.60"E
self.lon0 = 167.479333333 * deg
self.h0 = 0.012
elif location.upper() == "IRIS@URBANA":
# 40.16683888888889 = (40.+10./60.+0.62/3600.) #40deg10'0.62"N
self.lat0 = 40.16683888888889 * deg
#-88.1586 = -(88.+9./60.+30.96/3600.) #88deg9'30.96"W
self.lon0 = (360. -88.1586) * deg
self.h0 = 0.221
elif lat0==None or lon0==None or h0==None:
# By default: JRO center of antenna with google earth
# -11.9514944444 = -(11.+57./60.+5.38/3600.) # 11deg57'5.38"S
self.lat0 = -11.9514944444 * deg
# -76.8743916667#-(76.+52./60.+27.81/3600.) # 76deg52'27.81"W
self.lon0 = -76.8743916667 * deg
self.h0 = 0.463 # local height in km above reference ellipsoid
else:
self.lat0 = lat0 * deg
self.lon0 = lon0* deg
self.h0 = h0 # local height in km above reference ellipsoid
x0,y0,z0 = llh2xyz(self.lat0,self.lon0,self.h0)
self.xyz0 = np.array([x0,y0,z0])
xy0 = np.array([x0,y0])
p0 = np.sqrt(np.dot(xy0,xy0))
# unit vectors from jro
self.east0 = np.array([-y0,x0,0])/p0
# zenith and north directions wrt local ellipsoid
self.zenith0 = np.array([np.cos(self.lat0) * np.cos(self.lon0),
np.cos(self.lat0) * np.sin(self.lon0),
np.sin(self.lat0)])
self.north0 = np.cross(self.zenith0,self.east0)
# orthonormal basis vectors including the jro on-axis direction
dec=-12.88*deg
ha=-(4.+37./60.)*deg # on-axis direction at JRO
self.uo = np.array([np.cos(dec) * np.cos(ha/4. + self.lon0), # on axis
np.cos(dec) * np.sin(ha/4. + self.lon0), np.sin(dec)])
self.ux = np.cross(self.zenith0,self.uo)
# along the building to the right
self.ux = self.ux / np.sqrt(np.dot(self.ux,self.ux))
# away from the building into the valley
self.uy = np.cross(self.uo,self.ux)
def locations(self):
return ["JRO","JRO_GE","IRIS@ROI","ALTAIR","IR<EMAIL>"]
def dec_ha2el_az(dec,ha):
# returns elevation and azimuth angles of a radar beam
# with respect to local tangent plane.
# the beam is specified by:
# declination dec (deg)
# hour angle ha (min)
# with respect to radar location at longitude lon0 and height h0
# above reference ellipsiod at geodetic latitude lat0
lat=dec*deg # on celestial sphere
lon=2.*pi*(ha/(24.*60.))
lon=lon+lon0 # on celestial sphere
vec=array([cos(lat)*cos(lon),cos(lat)*sin(lon),sin(lat)])
hor=vec-dot(vec,zenith0)*zenith0
hor=hor/sqrt(dot(hor,hor))
el=arccos(dot(hor,vec))/deg
north=dot(hor,north0)
east=dot(hor,east0)
az=arctan2(east,north)/deg
return el,az
def xyz2dec_ha(self,vec):
# declination and hour angle in target direction used to describe radar
# beam direction at JRO, corresponding to latitude and relative
# longitude of the beam-spot on the celestial sphere, corresponds to
# rr->\infty, in which case:
vec = vec/np.sqrt(np.dot(vec,vec))
p = np.sqrt(vec[0]**2.+vec[1]**2.)
dec = np.arctan2(vec[2],p)/deg # in degrees
ha = (np.arctan2(vec[1],vec[0]) - self.lon0)*(24./(2.*np.pi))*60. # in minutes
return dec,ha
def aspect_angle(self,year,xyz):
# returns the magnetic aspect angle (rad) of a target with
# geocentric vector xyz defined in geocentric coordinates
r = np.sqrt(np.dot(xyz,xyz))
p = np.sqrt(xyz[0]**2. + xyz[1]**2.)
lat = np.arctan2(xyz[2],p)
lon = np.arctan2(xyz[1],xyz[0])
radial = xyz/r; # directions from target
east = np.array([-xyz[1],xyz[0],0.])/p
north = -np.cross(east,radial)
rr = xyz - self.xyz0
u_rr = rr / np.sqrt(np.dot(rr,rr)) # unit vector from radar to target
[bX,bY,bZ,bB] = igrf.igrf_B(year, r - a_igrf, lon/deg, lat/deg)
bfield = np.array([bX,bY,bZ])
B = bX*north + bY*east - bZ*radial
u_B = B / np.sqrt(np.dot(B,B))
aspect = np.arccos(np.dot(u_B, u_rr))
return r,lat,lon,aspect
def aspect_txty(self,year,rr,tx,ty):
# returns magnetic aspect angle and geocentric coordinates of a target
# tracked by jro at
# range rr (km)
# tx along jro building
# ty into the building
tz = np.sqrt(1.-tx**2.-ty**2.)
#geocentric coordinates of target
xyz = self.xyz0 + rr*(tx*self.ux + ty*self.uy + tz*self.uo)
[r,lat,lon,aspect] = self.aspect_angle(year,xyz)
[dec,ha] = self.xyz2dec_ha(xyz - self.xyz0)
return r,lon,lat,dec,ha,aspect
def aspect_elaz(self,year,rr,el,az):
# returns magnetic aspect angle and geocentric coordinates of a target
# tracked by jro at
# range rr (km)
# elevation el (rad above local tangent plane to ellipsoid)
# azimuth az (rad east of local north)
tx = np.cos(el) * np.sin(az) # direction cosines wrt east and north
ty = np.cos(el) * np.cos(az)
tz = np.sin(el)
#geocentric coordinates of target :
xyz = self.xyz0 + rr*(tx * self.east0 + ty*self.north0+tz*self.zenith0)
[r,lat,lon,aspect] = self.aspect_angle(year,xyz)
[dec,ha] = xyz2dec_ha(xyz - self.xyz0)
return r,lon,lat,dec,ha,aspect
| [
"numpy.sqrt",
"numpy.cross",
"numpy.sin",
"pyigrf.igrf.igrf_B",
"numpy.array",
"numpy.dot",
"numpy.arctan2",
"numpy.cos",
"numpy.finfo",
"numpy.arctan"
] | [((1506, 1534), 'numpy.sqrt', 'np.sqrt', (['(x ** 2.0 + y ** 2.0)'], {}), '(x ** 2.0 + y ** 2.0)\n', (1513, 1534), True, 'import numpy as np\n'), ((1535, 1551), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (1545, 1551), True, 'import numpy as np\n'), ((1559, 1575), 'numpy.arctan2', 'np.arctan2', (['z', 'p'], {}), '(z, p)\n', (1569, 1575), True, 'import numpy as np\n'), ((3290, 3341), 'pyigrf.igrf.igrf_B', 'igrf.igrf_B', (['year', '(r - a_igrf)', '(lon / deg)', '(lat / deg)'], {}), '(year, r - a_igrf, lon / deg, lat / deg)\n', (3301, 3341), False, 'from pyigrf import igrf\n'), ((4193, 4208), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (4201, 4208), True, 'import numpy as np\n'), ((4689, 4721), 'numpy.sqrt', 'np.sqrt', (['(a_WGS ** 2 - b_WGS ** 2)'], {}), '(a_WGS ** 2 - b_WGS ** 2)\n', (4696, 4721), True, 'import numpy as np\n'), ((1138, 1149), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (1144, 1149), True, 'import numpy as np\n'), ((1175, 1186), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (1181, 1186), True, 'import numpy as np\n'), ((1221, 1233), 'numpy.sin', 'np.sin', (['latg'], {}), '(latg)\n', (1227, 1233), True, 'import numpy as np\n'), ((1725, 1787), 'numpy.arctan', 'np.arctan', (['(z / (p * (1.0 - n * eccentricity ** 2.0 / (n + h))))'], {}), '(z / (p * (1.0 - n * eccentricity ** 2.0 / (n + h))))\n', (1734, 1787), True, 'import numpy as np\n'), ((2473, 2495), 'numpy.array', 'np.array', (['[xr, yr, zr]'], {}), '([xr, yr, zr])\n', (2481, 2495), True, 'import numpy as np\n'), ((7534, 7556), 'numpy.array', 'np.array', (['[x0, y0, z0]'], {}), '([x0, y0, z0])\n', (7542, 7556), True, 'import numpy as np\n'), ((7569, 7587), 'numpy.array', 'np.array', (['[x0, y0]'], {}), '([x0, y0])\n', (7577, 7587), True, 'import numpy as np\n'), ((7970, 8004), 'numpy.cross', 'np.cross', (['self.zenith0', 'self.east0'], {}), '(self.zenith0, self.east0)\n', (7978, 8004), True, 'import numpy as np\n'), ((8328, 8359), 'numpy.cross', 'np.cross', (['self.zenith0', 'self.uo'], {}), '(self.zenith0, self.uo)\n', (8336, 8359), True, 'import numpy as np\n'), ((8529, 8555), 'numpy.cross', 'np.cross', (['self.uo', 'self.ux'], {}), '(self.uo, self.ux)\n', (8537, 8555), True, 'import numpy as np\n'), ((9817, 9855), 'numpy.sqrt', 'np.sqrt', (['(vec[0] ** 2.0 + vec[1] ** 2.0)'], {}), '(vec[0] ** 2.0 + vec[1] ** 2.0)\n', (9824, 9855), True, 'import numpy as np\n'), ((10263, 10301), 'numpy.sqrt', 'np.sqrt', (['(xyz[0] ** 2.0 + xyz[1] ** 2.0)'], {}), '(xyz[0] ** 2.0 + xyz[1] ** 2.0)\n', (10270, 10301), True, 'import numpy as np\n'), ((10310, 10331), 'numpy.arctan2', 'np.arctan2', (['xyz[2]', 'p'], {}), '(xyz[2], p)\n', (10320, 10331), True, 'import numpy as np\n'), ((10345, 10371), 'numpy.arctan2', 'np.arctan2', (['xyz[1]', 'xyz[0]'], {}), '(xyz[1], xyz[0])\n', (10355, 10371), True, 'import numpy as np\n'), ((10648, 10699), 'pyigrf.igrf.igrf_B', 'igrf.igrf_B', (['year', '(r - a_igrf)', '(lon / deg)', '(lat / deg)'], {}), '(year, r - a_igrf, lon / deg, lat / deg)\n', (10659, 10699), False, 'from pyigrf import igrf\n'), ((10713, 10735), 'numpy.array', 'np.array', (['[bX, bY, bZ]'], {}), '([bX, bY, bZ])\n', (10721, 10735), True, 'import numpy as np\n'), ((11144, 11180), 'numpy.sqrt', 'np.sqrt', (['(1.0 - tx ** 2.0 - ty ** 2.0)'], {}), '(1.0 - tx ** 2.0 - ty ** 2.0)\n', (11151, 11180), True, 'import numpy as np\n'), ((11857, 11867), 'numpy.sin', 'np.sin', (['el'], {}), '(el)\n', (11863, 11867), True, 'import numpy as np\n'), ((1125, 1137), 'numpy.cos', 'np.cos', (['latg'], {}), '(latg)\n', (1131, 1137), True, 'import numpy as np\n'), ((1162, 1174), 'numpy.cos', 'np.cos', (['latg'], {}), '(latg)\n', (1168, 1174), True, 'import numpy as np\n'), ((2446, 2473), 'numpy.array', 'np.array', (['[east, north, up]'], {}), '([east, north, up])\n', (2454, 2473), True, 'import numpy as np\n'), ((7608, 7624), 'numpy.dot', 'np.dot', (['xy0', 'xy0'], {}), '(xy0, xy0)\n', (7614, 7624), True, 'import numpy as np\n'), ((7679, 7701), 'numpy.array', 'np.array', (['[-y0, x0, 0]'], {}), '([-y0, x0, 0])\n', (7687, 7701), True, 'import numpy as np\n'), ((9862, 9883), 'numpy.arctan2', 'np.arctan2', (['vec[2]', 'p'], {}), '(vec[2], p)\n', (9872, 9883), True, 'import numpy as np\n'), ((10232, 10248), 'numpy.dot', 'np.dot', (['xyz', 'xyz'], {}), '(xyz, xyz)\n', (10238, 10248), True, 'import numpy as np\n'), ((10442, 10474), 'numpy.array', 'np.array', (['[-xyz[1], xyz[0], 0.0]'], {}), '([-xyz[1], xyz[0], 0.0])\n', (10450, 10474), True, 'import numpy as np\n'), ((10492, 10514), 'numpy.cross', 'np.cross', (['east', 'radial'], {}), '(east, radial)\n', (10500, 10514), True, 'import numpy as np\n'), ((10843, 10860), 'numpy.dot', 'np.dot', (['u_B', 'u_rr'], {}), '(u_B, u_rr)\n', (10849, 10860), True, 'import numpy as np\n'), ((11743, 11753), 'numpy.cos', 'np.cos', (['el'], {}), '(el)\n', (11749, 11753), True, 'import numpy as np\n'), ((11756, 11766), 'numpy.sin', 'np.sin', (['az'], {}), '(az)\n', (11762, 11766), True, 'import numpy as np\n'), ((11820, 11830), 'numpy.cos', 'np.cos', (['el'], {}), '(el)\n', (11826, 11830), True, 'import numpy as np\n'), ((11833, 11843), 'numpy.cos', 'np.cos', (['az'], {}), '(az)\n', (11839, 11843), True, 'import numpy as np\n'), ((1698, 1710), 'numpy.cos', 'np.cos', (['latp'], {}), '(latp)\n', (1704, 1710), True, 'import numpy as np\n'), ((2284, 2295), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (2290, 2295), True, 'import numpy as np\n'), ((2383, 2395), 'numpy.cos', 'np.cos', (['latg'], {}), '(latg)\n', (2389, 2395), True, 'import numpy as np\n'), ((2409, 2421), 'numpy.sin', 'np.sin', (['latg'], {}), '(latg)\n', (2415, 2421), True, 'import numpy as np\n'), ((7928, 7945), 'numpy.sin', 'np.sin', (['self.lat0'], {}), '(self.lat0)\n', (7934, 7945), True, 'import numpy as np\n'), ((8296, 8307), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (8302, 8307), True, 'import numpy as np\n'), ((8437, 8461), 'numpy.dot', 'np.dot', (['self.ux', 'self.ux'], {}), '(self.ux, self.ux)\n', (8443, 8461), True, 'import numpy as np\n'), ((9788, 9804), 'numpy.dot', 'np.dot', (['vec', 'vec'], {}), '(vec, vec)\n', (9794, 9804), True, 'import numpy as np\n'), ((10571, 10585), 'numpy.dot', 'np.dot', (['rr', 'rr'], {}), '(rr, rr)\n', (10577, 10585), True, 'import numpy as np\n'), ((10803, 10815), 'numpy.dot', 'np.dot', (['B', 'B'], {}), '(B, B)\n', (10809, 10815), True, 'import numpy as np\n'), ((1900, 1911), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (1906, 1911), True, 'import numpy as np\n'), ((2199, 2210), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (2205, 2210), True, 'import numpy as np\n'), ((2225, 2236), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (2231, 2236), True, 'import numpy as np\n'), ((2237, 2249), 'numpy.cos', 'np.cos', (['latg'], {}), '(latg)\n', (2243, 2249), True, 'import numpy as np\n'), ((2250, 2261), 'numpy.cos', 'np.cos', (['lon'], {}), '(lon)\n', (2256, 2261), True, 'import numpy as np\n'), ((2310, 2321), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (2316, 2321), True, 'import numpy as np\n'), ((2322, 2334), 'numpy.cos', 'np.cos', (['latg'], {}), '(latg)\n', (2328, 2334), True, 'import numpy as np\n'), ((2335, 2346), 'numpy.sin', 'np.sin', (['lon'], {}), '(lon)\n', (2341, 2346), True, 'import numpy as np\n'), ((7794, 7811), 'numpy.cos', 'np.cos', (['self.lat0'], {}), '(self.lat0)\n', (7800, 7811), True, 'import numpy as np\n'), ((7814, 7831), 'numpy.cos', 'np.cos', (['self.lon0'], {}), '(self.lon0)\n', (7820, 7831), True, 'import numpy as np\n'), ((7861, 7878), 'numpy.cos', 'np.cos', (['self.lat0'], {}), '(self.lat0)\n', (7867, 7878), True, 'import numpy as np\n'), ((7881, 7898), 'numpy.sin', 'np.sin', (['self.lon0'], {}), '(self.lon0)\n', (7887, 7898), True, 'import numpy as np\n'), ((8184, 8195), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (8190, 8195), True, 'import numpy as np\n'), ((8198, 8226), 'numpy.cos', 'np.cos', (['(ha / 4.0 + self.lon0)'], {}), '(ha / 4.0 + self.lon0)\n', (8204, 8226), True, 'import numpy as np\n'), ((8255, 8266), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (8261, 8266), True, 'import numpy as np\n'), ((8269, 8297), 'numpy.sin', 'np.sin', (['(ha / 4.0 + self.lon0)'], {}), '(ha / 4.0 + self.lon0)\n', (8275, 8297), True, 'import numpy as np\n'), ((9940, 9966), 'numpy.arctan2', 'np.arctan2', (['vec[1]', 'vec[0]'], {}), '(vec[1], vec[0])\n', (9950, 9966), True, 'import numpy as np\n'), ((1042, 1054), 'numpy.sin', 'np.sin', (['latg'], {}), '(latg)\n', (1048, 1054), True, 'import numpy as np\n'), ((2212, 2224), 'numpy.sin', 'np.sin', (['latg'], {}), '(latg)\n', (2218, 2224), True, 'import numpy as np\n'), ((2297, 2309), 'numpy.sin', 'np.sin', (['latg'], {}), '(latg)\n', (2303, 2309), True, 'import numpy as np\n'), ((1668, 1680), 'numpy.sin', 'np.sin', (['latp'], {}), '(latp)\n', (1674, 1680), True, 'import numpy as np\n'), ((1867, 1878), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (1873, 1878), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import logging
import numpy as np
import pandas as pd
from library import Imputation as iptt
SAMPLING = np.array([2, 4, 4, 4, 5, 5, 7, 9])
BENIGN = np.array([4., 28., 1., 1., 3.])
MALIGNANT = np.array([5., 60., 2., 4., 3.])
BENIGN_COUNT = np.array([1., 1., 1., 1., 1.])
MALIGNANT_COUNT = np.array([4., 4., 4., 4., 2.])
PATH = ("dataset/mammographic/source.csv")
HEADER = ["BI-RADS","AGE","SHAPE","MARGIN","DENSITY","SEVERITY"]
try:
df = (pd.read_csv(PATH, names = HEADER)).head()
except:
logging.warning("pandas did not find the dataset source")
finally:
del PATH
del HEADER
def test_benign_avg():
assert np.array_equal(BENIGN, ((iptt(df,2).avg())[0]))
def test_malignant_avg():
assert np.array_equal(MALIGNANT, ((iptt(df,2).avg())[1]))
def test_standard_deviation():
avg = 0
std = 0
size = len(SAMPLING)
for i in range(size):
avg += SAMPLING[i] / size
for i in range(size):
std += (SAMPLING[i] - avg) ** 2
std = ((std / size) ** .5)
assert std == 2
| [
"numpy.array",
"logging.warning",
"pandas.read_csv",
"library.Imputation"
] | [((131, 165), 'numpy.array', 'np.array', (['[2, 4, 4, 4, 5, 5, 7, 9]'], {}), '([2, 4, 4, 4, 5, 5, 7, 9])\n', (139, 165), True, 'import numpy as np\n'), ((176, 212), 'numpy.array', 'np.array', (['[4.0, 28.0, 1.0, 1.0, 3.0]'], {}), '([4.0, 28.0, 1.0, 1.0, 3.0])\n', (184, 212), True, 'import numpy as np\n'), ((220, 256), 'numpy.array', 'np.array', (['[5.0, 60.0, 2.0, 4.0, 3.0]'], {}), '([5.0, 60.0, 2.0, 4.0, 3.0])\n', (228, 256), True, 'import numpy as np\n'), ((267, 302), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0])\n', (275, 302), True, 'import numpy as np\n'), ((317, 352), 'numpy.array', 'np.array', (['[4.0, 4.0, 4.0, 4.0, 2.0]'], {}), '([4.0, 4.0, 4.0, 4.0, 2.0])\n', (325, 352), True, 'import numpy as np\n'), ((528, 585), 'logging.warning', 'logging.warning', (['"""pandas did not find the dataset source"""'], {}), "('pandas did not find the dataset source')\n", (543, 585), False, 'import logging\n'), ((474, 505), 'pandas.read_csv', 'pd.read_csv', (['PATH'], {'names': 'HEADER'}), '(PATH, names=HEADER)\n', (485, 505), True, 'import pandas as pd\n'), ((686, 697), 'library.Imputation', 'iptt', (['df', '(2)'], {}), '(df, 2)\n', (690, 697), True, 'from library import Imputation as iptt\n'), ((776, 787), 'library.Imputation', 'iptt', (['df', '(2)'], {}), '(df, 2)\n', (780, 787), True, 'from library import Imputation as iptt\n')] |
""" Posterior predictions
1. Load data
2. Run simulations
- First experiment:
- With perseveration (3 cycles)
- Without perseveration (3 cycles)
- With perseveration (1 cycle) to plot single-trial updates and predictions
- Follow-up experiment:
- With perseveration (3 cycles)
- With perseveration (1 cycle) to plot single-trial updates and predictions
"""
import numpy as np
import pandas as pd
from al_simulation import simulation_loop
# Set random number generator for reproducible results
np.random.seed(123)
# ------------
# 1. Load data
# ------------
# Data first experiment
df_exp1 = pd.read_pickle('al_data/data_prepr_1.pkl')
# Data follow-up experiment
df_exp2 = pd.read_pickle('al_data/data_prepr_2.pkl')
# Parameter estimates first experiment
model_exp1 = pd.read_pickle('al_data/estimates_first_exp_25_sp.pkl')
# Parameter estimates second experiment
model_exp2 = pd.read_pickle('al_data/estimates_follow_up_exp_25_sp.pkl')
# ------------------
# 2. Run simulations
# ------------------
# First experiment
# ----------------
# Extract ID's and number of participants
sub_sel = df_exp1['subj_num'] # ID for each trial
n_subj = len(list(set(sub_sel))) # Number of participants
# First experiment with perseveration
n_sim = 3 # determine number of simulation cycles
sim_pers = True
all_pers, all_est_errs = simulation_loop(df_exp1, model_exp1, n_subj, sim_pers,
which_exp=1, sim_bucket_bias=False, n_sim=n_sim)
all_pers.to_pickle('al_data/postpred_exp1_pers.pkl')
all_est_errs.to_pickle('al_data/postpred_exp1_est_err.pkl')
# First experiment without perseveration
sim_pers = False
_, all_est_errs = simulation_loop(df_exp1, model_exp1, n_subj, sim_pers,
which_exp=1, sim_bucket_bias=False, n_sim=n_sim)
all_est_errs.to_pickle('al_data/hyp_est_errs_exp1_no_pers.pkl')
# First experiment, one cycle with perseveration to plot actual and predicted single-trial updates and predictions
n_sim = 1
sim_pers = True
_, _ = simulation_loop(df_exp1, model_exp1, n_subj, sim_pers, which_exp=1,
sim_bucket_bias=False, n_sim=n_sim, plot_data=True)
# Second experiment
# -----------------
# Extract ID's and number of participants
sub_sel = df_exp2['subj_num'] # ID for each trial
n_subj = len(list(set(sub_sel))) # number of participants
# Second experiment with perseveration
n_sim = 3
sim_pers = True
all_pers, all_est_errs = simulation_loop(df_exp2, model_exp2, n_subj, sim_pers,
which_exp=2, sim_bucket_bias=True, n_sim=n_sim)
all_pers.to_pickle('al_data/postpred_exp2_pers.pkl')
all_est_errs.to_pickle('al_data/postpred_exp2_est_err.pkl')
# Second experiment, one cycle with perseveration to plot actual and predicted single-trial updates and predictions
n_sim = 1
sim_pers = True
all_pers, all_est_errs = simulation_loop(df_exp2, model_exp2, n_subj, sim_pers,
which_exp=2, sim_bucket_bias=True, n_sim=n_sim, plot_data=True)
| [
"pandas.read_pickle",
"al_simulation.simulation_loop",
"numpy.random.seed"
] | [((544, 563), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (558, 563), True, 'import numpy as np\n'), ((645, 687), 'pandas.read_pickle', 'pd.read_pickle', (['"""al_data/data_prepr_1.pkl"""'], {}), "('al_data/data_prepr_1.pkl')\n", (659, 687), True, 'import pandas as pd\n'), ((727, 769), 'pandas.read_pickle', 'pd.read_pickle', (['"""al_data/data_prepr_2.pkl"""'], {}), "('al_data/data_prepr_2.pkl')\n", (741, 769), True, 'import pandas as pd\n'), ((823, 878), 'pandas.read_pickle', 'pd.read_pickle', (['"""al_data/estimates_first_exp_25_sp.pkl"""'], {}), "('al_data/estimates_first_exp_25_sp.pkl')\n", (837, 878), True, 'import pandas as pd\n'), ((933, 992), 'pandas.read_pickle', 'pd.read_pickle', (['"""al_data/estimates_follow_up_exp_25_sp.pkl"""'], {}), "('al_data/estimates_follow_up_exp_25_sp.pkl')\n", (947, 992), True, 'import pandas as pd\n'), ((1379, 1486), 'al_simulation.simulation_loop', 'simulation_loop', (['df_exp1', 'model_exp1', 'n_subj', 'sim_pers'], {'which_exp': '(1)', 'sim_bucket_bias': '(False)', 'n_sim': 'n_sim'}), '(df_exp1, model_exp1, n_subj, sim_pers, which_exp=1,\n sim_bucket_bias=False, n_sim=n_sim)\n', (1394, 1486), False, 'from al_simulation import simulation_loop\n'), ((1714, 1821), 'al_simulation.simulation_loop', 'simulation_loop', (['df_exp1', 'model_exp1', 'n_subj', 'sim_pers'], {'which_exp': '(1)', 'sim_bucket_bias': '(False)', 'n_sim': 'n_sim'}), '(df_exp1, model_exp1, n_subj, sim_pers, which_exp=1,\n sim_bucket_bias=False, n_sim=n_sim)\n', (1729, 1821), False, 'from al_simulation import simulation_loop\n'), ((2065, 2188), 'al_simulation.simulation_loop', 'simulation_loop', (['df_exp1', 'model_exp1', 'n_subj', 'sim_pers'], {'which_exp': '(1)', 'sim_bucket_bias': '(False)', 'n_sim': 'n_sim', 'plot_data': '(True)'}), '(df_exp1, model_exp1, n_subj, sim_pers, which_exp=1,\n sim_bucket_bias=False, n_sim=n_sim, plot_data=True)\n', (2080, 2188), False, 'from al_simulation import simulation_loop\n'), ((2493, 2599), 'al_simulation.simulation_loop', 'simulation_loop', (['df_exp2', 'model_exp2', 'n_subj', 'sim_pers'], {'which_exp': '(2)', 'sim_bucket_bias': '(True)', 'n_sim': 'n_sim'}), '(df_exp2, model_exp2, n_subj, sim_pers, which_exp=2,\n sim_bucket_bias=True, n_sim=n_sim)\n', (2508, 2599), False, 'from al_simulation import simulation_loop\n'), ((2918, 3040), 'al_simulation.simulation_loop', 'simulation_loop', (['df_exp2', 'model_exp2', 'n_subj', 'sim_pers'], {'which_exp': '(2)', 'sim_bucket_bias': '(True)', 'n_sim': 'n_sim', 'plot_data': '(True)'}), '(df_exp2, model_exp2, n_subj, sim_pers, which_exp=2,\n sim_bucket_bias=True, n_sim=n_sim, plot_data=True)\n', (2933, 3040), False, 'from al_simulation import simulation_loop\n')] |
# coding: utf-8
#
# Project: X-ray image reader
# https://github.com/silx-kit/fabio
#
# Copyright (C) 2016 Univeristy Köln, Germany
#
# Principal author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Princeton instrument SPE image reader for FabIO
"""
# Get ready for python3:
from __future__ import with_statement, print_function, division
__authors__ = ["<NAME>"]
__contact__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Clemens Prescher"
__date__ = "24/07/2017"
import logging
logger = logging.getLogger(__name__)
import datetime
from xml.dom.minidom import parseString
import numpy as np
from numpy.polynomial.polynomial import polyval
from .fabioimage import FabioImage
class SpeImage(FabioImage):
"""FabIO image class for Images for Princeton/SPE detector
Put some documentation here
"""
DATA_TYPES = {0: np.float32,
1: np.int32,
2: np.int16,
3: np.uint16}
DESCRIPTION = "Princeton instrument SPE file format"
DEFAULT_EXTENTIONS = ["spe"]
def _readheader(self, infile):
"""
Read and decode the header of an image:
:param infile: Opened python file (can be stringIO or bipped file)
"""
self.header['version'] = self._get_version(infile)
self.header['data_type'] = self._read_at(infile, 108, 1, np.uint16)[0]
self.header['x_dim'] = int(self._read_at(infile, 42, 1, np.int16)[0])
self.header['y_dim'] = int(self._read_at(infile, 656, 1, np.int16)[0])
self.header['num_frames'] = self._read_at(infile, 1446, 1, np.int32)[0]
if self.header['version'] == 2:
self.header['time'] = self._read_date_time_from_header(infile)
self.header['x_calibration'] = self._read_calibration_from_header(infile)
self.header['exposure_time'] = self._read_at(infile, 10, 1, np.float32)[0]
self.header['detector'] = 'unspecified'
self.header['grating'] = str(self._read_at(infile, 650, 1, np.float32)[0])
self.header['center_wavelength'] = float(self._read_at(infile, 72, 1, np.float32)[0])
# # self._read_roi_from_header()
# self._read_num_frames_from_header()
# self._read_num_combined_frames_from_header()
elif self.header['version'] == 3:
xml_string = self._get_xml_string(infile)
dom = self._create_dom_from_xml(xml_string)
self.header['time'] = self._read_date_time_from_dom(dom)
self.header['roi'] = self._read_roi_from_dom(dom)
self.header['x_calibration'] = self._read_calibration_from_dom(dom)
self.header['exposure_time'] = self._read_exposure_from_dom(dom)
self.header['detector'] = self._read_detector_from_dom(dom)
self.header['grating'] = self._read_grating_from_dom(dom, infile)
self.header['center_wavelength'] = self._read_center_wavelength_from_dom(dom, infile)
self.header = self.check_header(self.header)
def read(self, fname, frame=None):
"""
try to read image
:param fname: name of the file
:param frame:
"""
self.resetvals()
with self._open(fname, 'rb') as infile:
self._readheader(infile)
# read the image data and declare
self.data = self._read_data(infile, frame)
return self
def _get_version(self, infile):
self.xml_offset = self._read_at(infile, 678, 1, np.long)[0]
if self.xml_offset == 0:
return 2
else:
return 3
def _read_date_time_from_header(self, infile):
"""Reads the collection time from the header into the date_time field"""
raw_date = self._read_at(infile, 20, 9, np.int8)
raw_time = self._read_at(infile, 172, 6, np.int8)
str_date = ''.join([chr(i) for i in raw_date])
str_date += ''.join([chr(i) for i in raw_time])
date_time = datetime.datetime.strptime(str_date, "%d%b%Y%H%M%S")
return date_time.strftime("%m/%d/%Y %H:%M:%S")
def _read_date_time_from_dom(self, dom):
"""Reads the time of collection and saves it date_time field"""
date_time_str = dom.getElementsByTagName('Origin')[0].getAttribute('created')
try:
date_time = datetime.datetime.strptime(date_time_str[:-7], "%Y-%m-%dT%H:%M:%S.%f")
return date_time.strftime("%m/%d/%Y %H:%M:%S.%f")
except ValueError:
date_time = datetime.datetime.strptime(date_time_str[:-6], "%Y-%m-%dT%H:%M:%S")
return date_time.strftime("%m/%d/%Y %H:%M:%S")
def _read_calibration_from_header(self, infile):
"""Reads the calibration from the header into the x_calibration field"""
x_polynocoeff = self._read_at(infile, 3263, 6, np.double)
x_val = np.arange(self.header['x_dim']) + 1
return np.array(polyval(x_val, x_polynocoeff))
def _read_calibration_from_dom(self, dom):
"""Reads the x calibration of the image from the xml footer and saves
it in the x_calibration field"""
spe_format = dom.childNodes[0]
calibrations = spe_format.getElementsByTagName('Calibrations')[0]
wavelengthmapping = calibrations.getElementsByTagName('WavelengthMapping')[0]
wavelengths = wavelengthmapping.getElementsByTagName('Wavelength')[0]
wavelength_values = wavelengths.childNodes[0]
x_calibration = np.array([float(i) for i in wavelength_values.toxml().split(',')])
return x_calibration[self.header['roi'][0]:self.header['roi'][1]]
def _read_num_frames_from_header(self, infile):
self.num_frames = self._read_at(infile, 1446, 1, np.int32)[0]
def _get_xml_string(self, infile):
"""Reads out the xml string from the file end"""
if "size" in dir(infile):
size = infile.size
elif "measure_size" in dir(infile):
size = infile.measure_size()
else:
raise RuntimeError("Unable to guess the actual size of the file")
xml_size = size - self.xml_offset
xml = self._read_at(infile, self.xml_offset, xml_size, np.byte)
return ''.join([chr(i) for i in xml])
# if self.debug:
# fid = open(self.filename + '.xml', 'w')
# for line in self.xml_string:
# fid.write(line)
# fid.close()
def _create_dom_from_xml(self, xml_string):
"""Creates a DOM representation of the xml footer and saves it in the
dom field"""
return parseString(xml_string)
def _read_exposure_from_dom(self, dom):
"""Reads th exposure time of the experiment into the exposure_time field"""
if len(dom.getElementsByTagName('Experiment')) != 1: # check if it is a real v3.0 file
if len(dom.getElementsByTagName('ShutterTiming')) == 1: # check if it is a pixis detector
exposure_time = dom.getElementsByTagName('ExposureTime')[0].childNodes[0]
return np.float(exposure_time.toxml()) / 1000.0
else:
exposure_time = dom.getElementsByTagName('ReadoutControl')[0]. \
getElementsByTagName('Time')[0].childNodes[0].nodeValue
self.header['accumulations'] = dom.getElementsByTagName('Accumulations')[0].childNodes[0].nodeValue
return np.float(exposure_time) * np.float(self.header['accumulations'])
else: # this is searching for legacy experiment:
self._exposure_time = dom.getElementsByTagName('LegacyExperiment')[0]. \
getElementsByTagName('Experiment')[0]. \
getElementsByTagName('CollectionParameters')[0]. \
getElementsByTagName('Exposure')[0].attributes["value"].value
return np.float(self._exposure_time.split()[0])
def _read_detector_from_dom(self, dom):
"""Reads the detector information from the dom object"""
self._camera = dom.getElementsByTagName('Camera')
if len(self._camera) >= 1:
return self._camera[0].getAttribute('model')
else:
return 'unspecified'
def _read_grating_from_dom(self, dom, infile):
"""Reads the type of grating from the dom Model"""
try:
grating = dom.getElementsByTagName('Devices')[0]. \
getElementsByTagName('Spectrometer')[0]. \
getElementsByTagName('Grating')[0]. \
getElementsByTagName('Selected')[0].childNodes[0].toxml()
return grating.split('[')[1].split(']')[0].replace(',', ' ')
except IndexError:
# try from header:
return str(self._read_at(infile, 650, 1, np.float32)[0])
def _read_center_wavelength_from_dom(self, dom, infile):
"""Reads the center wavelength from the dom Model and saves it center_wavelength field"""
try:
center_wavelength = dom.getElementsByTagName('Devices')[0]. \
getElementsByTagName('Spectrometer')[0]. \
getElementsByTagName('Grating')[0]. \
getElementsByTagName('CenterWavelength')[0]. \
childNodes[0].toxml()
return float(center_wavelength)
except IndexError:
# try from header
return float(self._read_at(infile, 72, 1, np.float32)[0])
def _read_roi_from_dom(self, dom):
"""Reads the ROIs information defined in the SPE file.
Depending on the modus it will read out:
For CustomRegions
roi_x, roi_y, roi_width, roi_height, roi_x_binning, roi_y_binning
For FullSensor
roi_x,roi_y, roi_width, roi_height"""
try:
roi_modus = str(dom.getElementsByTagName('ReadoutControl')[0].
getElementsByTagName('RegionsOfInterest')[0].
getElementsByTagName('Selection')[0].
childNodes[0].toxml())
if roi_modus == 'CustomRegions':
roi_dom = dom.getElementsByTagName('ReadoutControl')[0]. \
getElementsByTagName('RegionsOfInterest')[0]. \
getElementsByTagName('CustomRegions')[0]. \
getElementsByTagName('RegionOfInterest')[0]
roi_x = int(roi_dom.attributes['x'].value)
roi_y = int(roi_dom.attributes['y'].value)
roi_width = int(roi_dom.attributes['width'].value)
roi_height = int(roi_dom.attributes['height'].value)
else:
roi_x = 0
roi_y = 0
roi_width = self.header['x_dim']
roi_height = self.header['y_dim']
except IndexError:
roi_x = 0
roi_y = 0
roi_width = self.header['x_dim']
roi_height = self.header['y_dim']
return roi_x, roi_x + roi_width, roi_y, roi_y + roi_height
def _read_at(self, infile, pos, size, ntype):
infile.seek(pos)
dtype = np.dtype(ntype)
bp = dtype.itemsize
data = infile.read(size * bp)
return np.fromstring(data, dtype)
def _read_data(self, infile, frame=None):
if frame is None:
frame = 0
dtype = self.DATA_TYPES.get(self.header['data_type'])
if dtype is None:
raise RuntimeError("Unsuported data type: %s" % self.header['data_type'])
number_size = np.dtype(dtype).itemsize
frame_size = self.header['x_dim'] * self.header['y_dim'] * number_size
return self._read_frame(infile, 4100 + frame * frame_size)
def _read_frame(self, infile, pos=None):
"""Reads in a frame at a specific binary position. The following header parameters have to
be predefined before calling this function:
datatype - either 0,1,2,3 for float32, int32, int16 or uint16
x_dim, y_dim - being the dimensions.
"""
if pos is None:
pos = infile.tell()
dtype = self.DATA_TYPES.get(self.header['data_type'])
if dtype is None:
return None
data = self._read_at(infile, pos, self.header['x_dim'] * self.header['y_dim'], dtype)
return data.reshape((self.header['y_dim'], self.header['x_dim']))
# this is not compatibility with old code:
speimage = SpeImage
| [
"logging.getLogger",
"numpy.float",
"datetime.datetime.strptime",
"xml.dom.minidom.parseString",
"numpy.polynomial.polynomial.polyval",
"numpy.dtype",
"numpy.fromstring",
"numpy.arange"
] | [((1579, 1606), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1596, 1606), False, 'import logging\n'), ((5062, 5114), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['str_date', '"""%d%b%Y%H%M%S"""'], {}), "(str_date, '%d%b%Y%H%M%S')\n", (5088, 5114), False, 'import datetime\n'), ((7660, 7683), 'xml.dom.minidom.parseString', 'parseString', (['xml_string'], {}), '(xml_string)\n', (7671, 7683), False, 'from xml.dom.minidom import parseString\n'), ((12126, 12141), 'numpy.dtype', 'np.dtype', (['ntype'], {}), '(ntype)\n', (12134, 12141), True, 'import numpy as np\n'), ((12223, 12249), 'numpy.fromstring', 'np.fromstring', (['data', 'dtype'], {}), '(data, dtype)\n', (12236, 12249), True, 'import numpy as np\n'), ((5411, 5481), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_time_str[:-7]', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(date_time_str[:-7], '%Y-%m-%dT%H:%M:%S.%f')\n", (5437, 5481), False, 'import datetime\n'), ((5939, 5970), 'numpy.arange', 'np.arange', (["self.header['x_dim']"], {}), "(self.header['x_dim'])\n", (5948, 5970), True, 'import numpy as np\n'), ((5999, 6028), 'numpy.polynomial.polynomial.polyval', 'polyval', (['x_val', 'x_polynocoeff'], {}), '(x_val, x_polynocoeff)\n', (6006, 6028), False, 'from numpy.polynomial.polynomial import polyval\n'), ((12541, 12556), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (12549, 12556), True, 'import numpy as np\n'), ((5595, 5662), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_time_str[:-6]', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(date_time_str[:-6], '%Y-%m-%dT%H:%M:%S')\n", (5621, 5662), False, 'import datetime\n'), ((8480, 8503), 'numpy.float', 'np.float', (['exposure_time'], {}), '(exposure_time)\n', (8488, 8503), True, 'import numpy as np\n'), ((8506, 8544), 'numpy.float', 'np.float', (["self.header['accumulations']"], {}), "(self.header['accumulations'])\n", (8514, 8544), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Cartopy implementation of TEC plotting in polar coordinates
# author: @mrinalghosh
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import numpy as np
import cartopy.feature as cfeature
import cartopy.crs as ccrs
import h5py
from argparse import ArgumentParser
from datetime import datetime
import os
from glob import glob
from sys import platform
months = {1: 'jan', 2: 'feb', 3: 'mar', 4: 'apr', 5: 'may', 6: 'jun',
7: 'jul', 8: 'aug', 9: 'sep', 10: 'oct', 11: 'nov', 12: 'dec'}
projections = {'plate': [ccrs.PlateCarree(), 'Plate Carree'],
'near': [ccrs.NearsidePerspective(), 'Nearside Perspective'],
'polar': [ccrs.NorthPolarStereo(), 'Polar Stereo'],
'mercator': [ccrs.Mercator(), 'Mercator'],
'geostat': [ccrs.Geostationary(), 'Geostationary']}
cmaps = plt.colormaps()
def save(root: str = None,
n: int = None,
overlap: bool = False,
slide: str = None,
proj: str = None,
lim: float = None,
cmap: str = None):
f = h5py.File(root, 'r')
lat = f['GPSTEC']['lat']
lon = f['GPSTEC']['lon']
t = f['GPSTEC']['time']
if cmap not in cmaps:
cmap = 'gist_ncar'
if slide is not None:
slide = int(slide)
if slide+n-1 <= len(t):
im = np.nanmean(f['GPSTEC']['im'][0:][0:][slide:slide+n+1], axis=0)
im = np.transpose(im)
time = datetime.fromtimestamp(t[slide])
# scale cmap
cmax = np.max(list(filter(lambda x: ~np.isnan(x), np.reshape(im, 64800))))
cmin = np.min(list(filter(lambda x: ~np.isnan(x), np.reshape(im, 64800))))
minoff = 0 # offset
maxoff = 0
if proj == 'polar':
fig = plt.figure()
theta = np.linspace(0, 2 * np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
ax1 = plt.subplot(1, 2, 1, projection=ccrs.SouthPolarStereo())
ax1.set_extent([-180, 180, -90, 0], ccrs.PlateCarree())
ax1.title.set_text('South Polar Stereographic')
ax1.add_feature(cfeature.OCEAN, zorder=1)
ax1.add_feature(cfeature.LAKES, zorder=1)
ax1.add_feature(cfeature.RIVERS, zorder=1)
ax1.add_feature(cfeature.LAND, zorder=1)
ax1.add_feature(cfeature.BORDERS, zorder=3)
ax1.add_feature(cfeature.COASTLINE, zorder=3)
ax1.gridlines()
ax1.set_boundary(circle, transform=ax1.transAxes)
im1 = ax1.pcolormesh(lon, lat, im, transform=ccrs.PlateCarree(), vmin=cmin + minoff, vmax=cmax - maxoff,
cmap=cmap, zorder=2)
ax2 = plt.subplot(1, 2, 2, projection=ccrs.NorthPolarStereo())
ax2.set_extent([-180, 180, 90, 0], ccrs.PlateCarree())
ax2.title.set_text('North Polar Stereographic')
ax2.add_feature(cfeature.OCEAN, zorder=1)
ax2.add_feature(cfeature.LAKES, zorder=1)
ax2.add_feature(cfeature.RIVERS, zorder=1)
ax2.add_feature(cfeature.LAND, zorder=1)
ax2.add_feature(cfeature.BORDERS, zorder=3)
ax2.add_feature(cfeature.COASTLINE, zorder=3)
ax2.gridlines()
ax2.set_boundary(circle, transform=ax2.transAxes)
im2 = ax2.pcolormesh(lon, lat, im, transform=ccrs.PlateCarree(), vmin=cmin + minoff, vmax=cmax - maxoff,
cmap=cmap, zorder=2)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.7])
fig.colorbar(im2, cax=cbar_ax, label='Total Electron Concentration [TECu]')
elif proj is not 'polar':
fig = plt.figure('TEC ({})'.format(datetime.fromtimestamp(t[slide])))
ax = plt.subplot(1, 1, 1, projection=projections[proj][0])
ax.title.set_text(projections[proj][1])
ax.add_feature(cfeature.OCEAN, zorder=1)
ax.add_feature(cfeature.LAKES, zorder=1)
ax.add_feature(cfeature.RIVERS, zorder=1)
ax.add_feature(cfeature.LAND, zorder=1)
ax.add_feature(cfeature.BORDERS, zorder=3)
ax.add_feature(cfeature.COASTLINE, zorder=3)
ax.gridlines()
imcm = ax.pcolormesh(lon, lat, im, transform=ccrs.PlateCarree(), vmin=cmin + minoff, vmax=cmax - maxoff,
cmap=cmap, zorder=2)
cb = fig.colorbar(imcm, shrink=0.5)
cb.set_label('Total Electron Content [TECu]')
print('Saving slide {}'.format(slide))
if platform == 'win32':
os.mkdir(os.path.split(root)[0] + '\\{}{}'.format(months[time.month], time.day))
elif platform in ['linux', 'linux2']:
os.mkdir(os.path.split(root)[0] + '/{}{}'.format(months[time.month], time.day))
folder = os.path.join(os.path.split(root)[0], '{}{}'.format(months[time.month], time.day))
print(folder)
# plt.savefig(os.path.join(folder, '{}.png'.format(slide)))
figsav = plt.gcf()
figsav.suptitle('{}'.format(time))
figsav.set_size_inches((10, 5), forward=False)
figsav.savefig(os.path.join(folder, '{}.png'.format(str(slide).zfill(3))), dpi=200)
plt.close(fig)
plt.close(figsav)
else:
t0 = datetime.fromtimestamp(t[0])
if platform == 'win32':
os.mkdir(os.path.split(root)[0] + '\\{}{}'.format(months[t0.month], t0.day))
elif platform in ['linux', 'linux2']:
os.mkdir(os.path.split(root)[0] + '/{}{}'.format(months[t0.month], t0.day))
folder = os.path.join(os.path.split(root)[0], '{}{}'.format(months[t0.month], t0.day))
if proj == 'polar':
theta = np.linspace(0, 2 * np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
if not overlap:
slides = list(map(lambda x: x*n, list(range(int(len(t)/n)-1))))
else:
slides = list(range(len(t)-n+1))
for slide in slides:
time = datetime.fromtimestamp(t[slide])
if lim is not 0:
cmax = lim
cmin = 0.0
else:
cmax = np.max(list(filter(lambda x: ~np.isnan(x), np.reshape(im, 64800))))
cmin = np.min(list(filter(lambda x: ~np.isnan(x), np.reshape(im, 64800))))
fig = plt.figure()
im = np.nanmean(f['GPSTEC']['im'][0:][0:][slide:slide+n+1], axis=0)
im = np.transpose(im)
ax1 = plt.subplot(1, 2, 1, projection=ccrs.SouthPolarStereo())
ax1.set_extent([-180, 180, -90, 0], ccrs.PlateCarree())
ax1.title.set_text('South Polar Stereographic')
ax1.add_feature(cfeature.OCEAN, zorder=1)
ax1.add_feature(cfeature.LAKES, zorder=1)
ax1.add_feature(cfeature.RIVERS, zorder=1)
ax1.add_feature(cfeature.LAND, zorder=1)
ax1.add_feature(cfeature.BORDERS, zorder=3)
ax1.add_feature(cfeature.COASTLINE, zorder=3)
ax1.gridlines()
ax1.set_boundary(circle, transform=ax1.transAxes)
ax1.pcolormesh(lon, lat, im, transform=ccrs.PlateCarree(), vmin=cmin, vmax=cmax,
cmap=cmap, zorder=2)
ax2 = plt.subplot(1, 2, 2, projection=ccrs.NorthPolarStereo())
ax2.set_extent([-180, 180, 90, 0], ccrs.PlateCarree())
ax2.title.set_text('North Polar Stereographic')
ax2.add_feature(cfeature.OCEAN, zorder=1)
ax2.add_feature(cfeature.LAKES, zorder=1)
ax2.add_feature(cfeature.RIVERS, zorder=1)
ax2.add_feature(cfeature.LAND, zorder=1)
ax2.add_feature(cfeature.BORDERS, zorder=3)
ax2.add_feature(cfeature.COASTLINE, zorder=3)
ax2.gridlines()
ax2.set_boundary(circle, transform=ax2.transAxes)
imcm = ax2.pcolormesh(lon, lat, im, transform=ccrs.PlateCarree(), vmin=cmin, vmax=cmax,
cmap=cmap, zorder=2)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.7])
fig.colorbar(imcm, cax=cbar_ax, label='Total Electron Concentration [TECu]')
print('Saving slide {}/{}...'.format(slide+1, len(t)))
figsav = plt.gcf()
figsav.suptitle('{}'.format(time))
figsav.set_size_inches((10, 5), forward=False)
figsav.savefig(os.path.join(folder, '{}.png'.format(str(slide).zfill(3))), dpi=200)
plt.close(fig)
plt.close(figsav)
print(folder)
else:
if not overlap:
slides = list(map(lambda x: x*n, list(range(int(len(t)/n)-1))))
else:
slides = list(range(len(t)-n+1))
for slide in slides:
fig = plt.figure()
im = np.nanmean(f['GPSTEC']['im'][0:][0:][slide:slide+n+1], axis=0)
im = np.transpose(im)
if lim is not 0:
cmax = lim
cmin = 0.0
else:
cmax = np.max(list(filter(lambda x: ~np.isnan(x), np.reshape(im, 64800))))
cmin = np.min(list(filter(lambda x: ~np.isnan(x), np.reshape(im, 64800))))
ax = plt.subplot(1, 1, 1, projection=projections[proj][0])
ax.title.set_text(projections[proj][1])
ax.add_feature(cfeature.OCEAN, zorder=1)
ax.add_feature(cfeature.LAKES, zorder=1)
ax.add_feature(cfeature.RIVERS, zorder=1)
ax.add_feature(cfeature.LAND, zorder=1)
ax.add_feature(cfeature.BORDERS, zorder=3)
ax.add_feature(cfeature.COASTLINE, zorder=3)
ax.gridlines()
im = ax.pcolormesh(lon, lat, im, transform=ccrs.PlateCarree(), vmin=cmin, vmax=cmax,
cmap=cmap, zorder=2)
cb = fig.colorbar(im, shrink=0.5)
cb.set_label('Total Electron Content [TECu]')
# fig.tight_layout()
print('Saving slide {}/{}...'.format(slide+1, len(t)))
# plt.savefig(os.path.join(folder, '{}.png'.format(slide)))
figsav = plt.gcf()
figsav.set_size_inches((10, 5), forward=False)
figsav.savefig(os.path.join(folder, '{}.png'.format(slide)), dpi=200)
plt.close(fig)
plt.close(figsav)
print(folder)
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('root', type=str, help='local address')
p.add_argument('-n', '--naverage', type=int, help='number of slides to include in average', default=1)
p.add_argument('--overlap', help='allow overlap of slides', action='store_true')
p.add_argument('-s', '--slide', type=str, help='slide number [0,239]')
# p.add_argument('-o', '--odir', type=str, help='directory to save images')
p.add_argument('-p', '--proj', type=str, help='map projection - plate or polar', default='polar')
p.add_argument('-l', '--lim', type=float, help='absolute limit of colorbar - 0 for no absolute', default=70)
p.add_argument('-c', '--cmap', type=str, help='colormap', default=None)
P = p.parse_args()
root = P.root
if os.path.splitext(root)[1] in ['.h5', '.hdf5']:
save(root=P.root, n=P.naverage, overlap=P.overlap, slide=P.slide, proj=P.proj, lim=P.lim, cmap=P.cmap)
else:
if platform == 'win32':
flist = sorted(glob(os.path.split(root)[0] + '\\conv*.h5'))
elif platform in ['linux', 'linux2']:
flist = sorted(glob(os.path.split(root)[0] + '/conv*.h5'))
if len(flist) > 0:
for file in flist:
save(file, n=P.naverage, overlap=P.overlap, slide=P.slide, proj=P.proj, lim=P.lim, cmap=P.cmap)
| [
"numpy.nanmean",
"numpy.sin",
"cartopy.crs.NorthPolarStereo",
"matplotlib.path.Path",
"numpy.reshape",
"argparse.ArgumentParser",
"os.path.split",
"matplotlib.pyplot.close",
"numpy.linspace",
"cartopy.crs.Mercator",
"cartopy.crs.NearsidePerspective",
"matplotlib.pyplot.gcf",
"os.path.splitex... | [((874, 889), 'matplotlib.pyplot.colormaps', 'plt.colormaps', ([], {}), '()\n', (887, 889), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1115), 'h5py.File', 'h5py.File', (['root', '"""r"""'], {}), "(root, 'r')\n", (1104, 1115), False, 'import h5py\n'), ((11042, 11058), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (11056, 11058), False, 'from argparse import ArgumentParser\n'), ((559, 577), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (575, 577), True, 'import cartopy.crs as ccrs\n'), ((620, 646), 'cartopy.crs.NearsidePerspective', 'ccrs.NearsidePerspective', ([], {}), '()\n', (644, 646), True, 'import cartopy.crs as ccrs\n'), ((698, 721), 'cartopy.crs.NorthPolarStereo', 'ccrs.NorthPolarStereo', ([], {}), '()\n', (719, 721), True, 'import cartopy.crs as ccrs\n'), ((768, 783), 'cartopy.crs.Mercator', 'ccrs.Mercator', ([], {}), '()\n', (781, 783), True, 'import cartopy.crs as ccrs\n'), ((825, 845), 'cartopy.crs.Geostationary', 'ccrs.Geostationary', ([], {}), '()\n', (843, 845), True, 'import cartopy.crs as ccrs\n'), ((1435, 1451), 'numpy.transpose', 'np.transpose', (['im'], {}), '(im)\n', (1447, 1451), True, 'import numpy as np\n'), ((1467, 1499), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['t[slide]'], {}), '(t[slide])\n', (1489, 1499), False, 'from datetime import datetime\n'), ((5182, 5191), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5189, 5191), True, 'import matplotlib.pyplot as plt\n'), ((5390, 5404), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5399, 5404), True, 'import matplotlib.pyplot as plt\n'), ((5413, 5430), 'matplotlib.pyplot.close', 'plt.close', (['figsav'], {}), '(figsav)\n', (5422, 5430), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5482), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['t[0]'], {}), '(t[0])\n', (5476, 5482), False, 'from datetime import datetime\n'), ((1359, 1425), 'numpy.nanmean', 'np.nanmean', (["f['GPSTEC']['im'][0:][0:][slide:slide + n + 1]"], {'axis': '(0)'}), "(f['GPSTEC']['im'][0:][0:][slide:slide + n + 1], axis=0)\n", (1369, 1425), True, 'import numpy as np\n'), ((1783, 1795), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1793, 1795), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1847), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (1828, 1847), True, 'import numpy as np\n'), ((1978, 2013), 'matplotlib.path.Path', 'mpath.Path', (['(verts * radius + center)'], {}), '(verts * radius + center)\n', (1988, 2013), True, 'import matplotlib.path as mpath\n'), ((5884, 5914), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(100)'], {}), '(0, 2 * np.pi, 100)\n', (5895, 5914), True, 'import numpy as np\n'), ((6045, 6080), 'matplotlib.path.Path', 'mpath.Path', (['(verts * radius + center)'], {}), '(verts * radius + center)\n', (6055, 6080), True, 'import matplotlib.path as mpath\n'), ((11807, 11829), 'os.path.splitext', 'os.path.splitext', (['root'], {}), '(root)\n', (11823, 11829), False, 'import os\n'), ((2138, 2156), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2154, 2156), True, 'import cartopy.crs as ccrs\n'), ((2933, 2951), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2949, 2951), True, 'import cartopy.crs as ccrs\n'), ((3929, 3982), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {'projection': 'projections[proj][0]'}), '(1, 1, 1, projection=projections[proj][0])\n', (3940, 3982), True, 'import matplotlib.pyplot as plt\n'), ((5005, 5024), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (5018, 5024), False, 'import os\n'), ((5769, 5788), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (5782, 5788), False, 'import os\n'), ((6314, 6346), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['t[slide]'], {}), '(t[slide])\n', (6336, 6346), False, 'from datetime import datetime\n'), ((6677, 6689), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6687, 6689), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6777), 'numpy.nanmean', 'np.nanmean', (["f['GPSTEC']['im'][0:][0:][slide:slide + n + 1]"], {'axis': '(0)'}), "(f['GPSTEC']['im'][0:][0:][slide:slide + n + 1], axis=0)\n", (6721, 6777), True, 'import numpy as np\n'), ((6795, 6811), 'numpy.transpose', 'np.transpose', (['im'], {}), '(im)\n', (6807, 6811), True, 'import numpy as np\n'), ((8761, 8770), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8768, 8770), True, 'import matplotlib.pyplot as plt\n'), ((9001, 9015), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9010, 9015), True, 'import matplotlib.pyplot as plt\n'), ((9032, 9049), 'matplotlib.pyplot.close', 'plt.close', (['figsav'], {}), '(figsav)\n', (9041, 9049), True, 'import matplotlib.pyplot as plt\n'), ((9322, 9334), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9332, 9334), True, 'import matplotlib.pyplot as plt\n'), ((9356, 9422), 'numpy.nanmean', 'np.nanmean', (["f['GPSTEC']['im'][0:][0:][slide:slide + n + 1]"], {'axis': '(0)'}), "(f['GPSTEC']['im'][0:][0:][slide:slide + n + 1], axis=0)\n", (9366, 9422), True, 'import numpy as np\n'), ((9440, 9456), 'numpy.transpose', 'np.transpose', (['im'], {}), '(im)\n', (9452, 9456), True, 'import numpy as np\n'), ((9786, 9839), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {'projection': 'projections[proj][0]'}), '(1, 1, 1, projection=projections[proj][0])\n', (9797, 9839), True, 'import matplotlib.pyplot as plt\n'), ((10754, 10763), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10761, 10763), True, 'import matplotlib.pyplot as plt\n'), ((10929, 10943), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10938, 10943), True, 'import matplotlib.pyplot as plt\n'), ((10960, 10977), 'matplotlib.pyplot.close', 'plt.close', (['figsav'], {}), '(figsav)\n', (10969, 10977), True, 'import matplotlib.pyplot as plt\n'), ((1580, 1601), 'numpy.reshape', 'np.reshape', (['im', '(64800)'], {}), '(im, 64800)\n', (1590, 1601), True, 'import numpy as np\n'), ((1663, 1684), 'numpy.reshape', 'np.reshape', (['im', '(64800)'], {}), '(im, 64800)\n', (1673, 1684), True, 'import numpy as np\n'), ((2065, 2088), 'cartopy.crs.SouthPolarStereo', 'ccrs.SouthPolarStereo', ([], {}), '()\n', (2086, 2088), True, 'import cartopy.crs as ccrs\n'), ((2696, 2714), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2712, 2714), True, 'import cartopy.crs as ccrs\n'), ((2861, 2884), 'cartopy.crs.NorthPolarStereo', 'ccrs.NorthPolarStereo', ([], {}), '()\n', (2882, 2884), True, 'import cartopy.crs as ccrs\n'), ((3490, 3508), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3506, 3508), True, 'import cartopy.crs as ccrs\n'), ((6944, 6962), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6960, 6962), True, 'import cartopy.crs as ccrs\n'), ((7761, 7779), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7777, 7779), True, 'import cartopy.crs as ccrs\n'), ((1924, 1937), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1930, 1937), True, 'import numpy as np\n'), ((1939, 1952), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1945, 1952), True, 'import numpy as np\n'), ((3877, 3909), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['t[slide]'], {}), '(t[slide])\n', (3899, 3909), False, 'from datetime import datetime\n'), ((4443, 4461), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4459, 4461), True, 'import cartopy.crs as ccrs\n'), ((4764, 4783), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (4777, 4783), False, 'import os\n'), ((5537, 5556), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (5550, 5556), False, 'import os\n'), ((5991, 6004), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5997, 6004), True, 'import numpy as np\n'), ((6006, 6019), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6012, 6019), True, 'import numpy as np\n'), ((6867, 6890), 'cartopy.crs.SouthPolarStereo', 'ccrs.SouthPolarStereo', ([], {}), '()\n', (6888, 6890), True, 'import cartopy.crs as ccrs\n'), ((7536, 7554), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7552, 7554), True, 'import cartopy.crs as ccrs\n'), ((7685, 7708), 'cartopy.crs.NorthPolarStereo', 'ccrs.NorthPolarStereo', ([], {}), '()\n', (7706, 7708), True, 'import cartopy.crs as ccrs\n'), ((8359, 8377), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8375, 8377), True, 'import cartopy.crs as ccrs\n'), ((10334, 10352), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (10350, 10352), True, 'import cartopy.crs as ccrs\n'), ((1567, 1578), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1575, 1578), True, 'import numpy as np\n'), ((1650, 1661), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1658, 1661), True, 'import numpy as np\n'), ((4903, 4922), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (4916, 4922), False, 'import os\n'), ((5672, 5691), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (5685, 5691), False, 'import os\n'), ((12040, 12059), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (12053, 12059), False, 'import os\n'), ((6534, 6555), 'numpy.reshape', 'np.reshape', (['im', '(64800)'], {}), '(im, 64800)\n', (6544, 6555), True, 'import numpy as np\n'), ((6629, 6650), 'numpy.reshape', 'np.reshape', (['im', '(64800)'], {}), '(im, 64800)\n', (6639, 6650), True, 'import numpy as np\n'), ((9644, 9665), 'numpy.reshape', 'np.reshape', (['im', '(64800)'], {}), '(im, 64800)\n', (9654, 9665), True, 'import numpy as np\n'), ((9739, 9760), 'numpy.reshape', 'np.reshape', (['im', '(64800)'], {}), '(im, 64800)\n', (9749, 9760), True, 'import numpy as np\n'), ((12158, 12177), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (12171, 12177), False, 'import os\n'), ((6521, 6532), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6529, 6532), True, 'import numpy as np\n'), ((6616, 6627), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6624, 6627), True, 'import numpy as np\n'), ((9631, 9642), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (9639, 9642), True, 'import numpy as np\n'), ((9726, 9737), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (9734, 9737), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Make big QUOCKA cubes"""
from IPython import embed
import schwimmbad
import sys
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from radio_beam import Beam, Beams
from radio_beam.utils import BeamError
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
import au2
import scipy.signal
import numpy as np
from functools import partial
import reproject as rpj
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
# Require reproject >= 0.7
try:
assert float(rpj.__version__[0:3]) >= 0.7
except AssertionError:
print('We require reproject version > 0.7')
print(f'Current version is {rpj.__version__}')
print('Please update reproject!')
quit()
class Error(Exception):
"""Base class for other exceptions"""
pass
class GridError(Error):
"""Raised when grid is too coarse for the convolving beam"""
pass
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return np.ceil(n * multiplier) / multiplier
def my_ceil(a, precision=0):
return np.round(a + 0.5 * 10**(-precision), precision)
def getmaxbeam(file_dict, tolerance=0.0001, nsamps=200, epsilon=0.0005, verbose=False):
"""Find common beam
Arguments:
file_dict {dict} -- Filenames for each bandcube.
Keyword Arguments:
tolerance {float} -- See common_beam (default: {0.0001})
nsamps {int} -- See common_beam (default: {200})
epsilon {float} -- See common_beam (default: {0.0005})
verbose {bool} -- Verbose output (default: {False})
Returns:
cmn_beam {Beam} -- Common beam
"""
if verbose:
print('Finding common beam...')
stokes = ['i', 'q', 'u', 'v']
beam_dict = {}
beams = []
for stoke in stokes:
for i, file in enumerate(file_dict[stoke]):
header = fits.getheader(file, memmap=True)
if stoke == 'i' and i == 0:
target_header = header
beam = Beam.from_fits_header(header)
beams.append(beam)
beams = Beams(
[beam.major.value for beam in beams]*u.deg,
[beam.minor.value for beam in beams]*u.deg,
[beam.pa.value for beam in beams]*u.deg
)
try:
cmn_beam = beams.common_beam(
tolerance=tolerance, epsilon=epsilon, nsamps=nsamps)
except BeamError:
if verbose:
print("Couldn't find common beam with defaults")
print("Trying again with smaller tolerance")
cmn_beam = beams.common_beam(
tolerance=tolerance*0.1, epsilon=epsilon, nsamps=nsamps)
cmn_beam = Beam(
major=my_ceil(cmn_beam.major.to(u.arcsec).value, precision=0)*u.arcsec,
minor=my_ceil(cmn_beam.minor.to(u.arcsec).value, precision=0)*u.arcsec,
pa=round_up(cmn_beam.pa.to(u.deg), decimals=2)
)
dx = target_header['CDELT1']*-1*u.deg
dy = target_header['CDELT2']*u.deg
assert abs(dx) == abs(dy)
grid = dy
conbeams = [cmn_beam.deconvolve(beam) for beam in beams]
# Check that convolving beam will be nyquist sampled
min_samps = []
for b_idx, conbeam in enumerate(conbeams):
# Get maj, min, pa
samp = conbeam.minor / grid.to(u.arcsec)
if samp < 2:
min_samps.append([samp, b_idx])
if len(min_samps) > 0:
print('Adjusting common beam to be sampled by grid!')
worst_idx = np.argmin([samp[0] for samp in min_samps], axis=0)
samp_cor_fac, idx = 2 / \
min_samps[worst_idx][0], int(
min_samps[worst_idx][1])
conbeam = conbeams[idx]
major = conbeam.major
minor = conbeam.minor*samp_cor_fac
pa = conbeam.pa
# Check for small major!
if major < minor:
major = minor
pa = 0*u.deg
cor_beam = Beam(major, minor, pa)
if verbose:
print('Smallest common beam is:', cmn_beam)
cmn_beam = beams[idx].convolve(cor_beam)
cmn_beam = Beam(
major=my_ceil(cmn_beam.major.to(u.arcsec).value, precision=1)*u.arcsec,
minor=my_ceil(cmn_beam.minor.to(u.arcsec).value, precision=1)*u.arcsec,
pa=round_up(cmn_beam.pa.to(u.deg), decimals=2)
)
if verbose:
print('Smallest common Nyquist sampled beam is:', cmn_beam)
return cmn_beam
def writecube(data, beam, stoke, field, outdir, verbose=False):
"""Write cubes to disk
Arguments:
data {dict} -- Image and frequency data and metadata
beam {Beam} -- New common resolution
stoke {str} -- Stokes parameter
field {str} -- Field name
outdir {str} -- Output directory
Keyword Arguments:
verbose {bool} -- Verbose output (default: {False})
"""
# Make filename
outfile = f"{field}.{stoke}.cutout.bigcube.fits"
# Make header
d_freq = np.nanmedian(np.diff(data['freqs']))
header = data['target header']
header = beam.attach_to_header(header)
header['CRVAL3'] = data['freqs'][0].to_value()
header['CDELT3'] = d_freq.to_value()
# Save the data
fits.writeto(f'{outdir}/{outfile}', data['cube'],
header=header, overwrite=True)
if verbose:
print("Saved cube to", f'{outdir}/{outfile}')
if stoke == 'i':
freqfile = f"{field}.bigcube.frequencies.txt"
np.savetxt(f"{outdir}/{freqfile}", data['freqs'].to_value())
if verbose:
print("Saved frequencies to", f"{outdir}/{freqfile}")
def main(pool, args, verbose=False):
"""Main script
"""
# Set up variables
bands = [2100, 5500, 7500]
stokes = ['i', 'q', 'u', 'v']
datadir = args.datadir
field = args.field
if datadir is not None:
if datadir[-1] == '/':
datadir = datadir[:-1]
outdir = args.outdir
if outdir is not None:
if outdir[-1] == '/':
outdir = outdir[:-1]
elif outdir is None:
outdir = datadir
# Glob out files
file_dict = {}
for stoke in stokes:
file_dict.update(
{
stoke: sorted(
glob(f'{datadir}/{field}.*.{stoke}.cutout.bandcube.fits')
)
}
)
file_dict.update(
{
'freqs': sorted(
glob(f'{datadir}/{field}.*.bandcube.frequencies.txt')
)
}
)
# Check files were found
for stoke in stokes:
if len(file_dict[stoke]) == 0:
raise Exception(f'No Stokes {stoke} files found!')
# Get common beam
big_beam = getmaxbeam(file_dict,
tolerance=args.tolerance,
nsamps=args.nsamps,
epsilon=args.epsilon,
verbose=verbose)
bmaj = args.bmaj
bmin = args.bmin
bpa = args.bpa
# Set to largest
if bpa is None and bmin is None and bmaj is None:
bpa = big_beam.pa.to(u.deg)
else:
bpa = 0*u.deg
if bmaj is None:
bmaj = round_up(big_beam.major.to(u.arcsec))
bmaj = big_beam.major.to(u.arcsec)
elif bmaj*u.arcsec < round_up(big_beam.major.to(u.arcsec)):
raise Exception('Selected BMAJ is too small!')
else:
bmaj *= u.arcsec
if bmin is None:
bmin = round_up(big_beam.minor.to(u.arcsec))
bmin = big_beam.minor.to(u.arcsec)
elif bmin*u.arcsec < round_up(big_beam.minor.to(u.arcsec)):
raise Exception('Selected BMIN is too small!')
else:
bmin *= u.arcsec
new_beam = Beam(
bmaj,
bmin,
bpa
)
if verbose:
print('Common beam is', new_beam)
# Start computation - work on each Stokes
stoke_dict = {}
for stoke in stokes:
print(f'Working on Stokes {stoke}...')
datadict = {}
# Get data from files
for band in tqdm(bands, desc='Reading data', disable=(not verbose)):
with fits.open(f'{datadir}/{field}.{band}.{stoke}.cutout.bandcube.fits',
memmap=True,
mode='denywrite') as hdulist:
data = hdulist[0].data
head = hdulist[0].header
freq = np.loadtxt(
f'{datadir}/{field}.{band}.bandcube.frequencies.txt')
datadict.update(
{
band: {
'data': data,
'head': head,
'wcs': WCS(head),
'freq': freq,
'beam': Beam.from_fits_header(head)
}
}
)
target_wcs = datadict[2100]['wcs']
target_header = datadict[2100]['head']
# Regrid
for band in tqdm(bands, desc='Regridding data', disable=(not verbose)):
worker = partial(
rpj.reproject_exact,
output_projection=target_wcs.celestial,
shape_out=datadict[2100]['data'][0].shape,
parallel=False,
return_footprint=False
)
input_wcs = datadict[band]['wcs'].celestial
inputs = [(image, input_wcs) for image in datadict[band]['data']]
newcube = np.zeros_like(datadict[band]['data'])*np.nan
out = list(
tqdm(
pool.imap(
worker, inputs
),
total=len(datadict[band]['data']),
desc='Regridding channels',
disable=(not verbose)
)
)
newcube[:] = out[:]
datadict[band].update(
{
"newdata": newcube
}
)
# Get scaling factors and convolution kernels
for band in tqdm(bands, desc='Computing scaling factors', disable=(not verbose)):
con_beam = new_beam.deconvolve(datadict[band]['beam'])
dx = target_header['CDELT1']*-1*u.deg
dy = target_header['CDELT2']*u.deg
fac, amp, outbmaj, outbmin, outbpa = au2.gauss_factor(
[
con_beam.major.to(u.arcsec).value,
con_beam.minor.to(u.arcsec).value,
con_beam.pa.to(u.deg).value
],
beamOrig=[
datadict[band]['beam'].major.to(u.arcsec).value,
datadict[band]['beam'].minor.to(u.arcsec).value,
datadict[band]['beam'].pa.to(u.deg).value
],
dx1=dx.to(u.arcsec).value,
dy1=dy.to(u.arcsec).value
)
pix_scale = dy
gauss_kern = con_beam.as_kernel(pix_scale)
conbm = gauss_kern.array/gauss_kern.array.max()
datadict[band].update(
{
'conbeam': conbm,
'fac': fac,
'target header': target_header
}
)
datadict.update(
{
'target header': target_header
}
)
# Convolve data
for band in tqdm(bands, desc='Smoothing data', disable=(not verbose)):
smooth = partial(
scipy.signal.convolve,
in2=datadict[band]['conbeam'],
mode='same'
)
sm_data = np.zeros_like(datadict[band]['newdata'])*np.nan
cube = np.copy(datadict[band]['newdata'])
cube[~np.isfinite(cube)] = 0
out = list(tqdm(
pool.imap(
smooth, cube
),
total=len(datadict[band]['newdata']),
desc='Smoothing channels',
disable=(not verbose)
))
sm_data[:] = out[:]
sm_data[~np.isfinite(cube)] = np.nan
datadict[band].update(
{
'smdata': sm_data,
}
)
stoke_dict.update(
{
stoke: datadict
}
)
# Show plots
if args.debug:
plt.figure()
i_mom = np.nansum(datadict[2100]['smdata'], axis=0)
idx = np.unravel_index(np.argmax(i_mom), i_mom.shape)
for band in bands:
x = datadict[band]['freq']
y = datadict[band]['fac'] * \
datadict[band]['smdata'][:, idx[0], idx[1]]
plt.plot(x, y, '.', label=f'Stokes {stoke} -- band {band}')
if stoke == 'i':
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Flux density [Jy/beam]')
plt.legend()
plt.show()
# Make cubes
for stoke in tqdm(stokes, desc='Making cubes', disable=(not verbose)):
cube = np.vstack([stoke_dict[stoke][band]['smdata']
* stoke_dict[stoke][band]['fac'] for band in bands])
freq_cube = np.concatenate(
[stoke_dict[stoke][band]['freq'] for band in bands]) * u.Hz
stoke_dict[stoke].update(
{
'cube': cube,
'freqs': freq_cube
}
)
# Show plots
if args.debug:
i_mom = np.nansum(stoke_dict['i']['cube'], axis=0)
idx = np.unravel_index(np.argmax(i_mom), i_mom.shape)
plt.figure()
for stoke in stokes:
x = stoke_dict[stoke]['freqs']
y = stoke_dict[stoke]['cube'][:, idx[0], idx[1]]
plt.plot(x, y, '.', label=f'Stokes {stoke}')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Flux density [Jy/beam]')
plt.legend()
plt.show()
plt.figure()
for stoke in stokes:
x = (299792458 / stoke_dict[stoke]['freqs'])**2
y = stoke_dict[stoke]['cube'][:, idx[0], idx[1]]
plt.plot(x, y, '.', label=f'Stokes {stoke}')
plt.xlabel('$\lambda^2$ [m$^2$]')
plt.ylabel('Flux density [Jy/beam]')
plt.legend()
plt.show()
if not args.dryrun:
# Save the cubes
for stoke in tqdm(stokes, desc='Writing cubes', disable=(not verbose)):
writecube(stoke_dict[stoke],
new_beam,
stoke,
field,
outdir,
verbose=verbose)
if verbose:
print('Done!')
def cli():
"""Command-line interface
"""
import argparse
# Help string to be shown using the -h option
descStr = """
Produce common resolution cubes for QUOCKA data.
Combines seperate cubes per band into single cube.
Make sure to run makecube.py first!
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'datadir',
metavar='datadir',
type=str,
help='Directory containing a single QUOCKA field images.')
parser.add_argument(
'field',
metavar='field',
type=str,
help='QUOCKA field name.')
parser.add_argument(
'-o',
'--outdir',
dest='outdir',
type=str,
default=None,
help='(Optional) Save cubes to different directory [datadir].')
parser.add_argument(
"--bmaj",
dest="bmaj",
type=float,
default=None,
help="BMAJ (arcsec) to convolve to [max BMAJ from given image(s)].")
parser.add_argument(
"--bmin",
dest="bmin",
type=float,
default=None,
help="BMIN (arcsec) to convolve to [max BMAJ from given image(s)].")
parser.add_argument(
"--bpa",
dest="bpa",
type=float,
default=None,
help="BPA (deg) to convolve to [0].")
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="verbose output [False].")
parser.add_argument(
"-d",
"--dryrun",
dest="dryrun",
action="store_true",
help="Compute common beam and stop [False].")
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Show debugging plots [False].")
parser.add_argument(
"-t",
"--tolerance",
dest="tolerance",
type=float,
default=0.0001,
help="tolerance for radio_beam.commonbeam.")
parser.add_argument(
"-e",
"--epsilon",
dest="epsilon",
type=float,
default=0.0005,
help="epsilon for radio_beam.commonbeam.")
parser.add_argument(
"-n",
"--nsamps",
dest="nsamps",
type=int,
default=200,
help="nsamps for radio_beam.commonbeam.")
group = parser.add_mutually_exclusive_group()
group.add_argument("--ncores", dest="n_cores", default=1,
type=int, help="Number of processes (uses multiprocessing).")
group.add_argument("--mpi", dest="mpi", default=False,
action="store_true", help="Run with MPI.")
args = parser.parse_args()
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.n_cores)
if args.mpi:
if not pool.is_master():
pool.wait()
sys.exit(0)
# make it so we can use imap in serial and mpi mode
if not isinstance(pool, schwimmbad.MultiPool):
pool.imap = pool.map
verbose = args.verbose
main(pool, args, verbose=verbose)
pool.close()
if __name__ == "__main__":
cli()
| [
"matplotlib.pyplot.ylabel",
"radio_beam.Beam.from_fits_header",
"numpy.isfinite",
"sys.exit",
"astropy.io.fits.open",
"schwimmbad.choose_pool",
"argparse.ArgumentParser",
"radio_beam.Beams",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.vstack",
"numpy.concatenat... | [((515, 571), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'AstropyWarning'}), "('ignore', category=AstropyWarning)\n", (536, 571), False, 'import warnings\n'), ((1153, 1200), 'numpy.round', 'np.round', (['(a + 0.5 * 10 ** -precision)', 'precision'], {}), '(a + 0.5 * 10 ** -precision, precision)\n', (1161, 1200), True, 'import numpy as np\n'), ((2145, 2289), 'radio_beam.Beams', 'Beams', (['([beam.major.value for beam in beams] * u.deg)', '([beam.minor.value for beam in beams] * u.deg)', '([beam.pa.value for beam in beams] * u.deg)'], {}), '([beam.major.value for beam in beams] * u.deg, [beam.minor.value for\n beam in beams] * u.deg, [beam.pa.value for beam in beams] * u.deg)\n', (2150, 2289), False, 'from radio_beam import Beam, Beams\n'), ((5201, 5286), 'astropy.io.fits.writeto', 'fits.writeto', (['f"""{outdir}/{outfile}"""', "data['cube']"], {'header': 'header', 'overwrite': '(True)'}), "(f'{outdir}/{outfile}', data['cube'], header=header, overwrite=True\n )\n", (5213, 5286), False, 'from astropy.io import fits\n'), ((7652, 7673), 'radio_beam.Beam', 'Beam', (['bmaj', 'bmin', 'bpa'], {}), '(bmaj, bmin, bpa)\n', (7656, 7673), False, 'from radio_beam import Beam, Beams\n'), ((13033, 13087), 'tqdm.tqdm', 'tqdm', (['stokes'], {'desc': '"""Making cubes"""', 'disable': '(not verbose)'}), "(stokes, desc='Making cubes', disable=not verbose)\n", (13037, 13087), False, 'from tqdm import tqdm\n'), ((15042, 15138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'descStr', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=descStr, formatter_class=argparse.\n RawTextHelpFormatter)\n', (15065, 15138), False, 'import argparse\n'), ((17531, 17591), 'schwimmbad.choose_pool', 'schwimmbad.choose_pool', ([], {'mpi': 'args.mpi', 'processes': 'args.n_cores'}), '(mpi=args.mpi, processes=args.n_cores)\n', (17553, 17591), False, 'import schwimmbad\n'), ((1074, 1097), 'numpy.ceil', 'np.ceil', (['(n * multiplier)'], {}), '(n * multiplier)\n', (1081, 1097), True, 'import numpy as np\n'), ((3493, 3543), 'numpy.argmin', 'np.argmin', (['[samp[0] for samp in min_samps]'], {'axis': '(0)'}), '([samp[0] for samp in min_samps], axis=0)\n', (3502, 3543), True, 'import numpy as np\n'), ((3920, 3942), 'radio_beam.Beam', 'Beam', (['major', 'minor', 'pa'], {}), '(major, minor, pa)\n', (3924, 3942), False, 'from radio_beam import Beam, Beams\n'), ((4982, 5004), 'numpy.diff', 'np.diff', (["data['freqs']"], {}), "(data['freqs'])\n", (4989, 5004), True, 'import numpy as np\n'), ((7975, 8028), 'tqdm.tqdm', 'tqdm', (['bands'], {'desc': '"""Reading data"""', 'disable': '(not verbose)'}), "(bands, desc='Reading data', disable=not verbose)\n", (7979, 8028), False, 'from tqdm import tqdm\n'), ((8921, 8977), 'tqdm.tqdm', 'tqdm', (['bands'], {'desc': '"""Regridding data"""', 'disable': '(not verbose)'}), "(bands, desc='Regridding data', disable=not verbose)\n", (8925, 8977), False, 'from tqdm import tqdm\n'), ((9996, 10062), 'tqdm.tqdm', 'tqdm', (['bands'], {'desc': '"""Computing scaling factors"""', 'disable': '(not verbose)'}), "(bands, desc='Computing scaling factors', disable=not verbose)\n", (10000, 10062), False, 'from tqdm import tqdm\n'), ((11361, 11416), 'tqdm.tqdm', 'tqdm', (['bands'], {'desc': '"""Smoothing data"""', 'disable': '(not verbose)'}), "(bands, desc='Smoothing data', disable=not verbose)\n", (11365, 11416), False, 'from tqdm import tqdm\n'), ((13106, 13210), 'numpy.vstack', 'np.vstack', (["[(stoke_dict[stoke][band]['smdata'] * stoke_dict[stoke][band]['fac']) for\n band in bands]"], {}), "([(stoke_dict[stoke][band]['smdata'] * stoke_dict[stoke][band][\n 'fac']) for band in bands])\n", (13115, 13210), True, 'import numpy as np\n'), ((13528, 13570), 'numpy.nansum', 'np.nansum', (["stoke_dict['i']['cube']"], {'axis': '(0)'}), "(stoke_dict['i']['cube'], axis=0)\n", (13537, 13570), True, 'import numpy as np\n'), ((13641, 13653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13651, 13653), True, 'import matplotlib.pyplot as plt\n'), ((13853, 13881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (13863, 13881), True, 'import matplotlib.pyplot as plt\n'), ((13890, 13926), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux density [Jy/beam]"""'], {}), "('Flux density [Jy/beam]')\n", (13900, 13926), True, 'import matplotlib.pyplot as plt\n'), ((13935, 13947), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13945, 13947), True, 'import matplotlib.pyplot as plt\n'), ((13956, 13966), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13964, 13966), True, 'import matplotlib.pyplot as plt\n'), ((13976, 13988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13986, 13988), True, 'import matplotlib.pyplot as plt\n'), ((14204, 14238), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda^2$ [m$^2$]"""'], {}), "('$\\\\lambda^2$ [m$^2$]')\n", (14214, 14238), True, 'import matplotlib.pyplot as plt\n'), ((14246, 14282), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux density [Jy/beam]"""'], {}), "('Flux density [Jy/beam]')\n", (14256, 14282), True, 'import matplotlib.pyplot as plt\n'), ((14291, 14303), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14301, 14303), True, 'import matplotlib.pyplot as plt\n'), ((14312, 14322), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14320, 14322), True, 'import matplotlib.pyplot as plt\n'), ((14394, 14449), 'tqdm.tqdm', 'tqdm', (['stokes'], {'desc': '"""Writing cubes"""', 'disable': '(not verbose)'}), "(stokes, desc='Writing cubes', disable=not verbose)\n", (14398, 14449), False, 'from tqdm import tqdm\n'), ((1940, 1973), 'astropy.io.fits.getheader', 'fits.getheader', (['file'], {'memmap': '(True)'}), '(file, memmap=True)\n', (1954, 1973), False, 'from astropy.io import fits\n'), ((2072, 2101), 'radio_beam.Beam.from_fits_header', 'Beam.from_fits_header', (['header'], {}), '(header)\n', (2093, 2101), False, 'from radio_beam import Beam, Beams\n'), ((9002, 9161), 'functools.partial', 'partial', (['rpj.reproject_exact'], {'output_projection': 'target_wcs.celestial', 'shape_out': "datadict[2100]['data'][0].shape", 'parallel': '(False)', 'return_footprint': '(False)'}), "(rpj.reproject_exact, output_projection=target_wcs.celestial,\n shape_out=datadict[2100]['data'][0].shape, parallel=False,\n return_footprint=False)\n", (9009, 9161), False, 'from functools import partial\n'), ((11441, 11515), 'functools.partial', 'partial', (['scipy.signal.convolve'], {'in2': "datadict[band]['conbeam']", 'mode': '"""same"""'}), "(scipy.signal.convolve, in2=datadict[band]['conbeam'], mode='same')\n", (11448, 11515), False, 'from functools import partial\n'), ((11667, 11701), 'numpy.copy', 'np.copy', (["datadict[band]['newdata']"], {}), "(datadict[band]['newdata'])\n", (11674, 11701), True, 'import numpy as np\n'), ((12360, 12372), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12370, 12372), True, 'import matplotlib.pyplot as plt\n'), ((12393, 12436), 'numpy.nansum', 'np.nansum', (["datadict[2100]['smdata']"], {'axis': '(0)'}), "(datadict[2100]['smdata'], axis=0)\n", (12402, 12436), True, 'import numpy as np\n'), ((12872, 12900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (12882, 12900), True, 'import matplotlib.pyplot as plt\n'), ((12913, 12949), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux density [Jy/beam]"""'], {}), "('Flux density [Jy/beam]')\n", (12923, 12949), True, 'import matplotlib.pyplot as plt\n'), ((12962, 12974), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12972, 12974), True, 'import matplotlib.pyplot as plt\n'), ((12987, 12997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12995, 12997), True, 'import matplotlib.pyplot as plt\n'), ((13250, 13317), 'numpy.concatenate', 'np.concatenate', (["[stoke_dict[stoke][band]['freq'] for band in bands]"], {}), "([stoke_dict[stoke][band]['freq'] for band in bands])\n", (13264, 13317), True, 'import numpy as np\n'), ((13602, 13618), 'numpy.argmax', 'np.argmax', (['i_mom'], {}), '(i_mom)\n', (13611, 13618), True, 'import numpy as np\n'), ((13799, 13843), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'label': 'f"""Stokes {stoke}"""'}), "(x, y, '.', label=f'Stokes {stoke}')\n", (13807, 13843), True, 'import matplotlib.pyplot as plt\n'), ((14151, 14195), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'label': 'f"""Stokes {stoke}"""'}), "(x, y, '.', label=f'Stokes {stoke}')\n", (14159, 14195), True, 'import matplotlib.pyplot as plt\n'), ((17678, 17689), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (17686, 17689), False, 'import sys\n'), ((6399, 6452), 'glob.glob', 'glob', (['f"""{datadir}/{field}.*.bandcube.frequencies.txt"""'], {}), "(f'{datadir}/{field}.*.bandcube.frequencies.txt')\n", (6403, 6452), False, 'from glob import glob\n'), ((8049, 8152), 'astropy.io.fits.open', 'fits.open', (['f"""{datadir}/{field}.{band}.{stoke}.cutout.bandcube.fits"""'], {'memmap': '(True)', 'mode': '"""denywrite"""'}), "(f'{datadir}/{field}.{band}.{stoke}.cutout.bandcube.fits', memmap=\n True, mode='denywrite')\n", (8058, 8152), False, 'from astropy.io import fits\n'), ((8317, 8381), 'numpy.loadtxt', 'np.loadtxt', (['f"""{datadir}/{field}.{band}.bandcube.frequencies.txt"""'], {}), "(f'{datadir}/{field}.{band}.bandcube.frequencies.txt')\n", (8327, 8381), True, 'import numpy as np\n'), ((9404, 9441), 'numpy.zeros_like', 'np.zeros_like', (["datadict[band]['data']"], {}), "(datadict[band]['data'])\n", (9417, 9441), True, 'import numpy as np\n'), ((11600, 11640), 'numpy.zeros_like', 'np.zeros_like', (["datadict[band]['newdata']"], {}), "(datadict[band]['newdata'])\n", (11613, 11640), True, 'import numpy as np\n'), ((12472, 12488), 'numpy.argmax', 'np.argmax', (['i_mom'], {}), '(i_mom)\n', (12481, 12488), True, 'import numpy as np\n'), ((12703, 12762), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'label': 'f"""Stokes {stoke} -- band {band}"""'}), "(x, y, '.', label=f'Stokes {stoke} -- band {band}')\n", (12711, 12762), True, 'import matplotlib.pyplot as plt\n'), ((12808, 12825), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (12818, 12825), True, 'import matplotlib.pyplot as plt\n'), ((12842, 12859), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (12852, 12859), True, 'import matplotlib.pyplot as plt\n'), ((6222, 6279), 'glob.glob', 'glob', (['f"""{datadir}/{field}.*.{stoke}.cutout.bandcube.fits"""'], {}), "(f'{datadir}/{field}.*.{stoke}.cutout.bandcube.fits')\n", (6226, 6279), False, 'from glob import glob\n'), ((11720, 11737), 'numpy.isfinite', 'np.isfinite', (['cube'], {}), '(cube)\n', (11731, 11737), True, 'import numpy as np\n'), ((12054, 12071), 'numpy.isfinite', 'np.isfinite', (['cube'], {}), '(cube)\n', (12065, 12071), True, 'import numpy as np\n'), ((8609, 8618), 'astropy.wcs.WCS', 'WCS', (['head'], {}), '(head)\n', (8612, 8618), False, 'from astropy.wcs import WCS\n'), ((8698, 8725), 'radio_beam.Beam.from_fits_header', 'Beam.from_fits_header', (['head'], {}), '(head)\n', (8719, 8725), False, 'from radio_beam import Beam, Beams\n')] |
from argparse import ArgumentParser
from collections import defaultdict
import numpy as np
import torch
import h5py
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
LAYER_NUM = 12
FEATURE_DIM = 768
def match_tokenized_to_untokenized(tokenized_sent, untokenized_sent):
'''Aligns tokenized and untokenized sentence given subwords "##" prefixed
Assuming that each subword token that does not start a new word is prefixed
by two hashes, "##", computes an alignment between the un-subword-tokenized
and subword-tokenized sentences.
Args:
tokenized_sent: a list of strings describing a subword-tokenized sentence
untokenized_sent: a list of strings describing a sentence, no subword tok.
Returns:
A dictionary of type {int: list(int)} mapping each untokenized sentence
index to a list of subword-tokenized sentence indices
'''
mapping = defaultdict(list)
untokenized_sent_index = 0
tokenized_sent_index = 1
while (untokenized_sent_index < len(untokenized_sent) and
tokenized_sent_index < len(tokenized_sent)):
while (tokenized_sent_index + 1 < len(tokenized_sent) and
tokenized_sent[tokenized_sent_index + 1].startswith('##')):
mapping[untokenized_sent_index].append(tokenized_sent_index)
tokenized_sent_index += 1
mapping[untokenized_sent_index].append(tokenized_sent_index)
untokenized_sent_index += 1
tokenized_sent_index += 1
return mapping
if __name__ == '__main__':
argp = ArgumentParser()
argp.add_argument('hdf5_file')
argp.add_argument('sentences_raw')
argp.add_argument('translation_npz')
args = argp.parse_args()
subword_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
with open(args.sentences_raw, 'r') as in_sents:
sentences = in_sents.readlines()
hf = h5py.File(args.hdf5_file, 'r')
indices = list(hf.keys())
translation = np.zeros((LAYER_NUM, FEATURE_DIM))
for index, sent in tqdm(zip(sorted([int(x) for x in indices]), sentences), desc='Averaging sentence vectors'):
sent = sent.strip()
tokenized_sent = subword_tokenizer.wordpiece_tokenizer.tokenize('[CLS] ' + sent + ' [SEP]')
untokenized_sent = sent.split(' ')
untok_tok_mapping = match_tokenized_to_untokenized(tokenized_sent, untokenized_sent)
feature_stack = hf[str(index)]
for layer_idx in range(LAYER_NUM):
single_layer_features = feature_stack[layer_idx]
assert single_layer_features.shape[0] == len(tokenized_sent)
single_layer_features = torch.tensor(
[np.mean(single_layer_features[untok_tok_mapping[i][0]:untok_tok_mapping[i][-1] + 1, :], axis=0)
for i in range(len(untokenized_sent))])
assert single_layer_features.shape[0] == len(sent.split(' '))
translation[layer_idx, :] -= single_layer_features.numpy().mean(axis=0)
translation /= len(sentences)
np.savez(args.translation_npz, translation)
| [
"numpy.mean",
"pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"numpy.savez",
"argparse.ArgumentParser",
"h5py.File",
"numpy.zeros",
"collections.defaultdict"
] | [((915, 932), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (926, 932), False, 'from collections import defaultdict\n'), ((1561, 1577), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1575, 1577), False, 'from argparse import ArgumentParser\n'), ((1747, 1808), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-multilingual-cased"""'], {}), "('bert-base-multilingual-cased')\n", (1776, 1808), False, 'from pytorch_pretrained_bert import BertTokenizer\n'), ((1913, 1943), 'h5py.File', 'h5py.File', (['args.hdf5_file', '"""r"""'], {}), "(args.hdf5_file, 'r')\n", (1922, 1943), False, 'import h5py\n'), ((1993, 2027), 'numpy.zeros', 'np.zeros', (['(LAYER_NUM, FEATURE_DIM)'], {}), '((LAYER_NUM, FEATURE_DIM))\n', (2001, 2027), True, 'import numpy as np\n'), ((3046, 3089), 'numpy.savez', 'np.savez', (['args.translation_npz', 'translation'], {}), '(args.translation_npz, translation)\n', (3054, 3089), True, 'import numpy as np\n'), ((2693, 2793), 'numpy.mean', 'np.mean', (['single_layer_features[untok_tok_mapping[i][0]:untok_tok_mapping[i][-1] + 1, :]'], {'axis': '(0)'}), '(single_layer_features[untok_tok_mapping[i][0]:untok_tok_mapping[i][\n -1] + 1, :], axis=0)\n', (2700, 2793), True, 'import numpy as np\n')] |
import numpy as np
from optlang import Constraint
from scipy import stats
from ..util.constraints import *
from ..util.linalg_fun import *
from ..util.thermo_constants import *
def generate_n_sphere_sample(n_variables):
"""Generates unit n-sphere sample. Works by picking random sample from normal distribution and normalized by radius.
Parameters
----------
n_variables : int
number of variables, dimension of the required sphere
Returns
-------
np.array
n-sphere sample with required dimensions
"""
# n-sphere sample from N(0,1)
random_sample = np.random.normal(loc=0, scale=1.0, size=(n_variables))
circle_radius = np.sqrt(np.sum(np.square(random_sample)))
normalized_sample = random_sample / circle_radius
return normalized_sample
def generate_ellipsoid_sample(cholesky):
"""sampling on the surface of n-dimensional ellipsoid
sample on n-ellipsoid is linear transformation of unit n-sphere
N(mu,var) = mu + A @ N(0,1)
A@A' = var, A is cholesky matrix
Parameters
----------
cholesky : np.ndarray
cholesky matrix
Returns
-------
np.array
numpy array containing ellipsoid sample with cholesky matrix length
"""
n_dimensions = len(cholesky)
chi_crit_val = chi2.isf(q=0.05, df=n_dimensions)
n_sphere_sample = generate_n_sphere_sample(n_dimensions)
ellipsoid_sample = np.sqrt(chi_crit_val) * cholesky @ n_sphere_sample
return ellipsoid_sample
def preprocess_model(model):
"""This function preprocess the model for the sampling on the surface of the ellipsoid method. We first remove the existing dG constraint and associated error variables. Then add the sphere variables. Depending on the variance range of covariance matrix, we split the sphere variables in two ellipsoids.
Parameters
----------
model : multitfa.core.tmodel
multitfa model, updated
Returns
-------
multitfa.core.tmodel
preprocessed model with modified thermo constraints to work on sampling approach
"""
# First remove the delG constraint and associated variables, we will add them later
remove_vars = [
var
for var in model.variables
if var.name.startswith("component_")
or var.name.startswith("dG_err_")
or var.name.startswith("Sphere_")
]
remove_cons = [
cons
for cons in model.constraints
if cons.name.startswith("delG_") or cons.name.startswith("std_dev_")
]
# Remove the variables and constraints from the model
model.remove_cons_vars(remove_cons + remove_vars)
# Pick indices of components present in the current model
model_component_indices = [
i
for i in range(model.compound_vector_matrix.shape[1])
if np.any(model.compound_vector_matrix[:, i])
]
# Reduced the compound_vector to contain only the non zero entries
model_compound_vector = model.compound_vector_matrix[:, model_component_indices]
# Now extract the sub covariance matrix containing only the components present in the model
component_model_covariance = covariance[:, model_component_indices][
model_component_indices, :
]
# Now separate the compounds that have variance > 1000 and others to avoid numerical issues
high_variance_indices = np.where(np.diag(component_model_covariance) > 1000)[0]
low_variance_indices = np.where(np.diag(component_model_covariance) < 1000)[0]
# Calculate cholesky matrix for two different covariance matrices
if len(low_variance_indices) > 0:
small_component_covariance = component_model_covariance[
:, low_variance_indices
][low_variance_indices, :]
cholesky_small_variance = matrix_decomposition(small_component_covariance)
chi2_value_small = stats.chi2.isf(
q=0.05, df=cholesky_small_variance.shape[1]
) # Chi-square value to map confidence interval
sphere_s_vars = np.array(
[
model.problem.Variable("Sphere_s_{}".format(i), lb=-1, ub=1)
for i in range(cholesky_small_variance.shape[1])
]
) # adding sphere variables for low variance compounds
model.add_cons_vars(sphere_s_vars.tolist())
for i in high_variance_indices:
zeros_axis = np.zeros((cholesky_small_variance.shape[1],))
cholesky_small_variance = np.insert(
cholesky_small_variance, i, zeros_axis, axis=0
)
metabolite_sphere_small = (
model_compound_vector @ cholesky_small_variance
) # This is a fixed term compound_vector @ cholesky
if len(high_variance_indices) > 0:
large_component_covariance = component_model_covariance[
:, high_variance_indices
][
high_variance_indices, :
] # Covariance matrix for the high variance components
cholesky_large_variance = matrix_decomposition(large_component_covariance)
chi2_value_high = stats.chi2.isf(q=0.05, df=cholesky_large_variance.shape[1])
sphere_l_vars = np.array(
[
model.problem.Variable("Sphere_l_{}".format(i), lb=-1, ub=1)
for i in range(cholesky_large_variance.shape[1])
]
) # adding sphere variables for high variance compounds
model.add_cons_vars(sphere_l_vars.tolist())
# Insert empty rows for the low_variance_components
for i in low_variance_indices:
zeros_axis = np.zeros((cholesky_large_variance.shape[1],))
cholesky_large_variance = np.insert(
cholesky_large_variance, i, zeros_axis, axis=0
)
metabolite_sphere_large = (
model_compound_vector @ cholesky_large_variance
) # This is a fixed term compound_vector @ cholesky
small_sphere_vars = np.array(
[var for var in model.variables if var.name.startswith("Sphere_s_")]
)
large_sphere_vars = np.array(
[var for var in model.variables if var.name.startswith("Sphere_l_")]
)
delG_constraints = []
for rxn in model.reactions:
if rxn.id in model.Exclude_reactions:
continue
S_vector = rxn.cal_stoichiometric_matrix()
concentration_term = sum(
stoic * metabolite.concentration_variable
for metabolite, stoic in iteritems(rxn.metabolites)
if metabolite.equilibrator_accession.inchi_key != PROTON_INCHI_KEY
)
if len(high_variance_indices) > 0:
coefficients_high_var = (
np.sqrt(chi2_value_high) * S_vector @ metabolite_sphere_large
)
err_expression_large = (
coefficients_high_var[np.nonzero(coefficients_high_var)]
@ large_sphere_vars[np.nonzero(coefficients_high_var)]
)
else:
err_expression_large = 0
if len(low_variance_indices) > 0:
coefficients_small_var = (
np.sqrt(chi2_value_small) * S_vector @ metabolite_sphere_small
)
err_expression_small = (
coefficients_small_var[np.nonzero(coefficients_small_var)]
@ small_sphere_vars[np.nonzero(coefficients_small_var)]
)
else:
err_expression_small = 0
lhs_forward = (
rxn.delG_forward
- RT * concentration_term
- err_expression_small
- err_expression_large
)
lhs_reverse = (
rxn.delG_reverse
+ RT * concentration_term
+ err_expression_small
+ err_expression_large
)
rhs = rxn.delG_prime + rxn.delG_transport
delG_f = model.problem.Constraint(
lhs_forward,
lb=rhs,
ub=rhs,
name="delG_{}".format(rxn.forward_variable.name),
)
delG_r = model.problem.Constraint(
lhs_reverse,
lb=-rhs,
ub=-rhs,
name="delG_{}".format(rxn.reverse_variable.name),
)
delG_constraints.extend([delG_f, delG_r])
model.add_cons_vars(delG_constraints)
return model
def compare_dataframes(df1, df2):
flags = []
for i in range(len(df1)):
range1 = df1["maximum"][i] - df1["minimum"][i]
range2 = df2["maximum"][i] - df2["minimum"][i]
if range2 > range1 * 1.05:
flags.append("Y")
else:
flags.append("N")
return flags
def extreme_value_distribution(data_set):
"""Fits the Gibbs free energy data to the Generalized extreme value distribution and predicts the extreme value at 95 % CI.
Uses Scipy genextreme function
Arguments:
data_set [list] -- The max or min range of Gibbs free energy values
Returns:
tuple -- min or max value predicted from GEV at 99% confidence
"""
c, loc, scale = stats.genextreme.fit(data_set)
min_extreme, max_extreme = stats.genextreme.interval(0.99, c, loc, scale)
return min_extreme, max_extreme
| [
"numpy.random.normal",
"scipy.stats.genextreme.fit",
"numpy.insert",
"numpy.sqrt",
"scipy.stats.genextreme.interval",
"numpy.any",
"numpy.square",
"numpy.diag",
"numpy.zeros",
"numpy.nonzero",
"scipy.stats.chi2.isf"
] | [((608, 660), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1.0)', 'size': 'n_variables'}), '(loc=0, scale=1.0, size=n_variables)\n', (624, 660), True, 'import numpy as np\n'), ((8997, 9027), 'scipy.stats.genextreme.fit', 'stats.genextreme.fit', (['data_set'], {}), '(data_set)\n', (9017, 9027), False, 'from scipy import stats\n'), ((9059, 9105), 'scipy.stats.genextreme.interval', 'stats.genextreme.interval', (['(0.99)', 'c', 'loc', 'scale'], {}), '(0.99, c, loc, scale)\n', (9084, 9105), False, 'from scipy import stats\n'), ((3856, 3915), 'scipy.stats.chi2.isf', 'stats.chi2.isf', ([], {'q': '(0.05)', 'df': 'cholesky_small_variance.shape[1]'}), '(q=0.05, df=cholesky_small_variance.shape[1])\n', (3870, 3915), False, 'from scipy import stats\n'), ((5066, 5125), 'scipy.stats.chi2.isf', 'stats.chi2.isf', ([], {'q': '(0.05)', 'df': 'cholesky_large_variance.shape[1]'}), '(q=0.05, df=cholesky_large_variance.shape[1])\n', (5080, 5125), False, 'from scipy import stats\n'), ((698, 722), 'numpy.square', 'np.square', (['random_sample'], {}), '(random_sample)\n', (707, 722), True, 'import numpy as np\n'), ((1422, 1443), 'numpy.sqrt', 'np.sqrt', (['chi_crit_val'], {}), '(chi_crit_val)\n', (1429, 1443), True, 'import numpy as np\n'), ((2819, 2861), 'numpy.any', 'np.any', (['model.compound_vector_matrix[:, i]'], {}), '(model.compound_vector_matrix[:, i])\n', (2825, 2861), True, 'import numpy as np\n'), ((4372, 4417), 'numpy.zeros', 'np.zeros', (['(cholesky_small_variance.shape[1],)'], {}), '((cholesky_small_variance.shape[1],))\n', (4380, 4417), True, 'import numpy as np\n'), ((4456, 4513), 'numpy.insert', 'np.insert', (['cholesky_small_variance', 'i', 'zeros_axis'], {'axis': '(0)'}), '(cholesky_small_variance, i, zeros_axis, axis=0)\n', (4465, 4513), True, 'import numpy as np\n'), ((5573, 5618), 'numpy.zeros', 'np.zeros', (['(cholesky_large_variance.shape[1],)'], {}), '((cholesky_large_variance.shape[1],))\n', (5581, 5618), True, 'import numpy as np\n'), ((5657, 5714), 'numpy.insert', 'np.insert', (['cholesky_large_variance', 'i', 'zeros_axis'], {'axis': '(0)'}), '(cholesky_large_variance, i, zeros_axis, axis=0)\n', (5666, 5714), True, 'import numpy as np\n'), ((3370, 3405), 'numpy.diag', 'np.diag', (['component_model_covariance'], {}), '(component_model_covariance)\n', (3377, 3405), True, 'import numpy as np\n'), ((3453, 3488), 'numpy.diag', 'np.diag', (['component_model_covariance'], {}), '(component_model_covariance)\n', (3460, 3488), True, 'import numpy as np\n'), ((6653, 6677), 'numpy.sqrt', 'np.sqrt', (['chi2_value_high'], {}), '(chi2_value_high)\n', (6660, 6677), True, 'import numpy as np\n'), ((6804, 6837), 'numpy.nonzero', 'np.nonzero', (['coefficients_high_var'], {}), '(coefficients_high_var)\n', (6814, 6837), True, 'import numpy as np\n'), ((6875, 6908), 'numpy.nonzero', 'np.nonzero', (['coefficients_high_var'], {}), '(coefficients_high_var)\n', (6885, 6908), True, 'import numpy as np\n'), ((7073, 7098), 'numpy.sqrt', 'np.sqrt', (['chi2_value_small'], {}), '(chi2_value_small)\n', (7080, 7098), True, 'import numpy as np\n'), ((7226, 7260), 'numpy.nonzero', 'np.nonzero', (['coefficients_small_var'], {}), '(coefficients_small_var)\n', (7236, 7260), True, 'import numpy as np\n'), ((7298, 7332), 'numpy.nonzero', 'np.nonzero', (['coefficients_small_var'], {}), '(coefficients_small_var)\n', (7308, 7332), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import xarray as xr
from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, \
get_vocabulary, dt64_epoch, update_dataset, ENCODINGS
from ooi_data_explorations.uncabled.process_phsen import ATTRS, quality_checks
def phsen_streamed(ds):
"""
Takes PHSEN data streamed from instruments deployed by the Regional Cabled
Array and cleans up the data set to make it more user-friendly. Primary
task is renaming parameters and dropping some that are of limited use.
Additionally, re-organize some of the variables to permit better assessments
of the data.
:param ds: initial PHSEN data set recorded by the data logger system and
downloaded from OOI via the M2M system
:return: cleaned up and reorganized data set
"""
# drop some of the variables:
# checksum == not used
# record_type == not used
# record_length == not used
# signal_intensity_434, part of the light measurements array, redundant so can remove
# signal_intensity_578, part of the light measurements array, redundant so can remove
ds = ds.reset_coords()
ds = ds.drop(['checksum', 'record_type', 'record_length', 'signal_intensity_434',
'signal_intensity_578'])
# convert the internal_timestamp values from a datetime64[ns] object to a floating point number with the time in
# seconds, replacing the internal_timestamp with the record_time (the internal_timestamp is incorrectly set in the
# NetCDF file).
ds['internal_timestamp'] = ('time', dt64_epoch(ds.record_time))
ds['internal_timestamp'].attrs = dict({
'long_name': 'Internal SAMI-pH Clock Time',
'standard_name': 'time',
'units': 'seconds since 1970-01-01 00:00:00 0:00',
'calendar': 'gregorian',
'comment': ('Comparing the instrument internal clock versus the GPS referenced sampling time will allow for ' +
'calculations of the instrument clock offset and drift. Useful when working with the ' +
'recovered instrument data where no external GPS referenced clock is available.')
})
ds = ds.drop(['record_time'])
# rename some of the variables for better clarity
rename = {
'voltage_battery': 'raw_battery_voltage',
'thermistor_start': 'raw_thermistor_start',
'thermistor_end': 'raw_thermistor_end',
'phsen_thermistor_temperature': 'thermistor_temperature',
'phsen_battery_volts': 'battery_voltage',
'ph_seawater': 'seawater_ph',
'ph_seawater_qc_executed': 'seawater_ph_qc_executed',
'ph_seawater_qc_results': 'seawater_ph_qc_results'
}
ds = ds.rename(rename)
# now we need to reset the light and reference arrays to named variables that will be more meaningful and useful in
# the final data files
nrec = len(ds['time'].values)
light = np.array(np.vstack(ds['ph_light_measurements'].values), dtype='int32')
light = np.atleast_3d(light)
light = np.reshape(light, (nrec, 23, 4)) # 4 sets of 23 seawater measurements
reference_434 = light[:, :, 0] # reference signal, 434 nm
signal_434 = light[:, :, 1] # signal intensity, 434 nm (PH434SI_L0)
reference_578 = light[:, :, 2] # reference signal, 578 nm
signal_578 = light[:, :, 3] # signal intensity, 578 nm (PH578SI_L0)
refnc = np.array(np.vstack(ds['reference_light_measurements'].values), dtype='int32')
refnc = np.atleast_3d(refnc)
refnc = np.reshape(refnc, (nrec, 4, 4)) # 4 sets of 4 DI water measurements (blanks)
blank_refrnc_434 = refnc[:, :, 0] # DI blank reference, 434 nm
blank_signal_434 = refnc[:, :, 1] # DI blank signal, 434 nm
blank_refrnc_578 = refnc[:, :, 2] # DI blank reference, 578 nm
blank_signal_578 = refnc[:, :, 3] # DI blank signal, 578 nm
# create a data set with the reference and light measurements
ph = xr.Dataset({
'blank_refrnc_434': (['time', 'blanks'], blank_refrnc_434.astype('int32')),
'blank_signal_434': (['time', 'blanks'], blank_signal_434.astype('int32')),
'blank_refrnc_578': (['time', 'blanks'], blank_refrnc_578.astype('int32')),
'blank_signal_578': (['time', 'blanks'], blank_signal_578.astype('int32')),
'reference_434': (['time', 'measurements'], reference_434.astype('int32')),
'signal_434': (['time', 'measurements'], signal_434.astype('int32')),
'reference_578': (['time', 'measurements'], reference_578.astype('int32')),
'signal_578': (['time', 'measurements'], signal_578.astype('int32'))
}, coords={'time': ds['time'], 'measurements': np.arange(0, 23).astype('int32'),
'blanks': np.arange(0, 4).astype('int32')
})
ds = ds.drop(['ph_light_measurements', 'reference_light_measurements',
'ph_light_measurements_dim_0', 'reference_light_measurements_dim_0'])
# merge the data sets back together
ds = ds.merge(ph)
# test data quality
ds['seawater_ph_quality_flag'] = quality_checks(ds)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
# and reset some of the data types
data_types = ['deployment', 'raw_thermistor_end', 'raw_thermistor_start', 'unique_id', 'raw_battery_voltage']
for v in data_types:
ds[v] = ds[v].astype('int32')
return ds
def main(argv=None):
# setup the input arguments
args = inputs(argv)
site = args.site
node = args.node
sensor = args.sensor
method = args.method
stream = args.stream
deploy = args.deploy
start = args.start
stop = args.stop
# determine the start and stop times for the data request based on either the deployment number or user entered
# beginning and ending dates.
if not deploy or (start and stop):
return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.')
else:
if deploy:
# Determine start and end dates based on the deployment number
start, stop = get_deployment_dates(site, node, sensor, deploy)
if not start or not stop:
exit_text = ('Deployment dates are unavailable for %s-%s-%s, deployment %02d.' % (site, node, sensor,
deploy))
raise SystemExit(exit_text)
# Request the data
r = m2m_request(site, node, sensor, method, stream, start, stop)
if not r:
exit_text = ('Data unavailable for %s-%s-%s, deployment %02d. Check request.' % (site, node, sensor, deploy))
raise SystemExit(exit_text)
# Valid request, start downloading the data
if deploy:
phsen = m2m_collect(r, ('.*deployment%04d.*PHSEN.*\\.nc$' % deploy))
else:
phsen = m2m_collect(r, '.*PHSEN.*\\.nc$')
if not phsen:
exit_text = ('Data unavailable for %s-%s-%s. Check request.' % (site, node, sensor))
raise SystemExit(exit_text)
# clean-up and reorganize
phsen = phsen_streamed(phsen)
vocab = get_vocabulary(site, node, sensor)[0]
phsen = update_dataset(phsen, vocab['maxdepth'])
# save the data to disk
out_file = os.path.abspath(args.outfile)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
phsen.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
if __name__ == '__main__':
main()
| [
"ooi_data_explorations.common.get_vocabulary",
"ooi_data_explorations.common.get_deployment_dates",
"numpy.reshape",
"ooi_data_explorations.common.inputs",
"ooi_data_explorations.uncabled.process_phsen.ATTRS.items",
"ooi_data_explorations.common.dt64_epoch",
"ooi_data_explorations.common.m2m_collect",
... | [((3068, 3088), 'numpy.atleast_3d', 'np.atleast_3d', (['light'], {}), '(light)\n', (3081, 3088), True, 'import numpy as np\n'), ((3101, 3133), 'numpy.reshape', 'np.reshape', (['light', '(nrec, 23, 4)'], {}), '(light, (nrec, 23, 4))\n', (3111, 3133), True, 'import numpy as np\n'), ((3593, 3613), 'numpy.atleast_3d', 'np.atleast_3d', (['refnc'], {}), '(refnc)\n', (3606, 3613), True, 'import numpy as np\n'), ((3626, 3657), 'numpy.reshape', 'np.reshape', (['refnc', '(nrec, 4, 4)'], {}), '(refnc, (nrec, 4, 4))\n', (3636, 3657), True, 'import numpy as np\n'), ((5167, 5185), 'ooi_data_explorations.uncabled.process_phsen.quality_checks', 'quality_checks', (['ds'], {}), '(ds)\n', (5181, 5185), False, 'from ooi_data_explorations.uncabled.process_phsen import ATTRS, quality_checks\n'), ((5237, 5250), 'ooi_data_explorations.uncabled.process_phsen.ATTRS.items', 'ATTRS.items', ([], {}), '()\n', (5248, 5250), False, 'from ooi_data_explorations.uncabled.process_phsen import ATTRS, quality_checks\n'), ((5784, 5796), 'ooi_data_explorations.common.inputs', 'inputs', (['argv'], {}), '(argv)\n', (5790, 5796), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((6805, 6865), 'ooi_data_explorations.common.m2m_request', 'm2m_request', (['site', 'node', 'sensor', 'method', 'stream', 'start', 'stop'], {}), '(site, node, sensor, method, stream, start, stop)\n', (6816, 6865), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((7511, 7551), 'ooi_data_explorations.common.update_dataset', 'update_dataset', (['phsen', "vocab['maxdepth']"], {}), "(phsen, vocab['maxdepth'])\n", (7525, 7551), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((7596, 7625), 'os.path.abspath', 'os.path.abspath', (['args.outfile'], {}), '(args.outfile)\n', (7611, 7625), False, 'import os\n'), ((1642, 1668), 'ooi_data_explorations.common.dt64_epoch', 'dt64_epoch', (['ds.record_time'], {}), '(ds.record_time)\n', (1652, 1668), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((2994, 3039), 'numpy.vstack', 'np.vstack', (["ds['ph_light_measurements'].values"], {}), "(ds['ph_light_measurements'].values)\n", (3003, 3039), True, 'import numpy as np\n'), ((3512, 3564), 'numpy.vstack', 'np.vstack', (["ds['reference_light_measurements'].values"], {}), "(ds['reference_light_measurements'].values)\n", (3521, 3564), True, 'import numpy as np\n'), ((7114, 7172), 'ooi_data_explorations.common.m2m_collect', 'm2m_collect', (['r', "('.*deployment%04d.*PHSEN.*\\\\.nc$' % deploy)"], {}), "(r, '.*deployment%04d.*PHSEN.*\\\\.nc$' % deploy)\n", (7125, 7172), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((7201, 7234), 'ooi_data_explorations.common.m2m_collect', 'm2m_collect', (['r', '""".*PHSEN.*\\\\.nc$"""'], {}), "(r, '.*PHSEN.*\\\\.nc$')\n", (7212, 7234), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((7461, 7495), 'ooi_data_explorations.common.get_vocabulary', 'get_vocabulary', (['site', 'node', 'sensor'], {}), '(site, node, sensor)\n', (7475, 7495), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((6417, 6465), 'ooi_data_explorations.common.get_deployment_dates', 'get_deployment_dates', (['site', 'node', 'sensor', 'deploy'], {}), '(site, node, sensor, deploy)\n', (6437, 6465), False, 'from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, get_vocabulary, dt64_epoch, update_dataset, ENCODINGS\n'), ((7652, 7677), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (7667, 7677), False, 'import os\n'), ((7700, 7725), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (7715, 7725), False, 'import os\n'), ((4770, 4786), 'numpy.arange', 'np.arange', (['(0)', '(23)'], {}), '(0, 23)\n', (4779, 4786), True, 'import numpy as np\n'), ((4829, 4844), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (4838, 4844), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = """Prof. <NAME>, Ph.D. <<EMAIL>>"""
import os
os.system('clear')
print('.-------------------------------.')
print('| |#')
print('| By.: Prof. <NAME> |#')
print('| |#')
print('| 2021 |#')
print('\'-------------------------------\'#')
print(' ################################')
print('')
print('Importing Libraries:')
import numpy as np
import random as rd
import matplotlib.pyplot as pl
from math import isnan
def neig(i,j,N):
z = []
if (i > 0):
z.append([i-1,j])
if (i < N-1):
z.append([i+1,j])
if (j > 0):
z.append([i,j-1])
if (j < N-1):
z.append([i,j+1])
return np.array(z)
N = 32
J = 1.0
beta = 1.0/1.5
alpha = 4.0
S = np.array([rd.choices([1,-1],k=N) for i in range(N)])
C = np.array([rd.choices([1,-1],k=N) for i in range(N)])
Sn = np.zeros((N,N))
Cn = np.zeros((N,N))
r = []
cauda = []
M = 1
Steps = 100
for it in range(Steps):
Ml = M
M = np.average(S)
for i in range(N):
for j in range(N):
sm = 0
for x in neig(i,j,N):
sm = sm + S[x[0],x[1]]
h = J*sm - alpha*C[i,j]*M
p = 1.0/(1+np.exp(-2*beta*h))
if (rd.random() < p):
Sn[i,j] = 1
else:
Sn[i,j] = -1
if (alpha*S[i,j]*C[i,j]*sm < 0):
Cn[i,j] = -C[i,j]
else:
Cn[i,j] = C[i,j]
S = Sn
C = Cn
rt = np.log10(abs(M))-np.log10(abs(Ml))
if (abs(rt) < 100):
r = np.append(r,rt)
if (rt > 0):
cauda.append(rt)
mi = min(r)
ma = max(r)
#pl.figure(1)
#pl.plot(r)
#pl.axis([0,Steps,-0.1,0.1])
#pl.savefig('../chapters/Chapter_8/figs/src/Bornts.svg')
x = np.linspace(mi,ma,100)
y = []
for ex in x:
y.append(1.0-float(sum(cauda<ex))/len(cauda))
#pl.figure(2)
#pl.loglog(x,y,'.')
#pl.savefig('../chapters/Chapter_8/figs/src/Bornts.svg')
#pl.show()
pl.imshow(S,cmap='gray')
pl.show()
#pl.savefig('../chapters/Chapter_8/figs/src/Born1.svg')
#pl.plot(r)
#pl.axis([0,200,-0.04,0.04])
#pl.savefig('../chapters/Chapter_8/figs/src/Bornts.svg')
#pl.hist(r)
#pl.savefig('../chapters/Chapter_8/figs/src/Bornh.svg')
#pl.show()
| [
"matplotlib.pyplot.imshow",
"numpy.average",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"random.choices",
"numpy.exp",
"os.system",
"random.random",
"matplotlib.pyplot.show"
] | [((108, 126), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (117, 126), False, 'import os\n'), ((961, 977), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (969, 977), True, 'import numpy as np\n'), ((982, 998), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (990, 998), True, 'import numpy as np\n'), ((1912, 1936), 'numpy.linspace', 'np.linspace', (['mi', 'ma', '(100)'], {}), '(mi, ma, 100)\n', (1923, 1936), True, 'import numpy as np\n'), ((2114, 2139), 'matplotlib.pyplot.imshow', 'pl.imshow', (['S'], {'cmap': '"""gray"""'}), "(S, cmap='gray')\n", (2123, 2139), True, 'import matplotlib.pyplot as pl\n'), ((2139, 2148), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2146, 2148), True, 'import matplotlib.pyplot as pl\n'), ((780, 791), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (788, 791), True, 'import numpy as np\n'), ((1078, 1091), 'numpy.average', 'np.average', (['S'], {}), '(S)\n', (1088, 1091), True, 'import numpy as np\n'), ((856, 880), 'random.choices', 'rd.choices', (['[1, -1]'], {'k': 'N'}), '([1, -1], k=N)\n', (866, 880), True, 'import random as rd\n'), ((913, 937), 'random.choices', 'rd.choices', (['[1, -1]'], {'k': 'N'}), '([1, -1], k=N)\n', (923, 937), True, 'import random as rd\n'), ((1703, 1719), 'numpy.append', 'np.append', (['r', 'rt'], {}), '(r, rt)\n', (1712, 1719), True, 'import numpy as np\n'), ((1360, 1371), 'random.random', 'rd.random', ([], {}), '()\n', (1369, 1371), True, 'import random as rd\n'), ((1312, 1333), 'numpy.exp', 'np.exp', (['(-2 * beta * h)'], {}), '(-2 * beta * h)\n', (1318, 1333), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Imports
import numpy as np
import torch
import gpytorch
from gpytorch.priors import GammaPrior
from copy import deepcopy
# Disctionary of distributions for randomm initialization
def build_dist_dict(noise_prior, outputscale_prior, lengthscale_prior):
"""
Build a dictionary of distributions to sample for random restarts.
"""
if noise_prior == None:
noise_dist = GammaPrior(1.5,0.5)
else:
noise_dist = noise_prior[0]
if outputscale_prior == None:
output_dist = GammaPrior(3, 0.5)
else:
output_dist = outputscale_prior[0]
if lengthscale_prior == None:
lengthscale_dist = GammaPrior(3,0.5)
else:
lengthscale_dist = lengthscale_prior[0]
distributions = {'likelihood.noise_covar.raw_noise': noise_dist,
'covar_module.raw_outputscale': output_dist,
'covar_module.base_kernel.raw_lengthscale': lengthscale_dist}
return distributions
# Randomly set parmeters for model based on distributions
def set_init_params(dictionary, distributions, seed=0):
"""
Generate a new random state dictionary with entries drawn from the list of
distributions.
"""
dict_copy = deepcopy(dictionary)
for key in distributions:
# Get parameter values for dict entry
params = dictionary[key]
# Get distribution
dist = distributions[key]
# Generate inital points from distribution
torch.manual_seed(seed)
new_params = dist.expand(params.shape).sample().log()
# Overwrite entry in copy
dict_copy[key] = new_params
return dict_copy
# Optimize a model via MLE
def optimize_mll(model, likelihood, X, y, learning_rate=0.1,
n_restarts=0, training_iters=100, noise_prior=None,
outputscale_prior=None, lengthscale_prior=None):
# Model and likelihood in training mode
model.train()
likelihood.train()
# Use ADAM
optimizer = torch.optim.Adam(
[{'params': model.parameters()}, ],
lr=learning_rate
)
# Marginal log likelihood loss
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
# Dictionary of distributions to draw random restarts from
dist_dict = build_dist_dict(noise_prior, outputscale_prior, lengthscale_prior)
# Restart optimizer with random inits drawn from priors
states = []
loss_list = []
min_loss_list = []
for restart in range(n_restarts + 1):
step_losses = []
# Optimization
for i in range(training_iters):
optimizer.zero_grad()
output = model(X)
loss = -mll(output, y)
step_losses.append(loss.item())
loss.backward()
optimizer.step()
states.append(deepcopy(mll.model.state_dict()))
loss_list.append(step_losses)
min_loss_list.append(loss.item())
new_state = set_init_params(states[0], dist_dict, seed=restart)
mll.model.load_state_dict(new_state)
# Set to best state
mll.model.load_state_dict(states[np.argmin(min_loss_list)])
return loss_list | [
"torch.manual_seed",
"gpytorch.mlls.ExactMarginalLogLikelihood",
"gpytorch.priors.GammaPrior",
"copy.deepcopy",
"numpy.argmin"
] | [((1249, 1269), 'copy.deepcopy', 'deepcopy', (['dictionary'], {}), '(dictionary)\n', (1257, 1269), False, 'from copy import deepcopy\n'), ((2266, 2325), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', (['likelihood', 'model'], {}), '(likelihood, model)\n', (2306, 2325), False, 'import gpytorch\n'), ((419, 439), 'gpytorch.priors.GammaPrior', 'GammaPrior', (['(1.5)', '(0.5)'], {}), '(1.5, 0.5)\n', (429, 439), False, 'from gpytorch.priors import GammaPrior\n'), ((542, 560), 'gpytorch.priors.GammaPrior', 'GammaPrior', (['(3)', '(0.5)'], {}), '(3, 0.5)\n', (552, 560), False, 'from gpytorch.priors import GammaPrior\n'), ((680, 698), 'gpytorch.priors.GammaPrior', 'GammaPrior', (['(3)', '(0.5)'], {}), '(3, 0.5)\n', (690, 698), False, 'from gpytorch.priors import GammaPrior\n'), ((1531, 1554), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1548, 1554), False, 'import torch\n'), ((3260, 3284), 'numpy.argmin', 'np.argmin', (['min_loss_list'], {}), '(min_loss_list)\n', (3269, 3284), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 23:04:56 2020
@author: dhruv
"""
from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
from mrcnn.utils import Dataset
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
class helmetDataset(Dataset):
# load the dataset definitions
def load_dataset(self, dataset_dir, is_train=True):
# define two class, can add more classes, just add the index number
self.add_class("dataset", 1, "helmet") #Change required
self.add_class("dataset", 2, "head") #Change required
# define data locations, all images of classes must be in a single folder
# named images and all annotations must be in annots can be changed accordingly though
images_dir = dataset_dir + '/images/'
annotations_dir = dataset_dir + '/annots/'
# find all images
for filename in listdir(images_dir):
# extract image id
image_id = filename[:-4]
#print(‘IMAGE ID: ‘,image_id)
# skip all images after 80 if we are building the train set
if is_train and int(image_id) >= 80: #set limit for your train and test set
continue
# skip all images before 80 if we are building the test/val set
if not is_train and int(image_id) < 81:
continue
img_path = images_dir + filename
ann_path = annotations_dir + image_id + '.xml'
# add to dataset
self.add_image('dataset', image_id=image_id, path=img_path, annotation=ann_path, class_ids = [0,1,2]) # for your case it is 0:BG, 1:PerWithHel.., 2:PersonWithoutHel… #Change required
# extract bounding boxes from an annotation file
def extract_boxes(self, filename):
# load and parse the file
tree = ElementTree.parse(filename)
# get the root of the document
root = tree.getroot()
# extract each bounding box
boxes = list()
#for box in root.findall('.//bndbox'):
for box in root.findall('.//object'):
name = box.find('name').text #Change required
xmin = int(box.find('./bndbox/xmin').text)
ymin = int(box.find('./bndbox/ymin').text)
xmax = int(box.find('./bndbox/xmax').text)
ymax = int(box.find('./bndbox/ymax').text)
#coors = [xmin, ymin, xmax, ymax, name]
coors = [xmin, ymin, xmax, ymax, name] #Change required
boxes.append(coors)
# extract image dimensions
width = int(root.find('.//size/width').text)
height = int(root.find('.//size/height').text)
return boxes, width, height
# load the masks for an image
def load_mask(self, image_id):
# get details of image
info = self.image_info[image_id]
# define box file location
path = info['annotation']
# load XML
boxes, w, h = self.extract_boxes(path)
# create one array for all masks, each on a different channel
masks = zeros([h, w, len(boxes)], dtype='uint8')
# create masks
class_ids = list()
for i in range(len(boxes)):
box = boxes[i]
row_s, row_e = box[1], box[3]
col_s, col_e = box[0], box[2]
if (box[4] == 'helmet'):#Change required #change this to your .XML file
masks[row_s:row_e, col_s:col_e, i] = 1 #Change required #assign number to your class_id
class_ids.append(self.class_names.index('helmet')) #Change required
else:
masks[row_s:row_e, col_s:col_e, i] = 2 #Change required
class_ids.append(self.class_names.index('head')) #Change required
return masks, asarray(class_ids, dtype='int32')
# load an image reference
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
# train set
train_set = helmetDataset()
train_set.load_dataset('helmet', is_train=True)
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))
# test/val set
test_set = helmetDataset()
test_set.load_dataset('helmet', is_train=False)
test_set.prepare()
print('Test: %d' % len(test_set.image_ids))
# define a configuration for the model
class helmetConfig(Config):
# define the name of the configuration
NAME = "helmet_cfg"
# number of classes (background + personWithoutHelmet + personWithHelmet)
NUM_CLASSES = 1 + 2 #Change required
# number of training steps per epoch
STEPS_PER_EPOCH = 1
config = helmetConfig()
config.display()
# define the model
model = MaskRCNN(mode='training', model_dir='./', config=config)
# load weights (mscoco) and exclude the output layers
model.load_weights('mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
# train weights (output layers or 'heads')
model.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=1, layers='heads') | [
"mrcnn.model.MaskRCNN",
"numpy.asarray",
"os.listdir",
"xml.etree.ElementTree.parse"
] | [((4429, 4485), 'mrcnn.model.MaskRCNN', 'MaskRCNN', ([], {'mode': '"""training"""', 'model_dir': '"""./"""', 'config': 'config'}), "(mode='training', model_dir='./', config=config)\n", (4437, 4485), False, 'from mrcnn.model import MaskRCNN\n'), ((1005, 1024), 'os.listdir', 'listdir', (['images_dir'], {}), '(images_dir)\n', (1012, 1024), False, 'from os import listdir\n'), ((1848, 1875), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['filename'], {}), '(filename)\n', (1865, 1875), False, 'from xml.etree import ElementTree\n'), ((3566, 3599), 'numpy.asarray', 'asarray', (['class_ids'], {'dtype': '"""int32"""'}), "(class_ids, dtype='int32')\n", (3573, 3599), False, 'from numpy import asarray\n')] |
import aoc
import numpy as np
coorddata, folddata = aoc.get_data(13)
coordlist = []
for e in coorddata:
coordlist.append(np.array(e.split(",")).astype(int))
coords = np.array(coordlist)
size = (np.max(coords[:, 1]) + 1, np.max(coords[:, 0]) + 1)
print(f"{size}")
board = np.zeros(shape=size, dtype=np.uint8)
for c in coordlist:
board[c[1], c[0]] = 8
print(f"{board}")
def fold_horizontal(board, value):
for i in range(1, board.shape[0] - value):
for j in range(0, board.shape[1]):
board[value - i, j] |= board[value + i, j]
return board[0:value, :]
def fold_vertical(board, value):
for i in range(0, board.shape[0]):
for j in range(1, board.shape[1] - value):
board[i, value - j] |= board[i, value + j]
return board[:, 0:value]
np.set_printoptions(linewidth=100)
for fold in folddata:
axis, value = fold.split("g ")[1].split("=")
value = int(value)
if axis == "y":
board = fold_horizontal(board, value)
else:
board = fold_vertical(board, value)
print(f"{board}")
print(f"{np.count_nonzero(board)}")
| [
"aoc.get_data",
"numpy.max",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.set_printoptions"
] | [((53, 69), 'aoc.get_data', 'aoc.get_data', (['(13)'], {}), '(13)\n', (65, 69), False, 'import aoc\n'), ((172, 191), 'numpy.array', 'np.array', (['coordlist'], {}), '(coordlist)\n', (180, 191), True, 'import numpy as np\n'), ((279, 315), 'numpy.zeros', 'np.zeros', ([], {'shape': 'size', 'dtype': 'np.uint8'}), '(shape=size, dtype=np.uint8)\n', (287, 315), True, 'import numpy as np\n'), ((803, 837), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(100)'}), '(linewidth=100)\n', (822, 837), True, 'import numpy as np\n'), ((201, 221), 'numpy.max', 'np.max', (['coords[:, 1]'], {}), '(coords[:, 1])\n', (207, 221), True, 'import numpy as np\n'), ((227, 247), 'numpy.max', 'np.max', (['coords[:, 0]'], {}), '(coords[:, 0])\n', (233, 247), True, 'import numpy as np\n'), ((1087, 1110), 'numpy.count_nonzero', 'np.count_nonzero', (['board'], {}), '(board)\n', (1103, 1110), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
from pycocotools import mask as maskUtils
import imgaug
import skimage
from matplotlib import pyplot as plt
import cv2
import time
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR)
print(ROOT_DIR)
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
from angiodataset import AngioDataset
import json
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "angio2020"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
def evaluate_coco(model, dataset, data, eval_type="bbox", limit=0, image_ids=None):
# Pick images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
class_names = ['BG', 'lad', 'diagonal', 'lcx1', 'lcx2', 'distal']
# visualize.display_instances(, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'])
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
results = data.loadRes(results)
# Evaluate
cocoEval = COCOeval(data, results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
class AngioConfig(Config):
NAME = 'angio2020'
IMAGES_PER_GPU = 1
IMAGE_CHANNEL_COUNT = 1
MEAN_PIXEL = np.array([129.8])
NUM_CLASSES = 1 + 5 # Background + rest
STEPS_PER_EPOCH = 16432
VALIDATION_STEPS = 317
DETECTION_MIN_CONFIDENCE = 0.7
BACKBONE = 'resnet101'
IMAGE_MAX_DIM = 512
IMAGE_MIN_DIM = 512
RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
MINI_MASK_SHAPE = (56, 56)
LEARNING_RATE = 0.0001
#Constants
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_INFERENCE_IMAGE_DIR = 'A:/val_images'
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on Angio Dataset.')
parser.add_argument("command",
metavar="<command>",
help="'train' 'eval' or 'inference on angio dataset")
parser.add_argument('--dataset', required=False,
default='A:/',
metavar="/path/to/angio/",
help="Directory of the dataset (default=A:/)")
parser.add_argument('--model', required=False,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'imagenet'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--inference_datapath', required=False,
default=DEFAULT_INFERENCE_IMAGE_DIR,
metavar="/path/to/inference_images/",
help="Path to folder containing images to run prediction (default=A:/val_images)"
)
parser.add_argument('--eval_data', required=False,
default='val',
metavar="<eval_data>",
help="which set of data to evaluate model on 'val' or 'train' or 'test' (default=val)"
)
parser.add_argument('--inference_save_path', required=False,
default='C:/Users/damo/OneDrive/Documents/mrcnn_inferences',
metavar="/path/to/inference/save",
help="path to save inference images"
)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
IMAGE_DIR = args.inference_datapath
mode = args.command
datasetdir = args.dataset
eval_data = args.eval_data
inference_save_path = args.inference_save_path
if mode == 'train':
config = AngioConfig()
elif mode == 'inference' or mode == 'eval':
class InferenceConfig(AngioConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.05
BACKBONE = 'resnet101'
# BACKBONE = modellib.mobilenet
# COMPUTE_BACKBONE_SHAPE = modellib.ompute_mobilenet_shapes
config = InferenceConfig()
config.display()
exclude = []
# create model and load weights
if mode =='train':
model = modellib.MaskRCNN(mode="training", config=config, model_dir=args.logs)
if not args.model:
weights_path = model.find_last()
elif args.model == 'imagenet':
exclude = ['conv1']
weights_path = model.get_imagenet_weights()
elif args.model == 'coco':
exclude = ['mrcnn_bbox_fc', 'mrcnn_class_logits', 'mrcnn_mask']
weights_path = COCO_MODEL_PATH
else:
weights_path = args.model
elif mode == 'inference' or mode == 'eval':
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs)
if not args.model:
weights_path = model.find_last()
else:
weights_path = args.model
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True, exclude=exclude)
if mode == 'inference':
class_names = ['BG', 'lad', 'diagonal', 'lcx1', 'lcx2', 'distal']
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
for name in file_names:
if name.split('.')[-1] == 'jpeg':
# load image
pred_image = skimage.io.imread(os.path.join(IMAGE_DIR, name))
image = skimage.color.gray2rgb(pred_image)
# converts image to rgb if it is grayscale
if pred_image.ndim != 3:
pred_image = np.expand_dims(pred_image, -1)
else:
pred_image = skimage.color.rgb2gray(pred_image)
pred_image = np.expand_dims(pred_image, -1)
# Run detection
results = model.detect([pred_image], verbose=1)
# Visualize results
r = results[0]
# visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
# class_names, r['scores'], title=name.split('.')[0], path=inference_save_path)
visualize.save_masks(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], path=inference_save_path, image_id=name.split('.')[0])
# print(mean / cnt)
elif mode == 'eval':
dataset_val = AngioDataset()
dataset_val.load_angio(datasetdir, eval_data)
dataset_val.prepare()
#load angio data in coco format as coco object
data = COCO(datasetdir + f'data_{eval_data}.json')
evaluate_coco(model, dataset_val, data, "segm")
elif mode == 'train':
# train dataset
dataset_train = AngioDataset()
dataset_train.load_angio(datasetdir, 'train')
dataset_train.prepare()
# val dataset
dataset_val = AngioDataset()
dataset_val.load_angio(datasetdir, 'val')
dataset_val.prepare()
# augmentation
augmentation = imgaug.augmenters.Sometimes(0.7, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.Flipud(0.5),
imgaug.augmenters.Rotate((-180, 180)),
imgaug.augmenters.ShearX((-20, 20)),
imgaug.augmenters.ShearY((-20, 20)),
imgaug.augmenters.ElasticTransformation(alpha=(0, 70.0), sigma=(4.0, 6.0)),
imgaug.augmenters.GammaContrast((0.5, 2.0)),
imgaug.augmenters.PiecewiseAffine(scale=(0.01, 0.05)),
imgaug.augmenters.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
])
# train heads
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune 4+")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/5,
epochs=120,
layers='4+',
augmentation=augmentation)
print("Fine tune 3+")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=160,
layers='3+',
augmentation=augmentation)
# Training - Stage 4
# Finetune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE * 3 / 100,
epochs=200,
layers='all',
augmentation=augmentation)
| [
"mrcnn.model.MaskRCNN",
"imgaug.augmenters.PiecewiseAffine",
"pycocotools.cocoeval.COCOeval",
"angiodataset.AngioDataset",
"tensorflow.config.experimental.list_logical_devices",
"numpy.array",
"imgaug.augmenters.Fliplr",
"sys.path.append",
"os.walk",
"argparse.ArgumentParser",
"imgaug.augmenters... | [((261, 283), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (276, 283), False, 'import os\n'), ((304, 329), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (319, 329), False, 'import sys\n'), ((366, 409), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (378, 409), False, 'import os\n'), ((597, 648), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (641, 648), True, 'import tensorflow as tf\n'), ((4508, 4538), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (4520, 4538), False, 'import os\n'), ((2607, 2618), 'time.time', 'time.time', ([], {}), '()\n', (2616, 2618), False, 'import time\n'), ((3659, 3693), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['data', 'results', 'eval_type'], {}), '(data, results, eval_type)\n', (3667, 3693), False, 'from pycocotools.cocoeval import COCOeval\n'), ((4099, 4116), 'numpy.array', 'np.array', (['[129.8]'], {}), '([129.8])\n', (4107, 4116), True, 'import numpy as np\n'), ((4682, 4755), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN on Angio Dataset."""'}), "(description='Train Mask R-CNN on Angio Dataset.')\n", (4705, 4755), False, 'import argparse\n'), ((847, 897), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (890, 897), True, 'import tensorflow as tf\n'), ((2785, 2796), 'time.time', 'time.time', ([], {}), '()\n', (2794, 2796), False, 'import time\n'), ((7701, 7771), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='training', config=config, model_dir=args.logs)\n", (7718, 7771), True, 'from mrcnn import model as modellib, utils\n'), ((772, 823), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (812, 823), True, 'import tensorflow as tf\n'), ((1535, 1556), 'numpy.around', 'np.around', (['rois[i]', '(1)'], {}), '(rois[i], 1)\n', (1544, 1556), True, 'import numpy as np\n'), ((2870, 2881), 'time.time', 'time.time', ([], {}), '()\n', (2879, 2881), False, 'import time\n'), ((3952, 3963), 'time.time', 'time.time', ([], {}), '()\n', (3961, 3963), False, 'import time\n'), ((8243, 8314), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='inference', config=config, model_dir=args.logs)\n", (8260, 8314), True, 'from mrcnn import model as modellib, utils\n'), ((10075, 10089), 'angiodataset.AngioDataset', 'AngioDataset', ([], {}), '()\n', (10087, 10089), False, 'from angiodataset import AngioDataset\n'), ((10245, 10288), 'pycocotools.coco.COCO', 'COCO', (["(datasetdir + f'data_{eval_data}.json')"], {}), "(datasetdir + f'data_{eval_data}.json')\n", (10249, 10288), False, 'from pycocotools.coco import COCO\n'), ((8736, 8754), 'os.walk', 'os.walk', (['IMAGE_DIR'], {}), '(IMAGE_DIR)\n', (8743, 8754), False, 'import os\n'), ((8986, 9020), 'skimage.color.gray2rgb', 'skimage.color.gray2rgb', (['pred_image'], {}), '(pred_image)\n', (9008, 9020), False, 'import skimage\n'), ((10422, 10436), 'angiodataset.AngioDataset', 'AngioDataset', ([], {}), '()\n', (10434, 10436), False, 'from angiodataset import AngioDataset\n'), ((10568, 10582), 'angiodataset.AngioDataset', 'AngioDataset', ([], {}), '()\n', (10580, 10582), False, 'from angiodataset import AngioDataset\n'), ((1899, 1922), 'numpy.asfortranarray', 'np.asfortranarray', (['mask'], {}), '(mask)\n', (1916, 1922), True, 'import numpy as np\n'), ((8931, 8960), 'os.path.join', 'os.path.join', (['IMAGE_DIR', 'name'], {}), '(IMAGE_DIR, name)\n', (8943, 8960), False, 'import os\n'), ((9155, 9185), 'numpy.expand_dims', 'np.expand_dims', (['pred_image', '(-1)'], {}), '(pred_image, -1)\n', (9169, 9185), True, 'import numpy as np\n'), ((9241, 9275), 'skimage.color.rgb2gray', 'skimage.color.rgb2gray', (['pred_image'], {}), '(pred_image)\n', (9263, 9275), False, 'import skimage\n'), ((9309, 9339), 'numpy.expand_dims', 'np.expand_dims', (['pred_image', '(-1)'], {}), '(pred_image, -1)\n', (9323, 9339), True, 'import numpy as np\n'), ((10757, 10786), 'imgaug.augmenters.Fliplr', 'imgaug.augmenters.Fliplr', (['(0.5)'], {}), '(0.5)\n', (10781, 10786), False, 'import imgaug\n'), ((10800, 10829), 'imgaug.augmenters.Flipud', 'imgaug.augmenters.Flipud', (['(0.5)'], {}), '(0.5)\n', (10824, 10829), False, 'import imgaug\n'), ((10843, 10880), 'imgaug.augmenters.Rotate', 'imgaug.augmenters.Rotate', (['(-180, 180)'], {}), '((-180, 180))\n', (10867, 10880), False, 'import imgaug\n'), ((10894, 10929), 'imgaug.augmenters.ShearX', 'imgaug.augmenters.ShearX', (['(-20, 20)'], {}), '((-20, 20))\n', (10918, 10929), False, 'import imgaug\n'), ((10943, 10978), 'imgaug.augmenters.ShearY', 'imgaug.augmenters.ShearY', (['(-20, 20)'], {}), '((-20, 20))\n', (10967, 10978), False, 'import imgaug\n'), ((10992, 11066), 'imgaug.augmenters.ElasticTransformation', 'imgaug.augmenters.ElasticTransformation', ([], {'alpha': '(0, 70.0)', 'sigma': '(4.0, 6.0)'}), '(alpha=(0, 70.0), sigma=(4.0, 6.0))\n', (11031, 11066), False, 'import imgaug\n'), ((11080, 11123), 'imgaug.augmenters.GammaContrast', 'imgaug.augmenters.GammaContrast', (['(0.5, 2.0)'], {}), '((0.5, 2.0))\n', (11111, 11123), False, 'import imgaug\n'), ((11137, 11190), 'imgaug.augmenters.PiecewiseAffine', 'imgaug.augmenters.PiecewiseAffine', ([], {'scale': '(0.01, 0.05)'}), '(scale=(0.01, 0.05))\n', (11170, 11190), False, 'import imgaug\n'), ((11204, 11270), 'imgaug.augmenters.Sharpen', 'imgaug.augmenters.Sharpen', ([], {'alpha': '(0.0, 1.0)', 'lightness': '(0.75, 2.0)'}), '(alpha=(0.0, 1.0), lightness=(0.75, 2.0))\n', (11229, 11270), False, 'import imgaug\n')] |
import numpy as np
import SimpleITK as sitk
from napari_imsmicrolink.data.image_transform import ImageTransform
def test_ImageTransform_add_points():
test_pts = np.array([[50.75, 100.0], [20.0, 10.0], [10.0, 50.0], [60.0, 20.0]])
itfm = ImageTransform()
itfm.output_spacing = (1, 1)
itfm.add_points(test_pts, round=True, src_or_tgt="source", scaling=100)
assert itfm.source_pts is not None
assert itfm.source_pts[0, 0] == 5100.0
assert itfm.source_pts[3, 0] == 6000.0
itfm.add_points(test_pts, round=False, src_or_tgt="source", scaling=100)
assert itfm.source_pts[0, 0] == 50.75 * 100
assert itfm.source_pts[3, 0] == 60 * 100
itfm.add_points(test_pts, round=False, src_or_tgt="target", scaling=100)
assert itfm.target_pts is not None
def test_ImageTransform_compute_transform():
source_pts = np.array(
[
[356.93356879, 6713.16214535],
[6285.96351516, 11137.0624842],
[15154.40947051, 7596.13949593],
[6905.28155271, 936.74985065],
]
)
target_pts = np.array(
[[500.0, 6250.0], [6400.0, 10700.0], [15300.0, 7200.0], [7100.0, 500.0]]
)
itfm = ImageTransform()
itfm.output_spacing = (0.92, 0.92)
itfm.add_points(source_pts, round=False, src_or_tgt="source", scaling=1)
itfm.add_points(target_pts, round=True, src_or_tgt="target", scaling=1)
assert itfm.affine_transform is not None
assert itfm.affine_np_mat_xy_um is not None
assert itfm.affine_np_mat_yx_um is not None
assert itfm.affine_np_mat_xy_px is not None
assert itfm.affine_np_mat_yx_px is not None
assert itfm.inverse_affine_transform is not None
assert itfm.inverse_affine_np_mat_xy_um is not None
assert itfm.inverse_affine_np_mat_yx_um is not None
assert itfm.inverse_affine_np_mat_xy_px is not None
assert itfm.inverse_affine_np_mat_yx_px is not None
assert itfm.point_reg_error < 10
def test_ImageTransform_apply_transform_pts():
aff_tform = sitk.AffineTransform(2)
aff_tform.SetMatrix([2, 0, 0, 2])
pts = np.array([[1, 1], [2, 2]]).astype(float)
tformed_pts = ImageTransform.apply_transform_to_pts(pts, aff_tform)
scaled_pts = np.array([[2, 2], [4, 4]]).astype(np.double)
np.testing.assert_array_equal(tformed_pts, scaled_pts)
| [
"SimpleITK.AffineTransform",
"napari_imsmicrolink.data.image_transform.ImageTransform.apply_transform_to_pts",
"numpy.array",
"napari_imsmicrolink.data.image_transform.ImageTransform",
"numpy.testing.assert_array_equal"
] | [((168, 236), 'numpy.array', 'np.array', (['[[50.75, 100.0], [20.0, 10.0], [10.0, 50.0], [60.0, 20.0]]'], {}), '([[50.75, 100.0], [20.0, 10.0], [10.0, 50.0], [60.0, 20.0]])\n', (176, 236), True, 'import numpy as np\n'), ((249, 265), 'napari_imsmicrolink.data.image_transform.ImageTransform', 'ImageTransform', ([], {}), '()\n', (263, 265), False, 'from napari_imsmicrolink.data.image_transform import ImageTransform\n'), ((855, 997), 'numpy.array', 'np.array', (['[[356.93356879, 6713.16214535], [6285.96351516, 11137.0624842], [\n 15154.40947051, 7596.13949593], [6905.28155271, 936.74985065]]'], {}), '([[356.93356879, 6713.16214535], [6285.96351516, 11137.0624842], [\n 15154.40947051, 7596.13949593], [6905.28155271, 936.74985065]])\n', (863, 997), True, 'import numpy as np\n'), ((1084, 1171), 'numpy.array', 'np.array', (['[[500.0, 6250.0], [6400.0, 10700.0], [15300.0, 7200.0], [7100.0, 500.0]]'], {}), '([[500.0, 6250.0], [6400.0, 10700.0], [15300.0, 7200.0], [7100.0, \n 500.0]])\n', (1092, 1171), True, 'import numpy as np\n'), ((1193, 1209), 'napari_imsmicrolink.data.image_transform.ImageTransform', 'ImageTransform', ([], {}), '()\n', (1207, 1209), False, 'from napari_imsmicrolink.data.image_transform import ImageTransform\n'), ((2019, 2042), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['(2)'], {}), '(2)\n', (2039, 2042), True, 'import SimpleITK as sitk\n'), ((2151, 2204), 'napari_imsmicrolink.data.image_transform.ImageTransform.apply_transform_to_pts', 'ImageTransform.apply_transform_to_pts', (['pts', 'aff_tform'], {}), '(pts, aff_tform)\n', (2188, 2204), False, 'from napari_imsmicrolink.data.image_transform import ImageTransform\n'), ((2271, 2325), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['tformed_pts', 'scaled_pts'], {}), '(tformed_pts, scaled_pts)\n', (2300, 2325), True, 'import numpy as np\n'), ((2091, 2117), 'numpy.array', 'np.array', (['[[1, 1], [2, 2]]'], {}), '([[1, 1], [2, 2]])\n', (2099, 2117), True, 'import numpy as np\n'), ((2222, 2248), 'numpy.array', 'np.array', (['[[2, 2], [4, 4]]'], {}), '([[2, 2], [4, 4]])\n', (2230, 2248), True, 'import numpy as np\n')] |
#!/usr/bin/python
from __future__ import division
from __future__ import print_function
"""
This file serves as an example of how to
a) select a problem to be solved
b) select a network type
c) train the network to minimize recovery MSE
"""
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # BE QUIET!!!!
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import tensorflow as tf
np.random.seed(1) # numpy is good about making repeatable output
tf.set_random_seed(1) # on the other hand, this is basically useless (see issue 9171)
# import our problems, networks and training modules
from tools import problems,networks,train
L=10000
M=250
N=500
SNR=20
pnz=.1
untied=False
T=8
shrink='bg'
# Create the basic problem structure.
prob = problems.bernoulli_gaussian_trial(kappa=None,M=M,N=N,L=L,pnz=pnz,SNR=SNR) #a Bernoulli-Gaussian x, noisily observed through a random matrix
#prob = problems.random_access_problem(2) # 1 or 2 for compressive random access or massive MIMO
print('Problem created ...')
print('A is:')
print(prob.A)
# from scipy.io import savemat
# # W = np.load(config.W)
# dict = dict(D=prob.A)
# savemat( 'D.mat', dict, oned_as='column' )
# build a LAMP network to solve the problem and get the intermediate results so we can greedily extend and then refine(fine-tune)
layers = networks.build_LAMP(prob,T=T,shrink=shrink,untied=untied)
print('Building layers ... done')
nmse_arrray = []
mse_arrray = []
sigma2_array = []
# Evaluating (fixed-)GAMP in TensorFlow
# For debugging purposes I initialize a session here - Intialize the Session
sess = tf.Session()
y,x_true = prob(sess)
sess.run(tf.global_variables_initializer())
for name, xhat_, rvar_, var_list in layers:
nmse_denom_ = tf.nn.l2_loss(prob.x_)
nmse_ = tf.nn.l2_loss( xhat_ - prob.x_) / nmse_denom_
mse_ = 2* tf.nn.l2_loss(xhat_ - prob.x_) / (L*N)
rvar_mean_ = tf.reduce_mean(rvar_)
x_hat, nmse, mse, rvar_mean = sess.run([xhat_, nmse_, mse_, rvar_mean_], feed_dict={prob.y_: y, prob.x_: x_true})
if "non-linear T=" in name:
nmse_arrray.append(nmse)
mse_arrray.append(mse)
sigma2_array.append(rvar_mean)
print(name, '\tnmse=', nmse,'\tNMSE/dB=',10*np.log10(nmse),'\tMSE/dB=',10*np.log10(mse), '\t sigma2/dB=',10*np.log10(rvar_mean))
sess.close()
print('nmse/dB=', 10*np.log10(nmse_arrray))
print('mse/dB=', 10*np.log10(mse_arrray))
print('sigma2/dB=', 10*np.log10(sigma2_array)) | [
"numpy.log10",
"tools.problems.bernoulli_gaussian_trial",
"tools.networks.build_LAMP",
"tensorflow.Session",
"tensorflow.nn.l2_loss",
"tensorflow.global_variables_initializer",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.set_random_seed"
] | [((397, 414), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (411, 414), True, 'import numpy as np\n'), ((462, 483), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (480, 483), True, 'import tensorflow as tf\n'), ((754, 832), 'tools.problems.bernoulli_gaussian_trial', 'problems.bernoulli_gaussian_trial', ([], {'kappa': 'None', 'M': 'M', 'N': 'N', 'L': 'L', 'pnz': 'pnz', 'SNR': 'SNR'}), '(kappa=None, M=M, N=N, L=L, pnz=pnz, SNR=SNR)\n', (787, 832), False, 'from tools import problems, networks, train\n'), ((1317, 1377), 'tools.networks.build_LAMP', 'networks.build_LAMP', (['prob'], {'T': 'T', 'shrink': 'shrink', 'untied': 'untied'}), '(prob, T=T, shrink=shrink, untied=untied)\n', (1336, 1377), False, 'from tools import problems, networks, train\n'), ((1587, 1599), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1597, 1599), True, 'import tensorflow as tf\n'), ((1633, 1666), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1664, 1666), True, 'import tensorflow as tf\n'), ((1732, 1754), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['prob.x_'], {}), '(prob.x_)\n', (1745, 1754), True, 'import tensorflow as tf\n'), ((1885, 1906), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['rvar_'], {}), '(rvar_)\n', (1899, 1906), True, 'import tensorflow as tf\n'), ((1767, 1797), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(xhat_ - prob.x_)'], {}), '(xhat_ - prob.x_)\n', (1780, 1797), True, 'import tensorflow as tf\n'), ((2333, 2354), 'numpy.log10', 'np.log10', (['nmse_arrray'], {}), '(nmse_arrray)\n', (2341, 2354), True, 'import numpy as np\n'), ((2376, 2396), 'numpy.log10', 'np.log10', (['mse_arrray'], {}), '(mse_arrray)\n', (2384, 2396), True, 'import numpy as np\n'), ((2421, 2443), 'numpy.log10', 'np.log10', (['sigma2_array'], {}), '(sigma2_array)\n', (2429, 2443), True, 'import numpy as np\n'), ((1828, 1858), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(xhat_ - prob.x_)'], {}), '(xhat_ - prob.x_)\n', (1841, 1858), True, 'import tensorflow as tf\n'), ((2211, 2225), 'numpy.log10', 'np.log10', (['nmse'], {}), '(nmse)\n', (2219, 2225), True, 'import numpy as np\n'), ((2241, 2254), 'numpy.log10', 'np.log10', (['mse'], {}), '(mse)\n', (2249, 2254), True, 'import numpy as np\n'), ((2275, 2294), 'numpy.log10', 'np.log10', (['rvar_mean'], {}), '(rvar_mean)\n', (2283, 2294), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
import numpy as np
from config import Config
from base import Connect4Base
from random_agent import RandomAgent
from simple_agent import SimpleAgent
from one_step_lookahead_agent import OneStepLookaheadAgent
from n_steps_lookahead_agent import NStepsLookaheadAgent
from cnn_agent import CNNAgent
from network_128x4_64_64 import Network1
class Tournament(Connect4Base):
def __init__(self, config, agent1, agent2):
super().__init__(config)
self.agent1 = agent1
self.agent2 = agent2
self.agent1.setup(1)
self.agent2.setup(2)
print("Player 1 - {}".format(agent1.name()))
print("Player 2 - {}".format(agent2.name()))
def run(self):
board = np.full((self.config.rows, self.config.columns), 0, np.int)
piece = 1 # starts first
winner = 0
while len(self.valid_moves(board)) > 0:
agent = self.agent1 if piece == 1 else self.agent2
col = agent.move(board)
board = self.drop_piece(board, col, piece)
if self.check_if_winning(board, piece):
winner = piece
break
piece = piece%2+1
self.agent1.game_over(winner)
self.agent2.game_over(winner)
return winner
def end(self):
self.agent1.teardown()
self.agent2.teardown()
# run agents
nruns = 100 if len(sys.argv) < 2 else int(sys.argv[1])
print("Number of runs", nruns)
winners = list()
config = Config(6, 7, 4)
#tournament = Tournament(config, RandomAgent(config), SimpleAgent(config))
#tournament = Tournament(config, SimpleAgent(config), NStepsLookaheadAgent(config, 1))
#tournament = Tournament(config, RandomAgent(config), CNNAgent(config, Network1(), 'rnd'))
#tournament = Tournament(config, OneStepLookaheadAgent(config), CNNAgent(config, Network1(), '1sla'))
tournament = Tournament(config, NStepsLookaheadAgent(config, 3), CNNAgent(config, Network1(), '3sla'))
#tournament = Tournament(config, CNNAgent(config, Network1(), 'cnn'), CNNAgent(config, Network1(), 'cnn'))
for n in range(nruns):
winner = tournament.run()
winners.append(winner)
print("Game", n, ", player", winner, "wins")
tournament.end()
draw = len([n for n in winners if n == 0])
won_1 = len([n for n in winners if n == 1])
won_2 = len([n for n in winners if n == 2])
print("player1:", won_1, ", player2:", won_2, ", draw:", draw)
| [
"n_steps_lookahead_agent.NStepsLookaheadAgent",
"config.Config",
"numpy.full",
"network_128x4_64_64.Network1"
] | [((1325, 1340), 'config.Config', 'Config', (['(6)', '(7)', '(4)'], {}), '(6, 7, 4)\n', (1331, 1340), False, 'from config import Config\n'), ((1728, 1759), 'n_steps_lookahead_agent.NStepsLookaheadAgent', 'NStepsLookaheadAgent', (['config', '(3)'], {}), '(config, 3)\n', (1748, 1759), False, 'from n_steps_lookahead_agent import NStepsLookaheadAgent\n'), ((690, 749), 'numpy.full', 'np.full', (['(self.config.rows, self.config.columns)', '(0)', 'np.int'], {}), '((self.config.rows, self.config.columns), 0, np.int)\n', (697, 749), True, 'import numpy as np\n'), ((1778, 1788), 'network_128x4_64_64.Network1', 'Network1', ([], {}), '()\n', (1786, 1788), False, 'from network_128x4_64_64 import Network1\n')] |
#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster-new/bin/python
import os
import json
from concurrent import futures
import numpy as np
import luigi
import z5py
import skeletor.io
from cluster_tools.skeletons import SkeletonWorkflow
def check_scale(scale):
path = '/g/kreshuk/data/FIB25/data.n5'
input_key = 'volumes/paintera/multicut/data/s%i' % scale
f = z5py.File(path)
ds = f[input_key]
shape = ds.shape
print(shape)
def skeletonize(scale, target, max_jobs):
path = '/g/kreshuk/data/FIB25/data.n5'
input_key = 'volumes/paintera/multicut/data/s%i' % scale
output_key = 'skeletons/s%i' % scale
config_dir = './configs'
tmp_folder = './tmp_skeletons_%i' % scale
os.makedirs(config_dir, exist_ok=True)
config = SkeletonWorkflow.get_config()
global_config = config['global']
shebang = '#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster-new/bin/python'
global_config.update({'shebang': shebang})
with open(os.path.join(config_dir, 'global.config'), 'w') as f:
json.dump(global_config, f)
config = config['skeletonize']
config.update({'time_limit': 600, 'mem_limit': 16})
with open(os.path.join(config_dir, 'skeletonize.config'), 'w') as f:
json.dump(config, f)
resolution = [8, 8, 8]
size_threshold = 2000
max_id = z5py.File(path)['volumes/paintera/multicut/data'].attrs['maxId']
task = SkeletonWorkflow(tmp_folder=tmp_folder, config_dir=config_dir,
max_jobs=max_jobs, target=target,
input_path=path, input_key=input_key,
output_path=path, output_key=output_key,
resolution=resolution, size_threshold=size_threshold,
max_id=max_id)
success = luigi.build([task], local_scheduler=True)
assert success
def skeletons_to_volume(scale, n_threads):
path = '/g/kreshuk/data/FIB25/data.n5'
f = z5py.File(path)
seg_key = 'volumes/paintera/multicut/data/s%i' % scale
in_key = 'skeletons/s%i' % scale
out_key = 'skeletons/volumes/s%i' % scale
shape = f[seg_key].shape
chunks = f[seg_key].chunks
seg = np.zeros(shape, dtype='uint64')
ds_in = f[in_key]
def seg_to_vol(seg_id):
nodes, _ = skeletor.io.read_n5(ds_in, seg_id)
if nodes is None:
return
print(seg_id, '/', ds_in.shape[0])
coords = tuple(np.array([node[i] for node in nodes]) for i in range(3))
seg[coords] = seg_id
with futures.ThreadPoolExecutor(n_threads) as tp:
tasks = [tp.submit(seg_to_vol, seg_id) for seg_id in range(ds_in.shape[0])]
[t.result() for t in tasks]
ds_out = f.require_dataset(out_key, shape=shape, chunks=chunks,
compression='gzip', dtype='uint64')
ds_out.n_threads = n_threads
ds_out[:] = seg
if __name__ == '__main__':
# TODO which scale ?
scale = 2
# check_scale(scale)
target = 'local'
max_jobs = 48
# skeletonize(scale, target, max_jobs)
skeletons_to_volume(scale, max_jobs)
| [
"luigi.build",
"os.makedirs",
"cluster_tools.skeletons.SkeletonWorkflow",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
"cluster_tools.skeletons.SkeletonWorkflow.get_config",
"z5py.File",
"numpy.array",
"numpy.zeros",
"json.dump"
] | [((388, 403), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (397, 403), False, 'import z5py\n'), ((734, 772), 'os.makedirs', 'os.makedirs', (['config_dir'], {'exist_ok': '(True)'}), '(config_dir, exist_ok=True)\n', (745, 772), False, 'import os\n'), ((787, 816), 'cluster_tools.skeletons.SkeletonWorkflow.get_config', 'SkeletonWorkflow.get_config', ([], {}), '()\n', (814, 816), False, 'from cluster_tools.skeletons import SkeletonWorkflow\n'), ((1437, 1694), 'cluster_tools.skeletons.SkeletonWorkflow', 'SkeletonWorkflow', ([], {'tmp_folder': 'tmp_folder', 'config_dir': 'config_dir', 'max_jobs': 'max_jobs', 'target': 'target', 'input_path': 'path', 'input_key': 'input_key', 'output_path': 'path', 'output_key': 'output_key', 'resolution': 'resolution', 'size_threshold': 'size_threshold', 'max_id': 'max_id'}), '(tmp_folder=tmp_folder, config_dir=config_dir, max_jobs=\n max_jobs, target=target, input_path=path, input_key=input_key,\n output_path=path, output_key=output_key, resolution=resolution,\n size_threshold=size_threshold, max_id=max_id)\n', (1453, 1694), False, 'from cluster_tools.skeletons import SkeletonWorkflow\n'), ((1836, 1877), 'luigi.build', 'luigi.build', (['[task]'], {'local_scheduler': '(True)'}), '([task], local_scheduler=True)\n', (1847, 1877), False, 'import luigi\n'), ((1993, 2008), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (2002, 2008), False, 'import z5py\n'), ((2222, 2253), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""uint64"""'}), "(shape, dtype='uint64')\n", (2230, 2253), True, 'import numpy as np\n'), ((1071, 1098), 'json.dump', 'json.dump', (['global_config', 'f'], {}), '(global_config, f)\n', (1080, 1098), False, 'import json\n'), ((1272, 1292), 'json.dump', 'json.dump', (['config', 'f'], {}), '(config, f)\n', (1281, 1292), False, 'import json\n'), ((2566, 2603), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['n_threads'], {}), '(n_threads)\n', (2592, 2603), False, 'from concurrent import futures\n'), ((1009, 1050), 'os.path.join', 'os.path.join', (['config_dir', '"""global.config"""'], {}), "(config_dir, 'global.config')\n", (1021, 1050), False, 'import os\n'), ((1205, 1251), 'os.path.join', 'os.path.join', (['config_dir', '"""skeletonize.config"""'], {}), "(config_dir, 'skeletonize.config')\n", (1217, 1251), False, 'import os\n'), ((1360, 1375), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (1369, 1375), False, 'import z5py\n'), ((2470, 2507), 'numpy.array', 'np.array', (['[node[i] for node in nodes]'], {}), '([node[i] for node in nodes])\n', (2478, 2507), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2 * np.pi, 50)
y = np.sin(x)
# plt.plot(x, y)
# plt.show()
# plt.plot(x, y)
# plt.plot(x, y * 2)
# plt.title("sin(x) & 2sin(x)")
# plt.show()
plt.plot(x, y, label="sin(x)")
plt.plot(x, y * 2, label="2sin(x)")
# plt.legend()
plt.legend(loc='best bbbbbbbbbb')
plt.show() | [
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((57, 86), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (68, 86), True, 'import numpy as np\n'), ((91, 100), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (97, 100), True, 'import numpy as np\n'), ((216, 246), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""sin(x)"""'}), "(x, y, label='sin(x)')\n", (224, 246), True, 'import matplotlib.pyplot as plt\n'), ((247, 282), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y * 2)'], {'label': '"""2sin(x)"""'}), "(x, y * 2, label='2sin(x)')\n", (255, 282), True, 'import matplotlib.pyplot as plt\n'), ((298, 331), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best bbbbbbbbbb"""'}), "(loc='best bbbbbbbbbb')\n", (308, 331), True, 'import matplotlib.pyplot as plt\n'), ((332, 342), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (340, 342), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import time
import torch
import torch.nn as nn
import os
from tqdm import tqdm as tqdm
import sys;
import math
import skimage.io as io
import cv2
from skimage import filters
from skimage.measure import label, moments
import glob
from model_arch import UnetVggMultihead
from my_dataloader_w_kfunc import CellsDataset
from my_dataloader import CellsDataset as CellsDataset_simple
from cluster_helper import *
checkpoints_root_dir = '../MCSpatNet_checkpoints' # The root directory for all training output.
checkpoints_folder_name = 'mcspatnet_consep_1' # The name of the folder that will be created under <checkpoints_root_dir> to hold output from current training instance.
model_param_path = None; # path of a previous checkpoint to continue training
clustering_pseudo_gt_root = '../MCSpatNet_epoch_subclasses'
train_data_root = '../MCSpatNet_datasets/CoNSeP_train'
test_data_root = '../MCSpatNet_datasets/CoNSeP_train'
train_split_filepath = './data_splits/consep/train_split.txt'
test_split_filepath = './data_splits/consep/val_split.txt'
epochs = 300 # number of training epochs. Use 300 for CoNSeP dataset.
cell_code = {1:'lymphocyte', 2:'tumor', 3:'stromal'}
feature_code = {'decoder':0, 'cell-detect':1, 'class':2, 'subclass':3, 'k-cell':4}
if __name__=="__main__":
# checkpoints_save_path: path to save checkpoints
checkpoints_save_path = os.path.join(checkpoints_root_dir, checkpoints_folder_name)
cluster_tmp_out = os.path.join(clustering_pseudo_gt_root, checkpoints_folder_name)
if not os.path.exists(checkpoints_root_dir):
os.mkdir(checkpoints_root_dir)
if not os.path.exists(checkpoints_save_path):
os.mkdir(checkpoints_save_path)
if not os.path.exists(clustering_pseudo_gt_root):
os.mkdir(clustering_pseudo_gt_root)
if not os.path.exists(cluster_tmp_out):
os.mkdir(cluster_tmp_out)
# log_file_path: path to save log file
i=1
while(True):
log_file_path = os.path.join(checkpoints_root_dir, checkpoints_folder_name, f'train_log_{i}.txt')
if(not os.path.exists(log_file_path)):
break
i +=1
start_epoch = 0 # To use if continuing training from a previous epoch loaded from model_param_path
epoch_start_eval_prec = 1 # After epoch_start_eval_prec epochs start to evaluate F-score of predictions on the validation set.
restart_epochs_freq = 50 # reset frequency for optimizer
next_restart_epoch = restart_epochs_freq + start_epoch
gpu_or_cpu ='cuda' # use cuda or cpu
device=torch.device(gpu_or_cpu)
seed = time.time()
# print_frequency = 1 # print frequency per epoch
# Initialize log file
log_file = open(log_file_path, 'a+')
# Configure training dataset
train_image_root = os.path.join(train_data_root, 'images')
train_dmap_root = os.path.join(train_data_root, 'gt_custom')
train_dots_root = os.path.join(train_data_root, 'gt_custom')
train_dmap_subclasses_root = cluster_tmp_out
train_dots_subclasses_root = train_dmap_subclasses_root
train_kmap_root = os.path.join(train_data_root, 'k_func_maps')
# Configure validation dataset
test_image_root = os.path.join(test_data_root, 'images')
test_dmap_root = os.path.join(test_data_root, 'gt_custom')
test_dots_root = os.path.join(test_data_root, 'gt_custom')
test_dmap_subclasses_root = cluster_tmp_out
test_dots_subclasses_root = test_dmap_subclasses_root
test_kmap_root = os.path.join(test_data_root, 'k_func_maps')
dropout_prob = 0.2
initial_pad = 126 # We add padding so that final output has same size as input since we do not use same padding conv.
interpolate = 'False'
conv_init = 'he'
n_channels = 3
n_classes = 3 # number of cell classes (lymphocytes, tumor, stromal)
n_classes_out = n_classes + 1 # number of output classes = number of cell classes (lymphocytes, tumor, stromal) + 1 (for cell detection channel)
class_indx = '1,2,3' # the index of the classes channels in the ground truth
n_clusters = 5 # number of clusters per class
n_classes2 = n_clusters * (n_classes) # number of output classes for the cell cluster classification
lr = 0.00005 # learning rate
batch_size = 1
prints_per_epoch=1 # print frequency per epoch
# Initialize the range of the radii for the K function for each class
r_step = 15
r_range = range(0, 100, r_step)
r_arr = np.array([*r_range])
r_classes = len(r_range) # number of output channels for the K function for a single class
r_classes_all = r_classes * (n_classes ) # number of output channels for the K function over all classes
k_norm_factor = 100 # the maximum K-value (i.e. number of nearby cells at radius r) to normalize the K-func to [0,1]
lamda_dice = 1; # weight for dice loss for main output channels (cell detection + cell classification)
lamda_subclasses = 1 # weight for dice loss for secondary output channels (cell cluster classification)
lamda_k = 1 # weight for L1 loss for K function regression
torch.cuda.manual_seed(seed)
model=UnetVggMultihead(kwargs={'dropout_prob':dropout_prob, 'initial_pad':initial_pad, 'interpolate':interpolate, 'conv_init':conv_init, 'n_classes':n_classes, 'n_channels':n_channels, 'n_heads':4, 'head_classes':[1,n_classes,n_classes2, r_classes_all]})
if(not (model_param_path is None)):
model.load_state_dict(torch.load(model_param_path), strict=False);
log_file.write('model loaded \n')
log_file.flush()
model.to(device)
# Initialize sigmoid layer for cell detection
criterion_sig = nn.Sigmoid()
# Initialize softmax layer for cell classification
criterion_softmax = nn.Softmax(dim=1)
# Initialize L1 loss for K function regression
criterion_l1_sum = nn.L1Loss(reduction='sum')
# Initialize Optimizer
optimizer=torch.optim.Adam(list(model.final_layers_lst.parameters())+list(model.decoder.parameters())+list(model.bottleneck.parameters())+list(model.encoder.parameters()),lr)
# Initialize training dataset loader
train_dataset=CellsDataset(train_image_root,train_dmap_root,train_dots_root,class_indx,train_dmap_subclasses_root, train_dots_subclasses_root, train_kmap_root, split_filepath=train_split_filepath, phase='train', fixed_size=448, max_scale=16)
train_loader=torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
# Initialize validation dataset loader
test_dataset=CellsDataset(test_image_root,test_dmap_root,test_dots_root,class_indx,test_dmap_subclasses_root, test_dots_subclasses_root, test_kmap_root, split_filepath=test_split_filepath,phase='test', fixed_size=-1, max_scale=16)
test_loader=torch.utils.data.DataLoader(test_dataset,batch_size=1,shuffle=False)
# Initialize training dataset loader for clustering phase
simple_train_dataset=CellsDataset_simple(train_image_root,train_dmap_root,train_dots_root,class_indx, phase='test', fixed_size=-1, max_scale=16, return_padding=True)
simple_train_loader=torch.utils.data.DataLoader(simple_train_dataset,batch_size=batch_size,shuffle=False)
# Use prints_per_epoch to get iteration number to generate sample output
# print_frequency = len(train_loader)//prints_per_epoch;
print_frequency_test = len(test_loader) // prints_per_epoch;
best_epoch_filepath=None
best_epoch=None
best_f1_mean = 0
best_prec_recall_diff = math.inf
centroids = None
for epoch in range(start_epoch,epochs):
# If epoch already exists then skip
epoch_files = glob.glob(os.path.join(checkpoints_save_path, 'mcspat_epoch_'+str(epoch)+"_*.pth"))
if len(epoch_files) > 0:
continue;
# Cluster features at the beginning of each epoch
print('epoch', epoch, 'start clustering')
centroids = perform_clustering(model, simple_train_loader, n_clusters, n_classes, [feature_code['k-cell'], feature_code['subclass']], train_dmap_subclasses_root, centroids)
print('epoch', epoch, 'end clustering')
# Training phase
model.train()
log_file.write('epoch= ' + str(epoch) + '\n')
log_file.flush()
# Initialize variables for accumulating loss over the epoch
epoch_loss=0
train_count = 0
# train_loss_k = 0
# train_loss_dice = 0
# train_count_k = 0
for i,(img,gt_dmap,gt_dots,gt_dmap_subclasses,gt_dots_subclasses, gt_kmap,img_name) in enumerate(tqdm(train_loader)):
'''
img: input image
gt_dmap: ground truth map for cell classes (lymphocytes, epithelial/tumor, stromal) with dilated dots. This can be a binary mask or a density map ( in which case it will be converted to a binary mask)
gt_dots: ground truth binary dot map for cell classes (lymphocytes, epithelial/tumor, stromal).
gt_dmap_subclasses: ground truth map for cell clustering sub-classes with dilated dots. This can be a binary mask or a density map ( in which case it will be converted to a binary mask)
gt_dots_subclasses: ground truth binary dot map for cell clustering sub-classes.
gt_kmap: ground truth k-function map. At each cell center contains the cross k-functions centered at that cell.
img_name: img filename
'''
gt_kmap /= k_norm_factor # Normalize K functions ground truth
img_name=img_name[0]
train_count += 1
img=img.to(device)
# Convert ground truth maps to binary mask (in case they were density maps)
gt_dmap = gt_dmap > 0
gt_dmap_subclasses = gt_dmap_subclasses > 0
# Get the detection ground truth maps from the classes ground truth maps
gt_dmap_all = gt_dmap.max(1)[0]
gt_dots_all = gt_dots.max(1)[0]
# Set datatype and move to GPU
gt_dmap = gt_dmap.type(torch.FloatTensor)
gt_dmap_all = gt_dmap_all.type(torch.FloatTensor)
gt_dmap_subclasses = gt_dmap_subclasses.type(torch.FloatTensor)
gt_kmap = gt_kmap.type(torch.FloatTensor)
gt_dmap=gt_dmap.to(device)
gt_dmap_all=gt_dmap_all.to(device)
gt_dmap_subclasses=gt_dmap_subclasses.to(device)
gt_kmap=gt_kmap.to(device)
# forward propagation
et_dmap_lst=model(img)
et_dmap_all=et_dmap_lst[0][:,:,2:-2,2:-2] # The cell detection prediction
et_dmap_class=et_dmap_lst[1][:,:,2:-2,2:-2] # The cell classification prediction
et_dmap_subclasses= et_dmap_lst[2][:,:,2:-2,2:-2] # The cell clustering sub-class prediction
et_kmap=et_dmap_lst[3][:,:,2:-2,2:-2]**2 # The cross K-functions estimation
# Apply K function loss only on the detection mask regions
k_loss_mask = gt_dmap_all.clone()
loss_l1_k = criterion_l1_sum(et_kmap*(k_loss_mask), gt_kmap*(k_loss_mask)) / (k_loss_mask.sum()*r_classes_all)
# Apply Sigmoid and Softmax activations to the detection and classification predictions, respectively.
et_all_sig = criterion_sig(et_dmap_all)
et_class_sig = criterion_softmax(et_dmap_class)
et_subclasses_sig = criterion_softmax(et_dmap_subclasses)
# Compute Dice loss on the detection and classification predictions
intersection = (et_class_sig * gt_dmap ).sum()
union = (et_class_sig**2).sum() + (gt_dmap**2).sum()
loss_dice_class = 1 - ((2 * intersection + 1) / (union + 1))
intersection = (et_all_sig * gt_dmap_all.unsqueeze(0) ).sum()
union = (et_all_sig**2).sum() + (gt_dmap_all.unsqueeze(0)**2).sum()
loss_dice_all = 1 - ((2 * intersection + 1) / (union + 1))
intersection = (et_subclasses_sig * gt_dmap_subclasses ).sum()
union = (et_subclasses_sig**2).sum() + (gt_dmap_subclasses**2).sum()
loss_dice_subclass = 1 - ((2 * intersection + 1) / (union + 1))
loss_dice = loss_dice_class + loss_dice_all + lamda_subclasses * loss_dice_subclass
# train_loss_dice += loss_dice.item()
# Add up the dice loss and the K function L1 loss. The K function can be NAN especially in the beginning of training. Do not add to loss if it is NAN.
loss = (lamda_dice * loss_dice )
if(not math.isnan(loss_l1_k.item())):
loss += loss_l1_k * lamda_k
# train_count_k += 1
# train_loss_k += loss_l1_k.item()
# Backpropagate loss
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
log_file.write("epoch: "+str(epoch)+ " i: "+str(i)+" loss_dice: "+str(loss_dice.item()) + " loss_l1_k:" + str(loss_l1_k.item()) + '\n')
log_file.flush()
log_file.write("epoch: " + str(epoch) + " train loss: "+ str(epoch_loss/train_count)+ '\n')
log_file.flush()
epoch_loss = epoch_loss/train_count
#break
# Testing phase on Validation Set
model.eval()
err=np.array([0 for s in range(n_classes_out)])
loss_val = 0
loss_val_k_wo_nan = 0
loss_val_k = 0
loss_val_dice = 0
loss_val_dice2 = 0
tp_count_all = np.zeros((n_classes_out))
fp_count_all = np.zeros((n_classes_out))
fn_count_all = np.zeros((n_classes_out))
test_count_k = 0
for i,(img,gt_dmap,gt_dots,gt_dmap_subclasses,gt_dots_subclasses, gt_kmap,img_name) in enumerate(tqdm(test_loader)):
'''
img: input image
gt_dmap: ground truth map for cell classes (lymphocytes, epithelial/tumor, stromal) with dilated dots. This can be a binary mask or a density map ( in which case it will be converted to a binary mask)
gt_dots: ground truth binary dot map for cell classes (lymphocytes, epithelial/tumor, stromal).
gt_dmap_subclasses: ground truth map for cell clustering sub-classes with dilated dots. This can be a binary mask or a density map ( in which case it will be converted to a binary mask)
gt_dots_subclasses: ground truth binary dot map for cell clustering sub-classes.
gt_kmap: ground truth k-function map. At each cell center contains the cross k-functions centered at that cell.
img_name: img filename
'''
gt_kmap /= k_norm_factor # Normalize K functions ground truth
img_name=img_name[0]
img=img.to(device)
# Convert ground truth maps to binary masks (in case they were density maps)
gt_dmap = gt_dmap > 0
# Get the detection ground truth maps from the classes ground truth maps
gt_dmap_all = gt_dmap.max(1)[0]
gt_dots_all = gt_dots.max(1)[0]
# Set datatype and move to GPU
gt_dmap = gt_dmap.type(torch.FloatTensor)
gt_dmap_all = gt_dmap_all.type(torch.FloatTensor)
gt_kmap = gt_kmap.type(torch.FloatTensor)
gt_kmap=gt_kmap.to(device)
k_loss_mask = gt_dmap_all.clone().to(device) # Apply K-function loss only on the dilated dots mask
# Convert ground truth maps to numpy arrays
gt_dots = gt_dots.detach().cpu().numpy()
gt_dots_all = gt_dots_all.detach().cpu().numpy()
gt_dmap = gt_dmap.detach().cpu().numpy()
gt_dmap_all = gt_dmap_all.detach().cpu().numpy()
# forward Propagation
et_dmap_lst=model(img)
et_dmap_all=et_dmap_lst[0][:,:,2:-2,2:-2] # The cell detection prediction
et_dmap_class=et_dmap_lst[1][:,:,2:-2,2:-2] # The cell classification prediction
et_dmap_subclasses= et_dmap_lst[2][:,:,2:-2,2:-2] # The cell clustering sub-class prediction
et_kmap=et_dmap_lst[3][:,:,2:-2,2:-2]**2 # The cross K-functions estimation
# Apply Sigmoid and Softmax activations to the detection and classification predictions, respectively.
et_all_sig = criterion_sig(et_dmap_all).detach().cpu().numpy()
et_class_sig = criterion_softmax(et_dmap_class).detach().cpu().numpy()
# Apply K function loss only on the detection mask regions
loss_l1_k = criterion_l1_sum(et_kmap*(k_loss_mask), gt_kmap*(k_loss_mask)) / (k_loss_mask.sum()*r_classes_all)
# Save sample output predictions
if(i % print_frequency_test == 0):
io.imsave(os.path.join(checkpoints_save_path, 'test'+ '_indx'+str(i)+'_img'+'.png'), (img.squeeze().detach().cpu().numpy()*255).transpose(1,2,0).astype(np.uint8));
for s in range(n_classes):
io.imsave(os.path.join(checkpoints_save_path, 'epoch'+str(epoch)+ '_test'+ '_indx'+str(i)+'_likelihood'+'_s'+str(s)+'.png'), (et_class_sig[:,s,:,:]*255).squeeze().astype(np.uint8));
io.imsave(os.path.join(checkpoints_save_path, 'test'+ '_indx'+str(i)+'_gt'+'_s'+str(s)+'.png'), (gt_dmap[:,s,:,:]*255).squeeze().astype(np.uint8));
io.imsave(os.path.join(checkpoints_save_path, 'epoch'+str(epoch)+ '_test'+ '_indx'+str(i)+'_likelihood'+'_all'+'.png'), (et_all_sig*255).squeeze().astype(np.uint8));
io.imsave(os.path.join(checkpoints_save_path, 'test'+ '_indx'+str(i)+'_gt'+'_all'+'.png'), (gt_dmap_all*255).squeeze().astype(np.uint8));
# Accumulate K-function test losses
loss_val_k += loss_l1_k.item()
if(not math.isnan(loss_l1_k.item())):
loss_val_k_wo_nan += loss_l1_k.item()
test_count_k += 1
# Compute Dice loss on the detection and classification predictions
intersection = (et_class_sig * gt_dmap ).sum()
union = (et_class_sig**2).sum() + (gt_dmap**2).sum()
loss_dice_class = 1 - ((2 * intersection + 1) / (union + 1))
intersection = (et_all_sig * gt_dmap_all ).sum()
union = (et_all_sig**2).sum() + (gt_dmap_all**2).sum()
loss_dice_all = 1 - ((2 * intersection + 1) / (union + 1))
loss_dice = (loss_dice_class + loss_dice_all).item()
loss_val_dice += loss_dice
print('epoch', epoch, 'test', i, 'loss_l1_k', str(loss_l1_k.item()), 'loss_dice', str(loss_dice))
# Calculate F-score if epoch >= epoch_start_eval_prec
if(epoch >= epoch_start_eval_prec):
# Apply a 0.5 threshold on detection output and convert to binary mask
e_hard = filters.apply_hysteresis_threshold(et_all_sig.squeeze(), 0.5, 0.5)
e_hard2 = (e_hard > 0).astype(np.uint8)
e_hard2_all = e_hard2.copy()
# Get predicted cell centers by finding center of contours in binary mask
e_dot = np.zeros((img.shape[-2], img.shape[-1]))
contours, hierarchy = cv2.findContours(e_hard2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for idx in range(len(contours)):
contour_i = contours[idx]
M = cv2.moments(contour_i)
if(M['m00'] == 0):
continue;
cx = round(M['m10'] / M['m00'])
cy = round(M['m01'] / M['m00'])
e_dot[cy, cx] = 1
e_dot_all = e_dot.copy()
tp_count = 0 # initialize number of true positives
fp_count = 0 # initialize number of false positives
fn_count = 0 # initialize number of false negatives
# Init g_dot_vis to contain all cell detections ground truth dots
g_dot_vis = gt_dots_all.copy().squeeze()
# Get connected components in the predicted detection binary map
e_hard2_comp = label(e_hard2)
e_hard2_comp_all = e_hard2_comp.copy()
# For each connected component, if it interests with a grount truth dot then it is a TP, otherwise it is a FP.
# If it is a TP, remove it from g_dot_vis.
# Note: if more than one ground truth dot interests, then only one is a TP.
for l in range(1, e_hard2_comp.max()+1):
e_hard2_comp_l = (e_hard2_comp == l)
M = moments(e_hard2_comp_l)
(y,x) = int(M[1, 0] / M[0, 0]), int(M[0, 1] / M[0, 0])
if ((e_hard2_comp_l * g_dot_vis).sum()>0): # true pos
tp_count += 1
(yg,xg) = np.where((e_hard2_comp_l * g_dot_vis) > 0)
yg = yg[0]
xg = xg[0]
g_dot_vis[yg,xg] = 0
else: #((e_hard2_comp_l * g_dot_vis).sum()==0): # false pos
fp_count += 1
# Remaining cells in g_dot_vis are False Negatives.
fn_points = np.where(g_dot_vis > 0)
fn_count = len(fn_points[0])
# Update TP, FP, FN counts for detection with counts from current image predictions
tp_count_all[-1] = tp_count_all[-1] + tp_count
fp_count_all[-1] = fp_count_all[-1] + fp_count
fn_count_all[-1] = fn_count_all[-1] + fn_count
# Get predicted cell classes
et_class_argmax = et_class_sig.squeeze().argmax(axis=0)
e_hard2_all = e_hard2.copy()
# For each class get the TP, FP, FN counts similar to previous detection code.
for s in range(n_classes):
g_count = gt_dots[0,s,:,:].sum()
e_hard2 = (et_class_argmax == s)
e_dot = e_hard2 * e_dot_all
g_dot = gt_dots[0,s,:,:].squeeze()
tp_count = 0
fp_count = 0
fn_count = 0
g_dot_vis = g_dot.copy()
e_dots_tuple = np.where(e_dot > 0)
for idx in range(len(e_dots_tuple[0])):
cy=e_dots_tuple[0][idx]
cx=e_dots_tuple[1][idx]
l = e_hard2_comp_all[cy, cx]
e_hard2_comp_l = (e_hard2_comp == l)
if ((e_hard2_comp_l * g_dot_vis).sum()>0): # true pos
tp_count += 1
(yg,xg) = np.where((e_hard2_comp_l * g_dot_vis) > 0)
yg = yg[0]
xg = xg[0]
g_dot_vis[yg,xg] = 0
else: #((e_hard2_comp_l * g_dot_vis).sum()==0): # false pos
fp_count += 1
fn_points = np.where(g_dot_vis > 0)
fn_count = len(fn_points[0])
tp_count_all[s] = tp_count_all[s] + tp_count
fp_count_all[s] = fp_count_all[s] + fp_count
fn_count_all[s] = fn_count_all[s] + fn_count
del img,gt_dmap,gt_dmap_all,gt_dmap_subclasses, gt_kmap, et_dmap_all, et_dmap_class, et_kmap,gt_dots
saved = False
precision_all = np.zeros((n_classes_out))
recall_all = np.zeros((n_classes_out))
f1_all = np.zeros((n_classes_out))
if(epoch >= epoch_start_eval_prec):
count_all = tp_count_all.sum() + fn_count_all.sum()
for s in range(n_classes_out):
if(tp_count_all[s] + fp_count_all[s] == 0):
precision_all[s] = 1
else:
precision_all[s] = tp_count_all[s]/(tp_count_all[s] + fp_count_all[s])
if(tp_count_all[s] + fn_count_all[s] == 0):
recall_all[s] = 1
else:
recall_all[s] = tp_count_all[s]/(tp_count_all[s] + fn_count_all[s])
if(precision_all[s]+recall_all[s] == 0):
f1_all[s] = 0
else:
f1_all[s] = 2*(precision_all[s] *recall_all[s])/(precision_all[s]+recall_all[s])
print_msg = f'epoch {epoch} s {s} precision_all {precision_all[s]} recall_all {recall_all[s]} f1_all {f1_all[s]}'
print(print_msg)
log_file.write(print_msg+'\n')
log_file.flush()
print_msg = f'epoch {epoch} all precision_all {precision_all.mean()} recall_all {recall_all.mean()} f1_all {f1_all.mean()}'
print(print_msg)
log_file.write(print_msg+'\n')
log_file.flush()
print_msg = f'epoch {epoch} classes precision_all {precision_all[:-1].mean()} recall_all {recall_all[:-1].mean()} f1_all {f1_all[:-1].mean()}'
print(print_msg)
log_file.write(print_msg+'\n')
log_file.flush()
# Check if this is the best epoch so far based on fscore on validation set
model_save_postfix = ''
is_best_epoch = False
# if (f1_all.mean() > best_f1_mean):
if (f1_all.mean() - best_f1_mean >= 0.005):
model_save_postfix += '_f1'
best_f1_mean = f1_all.mean()
best_prec_recall_diff = abs(recall_all.mean()-precision_all.mean())
is_best_epoch = True
elif ((abs(f1_all.mean() - best_f1_mean) < 0.005) # a slightly lower f score but smaller gap between precision and recall
and abs(recall_all.mean()-precision_all.mean()) < best_prec_recall_diff):
model_save_postfix += '_pr-diff'
best_f1_mean = f1_all.mean()
best_prec_recall_diff = abs(recall_all.mean()-precision_all.mean())
is_best_epoch = True
# if (recall_all.mean() > best_recall_mean):
# model_save_postfix += '_rec'
# best_recall_mean = recall_all.mean()
# is_best_epoch = True
# Save checkpoint if it is best so far
if((saved == False) and (model_save_postfix != '')):
print('epoch', epoch, 'saving')
new_epoch_filepath = os.path.join(checkpoints_save_path, 'mcspat_epoch_'+str(epoch)+model_save_postfix+".pth")
torch.save(model.state_dict(), new_epoch_filepath ) # save only if get better error
centroids.dump(os.path.join(checkpoints_save_path, 'epoch{}_centroids.npy'.format(epoch)))
saved = True
print_msg = f'epoch {epoch} saved.'
print(print_msg)
log_file.write(print_msg+'\n')
log_file.flush()
if(is_best_epoch):
best_epoch_filepath = new_epoch_filepath
best_epoch = epoch
# Adam optimizer needs resetting to avoid parameters learning rates dying
sys.stdout.flush();
if((epoch >= next_restart_epoch) and not(best_epoch_filepath is None)):
next_restart_epoch = epoch + restart_epochs_freq
model.load_state_dict(torch.load(best_epoch_filepath), strict=False);
model.to(device)
optimizer=torch.optim.Adam(list(model.final_layers_lst.parameters())+list(model.decoder.parameters())+list(model.bottleneck.parameters())+list(model.encoder.parameters()),lr)
log_file.close()
| [
"torch.nn.L1Loss",
"numpy.array",
"torch.nn.Sigmoid",
"os.path.exists",
"numpy.where",
"os.mkdir",
"skimage.measure.moments",
"sys.stdout.flush",
"my_dataloader.CellsDataset",
"my_dataloader_w_kfunc.CellsDataset",
"cv2.moments",
"time.time",
"model_arch.UnetVggMultihead",
"torch.device",
... | [((1393, 1452), 'os.path.join', 'os.path.join', (['checkpoints_root_dir', 'checkpoints_folder_name'], {}), '(checkpoints_root_dir, checkpoints_folder_name)\n', (1405, 1452), False, 'import os\n'), ((1483, 1547), 'os.path.join', 'os.path.join', (['clustering_pseudo_gt_root', 'checkpoints_folder_name'], {}), '(clustering_pseudo_gt_root, checkpoints_folder_name)\n', (1495, 1547), False, 'import os\n'), ((2606, 2630), 'torch.device', 'torch.device', (['gpu_or_cpu'], {}), '(gpu_or_cpu)\n', (2618, 2630), False, 'import torch\n'), ((2661, 2672), 'time.time', 'time.time', ([], {}), '()\n', (2670, 2672), False, 'import time\n'), ((2866, 2905), 'os.path.join', 'os.path.join', (['train_data_root', '"""images"""'], {}), "(train_data_root, 'images')\n", (2878, 2905), False, 'import os\n'), ((2928, 2970), 'os.path.join', 'os.path.join', (['train_data_root', '"""gt_custom"""'], {}), "(train_data_root, 'gt_custom')\n", (2940, 2970), False, 'import os\n'), ((2994, 3036), 'os.path.join', 'os.path.join', (['train_data_root', '"""gt_custom"""'], {}), "(train_data_root, 'gt_custom')\n", (3006, 3036), False, 'import os\n'), ((3169, 3213), 'os.path.join', 'os.path.join', (['train_data_root', '"""k_func_maps"""'], {}), "(train_data_root, 'k_func_maps')\n", (3181, 3213), False, 'import os\n'), ((3273, 3311), 'os.path.join', 'os.path.join', (['test_data_root', '"""images"""'], {}), "(test_data_root, 'images')\n", (3285, 3311), False, 'import os\n'), ((3333, 3374), 'os.path.join', 'os.path.join', (['test_data_root', '"""gt_custom"""'], {}), "(test_data_root, 'gt_custom')\n", (3345, 3374), False, 'import os\n'), ((3396, 3437), 'os.path.join', 'os.path.join', (['test_data_root', '"""gt_custom"""'], {}), "(test_data_root, 'gt_custom')\n", (3408, 3437), False, 'import os\n'), ((3565, 3608), 'os.path.join', 'os.path.join', (['test_data_root', '"""k_func_maps"""'], {}), "(test_data_root, 'k_func_maps')\n", (3577, 3608), False, 'import os\n'), ((4530, 4550), 'numpy.array', 'np.array', (['[*r_range]'], {}), '([*r_range])\n', (4538, 4550), True, 'import numpy as np\n'), ((5162, 5190), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (5184, 5190), False, 'import torch\n'), ((5201, 5471), 'model_arch.UnetVggMultihead', 'UnetVggMultihead', ([], {'kwargs': "{'dropout_prob': dropout_prob, 'initial_pad': initial_pad, 'interpolate':\n interpolate, 'conv_init': conv_init, 'n_classes': n_classes,\n 'n_channels': n_channels, 'n_heads': 4, 'head_classes': [1, n_classes,\n n_classes2, r_classes_all]}"}), "(kwargs={'dropout_prob': dropout_prob, 'initial_pad':\n initial_pad, 'interpolate': interpolate, 'conv_init': conv_init,\n 'n_classes': n_classes, 'n_channels': n_channels, 'n_heads': 4,\n 'head_classes': [1, n_classes, n_classes2, r_classes_all]})\n", (5217, 5471), False, 'from model_arch import UnetVggMultihead\n'), ((5732, 5744), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5742, 5744), True, 'import torch.nn as nn\n'), ((5824, 5841), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (5834, 5841), True, 'import torch.nn as nn\n'), ((5916, 5942), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (5925, 5942), True, 'import torch.nn as nn\n'), ((6210, 6453), 'my_dataloader_w_kfunc.CellsDataset', 'CellsDataset', (['train_image_root', 'train_dmap_root', 'train_dots_root', 'class_indx', 'train_dmap_subclasses_root', 'train_dots_subclasses_root', 'train_kmap_root'], {'split_filepath': 'train_split_filepath', 'phase': '"""train"""', 'fixed_size': '(448)', 'max_scale': '(16)'}), "(train_image_root, train_dmap_root, train_dots_root, class_indx,\n train_dmap_subclasses_root, train_dots_subclasses_root, train_kmap_root,\n split_filepath=train_split_filepath, phase='train', fixed_size=448,\n max_scale=16)\n", (6222, 6453), False, 'from my_dataloader_w_kfunc import CellsDataset\n'), ((6455, 6534), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True)\n', (6482, 6534), False, 'import torch\n'), ((6594, 6828), 'my_dataloader_w_kfunc.CellsDataset', 'CellsDataset', (['test_image_root', 'test_dmap_root', 'test_dots_root', 'class_indx', 'test_dmap_subclasses_root', 'test_dots_subclasses_root', 'test_kmap_root'], {'split_filepath': 'test_split_filepath', 'phase': '"""test"""', 'fixed_size': '(-1)', 'max_scale': '(16)'}), "(test_image_root, test_dmap_root, test_dots_root, class_indx,\n test_dmap_subclasses_root, test_dots_subclasses_root, test_kmap_root,\n split_filepath=test_split_filepath, phase='test', fixed_size=-1,\n max_scale=16)\n", (6606, 6828), False, 'from my_dataloader_w_kfunc import CellsDataset\n'), ((6828, 6898), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_dataset, batch_size=1, shuffle=False)\n', (6855, 6898), False, 'import torch\n'), ((6985, 7136), 'my_dataloader.CellsDataset', 'CellsDataset_simple', (['train_image_root', 'train_dmap_root', 'train_dots_root', 'class_indx'], {'phase': '"""test"""', 'fixed_size': '(-1)', 'max_scale': '(16)', 'return_padding': '(True)'}), "(train_image_root, train_dmap_root, train_dots_root,\n class_indx, phase='test', fixed_size=-1, max_scale=16, return_padding=True)\n", (7004, 7136), True, 'from my_dataloader import CellsDataset as CellsDataset_simple\n'), ((7154, 7245), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['simple_train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(simple_train_dataset, batch_size=batch_size,\n shuffle=False)\n', (7181, 7245), False, 'import torch\n'), ((1560, 1596), 'os.path.exists', 'os.path.exists', (['checkpoints_root_dir'], {}), '(checkpoints_root_dir)\n', (1574, 1596), False, 'import os\n'), ((1606, 1636), 'os.mkdir', 'os.mkdir', (['checkpoints_root_dir'], {}), '(checkpoints_root_dir)\n', (1614, 1636), False, 'import os\n'), ((1649, 1686), 'os.path.exists', 'os.path.exists', (['checkpoints_save_path'], {}), '(checkpoints_save_path)\n', (1663, 1686), False, 'import os\n'), ((1696, 1727), 'os.mkdir', 'os.mkdir', (['checkpoints_save_path'], {}), '(checkpoints_save_path)\n', (1704, 1727), False, 'import os\n'), ((1740, 1781), 'os.path.exists', 'os.path.exists', (['clustering_pseudo_gt_root'], {}), '(clustering_pseudo_gt_root)\n', (1754, 1781), False, 'import os\n'), ((1791, 1826), 'os.mkdir', 'os.mkdir', (['clustering_pseudo_gt_root'], {}), '(clustering_pseudo_gt_root)\n', (1799, 1826), False, 'import os\n'), ((1839, 1870), 'os.path.exists', 'os.path.exists', (['cluster_tmp_out'], {}), '(cluster_tmp_out)\n', (1853, 1870), False, 'import os\n'), ((1880, 1905), 'os.mkdir', 'os.mkdir', (['cluster_tmp_out'], {}), '(cluster_tmp_out)\n', (1888, 1905), False, 'import os\n'), ((2000, 2085), 'os.path.join', 'os.path.join', (['checkpoints_root_dir', 'checkpoints_folder_name', 'f"""train_log_{i}.txt"""'], {}), "(checkpoints_root_dir, checkpoints_folder_name,\n f'train_log_{i}.txt')\n", (2012, 2085), False, 'import os\n'), ((13566, 13589), 'numpy.zeros', 'np.zeros', (['n_classes_out'], {}), '(n_classes_out)\n', (13574, 13589), True, 'import numpy as np\n'), ((13615, 13638), 'numpy.zeros', 'np.zeros', (['n_classes_out'], {}), '(n_classes_out)\n', (13623, 13638), True, 'import numpy as np\n'), ((13664, 13687), 'numpy.zeros', 'np.zeros', (['n_classes_out'], {}), '(n_classes_out)\n', (13672, 13687), True, 'import numpy as np\n'), ((23565, 23588), 'numpy.zeros', 'np.zeros', (['n_classes_out'], {}), '(n_classes_out)\n', (23573, 23588), True, 'import numpy as np\n'), ((23612, 23635), 'numpy.zeros', 'np.zeros', (['n_classes_out'], {}), '(n_classes_out)\n', (23620, 23635), True, 'import numpy as np\n'), ((23655, 23678), 'numpy.zeros', 'np.zeros', (['n_classes_out'], {}), '(n_classes_out)\n', (23663, 23678), True, 'import numpy as np\n'), ((27107, 27125), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (27123, 27125), False, 'import sys\n'), ((2098, 2127), 'os.path.exists', 'os.path.exists', (['log_file_path'], {}), '(log_file_path)\n', (2112, 2127), False, 'import os\n'), ((5520, 5548), 'torch.load', 'torch.load', (['model_param_path'], {}), '(model_param_path)\n', (5530, 5548), False, 'import torch\n'), ((8610, 8628), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (8614, 8628), True, 'from tqdm import tqdm as tqdm\n'), ((13820, 13837), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (13824, 13837), True, 'from tqdm import tqdm as tqdm\n'), ((19178, 19218), 'numpy.zeros', 'np.zeros', (['(img.shape[-2], img.shape[-1])'], {}), '((img.shape[-2], img.shape[-1]))\n', (19186, 19218), True, 'import numpy as np\n'), ((19257, 19324), 'cv2.findContours', 'cv2.findContours', (['e_hard2', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(e_hard2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (19273, 19324), False, 'import cv2\n'), ((20178, 20192), 'skimage.measure.label', 'label', (['e_hard2'], {}), '(e_hard2)\n', (20183, 20192), False, 'from skimage.measure import label, moments\n'), ((21282, 21305), 'numpy.where', 'np.where', (['(g_dot_vis > 0)'], {}), '(g_dot_vis > 0)\n', (21290, 21305), True, 'import numpy as np\n'), ((27302, 27333), 'torch.load', 'torch.load', (['best_epoch_filepath'], {}), '(best_epoch_filepath)\n', (27312, 27333), False, 'import torch\n'), ((19444, 19466), 'cv2.moments', 'cv2.moments', (['contour_i'], {}), '(contour_i)\n', (19455, 19466), False, 'import cv2\n'), ((20664, 20687), 'skimage.measure.moments', 'moments', (['e_hard2_comp_l'], {}), '(e_hard2_comp_l)\n', (20671, 20687), False, 'from skimage.measure import label, moments\n'), ((22354, 22373), 'numpy.where', 'np.where', (['(e_dot > 0)'], {}), '(e_dot > 0)\n', (22362, 22373), True, 'import numpy as np\n'), ((23131, 23154), 'numpy.where', 'np.where', (['(g_dot_vis > 0)'], {}), '(g_dot_vis > 0)\n', (23139, 23154), True, 'import numpy as np\n'), ((20909, 20949), 'numpy.where', 'np.where', (['(e_hard2_comp_l * g_dot_vis > 0)'], {}), '(e_hard2_comp_l * g_dot_vis > 0)\n', (20917, 20949), True, 'import numpy as np\n'), ((22802, 22842), 'numpy.where', 'np.where', (['(e_hard2_comp_l * g_dot_vis > 0)'], {}), '(e_hard2_comp_l * g_dot_vis > 0)\n', (22810, 22842), True, 'import numpy as np\n')] |
import sys
import warnings
from copy import deepcopy
import itertools
import numpy as np
from scipy.stats import entropy, multivariate_normal
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky,\
_check_precision_matrix
import pandas as pd
from .utilities import ranking_based_roulette
def _diag_only(x):
return np.diag(np.diag(x))
def _is_not_singular(a):
return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]
def is_singular(a):
try:
_check_precision_matrix(a, 'full')
except Exception:
return True
return not _is_not_singular(a)
def _singular_prevent(c):
if is_singular(c):
c = _diag_only(c)
sp = np.identity(c.shape[0])
while is_singular(c):
c += sp
return c
def _singular_prevent_multiple(covs):
new_covs = np.zeros(covs.shape)
if len(covs.shape) == 3:
for i, c in enumerate(covs):
new_covs[i] = _singular_prevent(c)
return new_covs
def _create_gmm(k, means, weights, precisions=None, covariances=None):
if covariances is None:
precisions = np.array(precisions)
covariances = np.linalg.pinv(precisions)
elif precisions is None:
covariances = np.array(covariances)
precisions = np.linalg.pinv(covariances)
gmm = GaussianMixture(n_components=k,
weights_init=weights,
means_init=means,
reg_covar=1e-2,
precisions_init=precisions,
max_iter=1,
warm_start=True)
try:
gmm.precisions_cholesky_ = _compute_precision_cholesky(covariances,
'full')
except Exception:
c2 = covariances.copy()
covariances = _singular_prevent_multiple(covariances)
precisions = np.linalg.pinv(covariances)
try:
gmm.precisions_cholesky_ = _compute_precision_cholesky(covariances,
'full')
except Exception:
c2.dump('cov.npy')
raise Exception('Problema na matriz! Dump no arquivo cov.npy')
gmm.weights_ = weights
gmm.means_ = means
gmm.covariances_ = covariances
gmm.precisions_ = precisions
return gmm
def partial_log_likelihood(gmm, X):
respons = gmm.predict_proba(X)
pis = gmm.weights_
log_pi_mat = np.tile(np.log(pis), (respons.shape[0], 1))
try:
N = np.array([multivariate_normal.pdf(X, gmm.means_[g],
gmm.covariances_[g],
allow_singular=True)
for g in range(gmm.n_components)]
).T
except Exception:
N = np.array([multivariate_normal.pdf(X, gmm.means_[g],
_singular_prevent(
gmm.covariances_[g]))
for g in range(gmm.n_components)]
).T
N += sys.float_info.min
log_N = np.log(N)
plls = np.sum((log_N + log_pi_mat) * respons, axis=0)
return plls
def _responsability_entropy(gmm, X):
respons = entropy(gmm.predict_proba(X).T)
return respons
def _closest_chunklet(obj, chunklets, X):
return np.argmin([np.min([np.linalg.norm(X[idx] - obj)
for idx in c]) for c in chunklets])
class Individual:
@staticmethod
def covariance_matrices(X, y, n_clusters):
X = np.array(X)
y = np.array(y)
clusters = (X[y == i] for i in range(n_clusters))
n_attrs = len(X[0])
return [(np.cov(g, rowvar=False)
if len(g) > 1 else np.zeros((n_attrs, n_attrs)))
for g in clusters]
@staticmethod
def random_mapping_(chunklets, k, X):
nc = len(chunklets)
n = len(X)
# Map each chunklet to a cluster
mapping = list(range(nc))
seed_indexes = [np.random.choice(chunklets[i]) for i in mapping]
# Fill remaining with random
for _ in range(k - nc):
random_index = np.random.choice(n)
while random_index in seed_indexes:
random_index = np.random.choice(n)
mapping.append(_closest_chunklet(X[random_index], chunklets, X))
seed_indexes.append(random_index)
return seed_indexes, mapping
@staticmethod
def generate_from_kmeans(X, k, r, km_method, max_iter):
chunklets = r.get_chunklets()
seed_indexes, mapping = Individual.random_mapping_(chunklets, k, X)
if not chunklets:
km_method = 'random'
if km_method == 'chunklets':
seeds = X[seed_indexes]
km = KMeans(n_clusters=k, init=seeds, n_init=1,
random_state=0, max_iter=max_iter + 1)
else:
km = KMeans(n_clusters=k, init=km_method, n_init=1,
random_state=0, max_iter=max_iter + 1)
km.fit(X)
y = km.predict(X)
pis = np.bincount(y, minlength=k) / len(X)
mus = km.cluster_centers_
covs = Individual.covariance_matrices(X, y, km.n_clusters)
return Individual(_create_gmm(k, mus, pis, covariances=covs), mapping)
@staticmethod
def generate_feasible(X, k, r, k_means_max_iter=2):
return Individual.generate_from_kmeans(
X, k, r, 'chunklets', k_means_max_iter)
@staticmethod
def generate_infeasible(X, k, r, k_means_max_iter=2):
return Individual.generate_from_kmeans(
X, k, r, 'random', k_means_max_iter)
def reset_caches(self):
self._infeasible_fitness = None
self._ff_and_llk = None
self._is_feasible = None
def __init__(self, gmm, clusters_to_chunklets):
if len(clusters_to_chunklets) != gmm.n_components:
raise ValueError(
"Clusters to chunklets should be of length n_components")
self.gmm = deepcopy(gmm)
self.clusters_to_chunklets = np.array(
clusters_to_chunklets).astype(int)
self.reset_caches()
def em(self, number_iterations, X):
self.gmm.set_params(max_iter=number_iterations)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
self.gmm.fit(X)
except Exception:
pass
self.reset_caches()
def is_feasible(self, constraints, X):
if self._is_feasible is None:
self._is_feasible = constraints.is_feasible(self.predict(X))
return self._is_feasible
def is_infeasible(self, constraints, X):
return not self.is_feasible(constraints, X)
def infeasible_fitness(self, constraints, X):
if self._infeasible_fitness is None:
cost = 0
for chunklet, indexes in enumerate(constraints.get_chunklets()):
allowed_clusters = self.clusters_to_chunklets == chunklet
objects = X[indexes]
predicted_chunklet =\
self.clusters_to_chunklets[self.gmm.predict(objects)]
for idx, pred_chunk in zip(indexes, predicted_chunklet):
if pred_chunk != chunklet:
if not np.any(allowed_clusters):
cost += 1
else:
cost += 1 - \
np.max(self.gmm.predict_proba(
[X[idx]])[0, allowed_clusters])
self._infeasible_fitness = cost
return self._infeasible_fitness
def llk(self, X):
return self.gmm.score(X) * X.shape[0]
def ff_and_llk(self, X):
if self._ff_and_llk is None:
n, m = X.shape
llk = self.llk(X)
k = self.gmm.n_components
# Minumim description Length
penalization = k / 2 * (1 + m + (m * (m + 1) / 2)) * np.log(n)
self._ff_and_llk = ((penalization - llk), llk)
return self._ff_and_llk
def feasible_fitness(self, X):
return self.ff_and_llk(X)[0]
def mutate_create(self, constraints, X, seeds_indexes):
seeds = X[seeds_indexes]
w, m, p, k = self.gmm.weights_.copy(), self.gmm.means_.copy(
), self.gmm.precisions_.copy(), self.gmm.n_components
c = _singular_prevent_multiple(self.gmm.covariances_.copy())
cov = _singular_prevent(np.diag(np.var(X, axis=0)) / 10)
prec = np.linalg.inv(cov)
mapping = self.clusters_to_chunklets.copy()
chunklets = constraints.get_chunklets()
slen = len(seeds)
try:
normals = [wi * multivariate_normal.pdf(seeds, mean=mi,
cov=ci,
allow_singular=True)
for mi, ci, wi in zip(m, c, w)]
predicts = np.reshape(
np.argmax(normals, axis=0),
slen)
except np.linalg.LinAlgError as e:
print(e)
np.save('./singular.npy', self.gmm.covariances_)
raise Exception("Matriz singular salva no arquivo 'singular.npy'")
closest_chunklets = []
pis = []
for s, cluster_to_split in zip(seeds, predicts):
half_pi = w[cluster_to_split] / 2
w[cluster_to_split] = half_pi
pis.append(half_pi)
closest_chunklets.append(_closest_chunklet(s, chunklets, X))
k += slen
w = np.append(w, pis)
m = np.append(m, seeds, axis=0)
p = np.append(p, [prec] * slen, axis=0)
c = np.append(c, [cov] * slen, axis=0)
mapping = np.append(mapping, closest_chunklets)
gmm = _create_gmm(k, m, w, p, covariances=c)
return Individual(gmm, mapping)
def mutate_remove(self, clusters_indexes):
w, m, p, k = self.gmm.weights_.copy(), self.gmm.means_.copy(
), self.gmm.precisions_.copy(), self.gmm.n_components.copy()
mapping = self.clusters_to_chunklets.copy()
should_really_remove = []
_, counts = np.unique(mapping, return_counts=True)
counts = list(counts)
for g in clusters_indexes:
if counts[mapping[g]] > 1:
should_really_remove.append(g)
counts[mapping[g]] -= 1
if len(should_really_remove) > 0:
w = np.delete(w, should_really_remove, axis=0)
m = np.delete(m, should_really_remove, axis=0)
p = np.delete(p, should_really_remove, axis=0)
mapping = np.delete(mapping, should_really_remove, axis=0)
k -= len(should_really_remove)
w = w / np.sum(w)
return Individual(_create_gmm(k, m, w, p), mapping)
def _decide_clusters_to_remove(self, kmin, X):
k = self.gmm.n_components
n_clusters = np.random.randint(1, (k - kmin) + 1)
pll = partial_log_likelihood(self.gmm, X)
_, counts = np.unique(self.clusters_to_chunklets, return_counts=True)
counts = list(counts)
to_remove = []
roulette = ranking_based_roulette(
pll, descending=True, reposition=False)
while len(to_remove) < n_clusters:
try:
g = next(roulette)
chunk = self.clusters_to_chunklets[g]
# prevents removing only cluster of chunklet
if counts[chunk] > 1:
to_remove.append(g)
counts[chunk] -= 1
except StopIteration:
break
return to_remove
def mutate_feasible(self, kmin, kmax, constraints, X):
k = self.gmm.n_components
prob = (k - kmin) / (kmax - kmin)
if np.random.rand() > prob: # create
n_clusters = np.random.randint(1, (kmax - k) + 1)
ent = _responsability_entropy(self.gmm, X)
roulette = itertools.islice(ranking_based_roulette(
ent,
descending=True,
reposition=False),
n_clusters)
to_create = list(roulette)
return self.mutate_create(constraints, X, to_create)
# remove
to_remove = self._decide_clusters_to_remove(kmin, X)
return self.mutate_remove(to_remove)
def _find_objects_out_of_chunklet(self, constraints, X):
objects_out_of_chunklet = []
costs = []
for chunk, idxs in enumerate(constraints.get_chunklets()):
idxs = np.array(idxs)
wrong = idxs[self.clusters_to_chunklets[self.gmm.predict(
X[idxs])] != chunk]
if wrong.size:
wrong_costs = 1 - self.gmm.predict_proba(X[wrong]).max(axis=1)
objects_out_of_chunklet.extend(wrong)
costs.extend(wrong_costs)
return objects_out_of_chunklet, costs
def mutate_infeasible(self, kmin, kmax, constraints, X):
k = self.gmm.n_components
if k < kmax: # create
n_clusters = np.random.randint(1, (kmax - k) + 1)
objects_out_of_chunklet, costs = \
self._find_objects_out_of_chunklet(constraints, X)
roulette = itertools.islice(ranking_based_roulette(
costs,
descending=True,
reposition=False),
n_clusters)
seeds_indexes = [objects_out_of_chunklet[i] for i in roulette]
return self.mutate_create(constraints, X, seeds_indexes)
# remove
to_remove = self._decide_clusters_to_remove(kmin, X)
return self.mutate_remove(to_remove)
def mutate(self, kmin, kmax, constraints, X):
if self.is_feasible(constraints, X):
return self.mutate_feasible(kmin, kmax, constraints, X)
return self.mutate_infeasible(kmin, kmax, constraints, X)
def remove_empty_clusters(self, X):
k = self.gmm.n_components
predictions = np.unique(self.gmm.predict(X))
all_clusters = set(range(k))
to_remove = [x for x in all_clusters if x not in predictions]
return self.mutate_remove(list(to_remove))
def update_mapping(self, r, X):
d = r.get_chunklet_dict()
objs_in_chunks = list(d.keys())
respons = self.gmm.predict_proba(X[objs_in_chunks])
predicts = np.unique(respons.argmax(axis=1))
_, counts = np.unique(self.clusters_to_chunklets, return_counts=True)
for cluster, resp in enumerate(respons.T):
mapped_chunklet = self.clusters_to_chunklets[cluster]
not_only_representant = counts[mapped_chunklet] > 1
if not_only_representant and cluster not in predicts:
closest_object = objs_in_chunks[np.argmax(resp)]
self.clusters_to_chunklets[cluster] = d[closest_object]
_, counts = np.unique(
self.clusters_to_chunklets, return_counts=True)
self.reset_caches()
def predict_cluster(self, X):
return self.gmm.predict(X)
def predict(self, X):
return self.clusters_to_chunklets[self.gmm.predict(X)]
def _predict_proba_fn(self, fn, X):
clusters_proba = self.gmm.predict_proba(X)
class_proba = np.vstack([fn(clusters_proba[:, df.index], axis=1)
for _, df in pd.DataFrame(
self.clusters_to_chunklets
).groupby(0)]).T
return class_proba
def predict_proba(self, X):
prob = self._predict_proba_fn(np.max, X)
return (prob.T / prob.sum(axis=1)).T
def predict_proba_sum(self, X):
return self._predict_proba_fn(np.sum, X)
| [
"numpy.linalg.matrix_rank",
"numpy.linalg.pinv",
"numpy.random.rand",
"numpy.log",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.cov",
"numpy.save",
"sklearn.mixture.gaussian_mixture._check_precision_matrix",
"numpy.delete",
"pandas.DataFrame",
"numpy.identity",
"sklearn.mixt... | [((932, 952), 'numpy.zeros', 'np.zeros', (['covs.shape'], {}), '(covs.shape)\n', (940, 952), True, 'import numpy as np\n'), ((1411, 1559), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'k', 'weights_init': 'weights', 'means_init': 'means', 'reg_covar': '(0.01)', 'precisions_init': 'precisions', 'max_iter': '(1)', 'warm_start': '(True)'}), '(n_components=k, weights_init=weights, means_init=means,\n reg_covar=0.01, precisions_init=precisions, max_iter=1, warm_start=True)\n', (1426, 1559), False, 'from sklearn.mixture import GaussianMixture\n'), ((3198, 3207), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (3204, 3207), True, 'import numpy as np\n'), ((3220, 3266), 'numpy.sum', 'np.sum', (['((log_N + log_pi_mat) * respons)'], {'axis': '(0)'}), '((log_N + log_pi_mat) * respons, axis=0)\n', (3226, 3266), True, 'import numpy as np\n'), ((431, 441), 'numpy.diag', 'np.diag', (['x'], {}), '(x)\n', (438, 441), True, 'import numpy as np\n'), ((588, 622), 'sklearn.mixture.gaussian_mixture._check_precision_matrix', '_check_precision_matrix', (['a', '"""full"""'], {}), "(a, 'full')\n", (611, 622), False, 'from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky, _check_precision_matrix\n'), ((790, 813), 'numpy.identity', 'np.identity', (['c.shape[0]'], {}), '(c.shape[0])\n', (801, 813), True, 'import numpy as np\n'), ((1208, 1228), 'numpy.array', 'np.array', (['precisions'], {}), '(precisions)\n', (1216, 1228), True, 'import numpy as np\n'), ((1251, 1277), 'numpy.linalg.pinv', 'np.linalg.pinv', (['precisions'], {}), '(precisions)\n', (1265, 1277), True, 'import numpy as np\n'), ((1757, 1805), 'sklearn.mixture.gaussian_mixture._compute_precision_cholesky', '_compute_precision_cholesky', (['covariances', '"""full"""'], {}), "(covariances, 'full')\n", (1784, 1805), False, 'from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky, _check_precision_matrix\n'), ((2590, 2601), 'numpy.log', 'np.log', (['pis'], {}), '(pis)\n', (2596, 2601), True, 'import numpy as np\n'), ((3653, 3664), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3661, 3664), True, 'import numpy as np\n'), ((3677, 3688), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3685, 3688), True, 'import numpy as np\n'), ((6140, 6153), 'copy.deepcopy', 'deepcopy', (['gmm'], {}), '(gmm)\n', (6148, 6153), False, 'from copy import deepcopy\n'), ((8678, 8696), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (8691, 8696), True, 'import numpy as np\n'), ((9728, 9745), 'numpy.append', 'np.append', (['w', 'pis'], {}), '(w, pis)\n', (9737, 9745), True, 'import numpy as np\n'), ((9758, 9785), 'numpy.append', 'np.append', (['m', 'seeds'], {'axis': '(0)'}), '(m, seeds, axis=0)\n', (9767, 9785), True, 'import numpy as np\n'), ((9798, 9833), 'numpy.append', 'np.append', (['p', '([prec] * slen)'], {'axis': '(0)'}), '(p, [prec] * slen, axis=0)\n', (9807, 9833), True, 'import numpy as np\n'), ((9846, 9880), 'numpy.append', 'np.append', (['c', '([cov] * slen)'], {'axis': '(0)'}), '(c, [cov] * slen, axis=0)\n', (9855, 9880), True, 'import numpy as np\n'), ((9899, 9936), 'numpy.append', 'np.append', (['mapping', 'closest_chunklets'], {}), '(mapping, closest_chunklets)\n', (9908, 9936), True, 'import numpy as np\n'), ((10323, 10361), 'numpy.unique', 'np.unique', (['mapping'], {'return_counts': '(True)'}), '(mapping, return_counts=True)\n', (10332, 10361), True, 'import numpy as np\n'), ((11086, 11120), 'numpy.random.randint', 'np.random.randint', (['(1)', '(k - kmin + 1)'], {}), '(1, k - kmin + 1)\n', (11103, 11120), True, 'import numpy as np\n'), ((11193, 11250), 'numpy.unique', 'np.unique', (['self.clusters_to_chunklets'], {'return_counts': '(True)'}), '(self.clusters_to_chunklets, return_counts=True)\n', (11202, 11250), True, 'import numpy as np\n'), ((14607, 14664), 'numpy.unique', 'np.unique', (['self.clusters_to_chunklets'], {'return_counts': '(True)'}), '(self.clusters_to_chunklets, return_counts=True)\n', (14616, 14664), True, 'import numpy as np\n'), ((510, 534), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['a'], {}), '(a)\n', (531, 534), True, 'import numpy as np\n'), ((1329, 1350), 'numpy.array', 'np.array', (['covariances'], {}), '(covariances)\n', (1337, 1350), True, 'import numpy as np\n'), ((1372, 1399), 'numpy.linalg.pinv', 'np.linalg.pinv', (['covariances'], {}), '(covariances)\n', (1386, 1399), True, 'import numpy as np\n'), ((2006, 2033), 'numpy.linalg.pinv', 'np.linalg.pinv', (['covariances'], {}), '(covariances)\n', (2020, 2033), True, 'import numpy as np\n'), ((4125, 4155), 'numpy.random.choice', 'np.random.choice', (['chunklets[i]'], {}), '(chunklets[i])\n', (4141, 4155), True, 'import numpy as np\n'), ((4271, 4290), 'numpy.random.choice', 'np.random.choice', (['n'], {}), '(n)\n', (4287, 4290), True, 'import numpy as np\n'), ((4896, 4982), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': 'seeds', 'n_init': '(1)', 'random_state': '(0)', 'max_iter': '(max_iter + 1)'}), '(n_clusters=k, init=seeds, n_init=1, random_state=0, max_iter=\n max_iter + 1)\n', (4902, 4982), False, 'from sklearn.cluster import KMeans\n'), ((5033, 5123), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'init': 'km_method', 'n_init': '(1)', 'random_state': '(0)', 'max_iter': '(max_iter + 1)'}), '(n_clusters=k, init=km_method, n_init=1, random_state=0, max_iter=\n max_iter + 1)\n', (5039, 5123), False, 'from sklearn.cluster import KMeans\n'), ((5203, 5230), 'numpy.bincount', 'np.bincount', (['y'], {'minlength': 'k'}), '(y, minlength=k)\n', (5214, 5230), True, 'import numpy as np\n'), ((6386, 6411), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (6409, 6411), False, 'import warnings\n'), ((6425, 6458), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (6448, 6458), False, 'import warnings\n'), ((10612, 10654), 'numpy.delete', 'np.delete', (['w', 'should_really_remove'], {'axis': '(0)'}), '(w, should_really_remove, axis=0)\n', (10621, 10654), True, 'import numpy as np\n'), ((10671, 10713), 'numpy.delete', 'np.delete', (['m', 'should_really_remove'], {'axis': '(0)'}), '(m, should_really_remove, axis=0)\n', (10680, 10713), True, 'import numpy as np\n'), ((10730, 10772), 'numpy.delete', 'np.delete', (['p', 'should_really_remove'], {'axis': '(0)'}), '(p, should_really_remove, axis=0)\n', (10739, 10772), True, 'import numpy as np\n'), ((10795, 10843), 'numpy.delete', 'np.delete', (['mapping', 'should_really_remove'], {'axis': '(0)'}), '(mapping, should_really_remove, axis=0)\n', (10804, 10843), True, 'import numpy as np\n'), ((11956, 11972), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (11970, 11972), True, 'import numpy as np\n'), ((12016, 12050), 'numpy.random.randint', 'np.random.randint', (['(1)', '(kmax - k + 1)'], {}), '(1, kmax - k + 1)\n', (12033, 12050), True, 'import numpy as np\n'), ((12721, 12735), 'numpy.array', 'np.array', (['idxs'], {}), '(idxs)\n', (12729, 12735), True, 'import numpy as np\n'), ((13243, 13277), 'numpy.random.randint', 'np.random.randint', (['(1)', '(kmax - k + 1)'], {}), '(1, kmax - k + 1)\n', (13260, 13277), True, 'import numpy as np\n'), ((2086, 2134), 'sklearn.mixture.gaussian_mixture._compute_precision_cholesky', '_compute_precision_cholesky', (['covariances', '"""full"""'], {}), "(covariances, 'full')\n", (2113, 2134), False, 'from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky, _check_precision_matrix\n'), ((3793, 3816), 'numpy.cov', 'np.cov', (['g'], {'rowvar': '(False)'}), '(g, rowvar=False)\n', (3799, 3816), True, 'import numpy as np\n'), ((3852, 3880), 'numpy.zeros', 'np.zeros', (['(n_attrs, n_attrs)'], {}), '((n_attrs, n_attrs))\n', (3860, 3880), True, 'import numpy as np\n'), ((4370, 4389), 'numpy.random.choice', 'np.random.choice', (['n'], {}), '(n)\n', (4386, 4389), True, 'import numpy as np\n'), ((6191, 6222), 'numpy.array', 'np.array', (['clusters_to_chunklets'], {}), '(clusters_to_chunklets)\n', (6199, 6222), True, 'import numpy as np\n'), ((8129, 8138), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (8135, 8138), True, 'import numpy as np\n'), ((9144, 9170), 'numpy.argmax', 'np.argmax', (['normals'], {'axis': '(0)'}), '(normals, axis=0)\n', (9153, 9170), True, 'import numpy as np\n'), ((9270, 9318), 'numpy.save', 'np.save', (['"""./singular.npy"""', 'self.gmm.covariances_'], {}), "('./singular.npy', self.gmm.covariances_)\n", (9277, 9318), True, 'import numpy as np\n'), ((10908, 10917), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (10914, 10917), True, 'import numpy as np\n'), ((15078, 15135), 'numpy.unique', 'np.unique', (['self.clusters_to_chunklets'], {'return_counts': '(True)'}), '(self.clusters_to_chunklets, return_counts=True)\n', (15087, 15135), True, 'import numpy as np\n'), ((2658, 2745), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['X', 'gmm.means_[g]', 'gmm.covariances_[g]'], {'allow_singular': '(True)'}), '(X, gmm.means_[g], gmm.covariances_[g],\n allow_singular=True)\n', (2681, 2745), False, 'from scipy.stats import entropy, multivariate_normal\n'), ((3461, 3489), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[idx] - obj)'], {}), '(X[idx] - obj)\n', (3475, 3489), True, 'import numpy as np\n'), ((8638, 8655), 'numpy.var', 'np.var', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (8644, 8655), True, 'import numpy as np\n'), ((8865, 8933), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['seeds'], {'mean': 'mi', 'cov': 'ci', 'allow_singular': '(True)'}), '(seeds, mean=mi, cov=ci, allow_singular=True)\n', (8888, 8933), False, 'from scipy.stats import entropy, multivariate_normal\n'), ((14961, 14976), 'numpy.argmax', 'np.argmax', (['resp'], {}), '(resp)\n', (14970, 14976), True, 'import numpy as np\n'), ((7441, 7465), 'numpy.any', 'np.any', (['allowed_clusters'], {}), '(allowed_clusters)\n', (7447, 7465), True, 'import numpy as np\n'), ((15557, 15597), 'pandas.DataFrame', 'pd.DataFrame', (['self.clusters_to_chunklets'], {}), '(self.clusters_to_chunklets)\n', (15569, 15597), True, 'import pandas as pd\n')] |
import requests
from urllib import request, parse
import io
import dnnlib
import dnnlib.tflib as tflib
import pickle
import numpy as np
from projector import Projector
from PIL import Image
import re
import base64
from io import BytesIO
import boto3
import uuid
img_res = 256
def find_nearest(arr, val):
"Element in nd array `arr` closest to the scalar value `a0`"
idx = np.abs(arr - val).argmin()
return arr.flat[idx]
def url_to_b64(url):
return base64.b64encode(requests.get(url).content)
def url_to_image(url):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
white_bg_image = Image.new("RGBA", img.size, "WHITE") # Create a white rgba background
white_bg_image.paste(img, (0, 0), img)
white_bg_image = resizeimage.resize_contain(white_bg_image, [256, 256])
white_bg_image = white_bg_image.convert('RGB')
return white_bg_image
def base64_to_image(base64_str):
base64_data = re.sub('^data:image/.+;base64,', '', base64_str)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
return img
def pil_image_to_base64(img):
output_buffer = BytesIO()
img.save(output_buffer, format='JPEG')
byte_data = output_buffer.getvalue()
base64_str = base64.b64encode(byte_data).decode("utf-8")
return base64_str
def resize(image_pil, width, height):
'''
Resize PIL image keeping ratio and using white background.
'''
ratio_w = width / image_pil.width
ratio_h = height / image_pil.height
if ratio_w < ratio_h:
# It must be fixed by width
resize_width = width
resize_height = round(ratio_w * image_pil.height)
else:
# Fixed by height
resize_width = round(ratio_h * image_pil.width)
resize_height = height
image_pil = image_pil.convert('RGBA')
image_resize = image_pil.resize((resize_width, resize_height), Image.ANTIALIAS)
background = Image.new('RGB', (width, height), "WHITE")
offset = (round((width - resize_width) / 2), round((height - resize_height) / 2))
background.paste(image_resize, offset, image_resize)
return background.convert('RGB')
def cropAndRezieBase64Img(b64Img):
if (isinstance(b64Img, str)):
temp = base64_to_image(b64Img)
else:
temp = base64_to_image(b64Img.decode("utf-8"))
data = parse.urlencode(
{'image_file_b64': b64Img, 'crop': 'true', 'crop_margin': '10px', 'type': 'product',
'format': 'jpg'}).encode()
req = request.Request('https://api.remove.bg/v1.0/removebg', data=data) # this will make the method "POST"
req.add_header('X-Api-Key', '<KEY>')
response = request.urlopen(req).read()
baseImage = Image.open(io.BytesIO(response))
baseImage = resize(baseImage, img_res, img_res)
img = pil_image_to_base64(baseImage)
return img
def mixLatents(network_pkl, model_name, imageId, inputs):
thetaAngles = [40, 30, 20, 10, 0, 350, 340, 330, 320];
widthInches = np.array([52, 56, 60, 64, 68, 72, 74, 78, 82, 86, 90]);
lengthInches = np.array([36]);
heightInches = np.array([32]);
styleB64img = inputs[0]
widthInches = find_nearest(widthInches, inputs[1])
lengthInches = find_nearest(lengthInches, inputs[2])
heightInches = find_nearest(heightInches, inputs[3])
angle = 15
proj = Projector()
angleLatentPath = 'out/base'
styleLatentPath = 'out/style'
widthInMeter = widthInches * 0.0254
depthInMeter = lengthInches * 0.0254
heightInMeter = heightInches * 0.0254
aws_key = 'SOME_KEY'
aws_secret = 'SOME_SECRET'
session = boto3.Session(
aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret,
)
s3 = boto3.client('s3', aws_access_key_id=aws_key,
aws_secret_access_key=aws_secret)
s3Resource = session.resource('s3')
# baseB64img = cropAndRezieBase64Img(url_to_b64(baseUrl))
styleB64img = cropAndRezieBase64Img(styleB64img)
# proj.project2('network-snapshot-009800.pkl', baseB64img, angleLatentPath, False, 1)
proj.project2('network-snapshot-009800.pkl', styleB64img, styleLatentPath, False, 1)
Gs_syn_kwargs = {
'output_transform': dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True),
'randomize_noise': False,
'minibatch_size': 4
}
tflib.init_tf()
with dnnlib.util.open_url(network_pkl) as fp:
_G, _D, Gs = pickle.load(fp)
# model_name = 'armless_sofa'
rotatedImagesB64 = []
for i, angle in enumerate(thetaAngles):
i += 1
print(f'{i}/{len(thetaAngles)}')
baseLatent = f'{model_name}_{angle}_{widthInches}_{lengthInches}_{heightInches}_dlatents.npz'
# baseLatent = 'armless_sofa_0_56_36_32_dlatents.npz'
s3.download_file('sofa-latents', baseLatent,
baseLatent)
with np.load(baseLatent) as latent1:
with np.load(styleLatentPath + '/dlatents.npz') as latent2:
lat1 = latent1['dlatents']
lat2 = latent2['dlatents']
col_styles = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
# col_styles = [10,11,12,13,14,15,16,17]
# col_styles = [10,11,12,13]
lat1[0][col_styles] = lat2[0][col_styles]
image = Gs.components.synthesis.run(lat1, **Gs_syn_kwargs)[0]
mixImg = Image.fromarray(image, 'RGB')
respB64 = pil_image_to_base64(mixImg)
rotatedImagesB64.append(respB64)
s3ImageName = imageId + f'-{i:03}' + '.jpg'
print(s3ImageName)
obj = s3Resource.Object('homely-demo-renders', s3ImageName)
obj.put(Body=base64.b64decode(respB64))
print(imageId)
return {'id': imageId} | [
"boto3.client",
"PIL.Image.new",
"urllib.request.Request",
"base64.b64encode",
"io.BytesIO",
"numpy.array",
"urllib.parse.urlencode",
"urllib.request.urlopen",
"numpy.abs",
"projector.Projector",
"dnnlib.util.open_url",
"boto3.Session",
"pickle.load",
"requests.get",
"dnnlib.tflib.init_t... | [((552, 569), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (564, 569), False, 'import requests\n'), ((640, 676), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'img.size', '"""WHITE"""'], {}), "('RGBA', img.size, 'WHITE')\n", (649, 676), False, 'from PIL import Image\n'), ((960, 1008), 're.sub', 're.sub', (['"""^data:image/.+;base64,"""', '""""""', 'base64_str'], {}), "('^data:image/.+;base64,', '', base64_str)\n", (966, 1008), False, 'import re\n'), ((1025, 1054), 'base64.b64decode', 'base64.b64decode', (['base64_data'], {}), '(base64_data)\n', (1041, 1054), False, 'import base64\n'), ((1072, 1090), 'io.BytesIO', 'BytesIO', (['byte_data'], {}), '(byte_data)\n', (1079, 1090), False, 'from io import BytesIO\n'), ((1101, 1123), 'PIL.Image.open', 'Image.open', (['image_data'], {}), '(image_data)\n', (1111, 1123), False, 'from PIL import Image\n'), ((1191, 1200), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1198, 1200), False, 'from io import BytesIO\n'), ((1980, 2022), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '"""WHITE"""'], {}), "('RGB', (width, height), 'WHITE')\n", (1989, 2022), False, 'from PIL import Image\n'), ((2545, 2610), 'urllib.request.Request', 'request.Request', (['"""https://api.remove.bg/v1.0/removebg"""'], {'data': 'data'}), "('https://api.remove.bg/v1.0/removebg', data=data)\n", (2560, 2610), False, 'from urllib import request, parse\n'), ((3025, 3079), 'numpy.array', 'np.array', (['[52, 56, 60, 64, 68, 72, 74, 78, 82, 86, 90]'], {}), '([52, 56, 60, 64, 68, 72, 74, 78, 82, 86, 90])\n', (3033, 3079), True, 'import numpy as np\n'), ((3100, 3114), 'numpy.array', 'np.array', (['[36]'], {}), '([36])\n', (3108, 3114), True, 'import numpy as np\n'), ((3135, 3149), 'numpy.array', 'np.array', (['[32]'], {}), '([32])\n', (3143, 3149), True, 'import numpy as np\n'), ((3375, 3386), 'projector.Projector', 'Projector', ([], {}), '()\n', (3384, 3386), False, 'from projector import Projector\n'), ((3650, 3724), 'boto3.Session', 'boto3.Session', ([], {'aws_access_key_id': 'aws_key', 'aws_secret_access_key': 'aws_secret'}), '(aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)\n', (3663, 3724), False, 'import boto3\n'), ((3757, 3836), 'boto3.client', 'boto3.client', (['"""s3"""'], {'aws_access_key_id': 'aws_key', 'aws_secret_access_key': 'aws_secret'}), "('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)\n", (3769, 3836), False, 'import boto3\n'), ((4378, 4393), 'dnnlib.tflib.init_tf', 'tflib.init_tf', ([], {}), '()\n', (4391, 4393), True, 'import dnnlib.tflib as tflib\n'), ((591, 616), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (598, 616), False, 'from io import BytesIO\n'), ((2758, 2778), 'io.BytesIO', 'io.BytesIO', (['response'], {}), '(response)\n', (2768, 2778), False, 'import io\n'), ((4403, 4436), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['network_pkl'], {}), '(network_pkl)\n', (4423, 4436), False, 'import dnnlib\n'), ((4465, 4480), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4476, 4480), False, 'import pickle\n'), ((382, 399), 'numpy.abs', 'np.abs', (['(arr - val)'], {}), '(arr - val)\n', (388, 399), True, 'import numpy as np\n'), ((485, 502), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (497, 502), False, 'import requests\n'), ((1302, 1329), 'base64.b64encode', 'base64.b64encode', (['byte_data'], {}), '(byte_data)\n', (1318, 1329), False, 'import base64\n'), ((2389, 2511), 'urllib.parse.urlencode', 'parse.urlencode', (["{'image_file_b64': b64Img, 'crop': 'true', 'crop_margin': '10px', 'type':\n 'product', 'format': 'jpg'}"], {}), "({'image_file_b64': b64Img, 'crop': 'true', 'crop_margin':\n '10px', 'type': 'product', 'format': 'jpg'})\n", (2404, 2511), False, 'from urllib import request, parse\n'), ((2703, 2723), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (2718, 2723), False, 'from urllib import request, parse\n'), ((4910, 4929), 'numpy.load', 'np.load', (['baseLatent'], {}), '(baseLatent)\n', (4917, 4929), True, 'import numpy as np\n'), ((4959, 5001), 'numpy.load', 'np.load', (["(styleLatentPath + '/dlatents.npz')"], {}), "(styleLatentPath + '/dlatents.npz')\n", (4966, 5001), True, 'import numpy as np\n'), ((5430, 5459), 'PIL.Image.fromarray', 'Image.fromarray', (['image', '"""RGB"""'], {}), "(image, 'RGB')\n", (5445, 5459), False, 'from PIL import Image\n'), ((5763, 5788), 'base64.b64decode', 'base64.b64decode', (['respB64'], {}), '(respB64)\n', (5779, 5788), False, 'import base64\n')] |
from distributions.distribution_factory import create_distribution
from objectives.objective_factory import create_objective
from samplers.sampler_factory import create_sampler
from algorithms.algo_factory import create_algorithm
import os
import argparse
import pprint as pp
import json
import tensorflow as tf
import numpy as np
def run_experiments(config):
seeds = config["seeds"]
dimension = config["dimensions"]
max_eval_per_run = config["max_eval_per_run"]
outputs_dict = dict()
outputs_config_dict = dict()
outputs_config_dict["objective"] = config["objective"]
outputs_config_dict["distribution"] = config["distribution"]
outputs_config_dict["algo"] = config["algo"]
outputs_config_dict["sampler"] = config["sampler"]
outputs_dict["config"] = outputs_config_dict
output_results_dict = dict()
for seed in seeds:
results = run_one_experiment(config, dimension, seed, max_eval_per_run)
output_results_dict[seed] = results
outputs_dict["results"] = output_results_dict
return outputs_dict
def run_one_experiment(config, output_size, seed, max_evals):
np.random.seed(seed)
tf.reset_default_graph()
tf.set_random_seed(seed)
session = tf.Session()
with session.as_default():
distribution_config = config["distribution"]
distribution = create_distribution(distribution_config, output_size)
sampler_config = config["sampler"]
sampler = create_sampler(sampler_config, distribution)
algo_config = config["algo"]
algo = create_algorithm(algo_config, distribution)
objective_config = config["objective"]
objective = create_objective(objective_config, output_size)
session.run(tf.global_variables_initializer())
results = dict()
evals = 0
iters = 0
while evals<max_evals:
samples = dict()
queries, new_evals = sampler.sample()
evals += new_evals
scores = objective.f(queries)
samples["data"] = queries
samples["cost"] = scores
diagnostic = algo.fit(samples)
diagnostic["evals"] = evals
print_diagnostics(iters, diagnostic)
iters += 1
results[evals] = diagnostic
return results
def print_diagnostics(iteration, diagnostics):
print("Iteration %i" % iteration)
print("=================")
for key in diagnostics.keys():
print(key,":", diagnostics[key])
print("=================")
print("\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ES with Invertible Networks')
parser.add_argument('--config_name',
help='Name for the configuration file',
type=str)
parser.add_argument('--output_name',
help='Name for the result file',
type=str)
args = vars(parser.parse_args())
pp.pprint(args)
config_file_dir = 'config'
if not os.path.isdir(config_file_dir):
os.makedirs(config_file_dir)
output_file_dir = 'logs'
if not os.path.isdir(output_file_dir):
os.makedirs(output_file_dir)
config_file_name = args["config_name"]
config_file = os.path.join(config_file_dir, config_file_name)
with open(config_file, 'r') as f:
config = json.load(f)
results = run_experiments(config)
output_file_name = args["output_name"]
output_file = os.path.join(output_file_dir, output_file_name)
with open(output_file, 'w') as f:
json.dump(results, f)
| [
"samplers.sampler_factory.create_sampler",
"tensorflow.reset_default_graph",
"argparse.ArgumentParser",
"os.makedirs",
"tensorflow.Session",
"json.dump",
"os.path.join",
"algorithms.algo_factory.create_algorithm",
"tensorflow.global_variables_initializer",
"os.path.isdir",
"numpy.random.seed",
... | [((1139, 1159), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1153, 1159), True, 'import numpy as np\n'), ((1164, 1188), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1186, 1188), True, 'import tensorflow as tf\n'), ((1193, 1217), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (1211, 1217), True, 'import tensorflow as tf\n'), ((1232, 1244), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1242, 1244), True, 'import tensorflow as tf\n'), ((2604, 2670), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ES with Invertible Networks"""'}), "(description='ES with Invertible Networks')\n", (2627, 2670), False, 'import argparse\n'), ((2983, 2998), 'pprint.pprint', 'pp.pprint', (['args'], {}), '(args)\n', (2992, 2998), True, 'import pprint as pp\n'), ((3282, 3329), 'os.path.join', 'os.path.join', (['config_file_dir', 'config_file_name'], {}), '(config_file_dir, config_file_name)\n', (3294, 3329), False, 'import os\n'), ((3498, 3545), 'os.path.join', 'os.path.join', (['output_file_dir', 'output_file_name'], {}), '(output_file_dir, output_file_name)\n', (3510, 3545), False, 'import os\n'), ((1353, 1406), 'distributions.distribution_factory.create_distribution', 'create_distribution', (['distribution_config', 'output_size'], {}), '(distribution_config, output_size)\n', (1372, 1406), False, 'from distributions.distribution_factory import create_distribution\n'), ((1469, 1513), 'samplers.sampler_factory.create_sampler', 'create_sampler', (['sampler_config', 'distribution'], {}), '(sampler_config, distribution)\n', (1483, 1513), False, 'from samplers.sampler_factory import create_sampler\n'), ((1571, 1614), 'algorithms.algo_factory.create_algorithm', 'create_algorithm', (['algo_config', 'distribution'], {}), '(algo_config, distribution)\n', (1587, 1614), False, 'from algorithms.algo_factory import create_algorithm\n'), ((1683, 1730), 'objectives.objective_factory.create_objective', 'create_objective', (['objective_config', 'output_size'], {}), '(objective_config, output_size)\n', (1699, 1730), False, 'from objectives.objective_factory import create_objective\n'), ((3042, 3072), 'os.path.isdir', 'os.path.isdir', (['config_file_dir'], {}), '(config_file_dir)\n', (3055, 3072), False, 'import os\n'), ((3082, 3110), 'os.makedirs', 'os.makedirs', (['config_file_dir'], {}), '(config_file_dir)\n', (3093, 3110), False, 'import os\n'), ((3151, 3181), 'os.path.isdir', 'os.path.isdir', (['output_file_dir'], {}), '(output_file_dir)\n', (3164, 3181), False, 'import os\n'), ((3191, 3219), 'os.makedirs', 'os.makedirs', (['output_file_dir'], {}), '(output_file_dir)\n', (3202, 3219), False, 'import os\n'), ((3385, 3397), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3394, 3397), False, 'import json\n'), ((3592, 3613), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (3601, 3613), False, 'import json\n'), ((1752, 1785), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1783, 1785), True, 'import tensorflow as tf\n')] |
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import os
import argparse
import time
from tqdm import tqdm
from utils.setup import get_model,get_data_loader
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir",default='checkpoints',type=str,help="directory of checkpoints")
parser.add_argument('--data_dir', default='data', type=str,help="directory of data")
parser.add_argument('--dataset', default='imagenette',type=str,choices=('imagenette','imagenet','cifar','cifar100','svhn','flower102'),help="dataset")
parser.add_argument("--model",default='vit_base_patch16_224',type=str,help="model name")
parser.add_argument("--num_img",default=-1,type=int,help="number of randomly selected images for this experiment (-1: using the all images)")
args = parser.parse_args()
DATASET = args.dataset
MODEL_DIR=os.path.join('.',args.model_dir)
DATA_DIR=os.path.join(args.data_dir,DATASET)
MODEL_NAME = args.model
NUM_IMG = args.num_img
#get model and data loader
model = get_model(MODEL_NAME,DATASET,MODEL_DIR)
val_loader,NUM_IMG,_ = get_data_loader(DATASET,DATA_DIR,model,batch_size=16,num_img=NUM_IMG,train=False)
device = 'cuda'
model = model.to(device)
model.eval()
cudnn.benchmark = True
accuracy_list=[]
time_list=[]
for data,labels in tqdm(val_loader):
data,labels=data.to(device),labels.to(device)
start = time.time()
output_clean = model(data)
end=time.time()
time_list.append(end-start)
acc_clean=torch.sum(torch.argmax(output_clean, dim=1) == labels).item()#cpu().detach().numpy()
accuracy_list.append(acc_clean)
print("Test accuracy:",np.sum(accuracy_list)/NUM_IMG)
print('Per-example inference time:',np.sum(time_list)/NUM_IMG)
| [
"argparse.ArgumentParser",
"tqdm.tqdm",
"os.path.join",
"numpy.sum",
"utils.setup.get_model",
"utils.setup.get_data_loader",
"time.time",
"torch.argmax"
] | [((193, 218), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (216, 218), False, 'import argparse\n'), ((847, 880), 'os.path.join', 'os.path.join', (['"""."""', 'args.model_dir'], {}), "('.', args.model_dir)\n", (859, 880), False, 'import os\n'), ((889, 925), 'os.path.join', 'os.path.join', (['args.data_dir', 'DATASET'], {}), '(args.data_dir, DATASET)\n', (901, 925), False, 'import os\n'), ((1008, 1049), 'utils.setup.get_model', 'get_model', (['MODEL_NAME', 'DATASET', 'MODEL_DIR'], {}), '(MODEL_NAME, DATASET, MODEL_DIR)\n', (1017, 1049), False, 'from utils.setup import get_model, get_data_loader\n'), ((1071, 1161), 'utils.setup.get_data_loader', 'get_data_loader', (['DATASET', 'DATA_DIR', 'model'], {'batch_size': '(16)', 'num_img': 'NUM_IMG', 'train': '(False)'}), '(DATASET, DATA_DIR, model, batch_size=16, num_img=NUM_IMG,\n train=False)\n', (1086, 1161), False, 'from utils.setup import get_model, get_data_loader\n'), ((1282, 1298), 'tqdm.tqdm', 'tqdm', (['val_loader'], {}), '(val_loader)\n', (1286, 1298), False, 'from tqdm import tqdm\n'), ((1356, 1367), 'time.time', 'time.time', ([], {}), '()\n', (1365, 1367), False, 'import time\n'), ((1401, 1412), 'time.time', 'time.time', ([], {}), '()\n', (1410, 1412), False, 'import time\n'), ((1596, 1617), 'numpy.sum', 'np.sum', (['accuracy_list'], {}), '(accuracy_list)\n', (1602, 1617), True, 'import numpy as np\n'), ((1663, 1680), 'numpy.sum', 'np.sum', (['time_list'], {}), '(time_list)\n', (1669, 1680), True, 'import numpy as np\n'), ((1463, 1496), 'torch.argmax', 'torch.argmax', (['output_clean'], {'dim': '(1)'}), '(output_clean, dim=1)\n', (1475, 1496), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
def build_generator(latent_dim: int):
"""
Build discriminator network
:param latent_dim: latent vector size
"""
model = Sequential([
Dense(128, input_dim=latent_dim),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(256),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(512),
LeakyReLU(alpha=0.2),
BatchNormalization(momentum=0.8),
Dense(np.prod((28, 28, 1)), activation='tanh'),
# reshape to MNIST image size
Reshape((28, 28, 1))
])
model.summary()
# the latent input vector z
z = Input(shape=(latent_dim,))
generated = model(z)
# build model from the input and output
return Model(z, generated)
def build_discriminator():
"""
Build discriminator network
"""
model = Sequential([
Flatten(input_shape=(28, 28, 1)),
Dense(256),
LeakyReLU(alpha=0.2),
Dense(128),
LeakyReLU(alpha=0.2),
Dense(1, activation='sigmoid'),
], name='discriminator')
model.summary()
image = Input(shape=(28, 28, 1))
output = model(image)
return Model(image, output)
def train(generator, discriminator, combined, steps, batch_size):
"""
Train the GAN system
:param generator: generator
:param discriminator: discriminator
:param combined: stacked generator and discriminator
we'll use the combined network when we train the generator
:param steps: number of alternating steps for training
:param batch_size: size of the minibatch
"""
# Load the dataset
(x_train, _), _ = mnist.load_data()
# Rescale in [-1, 1] interval
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=-1)
# Discriminator ground truths
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
latent_dim = generator.input_shape[1]
for step in range(steps):
# Train the discriminator
# Select a random batch of images
real_images = x_train[np.random.randint(0, x_train.shape[0], batch_size)]
# Random batch of noise
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# Generate a batch of new images
generated_images = generator.predict(noise)
# Train the discriminator
discriminator_real_loss = discriminator.train_on_batch(real_images, real)
discriminator_fake_loss = discriminator.train_on_batch(generated_images, fake)
discriminator_loss = 0.5 * np.add(discriminator_real_loss, discriminator_fake_loss)
# Train the generator
# random latent vector z
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# Train the generator
# Note that we use the "valid" labels for the generated images
# That's because we try to maximize the discriminator loss
generator_loss = combined.train_on_batch(noise, real)
# Display progress
print("%d [Discriminator loss: %.4f%%, acc.: %.2f%%] [Generator loss: %.4f%%]" %
(step, discriminator_loss[0], 100 * discriminator_loss[1], generator_loss))
def plot_generated_images(generator):
"""
Display a nxn 2D manifold of digits
:param generator: the generator
"""
n = 10
digit_size = 28
# big array containing all images
figure = np.zeros((digit_size * n, digit_size * n))
latent_dim = generator.input_shape[1]
# n*n random latent distributions
noise = np.random.normal(0, 1, (n * n, latent_dim))
# generate the images
generated_images = generator.predict(noise)
# fill the big array with images
for i in range(n):
for j in range(n):
slice_i = slice(i * digit_size, (i + 1) * digit_size)
slice_j = slice(j * digit_size, (j + 1) * digit_size)
figure[slice_i, slice_j] = np.reshape(generated_images[i * n + j], (28, 28))
# plot the results
plt.figure(figsize=(6, 5))
plt.axis('off')
plt.imshow(figure, cmap='Greys_r')
plt.show()
if __name__ == '__main__':
print("GAN for new MNIST images with Keras")
latent_dim = 64
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0002, beta_1=0.5),
metrics=['accuracy'])
# Build the generator
generator = build_generator(latent_dim)
# Generator input z
z = Input(shape=(latent_dim,))
generated_image = generator(z)
# Only train the generator for the combined model
discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
real_or_fake = discriminator(generated_image)
# Stack the generator and discriminator in a combined model
# Trains the generator to deceive the discriminator
combined = Model(z, real_or_fake)
combined.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0002, beta_1=0.5))
# train the GAN system
train(generator=generator,
discriminator=discriminator,
combined=combined,
steps=15000,
batch_size=128)
# display some random generated images
plot_generated_images(generator)
| [
"numpy.prod",
"keras.layers.Dense",
"matplotlib.pyplot.imshow",
"numpy.reshape",
"keras.datasets.mnist.load_data",
"keras.models.Model",
"keras.layers.advanced_activations.LeakyReLU",
"matplotlib.pyplot.axis",
"numpy.random.normal",
"keras.optimizers.Adam",
"numpy.ones",
"keras.layers.Flatten"... | [((939, 965), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)'}), '(shape=(latent_dim,))\n', (944, 965), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((1047, 1066), 'keras.models.Model', 'Model', (['z', 'generated'], {}), '(z, generated)\n', (1052, 1066), False, 'from keras.models import Sequential, Model\n'), ((1415, 1439), 'keras.layers.Input', 'Input', ([], {'shape': '(28, 28, 1)'}), '(shape=(28, 28, 1))\n', (1420, 1439), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((1478, 1498), 'keras.models.Model', 'Model', (['image', 'output'], {}), '(image, output)\n', (1483, 1498), False, 'from keras.models import Sequential, Model\n'), ((1950, 1967), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1965, 1967), False, 'from keras.datasets import mnist\n'), ((2076, 2108), 'numpy.expand_dims', 'np.expand_dims', (['x_train'], {'axis': '(-1)'}), '(x_train, axis=-1)\n', (2090, 2108), True, 'import numpy as np\n'), ((2155, 2179), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (2162, 2179), True, 'import numpy as np\n'), ((2191, 2216), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (2199, 2216), True, 'import numpy as np\n'), ((3720, 3762), 'numpy.zeros', 'np.zeros', (['(digit_size * n, digit_size * n)'], {}), '((digit_size * n, digit_size * n))\n', (3728, 3762), True, 'import numpy as np\n'), ((3857, 3900), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n * n, latent_dim)'], {}), '(0, 1, (n * n, latent_dim))\n', (3873, 3900), True, 'import numpy as np\n'), ((4313, 4339), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (4323, 4339), True, 'import matplotlib.pyplot as plt\n'), ((4344, 4359), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4352, 4359), True, 'import matplotlib.pyplot as plt\n'), ((4364, 4398), 'matplotlib.pyplot.imshow', 'plt.imshow', (['figure'], {'cmap': '"""Greys_r"""'}), "(figure, cmap='Greys_r')\n", (4374, 4398), True, 'import matplotlib.pyplot as plt\n'), ((4403, 4413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4411, 4413), True, 'import matplotlib.pyplot as plt\n'), ((4869, 4895), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)'}), '(shape=(latent_dim,))\n', (4874, 4895), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((5288, 5310), 'keras.models.Model', 'Model', (['z', 'real_or_fake'], {}), '(z, real_or_fake)\n', (5293, 5310), False, 'from keras.models import Sequential, Model\n'), ((2499, 2547), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, latent_dim)'], {}), '(0, 1, (batch_size, latent_dim))\n', (2515, 2547), True, 'import numpy as np\n'), ((3018, 3066), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, latent_dim)'], {}), '(0, 1, (batch_size, latent_dim))\n', (3034, 3066), True, 'import numpy as np\n'), ((457, 489), 'keras.layers.Dense', 'Dense', (['(128)'], {'input_dim': 'latent_dim'}), '(128, input_dim=latent_dim)\n', (462, 489), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((499, 519), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (508, 519), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((529, 561), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (547, 561), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((571, 581), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (576, 581), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((591, 611), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (600, 611), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((621, 653), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (639, 653), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((663, 673), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (668, 673), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((683, 703), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (692, 703), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((713, 745), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (731, 745), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((849, 869), 'keras.layers.Reshape', 'Reshape', (['(28, 28, 1)'], {}), '((28, 28, 1))\n', (856, 869), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((1178, 1210), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(28, 28, 1)'}), '(input_shape=(28, 28, 1))\n', (1185, 1210), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((1220, 1230), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (1225, 1230), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((1240, 1260), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1249, 1260), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1270, 1280), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (1275, 1280), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((1290, 1310), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1299, 1310), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1320, 1350), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1325, 1350), False, 'from keras.layers import BatchNormalization, Input, Dense, Reshape, Flatten\n'), ((2398, 2448), 'numpy.random.randint', 'np.random.randint', (['(0)', 'x_train.shape[0]', 'batch_size'], {}), '(0, x_train.shape[0], batch_size)\n', (2415, 2448), True, 'import numpy as np\n'), ((2881, 2937), 'numpy.add', 'np.add', (['discriminator_real_loss', 'discriminator_fake_loss'], {}), '(discriminator_real_loss, discriminator_fake_loss)\n', (2887, 2937), True, 'import numpy as np\n'), ((4235, 4284), 'numpy.reshape', 'np.reshape', (['generated_images[i * n + j]', '(28, 28)'], {}), '(generated_images[i * n + j], (28, 28))\n', (4245, 4284), True, 'import numpy as np\n'), ((4688, 4715), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (4692, 4715), False, 'from keras.optimizers import Adam\n'), ((5391, 5418), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (5395, 5418), False, 'from keras.optimizers import Adam\n'), ((761, 781), 'numpy.prod', 'np.prod', (['(28, 28, 1)'], {}), '((28, 28, 1))\n', (768, 781), True, 'import numpy as np\n')] |
"""Generate plots for the annotations of the blame verifier."""
import abc
import logging
import typing as tp
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
from matplotlib import style
from sklearn import preprocessing
import varats.paper_mgmt.paper_config as PC
from varats.data.databases.blame_verifier_report_database import (
BlameVerifierReportDatabase,
OptLevel,
)
from varats.mapping.commit_map import get_commit_map
from varats.paper.case_study import CaseStudy
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plots.case_study_overview import SUCCESS_COLOR, FAILED_COLOR
from varats.utils.git_util import FullCommitHash
LOG = logging.getLogger(__name__)
def _get_named_df_for_case_study(
case_study: CaseStudy, opt_level: OptLevel
) -> tp.Optional[tp.Dict[str, tp.Union[str, pd.DataFrame]]]:
project_name = case_study.project_name
commit_map = get_commit_map(project_name)
verifier_plot_df = BlameVerifierReportDatabase.get_data_for_project(
project_name, [
"revision", "time_id", "opt_level", "total", "successful", "failed",
"undetermined"
], commit_map, case_study
)
# Filter results for current optimization level
verifier_plot_df = verifier_plot_df.loc[verifier_plot_df['opt_level'] ==
opt_level.value]
if verifier_plot_df.empty or len(
np.unique(verifier_plot_df['revision'])
) == 0:
if _is_multi_cs_plot():
return None
# Need more than one data point
LOG.warning(
f"No data found for project {project_name} with optimization level "
f"{opt_level.value}"
)
raise PlotDataEmpty
named_verifier_df = {
"project_name": project_name,
"dataframe": verifier_plot_df
}
return named_verifier_df
def _extract_data_from_named_dataframe(
named_verifier_plot_df: tp.Dict[str, tp.Union[str, pd.DataFrame]]
) -> tp.Tuple[str, tp.Dict[str, tp.Any]]:
current_verifier_plot_df = tp.cast(
pd.DataFrame, named_verifier_plot_df['dataframe']
)
current_verifier_plot_df.sort_values(by=['time_id'], inplace=True)
revisions = current_verifier_plot_df['revision'].to_numpy()
successes = current_verifier_plot_df['successful'].to_numpy()
failures = current_verifier_plot_df['failed'].to_numpy()
total = current_verifier_plot_df['total'].to_numpy()
success_ratio = successes / total
failure_ratio = failures / total
average_success_ratio = round((success_ratio.sum() / success_ratio.size) *
100, 2)
average_failure_ratio = round((failure_ratio.sum() / failure_ratio.size) *
100, 2)
result_data = named_verifier_plot_df['project_name'], {
"revisions": revisions,
"success_ratio": success_ratio,
"failure_ratio": failure_ratio,
"average_success_ratio": average_success_ratio,
"average_failure_ratio": average_failure_ratio
}
return result_data
def _load_all_named_dataframes(
current_config: PC.PaperConfig, opt_level: OptLevel
) -> tp.List[tp.Dict[str, tp.Union[str, pd.DataFrame]]]:
all_case_studies = current_config.get_all_case_studies()
all_named_dataframes: tp.List[tp.Dict[str, tp.Union[str,
pd.DataFrame]]] = []
for case_study in sorted(all_case_studies, key=lambda cs: cs.project_name):
named_df = _get_named_df_for_case_study(case_study, opt_level)
if named_df:
all_named_dataframes.append(named_df)
return all_named_dataframes
def _verifier_plot(
opt_level: OptLevel,
extra_plot_cfg: tp.Optional[tp.Dict[str, tp.Any]] = None
) -> None:
current_config = PC.get_paper_config()
plot_cfg = {
'legend_size': 8,
'legend_visible': True,
'legend_title': 'MISSING legend_title',
'fig_title': 'MISSING figure title',
}
if extra_plot_cfg is not None:
plot_cfg.update(extra_plot_cfg)
# The project name of the dataframes is stored to remember the
# correct title of the subplots
named_verifier_plot_df_list = _load_all_named_dataframes(
current_config, opt_level
)
final_plot_data: tp.List[tp.Tuple[str, tp.Dict[str, tp.Any]]] = []
for named_dataframe in named_verifier_plot_df_list:
final_plot_data.append(
_extract_data_from_named_dataframe(named_dataframe)
)
if not final_plot_data:
raise PlotDataEmpty
if _is_multi_cs_plot() and len(final_plot_data) > 1:
_verifier_plot_multiple(plot_cfg, final_plot_data)
else:
# Pass the only list item of the plot data
_verifier_plot_single(plot_cfg, final_plot_data[0])
def _is_multi_cs_plot() -> bool:
if len(PC.get_paper_config().get_all_case_studies()) > 1:
return True
return False
def _verifier_plot_single(
plot_cfg: tp.Dict[str, tp.Any], plot_data: tp.Tuple[str, tp.Dict[str,
tp.Any]]
) -> None:
fig, main_axis = plt.subplots()
fig.suptitle(
str(plot_cfg['fig_title']) + f' - Project {plot_data[0]}', fontsize=8
)
main_axis.grid(linestyle='--')
main_axis.set_xlabel('Revisions')
main_axis.set_ylabel('Success/Failure rate in %')
main_axis.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
main_axis.stackplot(
plot_data[1]["revisions"],
plot_data[1]["success_ratio"],
plot_data[1]["failure_ratio"],
labels=[
f"successes(\u2205 {plot_data[1]['average_success_ratio']}%)",
f"failures(\u2205 {plot_data[1]['average_failure_ratio']}%)"
],
colors=[SUCCESS_COLOR, FAILED_COLOR],
alpha=0.5
)
plt.setp(
main_axis.get_xticklabels(), rotation=30, horizontalalignment='right'
)
legend = main_axis.legend(
title=plot_cfg['legend_title'],
loc='upper left',
prop={
'size': plot_cfg['legend_size'],
'family': 'monospace'
}
)
legend.set_visible(plot_cfg['legend_visible'])
plt.setp(
legend.get_title(),
fontsize=plot_cfg['legend_size'],
family='monospace'
)
def _verifier_plot_multiple(
plot_cfg: tp.Dict[str, tp.Any],
final_plot_data: tp.List[tp.Tuple[str, tp.Dict[str, tp.Any]]]
) -> None:
fig = plt.figure()
main_axis = fig.subplots()
main_axis.set_xlim(0, 1)
project_names: str = "| "
main_axis.grid(linestyle='--')
main_axis.set_xlabel('Revisions normalized')
main_axis.set_ylabel('Success rate in %')
main_axis.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
mean_over_all_project_successes = 0
for plot_data in final_plot_data:
project_names += plot_data[0] + " | "
mean_over_all_project_successes += plot_data[1]["average_success_ratio"
] / len(final_plot_data)
# Save an unique int for each varying revision to prepare the data
# for the normalization on the x-axis
revisions_as_numbers = np.array([
x + 1 for x, y in enumerate(plot_data[1]["revisions"])
]).reshape(-1, 1)
normalized_revisions = preprocessing.minmax_scale(
revisions_as_numbers, (0, 1), axis=0, copy=False
)
main_axis.plot(
normalized_revisions,
plot_data[1]["success_ratio"],
label=
f"{plot_data[0]}(\u2205 {plot_data[1]['average_success_ratio']}%)"
)
main_axis.title.set_text(
str(plot_cfg['fig_title']) + f' - Project(s): \n{project_names}'
)
plt.setp(
main_axis.get_xticklabels(), rotation=30, horizontalalignment='right'
)
legend = main_axis.legend(
title=f"{plot_cfg['legend_title']}"
f"(\u2205 {round(mean_over_all_project_successes, 2)}%):",
loc='upper left',
prop={
'size': plot_cfg['legend_size'],
'family': 'monospace'
}
)
legend.set_visible(plot_cfg['legend_visible'])
plt.setp(
legend.get_title(),
fontsize=plot_cfg['legend_size'],
family='monospace'
)
class BlameVerifierReportPlot(Plot):
"""Base plot for blame verifier plots."""
@abc.abstractmethod
def plot(self, view_mode: bool) -> None:
"""Plot the current plot to a file."""
style.use(self.style)
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
pass
def plot_file_name(self, filetype: str) -> str:
return f"{self.name}.{filetype}"
class BlameVerifierReportNoOptPlot(BlameVerifierReportPlot):
"""Plotting the successful and failed annotations of reports without
optimization."""
NAME = 'b_verifier_report_no_opt_plot'
def __init__(self, **kwargs: tp.Any) -> None:
super().__init__(self.NAME, **kwargs)
def plot(self, view_mode: bool) -> None:
legend_title: str
if _is_multi_cs_plot():
legend_title = "Success rate of projects"
else:
legend_title = "Annotation types:"
extra_plot_cfg = {
'fig_title': 'Annotated project revisions without optimization',
'legend_title': legend_title
}
_verifier_plot(
opt_level=OptLevel.NO_OPT,
extra_plot_cfg=extra_plot_cfg,
)
class BlameVerifierReportOptPlot(BlameVerifierReportPlot):
"""Plotting the successful and failed annotations of reports with
optimization."""
NAME = 'b_verifier_report_opt_plot'
def __init__(self, **kwargs: tp.Any) -> None:
super().__init__(self.NAME, **kwargs)
def plot(self, view_mode: bool) -> None:
legend_title: str
if _is_multi_cs_plot():
legend_title = "Success rate of projects"
else:
legend_title = "Annotation types:"
extra_plot_cfg = {
'fig_title': 'Annotated project revisions with optimization',
'legend_title': legend_title
}
_verifier_plot(
opt_level=OptLevel.OPT,
extra_plot_cfg=extra_plot_cfg,
)
| [
"logging.getLogger",
"numpy.unique",
"varats.data.databases.blame_verifier_report_database.BlameVerifierReportDatabase.get_data_for_project",
"matplotlib.ticker.PercentFormatter",
"matplotlib.pyplot.figure",
"matplotlib.style.use",
"varats.paper_mgmt.paper_config.get_paper_config",
"matplotlib.pyplot.... | [((717, 744), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (734, 744), False, 'import logging\n'), ((949, 977), 'varats.mapping.commit_map.get_commit_map', 'get_commit_map', (['project_name'], {}), '(project_name)\n', (963, 977), False, 'from varats.mapping.commit_map import get_commit_map\n'), ((1002, 1184), 'varats.data.databases.blame_verifier_report_database.BlameVerifierReportDatabase.get_data_for_project', 'BlameVerifierReportDatabase.get_data_for_project', (['project_name', "['revision', 'time_id', 'opt_level', 'total', 'successful', 'failed',\n 'undetermined']", 'commit_map', 'case_study'], {}), "(project_name, ['revision',\n 'time_id', 'opt_level', 'total', 'successful', 'failed', 'undetermined'\n ], commit_map, case_study)\n", (1050, 1184), False, 'from varats.data.databases.blame_verifier_report_database import BlameVerifierReportDatabase, OptLevel\n'), ((2108, 2166), 'typing.cast', 'tp.cast', (['pd.DataFrame', "named_verifier_plot_df['dataframe']"], {}), "(pd.DataFrame, named_verifier_plot_df['dataframe'])\n", (2115, 2166), True, 'import typing as tp\n'), ((3876, 3897), 'varats.paper_mgmt.paper_config.get_paper_config', 'PC.get_paper_config', ([], {}), '()\n', (3895, 3897), True, 'import varats.paper_mgmt.paper_config as PC\n'), ((5232, 5246), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5244, 5246), True, 'import matplotlib.pyplot as plt\n'), ((6631, 6643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6641, 6643), True, 'import matplotlib.pyplot as plt\n'), ((5517, 5544), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (5539, 5544), True, 'import matplotlib.ticker as mtick\n'), ((6904, 6931), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (6926, 6931), True, 'import matplotlib.ticker as mtick\n'), ((7577, 7653), 'sklearn.preprocessing.minmax_scale', 'preprocessing.minmax_scale', (['revisions_as_numbers', '(0, 1)'], {'axis': '(0)', 'copy': '(False)'}), '(revisions_as_numbers, (0, 1), axis=0, copy=False)\n', (7603, 7653), False, 'from sklearn import preprocessing\n'), ((8752, 8773), 'matplotlib.style.use', 'style.use', (['self.style'], {}), '(self.style)\n', (8761, 8773), False, 'from matplotlib import style\n'), ((1461, 1500), 'numpy.unique', 'np.unique', (["verifier_plot_df['revision']"], {}), "(verifier_plot_df['revision'])\n", (1470, 1500), True, 'import numpy as np\n'), ((4930, 4951), 'varats.paper_mgmt.paper_config.get_paper_config', 'PC.get_paper_config', ([], {}), '()\n', (4949, 4951), True, 'import varats.paper_mgmt.paper_config as PC\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 01 10:00:09 2017
@author: <NAME>
Translation of NS's InterX for Matlab into Python.
Original Available at: https://www.mathworks.com/matlabcentral/fileexchange/22441-curve-intersections/content/InterX.m
Python translation (c) 2017, <NAME>
Original Matlab code Copyright (c) 2009, NS
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
InterX Intersection of curves
Input:
-L1x - Pandas dataframe of x-values for curve 1
-L1y - Pandas dataframe of y-values for curve 1
Optional input:
-L2x - Pandas dataframe of x-values for curve 2
-L2y - Pandas dataframe of y-values for curve 2
Returns:
-P - a Pandas dataframe containing two columns - xs, the x-values of
intersection points, and ys, the y-values of intersection points. If no
intersection is found, xs and ys are both None.
P = InterX(L1x,L1y,L2x,L2y) returns the intersection points of two curves L1
and L2. The curves L1,L2 can be either closed or open.
P = InterX(L1x,L1y) returns the self-intersection points of L1. To keep
the code simple, the points at which the curve is tangent to itself are
not included.
Author : NS
Translated from Matlab code Version: 3.0, 21 Sept. 2010
Translator: <NAME>
Translation version: 1.0, Mar 2017
Two words about the algorithm: Most of the code is self-explanatory.
The only trick lies in the calculation of C1 and C2. To be brief, this
is essentially the two-dimensional analog of the condition that needs
to be satisfied by a function F(x) that has a zero in the interval
[a,b], namely
F(a)*F(b) <= 0
C1 and C2 exactly do this for each segment of curves 1 and 2
respectively. If this condition is satisfied simultaneously for two
segments then we know that they will cross at some point.
Each factor of the 'C' arrays is essentially a matrix containing
the numerators of the signed distances between points of one curve
and line segments of the other.
"""
def InterX(L1x,L1y,L2x=None,L2y=None):
import pandas as pd
import numpy as np
#Check to see if second curve exists. If not, set up as identical to first curve.
if L2x is None:
L2x = L1x
L2y = L1y
#hF will tell us how to choose intersection points later - 'lt' means 'less than,'
#meaning that only intersection and not shared points will be returned
hF = 'lt'
#set up curve 2 as row-vectors
L2x = np.mat(L2x.as_matrix()).transpose()
L2y = np.mat(L2y.as_matrix()).transpose()
else:
#'le' means less than or equal to - all intersection points are returned
hF = 'le'
#set up curve 2 as row-vectors
L2x = np.mat(L2x.as_matrix()).transpose()
L2y = np.mat(L2y.as_matrix()).transpose()
#set up curve 1 as column-vectors
L1x = np.mat(L1x.as_matrix())
L1y = np.mat(L1y.as_matrix())
#combine x and y for each curve to one master matrix. Curve 1 is column-vectors, curve 2 is row-vectors
L1 = np.hstack([L1x,L1y])
L2 = np.vstack([L2x,L2y])
#break back to x and y values (to match naming conventions from Matlab version)
#curve 1 is column-vectors, curve 2 is row-vectors
x1 = L1.transpose()[0].transpose()
y1 = L1.transpose()[1].transpose()
x2 = L2[0]
y2 = L2[1]
#find the distance between adjacent x's and y's
dx1 = np.diff(x1,axis=0)
dx2 = np.diff(x2)
dy1 = np.diff(y1,axis=0)
dy2 = np.diff(y2)
#Find 'signed differences'
S1 = np.multiply(dx1,y1[0:len(y1)-1]) - np.multiply(dy1,x1[0:len(x1)-1])
S2 = np.multiply(dx2,y2.transpose()[0:len(y2.transpose())-1].transpose()) - np.multiply(dy2,x2.transpose()[0:len(x2.transpose())-1].transpose())
fact1 = dx1*y2 - dy1*x2
fact2 = y1*dx2 - x1*dy2
fact2T = fact2.transpose()
S2T = S2.transpose()
#Collected distances between points in one curve and line segments in other
C1 = np.multiply(fact1[0:int(fact1.shape[0]),0:int(fact1.shape[1])-1]-S1,fact1[0:int(fact1.shape[0]),1:int(fact1.shape[1])]-S1)
C2 = np.multiply(fact2T[0:int(fact2T.shape[0]),0:int(fact2T.shape[1])-1]-S2T,fact2T[0:int(fact2T.shape[0]),1:int(fact2T.shape[1])]-S2T)
#if looking for self-intersections, find only points that aren't tangents between curve 1 and 'curve 2'
if hF == 'lt':
TF1 = C1<0
TF2 = C2<0
TF2 = TF2.transpose()
#if looking for intersections between two different curves, take tangent points too
else:
TF1 = C1<=0
TF2 = C2<=0
TF2 = TF2.transpose()
#keep indicates row and column indices of line segments where intersections between the two curves are expected
keep = TF1 & TF2
#collect row and column index values from the keep matrix
i = []
j = []
for row in range(keep.shape[0]):
for column in range(keep.shape[1]):
if keep[row,column]:
i.append(row)
j.append(column)
#if no intersection is found, return 'None'
if not i:
P = [None]
P = pd.DataFrame(P,columns=['xs'])
P['ys']=[None]
return P
#transpose to make data handling easier in a few steps down
dy2 = dy2.transpose()
dx2 = dx2.transpose()
S2 = S2.transpose()
#calculate some values we need to get output
L = np.multiply(dy2[j], dx1[i])-np.multiply(dy1[i],dx2[j])
#L will end up in the denominator a later calculation, so check to make sure none of the values are 0.
Lnew = []
inew = []
jnew = []
for num in range(L.shape[0]):
if L[num] != 0:
Lnew.append(L[num,0])
inew.append(i[num])
jnew.append(j[num])
Lnew = np.mat(Lnew).transpose()
#Set up numerator and denominator to solve system of equations (two line segments) for intersection points
numerator = np.hstack([np.multiply(dx2[jnew],S1[inew]) - np.multiply(dx1[inew],S2[jnew]),np.multiply(dy2[jnew],S1[inew]) - np.multiply(dy1[inew],S2[jnew])])
denominator = np.hstack([Lnew,Lnew])
#Solve
result = np.divide(numerator,denominator)
#organize intersection points into dataframe
points = pd.DataFrame(result,columns=['xs','ys'])
points.drop_duplicates(inplace=True)
return points | [
"numpy.mat",
"numpy.multiply",
"numpy.hstack",
"numpy.diff",
"numpy.vstack",
"pandas.DataFrame",
"numpy.divide"
] | [((4439, 4460), 'numpy.hstack', 'np.hstack', (['[L1x, L1y]'], {}), '([L1x, L1y])\n', (4448, 4460), True, 'import numpy as np\n'), ((4470, 4491), 'numpy.vstack', 'np.vstack', (['[L2x, L2y]'], {}), '([L2x, L2y])\n', (4479, 4491), True, 'import numpy as np\n'), ((4816, 4835), 'numpy.diff', 'np.diff', (['x1'], {'axis': '(0)'}), '(x1, axis=0)\n', (4823, 4835), True, 'import numpy as np\n'), ((4846, 4857), 'numpy.diff', 'np.diff', (['x2'], {}), '(x2)\n', (4853, 4857), True, 'import numpy as np\n'), ((4869, 4888), 'numpy.diff', 'np.diff', (['y1'], {'axis': '(0)'}), '(y1, axis=0)\n', (4876, 4888), True, 'import numpy as np\n'), ((4899, 4910), 'numpy.diff', 'np.diff', (['y2'], {}), '(y2)\n', (4906, 4910), True, 'import numpy as np\n'), ((7541, 7564), 'numpy.hstack', 'np.hstack', (['[Lnew, Lnew]'], {}), '([Lnew, Lnew])\n', (7550, 7564), True, 'import numpy as np\n'), ((7592, 7625), 'numpy.divide', 'np.divide', (['numerator', 'denominator'], {}), '(numerator, denominator)\n', (7601, 7625), True, 'import numpy as np\n'), ((7691, 7733), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': "['xs', 'ys']"}), "(result, columns=['xs', 'ys'])\n", (7703, 7733), True, 'import pandas as pd\n'), ((6552, 6583), 'pandas.DataFrame', 'pd.DataFrame', (['P'], {'columns': "['xs']"}), "(P, columns=['xs'])\n", (6564, 6583), True, 'import pandas as pd\n'), ((6836, 6863), 'numpy.multiply', 'np.multiply', (['dy2[j]', 'dx1[i]'], {}), '(dy2[j], dx1[i])\n', (6847, 6863), True, 'import numpy as np\n'), ((6864, 6891), 'numpy.multiply', 'np.multiply', (['dy1[i]', 'dx2[j]'], {}), '(dy1[i], dx2[j])\n', (6875, 6891), True, 'import numpy as np\n'), ((7221, 7233), 'numpy.mat', 'np.mat', (['Lnew'], {}), '(Lnew)\n', (7227, 7233), True, 'import numpy as np\n'), ((7388, 7420), 'numpy.multiply', 'np.multiply', (['dx2[jnew]', 'S1[inew]'], {}), '(dx2[jnew], S1[inew])\n', (7399, 7420), True, 'import numpy as np\n'), ((7422, 7454), 'numpy.multiply', 'np.multiply', (['dx1[inew]', 'S2[jnew]'], {}), '(dx1[inew], S2[jnew])\n', (7433, 7454), True, 'import numpy as np\n'), ((7454, 7486), 'numpy.multiply', 'np.multiply', (['dy2[jnew]', 'S1[inew]'], {}), '(dy2[jnew], S1[inew])\n', (7465, 7486), True, 'import numpy as np\n'), ((7488, 7520), 'numpy.multiply', 'np.multiply', (['dy1[inew]', 'S2[jnew]'], {}), '(dy1[inew], S2[jnew])\n', (7499, 7520), True, 'import numpy as np\n')] |
import setuptools
from numpy.distutils.core import setup
from numpy.distutils.extension import Extension
from setuptools import find_packages
import versioneer
with open("README.rst") as readme_file:
readme = readme_file.read()
compile_opts = {
"extra_f90_compile_args": [
"-fopenmp",
"-ffree-line-length-none",
"-fdiagnostics-color=always",
"-Wno-tabs",
],
"f2py_options": ["skip:", "map_border", "calc_weights", ":"],
"extra_link_args": ["-fopenmp"],
}
mcm = Extension(
name="pspy.mcm_fortran.mcm_fortran",
sources=["pspy/mcm_fortran/mcm_fortran.f90", "pspy/wigner3j/wigner3j_sub.f"],
**compile_opts
)
cov = Extension(
name="pspy.cov_fortran.cov_fortran",
sources=["pspy/cov_fortran/cov_fortran.f90", "pspy/wigner3j/wigner3j_sub.f"],
**compile_opts
)
setup(
name="pspy",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Simons Observatory Collaboration Power Spectrum Task Force",
url="https://github.com/simonsobs/pspy",
description="Python power spectrum code",
long_description=readme,
long_description_content_type="text/x-rst",
license="BSD license",
python_requires=">=3.7",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
entry_points={},
ext_modules=[mcm, cov],
install_requires=[
"numpy>=1.20",
"healpy",
"pyFFTW",
"pillow", # this one should be installed by pixell
"pixell>=0.7.0",
],
packages=find_packages(),
package_data={"pspy": ["pspy/tests/data/*.pkl"]},
include_package_data=True,
scripts=["scripts/test-pspy"],
)
| [
"versioneer.get_version",
"versioneer.get_cmdclass",
"setuptools.find_packages",
"numpy.distutils.extension.Extension"
] | [((517, 667), 'numpy.distutils.extension.Extension', 'Extension', ([], {'name': '"""pspy.mcm_fortran.mcm_fortran"""', 'sources': "['pspy/mcm_fortran/mcm_fortran.f90', 'pspy/wigner3j/wigner3j_sub.f']"}), "(name='pspy.mcm_fortran.mcm_fortran', sources=[\n 'pspy/mcm_fortran/mcm_fortran.f90', 'pspy/wigner3j/wigner3j_sub.f'], **\n compile_opts)\n", (526, 667), False, 'from numpy.distutils.extension import Extension\n'), ((678, 828), 'numpy.distutils.extension.Extension', 'Extension', ([], {'name': '"""pspy.cov_fortran.cov_fortran"""', 'sources': "['pspy/cov_fortran/cov_fortran.f90', 'pspy/wigner3j/wigner3j_sub.f']"}), "(name='pspy.cov_fortran.cov_fortran', sources=[\n 'pspy/cov_fortran/cov_fortran.f90', 'pspy/wigner3j/wigner3j_sub.f'], **\n compile_opts)\n", (687, 828), False, 'from numpy.distutils.extension import Extension\n'), ((870, 894), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (892, 894), False, 'import versioneer\n'), ((909, 934), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (932, 934), False, 'import versioneer\n'), ((1820, 1835), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1833, 1835), False, 'from setuptools import find_packages\n')] |
import numpy as np
import cv2 as cv
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
inputName = "../bin/outlow_000.exr"
outputName1 = "../bin/outlow_001_warpedNumpy.exr"
outputName2 = "../bin/outlow_001_warpedTorch.exr"
flowName = "../bin/outlowf_000.exr"
flowTest1 = "../bin/outlowf_000_t1.png"
flowTest2 = "../bin/outlowf_000_t2.png"
def warp_flow(img, flow): # apply the flowo to warp the img
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] = flow[:,:,0]*w + np.arange(w)
flow[:,:,1] = flow[:,:,1]*h + np.arange(h)[:,np.newaxis]
ih, iw = img.shape[:2]
flow[:,:,0] = np.clip(flow[:,:,0], 0, iw)
flow[:,:,1] = np.clip(flow[:,:,1], 0, ih)
res = cv.remap(img, np.float32(flow), None, cv.INTER_LINEAR )
return res
def warp_flow_torch(img, flow):
H, W = flow.shape[:2]
grid_offsetsH = torch.linspace(-1, +1, H)
grid_offsetsW = torch.linspace(-1, +1, W)
grid_offsetsH = torch.unsqueeze(grid_offsetsH, 1)
grid_offsetsW = torch.unsqueeze(grid_offsetsW, 0)
grid_offsets = torch.stack(
torch.broadcast_tensors(grid_offsetsW, grid_offsetsH),
dim=2)
grid_offsets = torch.unsqueeze(grid_offsets, 0) # batch dimension
img_torch = torch.from_numpy(img.transpose((2, 0, 1)))
img_torch = torch.unsqueeze(img_torch, 0)
flow_torch = torch.unsqueeze(torch.from_numpy(flow), 0)
grid = grid_offsets + flow_torch * -2
warped = F.grid_sample(img_torch, grid)
res = warped[0].numpy().transpose((1, 2, 0))
return res
inputImage = imageio.imread(inputName)
flowImage = imageio.imread(flowName)[:,:,0:2]
print('inputImage:', inputImage.shape)
print('flowImage:', flowImage.shape)
imageio.imwrite(flowTest1, np.concatenate((flowImage + 0.5, np.zeros((flowImage.shape[0], flowImage.shape[1], 1))), axis=2))
imageio.imwrite("../bin/outlowf_000_t3.png", inputImage[:,:,3])
print()
print(inputImage[:,:,3])
print()
print(flowImage[:,:,0])
print()
print(np.uint8(inputImage[:,:,3]==0))
print()
# VERY IMPORTANT!!
flowImageInpainted = np.stack((
cv.inpaint(flowImage[:,:,0], np.uint8(inputImage[:,:,3]==0), 3, cv.INPAINT_NS),
cv.inpaint(flowImage[:,:,1], np.uint8(inputImage[:,:,3]==0), 3, cv.INPAINT_NS)), axis=2)
imageio.imwrite(flowTest2, np.concatenate((flowImageInpainted + 0.5, np.zeros((flowImage.shape[0], flowImage.shape[1], 1))), axis=2))
print(flowImageInpainted[:,:,0])
print()
warpedImage1 = warp_flow(inputImage, flowImageInpainted)
imageio.imwrite(outputName1, warpedImage1)
warpedImage2 = warp_flow_torch(inputImage, flowImageInpainted)
imageio.imwrite(outputName2, warpedImage2) | [
"numpy.clip",
"torch.nn.functional.grid_sample",
"numpy.uint8",
"imageio.imwrite",
"numpy.arange",
"torch.unsqueeze",
"torch.broadcast_tensors",
"torch.from_numpy",
"numpy.zeros",
"imageio.imread",
"numpy.float32",
"torch.linspace"
] | [((1515, 1540), 'imageio.imread', 'imageio.imread', (['inputName'], {}), '(inputName)\n', (1529, 1540), False, 'import imageio\n'), ((1789, 1854), 'imageio.imwrite', 'imageio.imwrite', (['"""../bin/outlowf_000_t3.png"""', 'inputImage[:, :, 3]'], {}), "('../bin/outlowf_000_t3.png', inputImage[:, :, 3])\n", (1804, 1854), False, 'import imageio\n'), ((2432, 2474), 'imageio.imwrite', 'imageio.imwrite', (['outputName1', 'warpedImage1'], {}), '(outputName1, warpedImage1)\n', (2447, 2474), False, 'import imageio\n'), ((2539, 2581), 'imageio.imwrite', 'imageio.imwrite', (['outputName2', 'warpedImage2'], {}), '(outputName2, warpedImage2)\n', (2554, 2581), False, 'import imageio\n'), ((633, 662), 'numpy.clip', 'np.clip', (['flow[:, :, 0]', '(0)', 'iw'], {}), '(flow[:, :, 0], 0, iw)\n', (640, 662), True, 'import numpy as np\n'), ((679, 708), 'numpy.clip', 'np.clip', (['flow[:, :, 1]', '(0)', 'ih'], {}), '(flow[:, :, 1], 0, ih)\n', (686, 708), True, 'import numpy as np\n'), ((864, 889), 'torch.linspace', 'torch.linspace', (['(-1)', '(+1)', 'H'], {}), '(-1, +1, H)\n', (878, 889), False, 'import torch\n'), ((907, 932), 'torch.linspace', 'torch.linspace', (['(-1)', '(+1)', 'W'], {}), '(-1, +1, W)\n', (921, 932), False, 'import torch\n'), ((950, 983), 'torch.unsqueeze', 'torch.unsqueeze', (['grid_offsetsH', '(1)'], {}), '(grid_offsetsH, 1)\n', (965, 983), False, 'import torch\n'), ((1001, 1034), 'torch.unsqueeze', 'torch.unsqueeze', (['grid_offsetsW', '(0)'], {}), '(grid_offsetsW, 0)\n', (1016, 1034), False, 'import torch\n'), ((1147, 1179), 'torch.unsqueeze', 'torch.unsqueeze', (['grid_offsets', '(0)'], {}), '(grid_offsets, 0)\n', (1162, 1179), False, 'import torch\n'), ((1269, 1298), 'torch.unsqueeze', 'torch.unsqueeze', (['img_torch', '(0)'], {}), '(img_torch, 0)\n', (1284, 1298), False, 'import torch\n'), ((1409, 1439), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['img_torch', 'grid'], {}), '(img_torch, grid)\n', (1422, 1439), True, 'import torch.nn.functional as F\n'), ((1553, 1577), 'imageio.imread', 'imageio.imread', (['flowName'], {}), '(flowName)\n', (1567, 1577), False, 'import imageio\n'), ((1933, 1967), 'numpy.uint8', 'np.uint8', (['(inputImage[:, :, 3] == 0)'], {}), '(inputImage[:, :, 3] == 0)\n', (1941, 1967), True, 'import numpy as np\n'), ((509, 521), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (518, 521), True, 'import numpy as np\n'), ((731, 747), 'numpy.float32', 'np.float32', (['flow'], {}), '(flow)\n', (741, 747), True, 'import numpy as np\n'), ((1066, 1119), 'torch.broadcast_tensors', 'torch.broadcast_tensors', (['grid_offsetsW', 'grid_offsetsH'], {}), '(grid_offsetsW, grid_offsetsH)\n', (1089, 1119), False, 'import torch\n'), ((1331, 1353), 'torch.from_numpy', 'torch.from_numpy', (['flow'], {}), '(flow)\n', (1347, 1353), False, 'import torch\n'), ((556, 568), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (565, 568), True, 'import numpy as np\n'), ((1723, 1776), 'numpy.zeros', 'np.zeros', (['(flowImage.shape[0], flowImage.shape[1], 1)'], {}), '((flowImage.shape[0], flowImage.shape[1], 1))\n', (1731, 1776), True, 'import numpy as np\n'), ((2055, 2089), 'numpy.uint8', 'np.uint8', (['(inputImage[:, :, 3] == 0)'], {}), '(inputImage[:, :, 3] == 0)\n', (2063, 2089), True, 'import numpy as np\n'), ((2136, 2170), 'numpy.uint8', 'np.uint8', (['(inputImage[:, :, 3] == 0)'], {}), '(inputImage[:, :, 3] == 0)\n', (2144, 2170), True, 'import numpy as np\n'), ((2265, 2318), 'numpy.zeros', 'np.zeros', (['(flowImage.shape[0], flowImage.shape[1], 1)'], {}), '((flowImage.shape[0], flowImage.shape[1], 1))\n', (2273, 2318), True, 'import numpy as np\n')] |
import cv2
import os
import numpy as np
import av
from torchvision.transforms import Compose, Resize, ToTensor
from PIL import Image
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from dataset import MaskDataset, get_img_files, get_img_files_eval
from nets.MobileNetV2_unet import MobileNetV2_unet
__author__ = 'roeiherz'
FILE_EXISTS_ERROR = (17, 'File exists')
N_CV = 5
IMG_SIZE = 224
RANDOM_STATE = 1
FPS = 5
def get_data_loaders(val_files):
val_transform = Compose([
Resize((IMG_SIZE, IMG_SIZE)),
ToTensor(),
])
val_loader = DataLoader(MaskDataset(val_files, val_transform),
batch_size=1,
shuffle=TabError,
pin_memory=True,
num_workers=4)
return val_loader
def create_folder(path):
"""
Checks if the path exists, if not creates it.
:param path: A valid path that might not exist
:return: An indication if the folder was created
"""
folder_missing = not os.path.exists(path)
if folder_missing:
# Using makedirs since the path hierarchy might not fully exist.
try:
os.makedirs(path)
except OSError as e:
if (e.errno, e.strerror) == FILE_EXISTS_ERROR:
print(e)
else:
raise
print('Created folder {0}'.format(path))
return folder_missing
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def video_to_frames(input_video, out_dir, refinment=1, fps=1):
"""
:param input_video: path for input video
:param out_dir: output path directory
:param refinement:
:param fps:
1: default fps
-1: automatic default depends differently per video
any other integer
:return:
"""
video = av.open(input_video)
rotation = int(video.streams[0].metadata.get('rotate', 0))
vidcap = cv2.VideoCapture(input_video)
# Jump using the fps inputs
if fps == -1:
duration = float(video.streams[0].duration * video.streams[0].time_base)
frames = video.streams[0].frames
fps = int(round(frames / duration))
count = 0
image_files = []
counter = 0
index = 0
while True:
success, image = vidcap.read()
if not success:
print("Finished/Error in video: {}".format(input_video))
break
counter += 1
if ((counter - 1) % refinment) > 0:
continue
image = rotate_bound(image, rotation)
outpath = os.path.join(out_dir, "%.6d.jpg" % (index))
if count % fps == 0:
cv2.imwrite(outpath, image)
image_files.append(outpath)
index += 1
count = count + 1
def images_to_video(outvid_path, input_folder):
"""
Create video from images
:param outvid_path: output path
:param input_folder:
:return:
"""
outvid = cv2.VideoWriter(outvid_path, cv2.VideoWriter_fourcc(*'MJPG'), 5.0, (224, 224))
for i in range(1, 1000):
if os.path.isfile(os.path.join(input_folder, 'frame' + str(i) + '.jpg')):
I = cv2.imread(os.path.join(input_folder, 'frame' + str(i) + '.jpg'))
outvid.write(I)
outvid.release()
return
if __name__ == '__main__':
# input_video = "/home/roei/Datasets/Accidents1K/Videos/0d1f5146-858f-48a5-8c9a-47b87fc8b6a8.mov"
input_video = "/home/roei/Downloads/incident-865ba5029fb5fefaae91b3e1e354f403.mp4"
output_video = "/home/roei/mobile-semantic-segmentation/outputs/"
model_path = "/home/roei/mobile-semantic-segmentation/outputs/UNET_224_weights_100000_days/0-best.pth"
uuid = os.path.basename(input_video).split('.')[0]
output_path = os.path.join(output_video, "{}_masked".format(os.path.basename(input_video).split('.')[0]))
output_shape = (720, 1280)
# Creates frames if they don't exists
if not os.path.exists(output_path):
create_folder(output_path)
# Process the network
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = "cpu"
# data_loader = get_data_loaders(frames)
model = MobileNetV2_unet(mode="eval")
model.load_state_dict(torch.load(model_path))
model.to(device)
model.eval()
transform = Compose([Resize((IMG_SIZE, IMG_SIZE)), ToTensor()])
# # Process the Video
video = av.open(input_video)
rotation = int(video.streams[0].metadata.get('rotate', 0))
# Video Reader
vidcap = cv2.VideoCapture(input_video)
# Jump using the fps inputs
fps = FPS
if fps == -1:
duration = float(video.streams[0].duration * video.streams[0].time_base)
frames = video.streams[0].frames
fps = int(round(frames / duration))
# Video Writer
outvid = cv2.VideoWriter(os.path.join(output_path, "{}.avi".format(uuid)),
cv2.VideoWriter_fourcc(*'MJPG'), float(fps), (output_shape[1], output_shape[0]))
count = 0
image_files = []
counter = 0
index = 0
while True:
success, image = vidcap.read()
if not success:
print("Finished/Error in video: {}".format(input_video))
break
counter += 1
if ((counter - 1) % 1) > 0:
continue
image = rotate_bound(image, rotation)
if count % FPS == 0:
with torch.no_grad():
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Apply transform to img
img_trf = Image.fromarray(img)
img_trf = transform(img_trf)
img_trf = img_trf.unsqueeze(0)
inputs = img_trf.to(device)
# Apply model to get output
outputs = model(inputs)
# Prepare image input and output mask for blending
i = inputs[0]
i = i.cpu().numpy().transpose((1, 2, 0)) * 255
i = i.astype(np.uint8)
o = outputs[0]
o = o.cpu().numpy().reshape(int(IMG_SIZE / 2), int(IMG_SIZE / 2)) * 255
o = cv2.resize(o.astype(np.uint8), (output_shape[1], output_shape[0]))
# Red color
mask = np.zeros((output_shape[0], output_shape[1], 3)).astype(np.uint8)
mask[:, :, 2] = o
# Blend both mask and image
org_resized_img = cv2.resize(image.astype(np.uint8), (output_shape[1], output_shape[0]))
blend = cv2.addWeighted(mask, 0.3, org_resized_img, 0.7, 0)
outvid.write(blend)
index += 1
count = count + 1
outvid.release()
print("Finished to processed video.")
| [
"av.open",
"dataset.MaskDataset",
"torch.cuda.is_available",
"os.path.exists",
"nets.MobileNetV2_unet.MobileNetV2_unet",
"cv2.addWeighted",
"cv2.VideoWriter_fourcc",
"torchvision.transforms.ToTensor",
"numpy.abs",
"cv2.warpAffine",
"cv2.cvtColor",
"torchvision.transforms.Resize",
"cv2.getRot... | [((1816, 1862), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', '(-angle)', '(1.0)'], {}), '((cX, cY), -angle, 1.0)\n', (1839, 1862), False, 'import cv2\n'), ((1873, 1888), 'numpy.abs', 'np.abs', (['M[0, 0]'], {}), '(M[0, 0])\n', (1879, 1888), True, 'import numpy as np\n'), ((1899, 1914), 'numpy.abs', 'np.abs', (['M[0, 1]'], {}), '(M[0, 1])\n', (1905, 1914), True, 'import numpy as np\n'), ((2235, 2269), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(nW, nH)'], {}), '(image, M, (nW, nH))\n', (2249, 2269), False, 'import cv2\n'), ((2624, 2644), 'av.open', 'av.open', (['input_video'], {}), '(input_video)\n', (2631, 2644), False, 'import av\n'), ((2721, 2750), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_video'], {}), '(input_video)\n', (2737, 2750), False, 'import cv2\n'), ((4961, 4990), 'nets.MobileNetV2_unet.MobileNetV2_unet', 'MobileNetV2_unet', ([], {'mode': '"""eval"""'}), "(mode='eval')\n", (4977, 4990), False, 'from nets.MobileNetV2_unet import MobileNetV2_unet\n'), ((5186, 5206), 'av.open', 'av.open', (['input_video'], {}), '(input_video)\n', (5193, 5206), False, 'import av\n'), ((5302, 5331), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_video'], {}), '(input_video)\n', (5318, 5331), False, 'import cv2\n'), ((610, 647), 'dataset.MaskDataset', 'MaskDataset', (['val_files', 'val_transform'], {}), '(val_files, val_transform)\n', (621, 647), False, 'from dataset import MaskDataset, get_img_files, get_img_files_eval\n'), ((1069, 1089), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1083, 1089), False, 'import os\n'), ((3351, 3392), 'os.path.join', 'os.path.join', (['out_dir', "('%.6d.jpg' % index)"], {}), "(out_dir, '%.6d.jpg' % index)\n", (3363, 3392), False, 'import os\n'), ((3767, 3798), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (3789, 3798), False, 'import cv2\n'), ((4718, 4745), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (4732, 4745), False, 'import os\n'), ((5017, 5039), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (5027, 5039), False, 'import torch\n'), ((5691, 5722), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (5713, 5722), False, 'import cv2\n'), ((524, 552), 'torchvision.transforms.Resize', 'Resize', (['(IMG_SIZE, IMG_SIZE)'], {}), '((IMG_SIZE, IMG_SIZE))\n', (530, 552), False, 'from torchvision.transforms import Compose, Resize, ToTensor\n'), ((562, 572), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (570, 572), False, 'from torchvision.transforms import Compose, Resize, ToTensor\n'), ((1212, 1229), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1223, 1229), False, 'import os\n'), ((3437, 3464), 'cv2.imwrite', 'cv2.imwrite', (['outpath', 'image'], {}), '(outpath, image)\n', (3448, 3464), False, 'import cv2\n'), ((4845, 4870), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4868, 4870), False, 'import torch\n'), ((5104, 5132), 'torchvision.transforms.Resize', 'Resize', (['(IMG_SIZE, IMG_SIZE)'], {}), '((IMG_SIZE, IMG_SIZE))\n', (5110, 5132), False, 'from torchvision.transforms import Compose, Resize, ToTensor\n'), ((5134, 5144), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (5142, 5144), False, 'from torchvision.transforms import Compose, Resize, ToTensor\n'), ((4479, 4508), 'os.path.basename', 'os.path.basename', (['input_video'], {}), '(input_video)\n', (4495, 4508), False, 'import os\n'), ((6176, 6191), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6189, 6191), False, 'import torch\n'), ((6215, 6253), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (6227, 6253), False, 'import cv2\n'), ((6321, 6341), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (6336, 6341), False, 'from PIL import Image\n'), ((7292, 7343), 'cv2.addWeighted', 'cv2.addWeighted', (['mask', '(0.3)', 'org_resized_img', '(0.7)', '(0)'], {}), '(mask, 0.3, org_resized_img, 0.7, 0)\n', (7307, 7343), False, 'import cv2\n'), ((4587, 4616), 'os.path.basename', 'os.path.basename', (['input_video'], {}), '(input_video)\n', (4603, 4616), False, 'import os\n'), ((7019, 7066), 'numpy.zeros', 'np.zeros', (['(output_shape[0], output_shape[1], 3)'], {}), '((output_shape[0], output_shape[1], 3))\n', (7027, 7066), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Jan 2018
hacer calib extrisneca con pymc3
@author: sebalander
"""
# %%
# import glob
import os
import corner
import time
import seaborn as sns
import scipy as sc
import scipy.stats as sts
import matplotlib.pyplot as plt
from copy import deepcopy as dc
import numpy as np
from importlib import reload
from glob import glob
# %env THEANO_FLAGS='device=cuda, floatX=float32'
import theano
import theano.tensor as T
import pymc3 as pm
import cv2
import scipy.optimize as opt
import sys
sys.path.append("/home/sebalander/Code/sebaPhD")
from calibration import calibrator as cl
from dev import bayesLib as bl
import pickle
from calibration.calibrator import datafull, real, realdete, realbalk, realches
from calibration.calibrator import synt, syntextr, syntches, syntintr
import numdifftools as ndft
from time import sleep
print('libraries imported')
# compute_test_value is 'off' by default, meaning this feature is inactive
theano.config.compute_test_value = 'off' # Use 'warn' to activate this feature
# %%
def radiusStepsNdim(n):
'''
retorna moda, la media y la desv est del radio de pasos al samplear
de gaussianas hiperdimensionales de sigma 1
'''
# https://www.wolframalpha.com/input/?i=integrate+x%5E(n-1)+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^(n - 1) exp(-x^2/2) dx = 2^(n/2 - 1) Γ(n/2) for Re(n)>0
Inorm = 2**(n / 2 - 1) * sc.special.gamma(n / 2)
# https://www.wolframalpha.com/input/?i=integrate+x%5En+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^n exp(-x^2/2) dx = 2^((n - 1)/2) Γ((n + 1)/2) for Re(n)>-1
ExpectedR = 2**((n - 1) / 2) * sc.special.gamma((n + 1) / 2)
# https://www.wolframalpha.com/input/?i=integrate+x%5E(n%2B1)+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^(n + 1) exp(-x^2/2) dx = 2^(n/2) Γ(n/2 + 1) for Re(n)>-2
ExpectedR2 = 2**(n / 2) * sc.special.gamma(n / 2 + 1)
ModeR = np.sqrt(n - 1)
# normalizo las integrales:
ExpectedR /= Inorm
ExpectedR2 /= Inorm
DesvEstR = np.sqrt(ExpectedR2 - ExpectedR**2)
return np.array([ModeR, ExpectedR, DesvEstR])
def extractCaseData(exCase):
objpoints = fullData.Synt.Extr.objPt
imagePoints = fullData.Synt.Extr.imgPt[exCase[0], exCase[1]]
imagePoints += stdPix * fullData.Synt.Extr.imgNse[exCase[0], exCase[1]]
if not exCase[2]:
objpoints = objpoints[fullData.Synt.Extr.index10]
imagePoints = imagePoints[fullData.Synt.Extr.index10]
xi, yi = imagePoints.T # lo dejo listo para el modelo theano
# load true rotation traslation values
rVecsT = fullData.Synt.Extr.rVecs[exCase[0]]
tVecsT = fullData.Synt.Extr.tVecs[exCase[0], exCase[1]]
# select wich points are in the camera FOV, con la coordenada z
xCam = (cv2.Rodrigues(rVecsT)[0][2, :2].dot(objpoints.T)).T + tVecsT[2]
inFOV = xCam > 0
return imagePoints[inFOV], objpoints[inFOV], rVecsT, tVecsT
# %%
pi2 = 2 * np.pi
pi2sq = np.sqrt(pi2)
def prob1vs0(t, x, adaptPrior=True):
'''
calculo el logaritmo del cociente de las probabilidades de que una serie
de datos siga
un modelo cnstante o lineal, si < 1 es que es mas probable el modelo
constante
'''
m0, c0 = np.polyfit(t, x, 0, cov=True)
m1, c1 = np.polyfit(t, x, 1, cov=True)
dif0 = x - m0[0]
var0 = np.mean((dif0)**2)
if np.isclose(var0, 0): # si varianza da cero es que sampleo mal
return 1 # devuelve para que se considere que no convergió
dif1 = x - m1[0] * t - m1[1]
var1 = np.mean((dif1)**2)
if np.allclose(var1, 0):
return 1
# defino los priors
if adaptPrior:
pConst0 = 1 / (np.max(dif0) - np.min(dif0)) # prior de la constante
deltaDif1 = np.max(dif1) - np.min(dif1)
pConst1 = 1 / deltaDif1
penDelta = deltaDif1 / (t[-1] - t[0])
pPendi1 = 1 / penDelta / 2 # prior de la pendiente
pWgH0 = np.log(pConst0)
pWgH1 = np.log(pConst1 * pPendi1)
else:
pWgH0 = 1.0
pWgH1 = 1.0
pDagWH0 = sc.stats.multivariate_normal.logpdf(dif0, cov=var0)
pDagWH1 = sc.stats.multivariate_normal.logpdf(dif1, cov=var1)
deltaW0 = np.log(pi2sq * np.sqrt(c0)[0, 0])
deltaW1 = np.log(pi2 * np.sqrt(np.linalg.det(c1)))
prob1_0 = np.sum(pDagWH1 - pDagWH0)
prob1_0 += pWgH1 * deltaW1 - pWgH0 - deltaW0
return prob1_0
def funcExp(x, a, b, c):
return a * np.exp(- x / np.abs(b)) + c
# %%
def logPerror(xAll, case):
rvec = xAll[:3]
tvec = xAll[3:]
Eint = cl.errorCuadraticoImagen(case.imagePoints, case.objpoints,
rvec, tvec, case.cameraMatrix, case.distCoeffs, case.model,
case.Ci, case.Cf, case.Ck, case.Crt, case.Cfk)
return Eint
def objective(xAll, case):
return np.sum(logPerror(xAll, case))
hessNum = ndft.Hessian(objective)
def optimizar(xAll, case):
'''
guarda en el objeto la posicion optimizada y una covarianza calculada como
derivada numerica
'''
ret = opt.minimize(objective, xAll, args=case)
case.xAllOpt = ret.x
case.covOpt = np.linalg.inv(hessNum(case.xAllOpt, case))
class casoCalibExtr:
def __init__(self, fullData, intrCalibResults, case, stdPix, allDelta,
nCores, nTune, nTuneInter, tuneBool, tune_thr, tallyBool,
convChecksBool, rndSeedBool, scaNdim, scaAl, nDraws, nChains,
indexSave, pathFiles):
self.case = case
self.nCores = nCores
self.nTune = nTune
self.nTuneInter = nTuneInter
self.tuneBool = tuneBool
self.tune_thr = tune_thr
self.tallyBool = tallyBool
self.convChecksBool = convChecksBool
self.rndSeedBool = rndSeedBool
self.scaNdim = scaNdim
self.scaAl = scaAl
self.nDraws = nDraws
self.nChains = nChains
self.indexSave = indexSave
self.pathFiles = pathFiles
self.allDelta = allDelta
self.camera = fullData.Synt.Intr.camera
self.model = fullData.Synt.Intr.model
self.imgSize = fullData.Synt.Intr.s
Ns = [2, 3]
Xint = intrCalibResults['mean'][:3]
self.cameraMatrix, self.distCoeffs = bl.flat2int(Xint, Ns, self.model)
caseData = extractCaseData(exCase)
self.imagePoints = caseData[0]
self.xi, self.yi = self.imagePoints.T
self.objpoints = caseData[1]
self.rVecsT = caseData[2]
self.tVecsT = caseData[3]
self.xAllT = np.concatenate([self.rVecsT, self.tVecsT])
self.nPt = self.objpoints.shape[0]
self.nFree = 6
self.observedNormed = np.zeros((self.nPt * 2))
self.Crt = False # no RT error
self.Cf = np.zeros((4, 4))
self.Cf[2:, 2:] = intrCalibResults['cov'][:2, :2]
self.Ck = intrCalibResults['cov'][2, 2]
self.Cfk = np.zeros(4)
self.Cfk[2:] = intrCalibResults['cov'][:2, 2]
self.Ci = np.array([stdPix**2 * np.eye(2)] * self.nPt)
# tau de 1/5 pra la ventana movil
self.weiEsp = np.exp(- np.arange(nDraws) * 5 / nDraws)[::-1]
self.weiEsp /= np.sum(self.weiEsp)
# %%
'''
funcion arbitraria para theano
https://docs.pymc.io/notebooks/getting_started.html
https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py
'''
from theano.compile.ops import as_op
@as_op(itypes=[T.dvector], otypes=[T.dvector])
def project2diagonalisedError(xAll):
'''
no me queda otra que leer case desde el main como una variable global
'''
c = case
xm, ym, Cm = cl.inverse(c.xi, c.yi, xAll[:3], xAll[3:], c.cameraMatrix,
c.distCoeffs, c.model, c.Ci, c.Cf, c.Ck, c.covOpt, c.Cfk)
xNorm, yNorm = cl.points2linearised(xm - c.objpoints[:, 0],
ym - c.objpoints[:, 1], Cm).T
return np.concatenate([xNorm, yNorm])
#class project2diagonalisedError(theano.Op):
# # Properties attribute
# __props__ = ("xi", "yi", "cameraMatrix", "distCoeffs", "model", "Ci", "Cf", "Ck", "covOpt", "Cfk", "objpoints")
#
# #itypes and otypes attributes are
# #compulsory if make_node method is not defined.
# #They're the type of input and output respectively
# itypes = [T.dvector]
# otypes = [T.dvector]
#
# def __init__(self, case):
# self.xi, self.yi, self.cameraMatrix, self.distCoeffs, self.model, self.Ci, self.Cf, self.Ck, self.covOpt, self.Cfk, self.objpoints = [case.xi, case.yi, case.cameraMatrix, case.distCoeffs, case.model, case.Ci, case.Cf, case.Ck, case.covOpt, case.Cfk, case.objpoints]
#
#
# # Python implementation:
# def perform(self, node, inputs_storage, output_storage):
# xAll = inputs_storage[0][0]
# xm, ym, Cm = cl.inverse(self.xi, self.yi, xAll[:3], xAll[3:], self.cameraMatrix,
# self.distCoeffs, self.model, self.Ci, self.Cf, self.Ck, self.covOpt, self.Cfk)
#
# xNorm, yNorm = cl.points2linearised(xm - self.objpoints[:, 0],
# ym - self.objpoints[:, 1], Cm).T
#
# output_storage[0] = np.float64(np.concatenate([xNorm, yNorm]))
def getTrace(alMean, Sal, case):
# prior bounds
allLow = case.xAllT - case.allDelta
allUpp = case.xAllT + case.allDelta
alSeed = np.random.randn(case.nChains, case.nFree) * Sal # .reshape((-1, 1))
alSeed += alMean # .reshape((-1, 1))
start = [dict({'xAl': alSeed[i]}) for i in range(nChains)]
projectionModel = pm.Model()
with projectionModel:
# Priors for unknown model parameters
xAl = pm.Uniform('xAl', lower=allLow, upper=allUpp, shape=allLow.shape,
transform=None)
xAl.tag.test_value= case.xAllT
#
# proj = project2diagonalisedError(case)
# x = theano.tensor.vector()
# x.tag.test_value= case.xAllT
# xyMNor = project2diagonalisedError(xAl, theano.shared(case))
# f = theano.function([xAl], project2diagonalisedError(case)(xAl))
# xyMNor = f(xAl)
xyMNor = project2diagonalisedError(xAl)
Y_obs = pm.Normal('Y_obs', mu=xyMNor, sd=1, observed=case.observedNormed)
step = pm.DEMetropolis(vars=[xAl], S=Sal, tune=tuneBool,
tune_interval=nTuneInter, tune_throughout=tune_thr,
tally=tallyBool, scaling=scaAl)
step.tune = tuneBool
step.lamb = scaAl
step.scaling = scaAl
trace = pm.sample(draws=nDraws, step=step, njobs=nChains,
start=start,
tune=nTune, chains=nChains, progressbar=True,
discard_tuned_samples=False, cores=nCores,
compute_convergence_checks=convChecksBool,
parallelize=True)
return trace
#lor = np.array([-100]*6)
#upr = - lor
#xAl = pm.Uniform.dist(lor, upr, shape=lor.shape)
#
#f = theano.function([xAl], project2diagonalisedError(case)(xAl))
#xyMNor = f(xAl)
#getTrace(case.xAllOpt, np.sqrt(np.diag(case.covOpt)), case)
'''
ponele que anda, no upde hacer que la funcion acepte a "case" como argumento
o algo que no sea leerlo desde las variables globales del main.
incluso fallo definir como un objeto mas complicado que pudiera inicializarse
guardando los parametros que necesito
'''
# %%
def getStationaryTrace(exCase):
imagePoints, objpoints, rVecsT, tVecsT = extractCaseData(exCase)
xi, yi = imagePoints.T
nPt = objpoints.shape[0] # cantidad de puntos
Ci = np.array([stdPix**2 * np.eye(2)] * nPt)
nFree = 6 # nro de parametros libres
# pongo en forma flat los valores iniciales
xAllT = np.concatenate([rVecsT, tVecsT])
# pongo en forma flat los valores iniciales
xAllT = np.concatenate([rVecsT, tVecsT])
print('data loaded and formated')
ret = opt.minimize(objective, xAllT)
xAllOpt = ret.x
covNum = np.linalg.inv(hessNum(xAllOpt)) # la achico por las dudas?
print('initial optimisation and covariance estimated')
# for proposal distr
alMean = xAllOpt
Sal = np.sqrt(np.diag(covNum))
print('defined parameters')
means = list()
stdes = list()
tracesList = list()
probList = list()
for intento in range(50):
print("\n\n")
print("============================")
print('intento nro', intento, ' caso ', exCase)
trace = getTrace(alMean, Sal)
sleep(5) # espero un ratito ...
traceArray = trace['xAl'].reshape((nChains, -1, nFree))
traceArray = traceArray.transpose((2, 1, 0))
tracesList.append(traceArray)
traceMean = np.mean(traceArray, axis=2)
traceStd = np.std(traceArray, axis=2)
means.append(traceMean)
stdes.append(traceStd)
probMean = np.zeros(6)
probStd = np.zeros(6)
for i in range(6):
probMean[i] = prob1vs0(t, traceMean[i], adaptPrior=True)
probStd[i] = prob1vs0(t, traceStd[i], adaptPrior=True)
probList.append(np.array([probMean, probStd]))
print(probList[-1].T)
convergedBool = np.all([probMean < 0, probStd < 0])
alMean = np.average(traceMean, axis=1, weights=weiEsp)
Sal = np.average(traceStd, axis=1, weights=weiEsp)
for sa in Sal: # si alguno da cero lo regularizo
if np.isclose(sa, 0):
sa = 1e-6
if intento > 0 and convergedBool:
print("parece que convergió")
break
return means, stdes, probList, tracesList
# %% LOAD DATA
# input
# import collections as clt
fullDataFile = "./resources/nov16/fullDataIntrExtr.npy"
dataFile = open(fullDataFile, "rb")
fullData = pickle.load(dataFile)
dataFile.close()
intrCalibResults = np.load("./resources/nov16/syntIntrCalib.npy").all()
stdPix = 1.0
allDelta = np.concatenate([[np.deg2rad(10)] * 3, [5] * 3])
# cam puede ser ['vca', 'vcaWide', 'ptz'] son los datos que se tienen
#camera = fullData.Synt.Intr.camera
## modelos = ['poly', 'rational', 'fisheye', 'stereographic']
#model = fullData.Synt.Intr.model
#imgSize = fullData.Synt.Intr.s
#Ns = [2, 3]
#Xint = intrCalibResults['mean'][:3]
#cameraMatrix, distCoeffs = bl.flat2int(Xint, Ns, model)
# caso de extrinseca a analizar, indices: [angulos, alturas, 20 puntos]
exCasesList = list()
for aa in range(3):
for hh in range(2):
for nBo in [False, True]:
exCasesList.append([aa, hh, nBo])
exCase = exCasesList[0]
#
## cargo los puntos de calibracion
#imagePoints, objpoints, rVecsT, tVecsT = extractCaseData(exCase)
#xi, yi = imagePoints.T
#nPt = objpoints.shape[0] # cantidad de puntos
#nFree = 6 # nro de parametros libres
## load intrisic calibrated
## 0.1pix as image std
## https://stackoverflow.com/questions/12102318/opencv-findcornersubpix-precision
## increase to 1pix porque la posterior da demasiado rara
#Crt = False # no RT error
#Cf = np.zeros((4, 4))
#Cf[2:, 2:] = intrCalibResults['cov'][:2, :2]
#Ck = intrCalibResults['cov'][2, 2]
#Cfk = np.zeros(4)
#Cfk[2:] = intrCalibResults['cov'][:2, 2]
#stdPix = 1.0
#Ci = np.array([stdPix**2 * np.eye(2)] * nPt)
# pongo en forma flat los valores iniciales
#xAllT = np.concatenate([rVecsT, tVecsT])
print('data loaded and formated')
# %% metropolis desde MAP. a ver si zafo de la proposal dist
'''
http://docs.pymc.io/api/inference.html
aca documentacion copada
https://pymc-devs.github.io/pymc/modelfitting.html
https://github.com/pymc-devs/pymc3/blob/75f64e9517a059ce678c6b4d45b7c64d77242ab6/pymc3/step_methods/metropolis.py
'''
nCores = 1
nTune = 0
nTuneInter = 0
tuneBool = nTune != 0
tune_thr = False
tallyBool = False
convChecksBool = False
rndSeedBool = True
# escalas características del tipical set
nFree = 6
scaNdim = 1 / radiusStepsNdim(nFree)[1]
# determino la escala y el lambda para las propuestas
scaAl = scaNdim / np.sqrt(3)
# lamb = 2.38 / np.sqrt(2 * Sal.size)
# scaIn = scaEx = 1 / radiusStepsNdim(NfreeParams)[1]
nDraws = 100
nChains = 10 * int(1.1 * nFree)
indexSave = 0
#header = "nDraws %d, nChains %d" % (nDraws, nChains)
pathFiles = "/home/sebalander/Code/VisionUNQextra/Videos y Mediciones/"
pathFiles += "extraDataSebaPhD/tracesSynt" + str(indexSave)
# %%
case = casoCalibExtr(fullData, intrCalibResults, exCase, stdPix, allDelta,
nCores, nTune, nTuneInter, tuneBool, tune_thr, tallyBool,
convChecksBool, rndSeedBool, scaNdim, scaAl, nDraws, nChains,
indexSave, pathFiles)
# uso las
optimizar(case.xAllT, case)
getTrace(case.xAllOpt, np.sqrt(np.diag(case.covOpt)), case)
# %% defino la funcion a minimizar
erCuaIm = case.logPerror(case.xAllT)
erCua = objective(case.xAllT)
print(erCuaIm, np.exp(- erCuaIm / 2))
print(erCua, np.exp(- erCua / 2))
covNum = np.linalg.inv(hessNum(xAllT))
# %%
ret = opt.minimize(objective, case.xAllT)
xAllOpt = ret.x
covOpt = np.exp(ret.fun / 2) * ret.hess_inv
sigOpt = np.sqrt(np.diag(covOpt))
crrOpt = covOpt / (sigOpt.reshape((-1, 1)) * sigOpt.reshape((1, -1)))
# plt.matshow(crrOpt, vmin=-1, vmax=1, cmap='coolwarm')
xm, ym, Cm = cl.inverse(xi, yi, xAllOpt[:3], xAllOpt[3:], cameraMatrix,
distCoeffs, model, Ci, Cf, Ck, covOpt, Cfk)
plt.figure()
ax = plt.gca()
ax.scatter(xm, ym)
ax.scatter(objpoints[:, 0], objpoints[:, 1])
for i in range(len(xm)):
cl.plotEllipse(ax, Cm[i], xm[i], ym[i], 'k')
ax.axis('equal')
reload(cl)
xNorm, yNorm = cl.points2linearised(xm - objpoints[:, 0],
ym - objpoints[:, 1], Cm).T
plt.scatter(xNorm, yNorm)
plt.axis('equal')
plt.scatter(fullData.Synt.Extr.imgNse[exCase[0], exCase[1], :, 0],
fullData.Synt.Extr.imgNse[exCase[0], exCase[1], :, 1])
# %%
# chequeo la funcion arbitraria que va ausar theano
xAllTheano = theano.shared(xAllT)
parameters = [xi, yi, cameraMatrix, distCoeffs, model, Ci, Cf, Ck, covOpt,
Cfk, objpoints]
#projObj = diagonalisedError(parameters)
project2diagonalisedError(xAllTheano).eval()
# %%
# 10 grados de error de rotacion
# intervalo de 5 unidades de distancia
allDelta = np.concatenate([[np.deg2rad(10)] * 3, [5] * 3])
observedNormed = np.zeros((nPt * 2))
# calculate estimated radius step
ModeR, ExpectedR, DesvEstR = radiusStepsNdim(nFree)
print("moda, la media y la desv est del radio\n", ModeR, ExpectedR, DesvEstR)
#exCaseList = [[1, 0, 1]]
# %%
saveBool = True
#exCase = [2, 1, True]
for exCase in exCasesList[:2]:
file = pathFiles + "ext-%d-%d-%d" % tuple(exCase)
print("\n\n")
print("========================================================")
print("========================================================")
print(pathFiles)
means, stdes, probList, tracesList = getStationaryTrace(exCase)
intento = -1
for m, s, p in zip(means, stdes, probList):
intento += 1
plt.figure(1)
plt.plot(t + intento * nDraws, m.T - xAllOpt)
plt.figure(2)
plt.plot(t + intento * nDraws, s.T)
if saveBool:
# np.save(pathFiles + "Int", trace['xIn'])
# np.save(pathFiles + "Ext", trace['xEx'])
# trace['docs'] = header
np.save(file, tracesList)
print("saved data to")
print(file)
print("exiting")
# sys.exit()
# %%
loadBool = True
if loadBool:
filesFound = glob(pathFiles + "ext-*.npy")
traces = dict()
for file in filesFound:
print(file)
trace = dict()
trace = np.load(file).all()
key = file[-9:-4]
traces[key] = trace
# %%
traceArray = trace['xAl'].reshape((nChains, -1, nFree)).transpose((2, 1, 0))
# queda con los indices (parametro, iteracion, cadena)
fig, axs = plt.subplots(2, 3)
for i, ax in enumerate(axs.flatten()):
ax.plot(traceArray[i], alpha=0.2)
#corner.corner(trace['xAl'])
difAll = np.diff(traceArray, axis=1)
repeats = np.zeros_like(traceArray)
repeats[:, 1:] = difAll == 0
repsRate = repeats.sum() / np.prod(repeats.shape)
print("tasa de repetidos", repsRate)
# %%
indexCut = 1000
traceCut = traceArray[:, indexCut:]
del traceArray
del repeats
del difAll
# saco la desvest y hago un ajuste
stdIte = np.std(traceCut, axis=2)
itime = np.arange(stdIte.shape[1])
plt.figure()
plt.plot(stdIte.T, alpha=0.2)
plt.xlabel('iteración')
plt.ylabel('std parámetro')
plt.plot(stdIte[0], 'k')
from scipy.optimize import curve_fit
def funcExp(x, a, b, c):
return a * np.exp(- x / b) + c
# ajuste exponencial
linFitList = list()
expFitList = list()
for i in range(nFree):
p0 = [stdIte[i, 0], 1e3, 5 * stdIte[i, 0]]
popt, pcov = curve_fit(funcExp, itime, stdIte[i], p0)
expFitList.append([popt, np.sqrt(np.diag(pcov))])
popt, pcov = np.polyfit(itime, stdIte[i], 1, cov=True)
linFitList.append([popt, np.sqrt(np.diag(pcov))])
linFitList = np.array(linFitList)
expFitList = np.array(expFitList)
Tnext = 3e4
stdNext = Tnext * linFitList[:, 0, 0] + linFitList[:, 0, 1]
plt.figure()
plt.plot(Sal, stdNext, '+')
_, nDraws, nTrac = traceCut.shape
# % proyecto sobre los dos autovectores ppales
traceMean = np.mean(traceCut, axis=(1, 2))
traceDif = traceCut - traceMean.reshape((-1, 1, 1))
U, S, Vh = np.linalg.svd(traceDif.reshape((nFree, -1)).T,
full_matrices=False)
traceCov = np.cov(traceDif.reshape((nFree, -1)))
traceSig = np.sqrt(np.diag(traceCov))
traceCrr = traceCov / (traceSig.reshape((-1, 1)) * traceSig.reshape((1, -1)))
saveIntrBool = False
if saveIntrBool:
np.save("/home/sebalander/Code/sebaPhD/resources/nov16/syntIntrCalib.npy",
{'mean': traceMean,
'cov': traceCov,
"camera": camera,
"model": model,
"trueVals": xAllT,
"datafile": fullDataFile})
plt.matshow(traceCrr, vmin=-1, vmax=1, cmap='coolwarm')
cols = np.array([[1] * 3, [2] * 3]).reshape(-1)
cols = np.concatenate([[0] * 3, cols])
plt.figure()
plt.scatter(np.abs(traceMean), traceSig)
print(traceMean[:3], traceCov[:3,:3])
versors = Vh[:2].T / S[:2]
ppalXY = traceDif.T.dot(versors)
ppalX, ppalY = ppalXY.T
plt.figure()
plt.plot(ppalX, ppalY, linewidth=0.5)
plt.axis('equal')
plt.figure()
plt.plot(ppalX, linewidth=0.5)
plt.plot(ppalX[:,::50], linewidth=2)
plt.figure()
plt.plot(ppalY, linewidth=0.5)
plt.plot(ppalY[:,::50], linewidth=2)
pathFiles = "/home/sebalander/Code/VisionUNQextra/Videos y Mediciones/"
pathFiles += "extraDataSebaPhD/tracesSynt" + str(0)
trace = dict()
trace['xAl'] = np.load(pathFiles + "All.npy")
| [
"numpy.prod",
"calibration.calibrator.errorCuadraticoImagen",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"numpy.log",
"time.sleep",
"numpy.array",
"pymc3.sample",
"sys.path.append",
"numpy.arange",
"calibration.calibrator.points2linearised",
"numpy.save",
"numpy.mean",
"t... | [((549, 597), 'sys.path.append', 'sys.path.append', (['"""/home/sebalander/Code/sebaPhD"""'], {}), "('/home/sebalander/Code/sebaPhD')\n", (564, 597), False, 'import sys\n'), ((2996, 3008), 'numpy.sqrt', 'np.sqrt', (['pi2'], {}), '(pi2)\n', (3003, 3008), True, 'import numpy as np\n'), ((4852, 4875), 'numdifftools.Hessian', 'ndft.Hessian', (['objective'], {}), '(objective)\n', (4864, 4875), True, 'import numdifftools as ndft\n'), ((7392, 7437), 'theano.compile.ops.as_op', 'as_op', ([], {'itypes': '[T.dvector]', 'otypes': '[T.dvector]'}), '(itypes=[T.dvector], otypes=[T.dvector])\n', (7397, 7437), False, 'from theano.compile.ops import as_op\n'), ((13740, 13761), 'pickle.load', 'pickle.load', (['dataFile'], {}), '(dataFile)\n', (13751, 13761), False, 'import pickle\n'), ((16862, 16897), 'scipy.optimize.minimize', 'opt.minimize', (['objective', 'case.xAllT'], {}), '(objective, case.xAllT)\n', (16874, 16897), True, 'import scipy.optimize as opt\n'), ((17138, 17244), 'calibration.calibrator.inverse', 'cl.inverse', (['xi', 'yi', 'xAllOpt[:3]', 'xAllOpt[3:]', 'cameraMatrix', 'distCoeffs', 'model', 'Ci', 'Cf', 'Ck', 'covOpt', 'Cfk'], {}), '(xi, yi, xAllOpt[:3], xAllOpt[3:], cameraMatrix, distCoeffs,\n model, Ci, Cf, Ck, covOpt, Cfk)\n', (17148, 17244), True, 'from calibration import calibrator as cl\n'), ((17266, 17278), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17276, 17278), True, 'import matplotlib.pyplot as plt\n'), ((17284, 17293), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (17291, 17293), True, 'import matplotlib.pyplot as plt\n'), ((17451, 17461), 'importlib.reload', 'reload', (['cl'], {}), '(cl)\n', (17457, 17461), False, 'from importlib import reload\n'), ((17584, 17609), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xNorm', 'yNorm'], {}), '(xNorm, yNorm)\n', (17595, 17609), True, 'import matplotlib.pyplot as plt\n'), ((17610, 17627), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (17618, 17627), True, 'import matplotlib.pyplot as plt\n'), ((17630, 17756), 'matplotlib.pyplot.scatter', 'plt.scatter', (['fullData.Synt.Extr.imgNse[exCase[0], exCase[1], :, 0]', 'fullData.Synt.Extr.imgNse[exCase[0], exCase[1], :, 1]'], {}), '(fullData.Synt.Extr.imgNse[exCase[0], exCase[1], :, 0], fullData\n .Synt.Extr.imgNse[exCase[0], exCase[1], :, 1])\n', (17641, 17756), True, 'import matplotlib.pyplot as plt\n'), ((17840, 17860), 'theano.shared', 'theano.shared', (['xAllT'], {}), '(xAllT)\n', (17853, 17860), False, 'import theano\n'), ((18209, 18226), 'numpy.zeros', 'np.zeros', (['(nPt * 2)'], {}), '(nPt * 2)\n', (18217, 18226), True, 'import numpy as np\n'), ((19732, 19750), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {}), '(2, 3)\n', (19744, 19750), True, 'import matplotlib.pyplot as plt\n'), ((19868, 19895), 'numpy.diff', 'np.diff', (['traceArray'], {'axis': '(1)'}), '(traceArray, axis=1)\n', (19875, 19895), True, 'import numpy as np\n'), ((19907, 19932), 'numpy.zeros_like', 'np.zeros_like', (['traceArray'], {}), '(traceArray)\n', (19920, 19932), True, 'import numpy as np\n'), ((20192, 20216), 'numpy.std', 'np.std', (['traceCut'], {'axis': '(2)'}), '(traceCut, axis=2)\n', (20198, 20216), True, 'import numpy as np\n'), ((20225, 20251), 'numpy.arange', 'np.arange', (['stdIte.shape[1]'], {}), '(stdIte.shape[1])\n', (20234, 20251), True, 'import numpy as np\n'), ((20253, 20265), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20263, 20265), True, 'import matplotlib.pyplot as plt\n'), ((20266, 20295), 'matplotlib.pyplot.plot', 'plt.plot', (['stdIte.T'], {'alpha': '(0.2)'}), '(stdIte.T, alpha=0.2)\n', (20274, 20295), True, 'import matplotlib.pyplot as plt\n'), ((20296, 20319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteración"""'], {}), "('iteración')\n", (20306, 20319), True, 'import matplotlib.pyplot as plt\n'), ((20320, 20347), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""std parámetro"""'], {}), "('std parámetro')\n", (20330, 20347), True, 'import matplotlib.pyplot as plt\n'), ((20348, 20372), 'matplotlib.pyplot.plot', 'plt.plot', (['stdIte[0]', '"""k"""'], {}), "(stdIte[0], 'k')\n", (20356, 20372), True, 'import matplotlib.pyplot as plt\n'), ((20845, 20865), 'numpy.array', 'np.array', (['linFitList'], {}), '(linFitList)\n', (20853, 20865), True, 'import numpy as np\n'), ((20879, 20899), 'numpy.array', 'np.array', (['expFitList'], {}), '(expFitList)\n', (20887, 20899), True, 'import numpy as np\n'), ((20974, 20986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20984, 20986), True, 'import matplotlib.pyplot as plt\n'), ((20987, 21014), 'matplotlib.pyplot.plot', 'plt.plot', (['Sal', 'stdNext', '"""+"""'], {}), "(Sal, stdNext, '+')\n", (20995, 21014), True, 'import matplotlib.pyplot as plt\n'), ((21110, 21140), 'numpy.mean', 'np.mean', (['traceCut'], {'axis': '(1, 2)'}), '(traceCut, axis=(1, 2))\n', (21117, 21140), True, 'import numpy as np\n'), ((21777, 21832), 'matplotlib.pyplot.matshow', 'plt.matshow', (['traceCrr'], {'vmin': '(-1)', 'vmax': '(1)', 'cmap': '"""coolwarm"""'}), "(traceCrr, vmin=-1, vmax=1, cmap='coolwarm')\n", (21788, 21832), True, 'import matplotlib.pyplot as plt\n'), ((21889, 21920), 'numpy.concatenate', 'np.concatenate', (['[[0] * 3, cols]'], {}), '([[0] * 3, cols])\n', (21903, 21920), True, 'import numpy as np\n'), ((21922, 21934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21932, 21934), True, 'import matplotlib.pyplot as plt\n'), ((22102, 22114), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22112, 22114), True, 'import matplotlib.pyplot as plt\n'), ((22115, 22152), 'matplotlib.pyplot.plot', 'plt.plot', (['ppalX', 'ppalY'], {'linewidth': '(0.5)'}), '(ppalX, ppalY, linewidth=0.5)\n', (22123, 22152), True, 'import matplotlib.pyplot as plt\n'), ((22153, 22170), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (22161, 22170), True, 'import matplotlib.pyplot as plt\n'), ((22172, 22184), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22182, 22184), True, 'import matplotlib.pyplot as plt\n'), ((22185, 22215), 'matplotlib.pyplot.plot', 'plt.plot', (['ppalX'], {'linewidth': '(0.5)'}), '(ppalX, linewidth=0.5)\n', (22193, 22215), True, 'import matplotlib.pyplot as plt\n'), ((22216, 22253), 'matplotlib.pyplot.plot', 'plt.plot', (['ppalX[:, ::50]'], {'linewidth': '(2)'}), '(ppalX[:, ::50], linewidth=2)\n', (22224, 22253), True, 'import matplotlib.pyplot as plt\n'), ((22255, 22267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22265, 22267), True, 'import matplotlib.pyplot as plt\n'), ((22268, 22298), 'matplotlib.pyplot.plot', 'plt.plot', (['ppalY'], {'linewidth': '(0.5)'}), '(ppalY, linewidth=0.5)\n', (22276, 22298), True, 'import matplotlib.pyplot as plt\n'), ((22299, 22336), 'matplotlib.pyplot.plot', 'plt.plot', (['ppalY[:, ::50]'], {'linewidth': '(2)'}), '(ppalY[:, ::50], linewidth=2)\n', (22307, 22336), True, 'import matplotlib.pyplot as plt\n'), ((22495, 22525), 'numpy.load', 'np.load', (["(pathFiles + 'All.npy')"], {}), "(pathFiles + 'All.npy')\n", (22502, 22525), True, 'import numpy as np\n'), ((1958, 1972), 'numpy.sqrt', 'np.sqrt', (['(n - 1)'], {}), '(n - 1)\n', (1965, 1972), True, 'import numpy as np\n'), ((2069, 2105), 'numpy.sqrt', 'np.sqrt', (['(ExpectedR2 - ExpectedR ** 2)'], {}), '(ExpectedR2 - ExpectedR ** 2)\n', (2076, 2105), True, 'import numpy as np\n'), ((2116, 2154), 'numpy.array', 'np.array', (['[ModeR, ExpectedR, DesvEstR]'], {}), '([ModeR, ExpectedR, DesvEstR])\n', (2124, 2154), True, 'import numpy as np\n'), ((3259, 3288), 'numpy.polyfit', 'np.polyfit', (['t', 'x', '(0)'], {'cov': '(True)'}), '(t, x, 0, cov=True)\n', (3269, 3288), True, 'import numpy as np\n'), ((3302, 3331), 'numpy.polyfit', 'np.polyfit', (['t', 'x', '(1)'], {'cov': '(True)'}), '(t, x, 1, cov=True)\n', (3312, 3331), True, 'import numpy as np\n'), ((3365, 3383), 'numpy.mean', 'np.mean', (['(dif0 ** 2)'], {}), '(dif0 ** 2)\n', (3372, 3383), True, 'import numpy as np\n'), ((3391, 3410), 'numpy.isclose', 'np.isclose', (['var0', '(0)'], {}), '(var0, 0)\n', (3401, 3410), True, 'import numpy as np\n'), ((3567, 3585), 'numpy.mean', 'np.mean', (['(dif1 ** 2)'], {}), '(dif1 ** 2)\n', (3574, 3585), True, 'import numpy as np\n'), ((3593, 3613), 'numpy.allclose', 'np.allclose', (['var1', '(0)'], {}), '(var1, 0)\n', (3604, 3613), True, 'import numpy as np\n'), ((4079, 4130), 'scipy.stats.multivariate_normal.logpdf', 'sc.stats.multivariate_normal.logpdf', (['dif0'], {'cov': 'var0'}), '(dif0, cov=var0)\n', (4114, 4130), True, 'import scipy as sc\n'), ((4145, 4196), 'scipy.stats.multivariate_normal.logpdf', 'sc.stats.multivariate_normal.logpdf', (['dif1'], {'cov': 'var1'}), '(dif1, cov=var1)\n', (4180, 4196), True, 'import scipy as sc\n'), ((4316, 4341), 'numpy.sum', 'np.sum', (['(pDagWH1 - pDagWH0)'], {}), '(pDagWH1 - pDagWH0)\n', (4322, 4341), True, 'import numpy as np\n'), ((4568, 4742), 'calibration.calibrator.errorCuadraticoImagen', 'cl.errorCuadraticoImagen', (['case.imagePoints', 'case.objpoints', 'rvec', 'tvec', 'case.cameraMatrix', 'case.distCoeffs', 'case.model', 'case.Ci', 'case.Cf', 'case.Ck', 'case.Crt', 'case.Cfk'], {}), '(case.imagePoints, case.objpoints, rvec, tvec, case\n .cameraMatrix, case.distCoeffs, case.model, case.Ci, case.Cf, case.Ck,\n case.Crt, case.Cfk)\n', (4592, 4742), True, 'from calibration import calibrator as cl\n'), ((5031, 5071), 'scipy.optimize.minimize', 'opt.minimize', (['objective', 'xAll'], {'args': 'case'}), '(objective, xAll, args=case)\n', (5043, 5071), True, 'import scipy.optimize as opt\n'), ((7595, 7716), 'calibration.calibrator.inverse', 'cl.inverse', (['c.xi', 'c.yi', 'xAll[:3]', 'xAll[3:]', 'c.cameraMatrix', 'c.distCoeffs', 'c.model', 'c.Ci', 'c.Cf', 'c.Ck', 'c.covOpt', 'c.Cfk'], {}), '(c.xi, c.yi, xAll[:3], xAll[3:], c.cameraMatrix, c.distCoeffs, c.\n model, c.Ci, c.Cf, c.Ck, c.covOpt, c.Cfk)\n', (7605, 7716), True, 'from calibration import calibrator as cl\n'), ((7887, 7917), 'numpy.concatenate', 'np.concatenate', (['[xNorm, yNorm]'], {}), '([xNorm, yNorm])\n', (7901, 7917), True, 'import numpy as np\n'), ((9525, 9535), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (9533, 9535), True, 'import pymc3 as pm\n'), ((11711, 11743), 'numpy.concatenate', 'np.concatenate', (['[rVecsT, tVecsT]'], {}), '([rVecsT, tVecsT])\n', (11725, 11743), True, 'import numpy as np\n'), ((11805, 11837), 'numpy.concatenate', 'np.concatenate', (['[rVecsT, tVecsT]'], {}), '([rVecsT, tVecsT])\n', (11819, 11837), True, 'import numpy as np\n'), ((11887, 11917), 'scipy.optimize.minimize', 'opt.minimize', (['objective', 'xAllT'], {}), '(objective, xAllT)\n', (11899, 11917), True, 'import scipy.optimize as opt\n'), ((15901, 15911), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (15908, 15911), True, 'import numpy as np\n'), ((16751, 16771), 'numpy.exp', 'np.exp', (['(-erCuaIm / 2)'], {}), '(-erCuaIm / 2)\n', (16757, 16771), True, 'import numpy as np\n'), ((16788, 16806), 'numpy.exp', 'np.exp', (['(-erCua / 2)'], {}), '(-erCua / 2)\n', (16794, 16806), True, 'import numpy as np\n'), ((16924, 16943), 'numpy.exp', 'np.exp', (['(ret.fun / 2)'], {}), '(ret.fun / 2)\n', (16930, 16943), True, 'import numpy as np\n'), ((16977, 16992), 'numpy.diag', 'np.diag', (['covOpt'], {}), '(covOpt)\n', (16984, 16992), True, 'import numpy as np\n'), ((17387, 17431), 'calibration.calibrator.plotEllipse', 'cl.plotEllipse', (['ax', 'Cm[i]', 'xm[i]', 'ym[i]', '"""k"""'], {}), "(ax, Cm[i], xm[i], ym[i], 'k')\n", (17401, 17431), True, 'from calibration import calibrator as cl\n'), ((17478, 17546), 'calibration.calibrator.points2linearised', 'cl.points2linearised', (['(xm - objpoints[:, 0])', '(ym - objpoints[:, 1])', 'Cm'], {}), '(xm - objpoints[:, 0], ym - objpoints[:, 1], Cm)\n', (17498, 17546), True, 'from calibration import calibrator as cl\n'), ((19368, 19397), 'glob.glob', 'glob', (["(pathFiles + 'ext-*.npy')"], {}), "(pathFiles + 'ext-*.npy')\n", (19372, 19397), False, 'from glob import glob\n'), ((19989, 20011), 'numpy.prod', 'np.prod', (['repeats.shape'], {}), '(repeats.shape)\n', (19996, 20011), True, 'import numpy as np\n'), ((20622, 20662), 'scipy.optimize.curve_fit', 'curve_fit', (['funcExp', 'itime', 'stdIte[i]', 'p0'], {}), '(funcExp, itime, stdIte[i], p0)\n', (20631, 20662), False, 'from scipy.optimize import curve_fit\n'), ((20735, 20776), 'numpy.polyfit', 'np.polyfit', (['itime', 'stdIte[i]', '(1)'], {'cov': '(True)'}), '(itime, stdIte[i], 1, cov=True)\n', (20745, 20776), True, 'import numpy as np\n'), ((21366, 21383), 'numpy.diag', 'np.diag', (['traceCov'], {}), '(traceCov)\n', (21373, 21383), True, 'import numpy as np\n'), ((21506, 21705), 'numpy.save', 'np.save', (['"""/home/sebalander/Code/sebaPhD/resources/nov16/syntIntrCalib.npy"""', "{'mean': traceMean, 'cov': traceCov, 'camera': camera, 'model': model,\n 'trueVals': xAllT, 'datafile': fullDataFile}"], {}), "('/home/sebalander/Code/sebaPhD/resources/nov16/syntIntrCalib.npy',\n {'mean': traceMean, 'cov': traceCov, 'camera': camera, 'model': model,\n 'trueVals': xAllT, 'datafile': fullDataFile})\n", (21513, 21705), True, 'import numpy as np\n'), ((21947, 21964), 'numpy.abs', 'np.abs', (['traceMean'], {}), '(traceMean)\n', (21953, 21964), True, 'import numpy as np\n'), ((1442, 1465), 'scipy.special.gamma', 'sc.special.gamma', (['(n / 2)'], {}), '(n / 2)\n', (1458, 1465), True, 'import scipy as sc\n'), ((1677, 1706), 'scipy.special.gamma', 'sc.special.gamma', (['((n + 1) / 2)'], {}), '((n + 1) / 2)\n', (1693, 1706), True, 'import scipy as sc\n'), ((1917, 1944), 'scipy.special.gamma', 'sc.special.gamma', (['(n / 2 + 1)'], {}), '(n / 2 + 1)\n', (1933, 1944), True, 'import scipy as sc\n'), ((3956, 3971), 'numpy.log', 'np.log', (['pConst0'], {}), '(pConst0)\n', (3962, 3971), True, 'import numpy as np\n'), ((3988, 4013), 'numpy.log', 'np.log', (['(pConst1 * pPendi1)'], {}), '(pConst1 * pPendi1)\n', (3994, 4013), True, 'import numpy as np\n'), ((6222, 6255), 'dev.bayesLib.flat2int', 'bl.flat2int', (['Xint', 'Ns', 'self.model'], {}), '(Xint, Ns, self.model)\n', (6233, 6255), True, 'from dev import bayesLib as bl\n'), ((6511, 6553), 'numpy.concatenate', 'np.concatenate', (['[self.rVecsT, self.tVecsT]'], {}), '([self.rVecsT, self.tVecsT])\n', (6525, 6553), True, 'import numpy as np\n'), ((6650, 6672), 'numpy.zeros', 'np.zeros', (['(self.nPt * 2)'], {}), '(self.nPt * 2)\n', (6658, 6672), True, 'import numpy as np\n'), ((6734, 6750), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (6742, 6750), True, 'import numpy as np\n'), ((6876, 6887), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (6884, 6887), True, 'import numpy as np\n'), ((7140, 7159), 'numpy.sum', 'np.sum', (['self.weiEsp'], {}), '(self.weiEsp)\n', (7146, 7159), True, 'import numpy as np\n'), ((7760, 7832), 'calibration.calibrator.points2linearised', 'cl.points2linearised', (['(xm - c.objpoints[:, 0])', '(ym - c.objpoints[:, 1])', 'Cm'], {}), '(xm - c.objpoints[:, 0], ym - c.objpoints[:, 1], Cm)\n', (7780, 7832), True, 'from calibration import calibrator as cl\n'), ((9328, 9369), 'numpy.random.randn', 'np.random.randn', (['case.nChains', 'case.nFree'], {}), '(case.nChains, case.nFree)\n', (9343, 9369), True, 'import numpy as np\n'), ((9622, 9708), 'pymc3.Uniform', 'pm.Uniform', (['"""xAl"""'], {'lower': 'allLow', 'upper': 'allUpp', 'shape': 'allLow.shape', 'transform': 'None'}), "('xAl', lower=allLow, upper=allUpp, shape=allLow.shape, transform\n =None)\n", (9632, 9708), True, 'import pymc3 as pm\n'), ((10128, 10193), 'pymc3.Normal', 'pm.Normal', (['"""Y_obs"""'], {'mu': 'xyMNor', 'sd': '(1)', 'observed': 'case.observedNormed'}), "('Y_obs', mu=xyMNor, sd=1, observed=case.observedNormed)\n", (10137, 10193), True, 'import pymc3 as pm\n'), ((10210, 10347), 'pymc3.DEMetropolis', 'pm.DEMetropolis', ([], {'vars': '[xAl]', 'S': 'Sal', 'tune': 'tuneBool', 'tune_interval': 'nTuneInter', 'tune_throughout': 'tune_thr', 'tally': 'tallyBool', 'scaling': 'scaAl'}), '(vars=[xAl], S=Sal, tune=tuneBool, tune_interval=nTuneInter,\n tune_throughout=tune_thr, tally=tallyBool, scaling=scaAl)\n', (10225, 10347), True, 'import pymc3 as pm\n'), ((10507, 10728), 'pymc3.sample', 'pm.sample', ([], {'draws': 'nDraws', 'step': 'step', 'njobs': 'nChains', 'start': 'start', 'tune': 'nTune', 'chains': 'nChains', 'progressbar': '(True)', 'discard_tuned_samples': '(False)', 'cores': 'nCores', 'compute_convergence_checks': 'convChecksBool', 'parallelize': '(True)'}), '(draws=nDraws, step=step, njobs=nChains, start=start, tune=nTune,\n chains=nChains, progressbar=True, discard_tuned_samples=False, cores=\n nCores, compute_convergence_checks=convChecksBool, parallelize=True)\n', (10516, 10728), True, 'import pymc3 as pm\n'), ((12136, 12151), 'numpy.diag', 'np.diag', (['covNum'], {}), '(covNum)\n', (12143, 12151), True, 'import numpy as np\n'), ((12473, 12481), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (12478, 12481), False, 'from time import sleep\n'), ((12682, 12709), 'numpy.mean', 'np.mean', (['traceArray'], {'axis': '(2)'}), '(traceArray, axis=2)\n', (12689, 12709), True, 'import numpy as np\n'), ((12729, 12755), 'numpy.std', 'np.std', (['traceArray'], {'axis': '(2)'}), '(traceArray, axis=2)\n', (12735, 12755), True, 'import numpy as np\n'), ((12840, 12851), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (12848, 12851), True, 'import numpy as np\n'), ((12870, 12881), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (12878, 12881), True, 'import numpy as np\n'), ((13156, 13191), 'numpy.all', 'np.all', (['[probMean < 0, probStd < 0]'], {}), '([probMean < 0, probStd < 0])\n', (13162, 13191), True, 'import numpy as np\n'), ((13210, 13255), 'numpy.average', 'np.average', (['traceMean'], {'axis': '(1)', 'weights': 'weiEsp'}), '(traceMean, axis=1, weights=weiEsp)\n', (13220, 13255), True, 'import numpy as np\n'), ((13270, 13314), 'numpy.average', 'np.average', (['traceStd'], {'axis': '(1)', 'weights': 'weiEsp'}), '(traceStd, axis=1, weights=weiEsp)\n', (13280, 13314), True, 'import numpy as np\n'), ((13799, 13845), 'numpy.load', 'np.load', (['"""./resources/nov16/syntIntrCalib.npy"""'], {}), "('./resources/nov16/syntIntrCalib.npy')\n", (13806, 13845), True, 'import numpy as np\n'), ((16597, 16617), 'numpy.diag', 'np.diag', (['case.covOpt'], {}), '(case.covOpt)\n', (16604, 16617), True, 'import numpy as np\n'), ((18901, 18914), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (18911, 18914), True, 'import matplotlib.pyplot as plt\n'), ((18923, 18968), 'matplotlib.pyplot.plot', 'plt.plot', (['(t + intento * nDraws)', '(m.T - xAllOpt)'], {}), '(t + intento * nDraws, m.T - xAllOpt)\n', (18931, 18968), True, 'import matplotlib.pyplot as plt\n'), ((18977, 18990), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (18987, 18990), True, 'import matplotlib.pyplot as plt\n'), ((18999, 19034), 'matplotlib.pyplot.plot', 'plt.plot', (['(t + intento * nDraws)', 's.T'], {}), '(t + intento * nDraws, s.T)\n', (19007, 19034), True, 'import matplotlib.pyplot as plt\n'), ((19193, 19218), 'numpy.save', 'np.save', (['file', 'tracesList'], {}), '(file, tracesList)\n', (19200, 19218), True, 'import numpy as np\n'), ((21841, 21869), 'numpy.array', 'np.array', (['[[1] * 3, [2] * 3]'], {}), '([[1] * 3, [2] * 3])\n', (21849, 21869), True, 'import numpy as np\n'), ((3773, 3785), 'numpy.max', 'np.max', (['dif1'], {}), '(dif1)\n', (3779, 3785), True, 'import numpy as np\n'), ((3788, 3800), 'numpy.min', 'np.min', (['dif1'], {}), '(dif1)\n', (3794, 3800), True, 'import numpy as np\n'), ((13070, 13099), 'numpy.array', 'np.array', (['[probMean, probStd]'], {}), '([probMean, probStd])\n', (13078, 13099), True, 'import numpy as np\n'), ((13388, 13405), 'numpy.isclose', 'np.isclose', (['sa', '(0)'], {}), '(sa, 0)\n', (13398, 13405), True, 'import numpy as np\n'), ((20453, 20467), 'numpy.exp', 'np.exp', (['(-x / b)'], {}), '(-x / b)\n', (20459, 20467), True, 'import numpy as np\n'), ((3699, 3711), 'numpy.max', 'np.max', (['dif0'], {}), '(dif0)\n', (3705, 3711), True, 'import numpy as np\n'), ((3714, 3726), 'numpy.min', 'np.min', (['dif0'], {}), '(dif0)\n', (3720, 3726), True, 'import numpy as np\n'), ((4227, 4238), 'numpy.sqrt', 'np.sqrt', (['c0'], {}), '(c0)\n', (4234, 4238), True, 'import numpy as np\n'), ((4281, 4298), 'numpy.linalg.det', 'np.linalg.det', (['c1'], {}), '(c1)\n', (4294, 4298), True, 'import numpy as np\n'), ((13893, 13907), 'numpy.deg2rad', 'np.deg2rad', (['(10)'], {}), '(10)\n', (13903, 13907), True, 'import numpy as np\n'), ((18161, 18175), 'numpy.deg2rad', 'np.deg2rad', (['(10)'], {}), '(10)\n', (18171, 18175), True, 'import numpy as np\n'), ((19506, 19519), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (19513, 19519), True, 'import numpy as np\n'), ((20700, 20713), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (20707, 20713), True, 'import numpy as np\n'), ((20814, 20827), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (20821, 20827), True, 'import numpy as np\n'), ((4466, 4475), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (4472, 4475), True, 'import numpy as np\n'), ((11590, 11599), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (11596, 11599), True, 'import numpy as np\n'), ((6982, 6991), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6988, 6991), True, 'import numpy as np\n'), ((2811, 2832), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rVecsT'], {}), '(rVecsT)\n', (2824, 2832), False, 'import cv2\n'), ((7079, 7096), 'numpy.arange', 'np.arange', (['nDraws'], {}), '(nDraws)\n', (7088, 7096), True, 'import numpy as np\n')] |
import pyqtgraph as pg
from pyqtgraph import GraphicsLayoutWidget
from Utilities.IO import IOHelper
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from Utilities.Helper import settings
from pathlib import Path
import numpy as np
from PIL import Image
import datetime
from queue import Queue
from PyQt5 import QtGui
class ImgAnalysisSetting(QWidget):
abs_img = pyqtSignal(dict)
def __init__(self, parent=None):
super(ImgAnalysisSetting, self).__init__(parent=parent)
self.parent = parent
self.horizontalGroupBox1 = QGroupBox("absorbtion Parameters")
self.horizontalGroupBox2 = QGroupBox("flurence Parameters")
layout1 = QHBoxLayout()
layout2 = QHBoxLayout()
self.mag = QLabel('magnification',self)
self.magValue = QDoubleSpinBox()
self.magValue.setRange(0.01, 10)
self.magValue.setSingleStep(0.1)
layout1.addWidget(self.mag)
layout1.addWidget(self.magValue)
self.dia1 = QDialog() # create a dialog
self.dia2 = QDialog() # create a dialog
self.prefix_label = QLabel('Prefix',self)
self.prefix_text = QLineEdit('Data',self)
self.layoutprefix = QHBoxLayout(self)
self.layoutprefix.addWidget(self.prefix_label)
self.layoutprefix.addWidget(self.prefix_text)
self.dia2.setLayout(self.layoutprefix)
self.layoutv = QHBoxLayout(self)
self.layoutv1 = QVBoxLayout(self)
self.layoutv3 = QHBoxLayout(self)
# win = pg.GraphicsView()
l1 = pg.GraphicsLayout(border=(100, 100, 100))
win1 = pg.GraphicsLayoutWidget()
win1.setCentralItem(l1)
pg.setConfigOptions(imageAxisOrder='row-major')
# pg.setConfigOptions(leftButtonPan=False)
self.viewBox = l1.addPlot()
self.viewBox.hideAxis('left') # hide the left and right
self.viewBox.hideAxis('bottom')
self.img = pg.ImageItem()
self.viewBox.setMouseEnabled(x=False, y=False) # make it can not move
# pg.setConfigOptions(leftButtonPan=False)
self.viewBox.addItem(self.img)
self.layoutv1.addWidget(win1)
self.img_labelt1 = QLabel()
self.img_Push1 = QPushButton("=>", self)
self.img_Push2 = QPushButton('save', self)
self.img_Push1.clicked.connect(self.push_state)
self.img_Push2.clicked.connect(self.push2_state)
# self.img_labelt1.setText()
self.layoutv3.addWidget(self.img_labelt1)
self.layoutv3.addWidget(self.img_Push2)
self.layoutv1.addLayout(self.layoutv3)
# self.img_Push1.setEnabled(False)
self.img_Push2.setEnabled(False)
self.layoutv2 = QVBoxLayout(self)
plot_win1 = PlotWindow()
plot_win1.myserial = 0
plot_win2 = PlotWindow()
plot_win2.myserial = 1
plot_win3 = PlotWindow()
plot_win3.myserial = 2
self.layoutv2.addWidget(plot_win1)
self.layoutv2.addWidget(plot_win2)
self.layoutv2.addWidget(plot_win3)
self.layoutv.addLayout(self.layoutv2)
self.layoutv.addWidget(self.img_Push1)
self.layoutv.addLayout(self.layoutv1)
self.abs_img.connect(self.update_image2)
self.dia1.setLayout(self.layoutv)
screen = QtGui.QDesktopWidget().screenGeometry() # Control window size
self.dia1.setFixedSize(screen.width() * 51/ 100, screen.height() * 50/ 100)
win1.setFixedSize(screen.width() * 30 / 100, screen.height() * 45/ 100)
ToPwrLabel = QLabel('TotPwr_mW')
self.ToPwr = QDoubleSpinBox()
self.ToPwr.setMaximum(999)
self.ToPwr.setMinimum(0)
self.ToPwr.setSingleStep(1)
DetuLabel = QLabel('Detu_MHz')
self.Detu = QDoubleSpinBox()
self.Detu.setMaximum(999)
self.Detu.setMinimum(0)
self.Detu.setSingleStep(1)
DiaLabel = QLabel('Dia_mm')
self.Dia = QDoubleSpinBox()
self.Dia.setMaximum(999)
self.Dia.setMinimum(0)
self.Dia.setSingleStep(1)
# layout1.addWidget(self.roi)
# layout1.addWidget(self.cross_axes)
# layout1.addWidget(self.cross_axes2)
# layout1.addWidget(self.auto_save)
# layout1.addWidget(self.piefix)
# layout1.addWidget(self.absorb_img)
layout2.addWidget(ToPwrLabel)
layout2.addWidget(self.ToPwr)
layout2.addWidget(DetuLabel)
layout2.addWidget(self.Detu)
layout2.addWidget(DiaLabel)
layout2.addWidget(self.Dia)
self.horizontalGroupBox1.setLayout(layout1)
self.horizontalGroupBox2.setLayout(layout2)
self.vertical_layout = QVBoxLayout()
self.vertical_layout.addWidget(self.horizontalGroupBox1)
self.vertical_layout.addWidget(self.horizontalGroupBox2)
self.setLayout(self.vertical_layout)
self.default_setting()
self.magValue.valueChanged.connect(self.change_mag)
self.Detu.valueChanged.connect(self.change_Detu)
self.Dia.valueChanged.connect(self.change_Dia)
self.ToPwr.valueChanged.connect(self.change_ToPwr)
self.setFixedHeight(screen.width()*(9/16)*22/100)
def update_image2(self,img_dict):
self.img.setImage(img_dict['img_data'])
self.img_labelt1.setText(img_dict['img_name'])
# self.data = img_dict['img_data']
# self.data_shape = self.data.shape
def push2_state(self):
fpath = IOHelper.get_config_setting('DATA_PATH')
fpath = Path(fpath)
dir_path = fpath.joinpath(str(datetime.datetime.now())[2:].split('.')[0].replace(' ', '-').replace(':', '_'))
# print("save images to {}".format(dir_path))
if settings.m_path != []:
dir_path = settings.m_path
if not dir_path.exists():
dir_path.mkdir()
img_data = np.array(self.img.image)
# load image name by path
img_name = (self.img_labelt1.text())[0:20].replace(' ', '~').replace(':', '_').replace('-', '')
img_data = img_data[::-1]
import numpy
numpy.savetxt(r"{}\{}.ndata".format(dir_path, img_name), img_data, fmt='%.2e', delimiter=' ', newline='\n',
header='', footer='', comments=' ', encoding=None)
print("save images to {}".format(dir_path))
def push_state(self):
if settings.absimgDatas[0] != [] and settings.absimgDatas[1] != [] and settings.absimgDatas[2] != []:
withatom = np.zeros((settings.absimgDatas[0].shape[0], settings.absimgDatas[0].shape[1]))
withoutatom = np.zeros((settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1]))
totalmat = np.zeros((settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1]))
settings.absimgDatas[3] = np.zeros((settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1]))
# print('In the calculation')
import warnings
warnings.filterwarnings("ignore")
for ii in range(settings.absimgDatas[1].shape[0]):
for jj in range(settings.absimgDatas[1].shape[1]):
withatom[ii, jj] = settings.absimgDatas[0][ii, jj] - settings.absimgDatas[2][ii, jj] ####
withoutatom[ii, jj] = settings.absimgDatas[1][ii, jj] - settings.absimgDatas[2][ii, jj] ###
if withoutatom[ii, jj] != 0:
totalmat[ii, jj] = withatom[ii, jj] / withoutatom[ii, jj] ##########
else:
totalmat[ii, jj] = 1
if totalmat[ii, jj] >= 1 or totalmat[ii, jj] <= 0:
totalmat[ii, jj] = 1
settings.absimgDatas[3][ii, jj] = -np.log(totalmat[ii, jj])
# print(settings.absimgData[3][0:20,0:20])
timestamp = datetime.datetime.now()
self.abs_img.emit({'img_name': str(timestamp)[2:], 'img_data': settings.absimgDatas[3]})
self.img_Push2.setEnabled(True)
else:
print('Please add images')
def absorb_setting(self):
self.dia1.setWindowTitle('absorb image')
self.dia1.setWindowModality(Qt.NonModal)
self.dia1.setWindowFlags(Qt.WindowStaysOnTopHint)
self.dia1.show()
def prefix_setting(self):
self.dia2.setWindowTitle('prefix setting')
self.dia2.setWindowModality(Qt.ApplicationModal)
self.dia2.exec_()
def default_setting(self):
# self.roi.setChecked(False)
# self.cross_axes.setChecked(False)
# self.cross_axes2.setChecked(False)
self.magValue.setValue(settings.widget_params["Analyse Data Setting"]["magValue"])
self.Detu.setValue(settings.widget_params["Analyse Data Setting"]["Detu"])
self.Dia.setValue(settings.widget_params["Analyse Data Setting"]["Dia"])
self.ToPwr.setValue(settings.widget_params["Analyse Data Setting"]["ToPwr"])
def change_mag(self):
settings.widget_params["Analyse Data Setting"]["magValue"] = self.magValue.value()
def change_Detu(self):
settings.widget_params["Analyse Data Setting"]["Detu"] = self.Detu.value()
# print("new Detu is ", settings.widget_params["Analyse Data Setting"]["Detu"])
def change_Dia(self):
settings.widget_params["Analyse Data Setting"]["Dia"] = self.Dia.value()
# print("new Dia is ", settings.widget_params["Analyse Data Setting"]["Dia"])
def change_ToPwr(self):
settings.widget_params["Analyse Data Setting"]["ToPwr"] = self.ToPwr.value()
# print("new toPwr is ", settings.widget_params["Analyse Data Setting"]["ToPwr"])
class PlotWindow(QWidget):
img_dict = pyqtSignal(object)
myserial = 5
def __init__(self):
super(PlotWindow, self).__init__()
self.layout = QHBoxLayout(self)
pg.setConfigOptions(imageAxisOrder='row-major')
self.viewport = GraphicsLayoutWidget()
self.video_view = self.viewport.addViewBox()
self.video = pg.ImageItem()
self.video_view.addItem(self.video)
self.video_view.setMouseEnabled(x=False, y=False)#make it can not move
self.setLayout(self.layout)
self.layout.addWidget(self.viewport)
self.img_label = QLabel()
# self.horizontalLayout = QVBoxLayout()
# self.horizontalLayout.addWidget(self.img_label)
# self.layout.addLayout(self.horizontalLayout)
screen = QtGui.QDesktopWidget().screenGeometry()
self.setFixedSize(screen.width() * 15 / 100, screen.height() * 14.5 / 100)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
try:
fpath = IOHelper.get_config_setting('DATA_PATH')
img_fpath = QFileDialog.getOpenFileName(self, "Open File", fpath) # name path
strimg_fpath = str(img_fpath)
img_file = strimg_fpath[2:len(strimg_fpath) - 19]
img_path = Path(img_file)
file = open(img_path)
linescontent = file.readlines() # Read the file as a behavior unit
rows = len(linescontent) # get the numbers fo line
lines = len(linescontent[0].strip().split(' '))
img_data = np.zeros((rows, lines)) # Initialization matrix
row = 0
for line in linescontent:
line = line.strip().split(' ')
img_data[row, :] = line[:]
row += 1
file.close()
img_data = img_data[::-1]
img_name = img_path.stem
img = {
'img_name': img_name,
'img_data': img_data}
settings.absimgDatas[self.myserial] = img_data
self.img_plot(img)
except TypeError:
return
except PermissionError:
return
def update_console(self, stri):
MAX_LINES = 50
stri = str(stri)
new_text = self.prompt_dock.console_text() + '\n' + stri
line_list = new_text.splitlines()
N_lines = min(MAX_LINES, len(line_list))
# limit output lines
new_text = '\n'.join(line_list[-N_lines:])
self.prompt_dock.console_text(new_text)
self.prompt_dock.automatic_scroll()
def save_image(self):
try:
if self.video.image is None:
print("have no image in window")
return
fpath = IOHelper.get_config_setting('DATA_PATH')
fpath = Path(fpath)
dir_path = fpath.joinpath(str(datetime.datetime.now()).split('.')[0].replace(' ', '-').replace(':', '_'))
# print("save images to {}".format(dir_path))
if not dir_path.exists():
dir_path.mkdir()
img_data = np.array(self.video.image)
# load image name by path
img_name1 = settings.widget_params["Analyse Data Setting"]["Prefix"]
img_name2 = (self.img_label.text())[0:20].replace(' ', '~').replace(':', '').replace('-', '')
img_name = str(img_name1) + str(img_name2)
img_data = img_data[::-1]
img_data = Image.fromarray(img_data)
img_data.save(r"{}\{}.png".format(dir_path, img_name))
print("save images to {}".format(dir_path))
# print("images have saved.")
except OSError:
print('Only new version files can be saved.')
def img_plot(self, img_dict):
self.video.setImage(img_dict['img_data'])
self.img_label.setText(img_dict['img_name'])
def clear_win(self):
self.video.clear()
self.img_label.setText('')
| [
"PIL.Image.fromarray",
"pathlib.Path",
"pyqtgraph.GraphicsLayout",
"pyqtgraph.ImageItem",
"numpy.log",
"pyqtgraph.setConfigOptions",
"PyQt5.QtGui.QDesktopWidget",
"numpy.array",
"numpy.zeros",
"datetime.datetime.now",
"Utilities.IO.IOHelper.get_config_setting",
"pyqtgraph.GraphicsLayoutWidget"... | [((1552, 1593), 'pyqtgraph.GraphicsLayout', 'pg.GraphicsLayout', ([], {'border': '(100, 100, 100)'}), '(border=(100, 100, 100))\n', (1569, 1593), True, 'import pyqtgraph as pg\n'), ((1609, 1634), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (1632, 1634), True, 'import pyqtgraph as pg\n'), ((1675, 1722), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'imageAxisOrder': '"""row-major"""'}), "(imageAxisOrder='row-major')\n", (1694, 1722), True, 'import pyqtgraph as pg\n'), ((1934, 1948), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (1946, 1948), True, 'import pyqtgraph as pg\n'), ((5450, 5490), 'Utilities.IO.IOHelper.get_config_setting', 'IOHelper.get_config_setting', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (5477, 5490), False, 'from Utilities.IO import IOHelper\n'), ((5507, 5518), 'pathlib.Path', 'Path', (['fpath'], {}), '(fpath)\n', (5511, 5518), False, 'from pathlib import Path\n'), ((5846, 5870), 'numpy.array', 'np.array', (['self.img.image'], {}), '(self.img.image)\n', (5854, 5870), True, 'import numpy as np\n'), ((9847, 9894), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'imageAxisOrder': '"""row-major"""'}), "(imageAxisOrder='row-major')\n", (9866, 9894), True, 'import pyqtgraph as pg\n'), ((9919, 9941), 'pyqtgraph.GraphicsLayoutWidget', 'GraphicsLayoutWidget', ([], {}), '()\n', (9939, 9941), False, 'from pyqtgraph import GraphicsLayoutWidget\n'), ((10016, 10030), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (10028, 10030), True, 'import pyqtgraph as pg\n'), ((6466, 6544), 'numpy.zeros', 'np.zeros', (['(settings.absimgDatas[0].shape[0], settings.absimgDatas[0].shape[1])'], {}), '((settings.absimgDatas[0].shape[0], settings.absimgDatas[0].shape[1]))\n', (6474, 6544), True, 'import numpy as np\n'), ((6571, 6649), 'numpy.zeros', 'np.zeros', (['(settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1])'], {}), '((settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1]))\n', (6579, 6649), True, 'import numpy as np\n'), ((6673, 6751), 'numpy.zeros', 'np.zeros', (['(settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1])'], {}), '((settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1]))\n', (6681, 6751), True, 'import numpy as np\n'), ((6790, 6868), 'numpy.zeros', 'np.zeros', (['(settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1])'], {}), '((settings.absimgDatas[1].shape[0], settings.absimgDatas[1].shape[1]))\n', (6798, 6868), True, 'import numpy as np\n'), ((6952, 6985), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (6975, 6985), False, 'import warnings\n'), ((7833, 7856), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7854, 7856), False, 'import datetime\n'), ((12531, 12571), 'Utilities.IO.IOHelper.get_config_setting', 'IOHelper.get_config_setting', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (12558, 12571), False, 'from Utilities.IO import IOHelper\n'), ((12592, 12603), 'pathlib.Path', 'Path', (['fpath'], {}), '(fpath)\n', (12596, 12603), False, 'from pathlib import Path\n'), ((3287, 3309), 'PyQt5.QtGui.QDesktopWidget', 'QtGui.QDesktopWidget', ([], {}), '()\n', (3307, 3309), False, 'from PyQt5 import QtGui\n'), ((10450, 10472), 'PyQt5.QtGui.QDesktopWidget', 'QtGui.QDesktopWidget', ([], {}), '()\n', (10470, 10472), False, 'from PyQt5 import QtGui\n'), ((10698, 10738), 'Utilities.IO.IOHelper.get_config_setting', 'IOHelper.get_config_setting', (['"""DATA_PATH"""'], {}), "('DATA_PATH')\n", (10725, 10738), False, 'from Utilities.IO import IOHelper\n'), ((10974, 10988), 'pathlib.Path', 'Path', (['img_file'], {}), '(img_file)\n', (10978, 10988), False, 'from pathlib import Path\n'), ((11271, 11294), 'numpy.zeros', 'np.zeros', (['(rows, lines)'], {}), '((rows, lines))\n', (11279, 11294), True, 'import numpy as np\n'), ((12878, 12904), 'numpy.array', 'np.array', (['self.video.image'], {}), '(self.video.image)\n', (12886, 12904), True, 'import numpy as np\n'), ((13270, 13295), 'PIL.Image.fromarray', 'Image.fromarray', (['img_data'], {}), '(img_data)\n', (13285, 13295), False, 'from PIL import Image\n'), ((7728, 7752), 'numpy.log', 'np.log', (['totalmat[ii, jj]'], {}), '(totalmat[ii, jj])\n', (7734, 7752), True, 'import numpy as np\n'), ((5557, 5580), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5578, 5580), False, 'import datetime\n'), ((12646, 12669), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12667, 12669), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 23 16:16:12 2019
@author: Nate
"""
from scipy import random
import numpy as np
import matplotlib.pyplot as plt
a = 0
b = 1
N = 10000
xrand = random.uniform(a,b,N)
to_plot = []
to_plot_scatter = []
def my_func(x):
return(4/(1+x**2))
plotting3 = []
integral = 0.0
for i in range(N):
x = my_func(xrand[i])
integral += my_func(xrand[i])
answer = integral*(b-a)/(i+1)
plotting3.append((answer-np.pi)/np.pi)
to_plot.append(answer)
to_plot_scatter.append(x)
fig1, ax3 = plt.subplots()
ax3.plot(to_plot)
ax3.plot(range(N),np.full(N,np.pi),'r')
ax3.set_ylabel('Integral Evaluation')
ax3.set_xlabel('Counts')
ax3.set_ylim(1.9,4.1)
ax3.legend(('Approximate Integral','Exact'), loc='upper right')
ax3.set_title("Monte Carlo - $\int_{0}^{1} \\frac{4}{1+x^2} dx$", va='bottom')
fig1, ax4 = plt.subplots()
plotting2 = []
plotting2x = []
for i in range(N):
plotting2.append(my_func(i/N))
plotting2x.append(i/N)
#plotting2.append(np.sin(i/N))
ax4.plot(plotting2x,plotting2)
ax4.set_ylabel('$f(x_i)$')
ax4.set_xlabel('$x_i$')
ax4.set_title("Exact Solution of $\\frac{4}{1+x^2}$", va='bottom')
fig1, ax5 = plt.subplots()
ax5.plot(np.zeros(N))
ax5.plot(plotting3)
ax5.set_ylabel('Error')
ax5.set_xlabel('Counts')
ax5.set_title("Accuracy", va='bottom')
fig1, ax6 = plt.subplots()
ax6.scatter(range(0,N),to_plot_scatter, s=.5)
ax6.set_ylabel('$f(x_i)$')
ax6.set_xlabel('Counts')
ax6.set_title("$f(x_i)$ at random points from 0 to 1", va='bottom')
plt.show()
| [
"numpy.zeros",
"scipy.random.uniform",
"numpy.full",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((194, 217), 'scipy.random.uniform', 'random.uniform', (['a', 'b', 'N'], {}), '(a, b, N)\n', (208, 217), False, 'from scipy import random\n'), ((567, 581), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (579, 581), True, 'import matplotlib.pyplot as plt\n'), ((886, 900), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (898, 900), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1231), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1229, 1231), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1390), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1388, 1390), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1568, 1570), True, 'import matplotlib.pyplot as plt\n'), ((621, 638), 'numpy.full', 'np.full', (['N', 'np.pi'], {}), '(N, np.pi)\n', (628, 638), True, 'import numpy as np\n'), ((1241, 1252), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1249, 1252), True, 'import numpy as np\n')] |
import liquepy as lq
import numpy as np
import eqsig
import pysra
import sfsimodels as sm
class EqlinStockwellAnalysis(object):
def __init__(self, soil_profile, in_sig, rus=None, wave_field='outcrop', store='surface', gibbs=0, t_inc=1.0, t_win=3.0, strain_at_incs=True, strain_ratio=0.9):
"""
Equivalent linear Stockwell Analysis
This method performs the eight step procedure outlined in Millen et al. (2021)
to obtain the surface acceleration time series from an input motion at the base of a 1D soil profile.
Note: a soil layer is a layer as defined in the soil_profile object, a slice is a layer from the pysra_profile.
Parameters
----------
soil_profile: sm.SoilProfile object
in_sig: eqsig.AccSignal object
Input motion at base
rus: array_like or None
A 2D array of pore pressure ratios of shape `(soil_profile.n_layers, in_sig.npts)`,
if none the 'total stress' conditions are assumed
wave_field: str
If input motion should be used as an `outcrop` or '`within` motion.
store: str
if 'surface' (default), it only stores the surface acceleration time series,
if 'all' then stores Stockwell transforms.
gibbs: int or None
If integer then zero-pad input motion to next power of 2, to reduce Gibb's effect
t_inc: float (default=1s)
Time increment of interval for determining transfer functions
t_win: float (default=3s)
Time window for determining maximum strain value
strain_at_incs: bool (default=True)
If true then compute effective strain at time intervals, else use same value for full time series
strain_ratio: float (default=0.9)
Ratio between effective strain and peak (maximum) strain
"""
assert isinstance(soil_profile, sm.SoilProfile)
org_npts = in_sig.npts
# Determine number of zeros required for zero padding
if gibbs is not None: # If it is an integer then add to the exponent of 2 to remove the Gibbs effect
nindex = int(np.ceil(np.log2(org_npts))) + gibbs
new_len = 2 ** nindex
diff_len = new_len - org_npts
front = 0 # int(diff_len / 2)
back = diff_len - front
else: # record length must be a factor of 4
back = int(4 * np.ceil(org_npts / 4) - org_npts)
front = 0
# pad the input signal with zeros to make length a factor of 4
in_sig = eqsig.AccSignal(np.pad(in_sig.values, (front, back), mode='constant'), in_sig.dt)
self.t_inds = np.arange(0, in_sig.npts - 1, int(t_inc / in_sig.dt), dtype=int) # indices of time intervals
self.t_inds = np.insert(self.t_inds, len(self.t_inds), in_sig.npts) # make sure last value is in list
ics = np.array((self.t_inds[1:] + self.t_inds[:-1]) / 2, dtype=int) # halfway between indices of time intervals
points = int(in_sig.npts / 2)
freqs = np.arange(0, points) / (points * in_sig.dt * 2) # All the frequencies in the Stockwell transform
# freqs_d2 = freqs[:int(points / 2)] # Frequencies needed to compute the transfer function
# Steps 1 & 2) Conduct and equivalent linear analysis and obtain strain time series
pysra_profile, strains = compute_pysra_strain_time_series(soil_profile, in_sig, target_height=0.5,
wave_field=wave_field)
# 3a) Calculate the effective strain in each time interval for each slice of the soil profile
iside = int(t_win / in_sig.dt) # width of window in increments
eff_strains = []
for i, depth in enumerate(pysra_profile.depth):
eff_strains.append([])
for tt in range(len(ics)):
if strain_at_incs:
si = max([ics[tt] - iside, 0])
ei = min([ics[tt] + iside, len(strains[i])])
max_strain = max(abs(strains[i][si: ei]))
else: # Note this is different to the pysra eff. strain -which estimates the peak from the strain tf
max_strain = max(abs(strains[i]))
eff_strains[i].append(strain_ratio * max_strain)
# 4a) Obtain the reduction in secant stiffness and increase in damping from pore pressure at each time interval
# Parameters defined in Millen et al. (2020)
dxi_ld_liq = 0.3 # (Delta-xi-low-density-at-liquefaction)
dxi_hd_liq = 0.1 # (Delta-xi-high-density-at-liquefaction)
gr_ld_liq = 0.03 # (secant-shear-modulus-ratio-low-density-at-liquefaction)
gr_hd_liq = 0.15 # (secant-shear-modulus-ratio-high-density-at-liquefaction)
x_ld = 0.45 # Low density threshold
x_hd = 0.8 # high density threshold
ru_gr_low = 0.3 # Low pore pressure ratio threshold for secant shear modulus change
ru_gr_high = 0.8 # High pore pressure ratio threshold for secant shear modulus change
ru_dx_low = 0.5 # Low pore pressure ratio threshold for damping increment change
ru_dx_high = 1.0 # High pore pressure ratio threshold for damping increment change
min_g_liq_vs_g0 = 0.001 # Limiting ratio between the shear modulus at liquefaction divided by initial shear mod
max_xi_liq = 0.3 # Maximum value for damping
# The arrays for the secant stiffness ratio and damping increase at each time interval from pore pressure
gred_is = np.ones((soil_profile.n_layers, len(ics)))
dxi_is = np.zeros((soil_profile.n_layers, len(ics)))
if rus is not None: # if pore pressure time series is defined then calculate the pore pressure corrections
assert len(rus[0]) == org_npts, (len(rus[0]), org_npts)
gr_liqs = np.ones(soil_profile.n_layers) # The secant stiffness ratio at liquefaction for each soil layer
dxi_liqs = np.zeros(soil_profile.n_layers) # The damping increase at liquefaction for each soil layer
for i in range(soil_profile.n_layers):
dr = soil_profile.layer(i + 1).relative_density
if dr is None:
if max(rus[i]):
raise ValueError('Relative density must be set for layer: ', i + 1)
else:
continue
# Calculate the secant stiffness ratio at liquefaction based on relative density
gr_liq = np.where(dr < x_ld, gr_ld_liq, gr_ld_liq + (gr_hd_liq - gr_ld_liq) / (x_hd - x_ld) * (dr - x_ld))
np.clip(gr_liq, None, gr_hd_liq, out=gr_liq)
gr_liqs[i] = gr_liq
# Calculate the damping increase at liquefaction based on relative density
dx_max = np.where(dr < x_ld, dxi_ld_liq, dxi_ld_liq + (dxi_hd_liq - dxi_ld_liq) / (x_hd - x_ld) * (dr - x_ld))
np.clip(dx_max, dxi_hd_liq, None, out=dx_max)
dxi_liqs[i] = dx_max
# zero pad pore pressure time series to be consistent with acceleration time series
rus = np.pad(rus, [(0, 0), (front, back)], mode='constant')
# Calculate the secant stiffness ratio at each time step based on pore pressure ratio (ru)
greds = np.where(rus < ru_gr_low, 1, 1 - (1 - gr_liqs[:, np.newaxis]) / (ru_gr_high - ru_gr_low) * (rus - ru_gr_low))
np.clip(greds, gr_liqs[:, np.newaxis], None, out=greds)
# Calculate the damping increase at each time step based on pore pressure ratio (ru)
dxs = np.where(rus < ru_dx_low, 0, dxi_liqs[:, np.newaxis] / (ru_dx_high - ru_dx_low) * (rus - ru_dx_low))
np.clip(dxs, None, dxi_liqs[:, np.newaxis], out=dxs)
# Calculate the secant stiffness ratio and damping increase at each time interval
for tt in range(len(ics)):
gred_is[:, tt] = np.mean(greds[:, self.t_inds[tt]: self.t_inds[tt + 1]], axis=1)
dxi_is[:, tt] = np.mean(dxs[:, self.t_inds[tt]: self.t_inds[tt + 1]], axis=1)
# 5) Develop input-to-surface transfer functions
self.tfs = [] # A list to store the transfer functions at each increment
for tt in range(len(self.t_inds[1:])):
layers = []
for i, depth in enumerate(pysra_profile.depth):
org_layer = pysra_profile.location('outcrop', depth=depth).layer
org_layer.strain = eff_strains[i][tt] # Apply effective strain (Step 3b)
shear_vel0 = org_layer.initial_shear_vel
shear_vel = org_layer.shear_vel
damping = org_layer.damping
slice_thickness = org_layer.thickness
# get pore pressure effects
ind = soil_profile.get_layer_index_by_depth(depth) - 1
dx = dxi_is[ind][tt]
gred = gred_is[ind][tt]
# 4b) determine the new shear modulus and damping accounting for strain and pore pressure
xi_liq = min([damping + dx, max_xi_liq])
vs_liq = max([np.sqrt(min_g_liq_vs_g0) * shear_vel0, shear_vel * np.sqrt(gred)])
pysra_sl = pysra.site.SoilType("soil", org_layer.unit_wt, None, xi_liq)
lay = pysra.site.Layer(pysra_sl, slice_thickness, vs_liq)
layers.append(lay)
# rebuild the pysra_profile with the new properties
strain_comp_profile = pysra.site.Profile(layers, wt_depth=soil_profile.gwl)
# determine the new transfer function for this interval
freq1, tf_values = lq.sra.calc_pysra_tf(strain_comp_profile, freqs, wave_field=wave_field)
# refactor transfer function to be applied to Stockwell transform
tf_values = np.flipud(np.conj(tf_values))
# tf_values = np.concatenate((tf_values, np.flipud(np.conj(tf_values))))
tf_values = tf_values.reshape(len(tf_values), 1)
self.tfs.append(tf_values)
# 6) Obtain the Stockwell transform of the input motion
in_sig.swtf = eqsig.stockwell.transform(in_sig.values)
# 7) Obtain the surface Stockwell transform by multiplying input Stockwell transform by transfer functions
ps = []
for ss in range(len(self.tfs)):
p1 = self.tfs[ss] * in_sig.swtf[:, self.t_inds[ss]:self.t_inds[ss + 1]]
ps.append(p1)
surf_st = np.concatenate(ps, axis=1)
# 8) Perform the inverse Stockwell transform to obtain the surface acceleration time series
iacc = eqsig.stockwell.itransform(surf_st)
# save the surface acceleration series as a parameter
self.surf_sig = eqsig.AccSignal(iacc, in_sig.dt)
if store == 'all': # Store the Stockwell transforms of the input motion if needed
# self.tfs = tfs
self.freqs = freqs
self.in_sig = in_sig
self.surf_sig.stockwell = surf_st
self.in_sig.smooth_freqs = np.linspace(0.2, 1 / (4 * in_sig.dt), 30)
self.surf_sig.smooth_freqs = np.linspace(0.2, 1 / (4 * in_sig.dt), 30)
def compute_pysra_strain_time_series(soil_profile, in_sig, d_inc=None, target_height=1.0, wave_field='outcrop', in_loc=-1, atype='eqlin'):
"""
Perform an equivalent linear analysis and obtain the strain time series at many depths
Parameters
----------
soil_profile: sm.SoilProfile object
in_sig: eqsig.AccSignal object
Input motion at base
d_inc: float
Target depth increment for each layer in soil_profile
target_height: float
Target depth increment for whole soil profile
wave_field: str
If input motion should be used as an `outcrop` or '`within` motion.
in_loc: int
If -1 then input motion at base, if 0 then input motion at surface
Returns
-------
"""
import pysra
m = pysra.motion.TimeSeriesMotion(filename=in_sig.label, description=None, time_step=in_sig.dt,
accels=in_sig.values / 9.8)
if d_inc is None:
d_inc = 1.0 * np.ones(soil_profile.n_layers)
profile = lq.sra.sm_profile_to_pysra(soil_profile, target_height=target_height, d_inc=d_inc)
strain_ratio = None
kw = {}
if strain_ratio is not None:
kw['strain_ratio'] = strain_ratio
if atype == 'eqlin':
calc = pysra.propagation.EquivalentLinearCalculator(**kw)
elif atype == 'fd':
calc = pysra.propagation.FrequencyDependentEqlCalculator(use_smooth_spectrum=False, **kw)
elif atype == 'fdk': # k=Kausel
calc = pysra.propagation.FrequencyDependentEqlCalculator(use_smooth_spectrum=True, **kw)
elif atype == 'linear':
calc = pysra.propagation.LinearElasticCalculator()
else:
raise ValueError(f'atype must: "eqlin", "fd", "fdk", "linear". Not {atype}')
if in_loc == -1:
in_depth = soil_profile.height
else:
in_depth = 0.0
calc(m, profile, profile.location(wave_field, depth=in_depth))
outs = []
for i, depth in enumerate(profile.depth):
outs.append(pysra.output.StrainTSOutput(pysra.output.OutputLocation('within', depth=depth),
in_percent=False))
outputs = pysra.output.OutputCollection(outs)
outputs(calc)
strains = []
for i, depth in enumerate(profile.depth):
strains.append(outputs[i].values)
return profile, strains
| [
"numpy.clip",
"numpy.sqrt",
"pysra.motion.TimeSeriesMotion",
"numpy.array",
"pysra.propagation.LinearElasticCalculator",
"numpy.arange",
"numpy.mean",
"liquepy.sra.sm_profile_to_pysra",
"numpy.where",
"numpy.linspace",
"pysra.site.SoilType",
"numpy.concatenate",
"pysra.output.OutputLocation"... | [((12000, 12123), 'pysra.motion.TimeSeriesMotion', 'pysra.motion.TimeSeriesMotion', ([], {'filename': 'in_sig.label', 'description': 'None', 'time_step': 'in_sig.dt', 'accels': '(in_sig.values / 9.8)'}), '(filename=in_sig.label, description=None,\n time_step=in_sig.dt, accels=in_sig.values / 9.8)\n', (12029, 12123), False, 'import pysra\n'), ((12247, 12334), 'liquepy.sra.sm_profile_to_pysra', 'lq.sra.sm_profile_to_pysra', (['soil_profile'], {'target_height': 'target_height', 'd_inc': 'd_inc'}), '(soil_profile, target_height=target_height, d_inc\n =d_inc)\n', (12273, 12334), True, 'import liquepy as lq\n'), ((13373, 13408), 'pysra.output.OutputCollection', 'pysra.output.OutputCollection', (['outs'], {}), '(outs)\n', (13402, 13408), False, 'import pysra\n'), ((2909, 2970), 'numpy.array', 'np.array', (['((self.t_inds[1:] + self.t_inds[:-1]) / 2)'], {'dtype': 'int'}), '((self.t_inds[1:] + self.t_inds[:-1]) / 2, dtype=int)\n', (2917, 2970), True, 'import numpy as np\n'), ((10175, 10215), 'eqsig.stockwell.transform', 'eqsig.stockwell.transform', (['in_sig.values'], {}), '(in_sig.values)\n', (10200, 10215), False, 'import eqsig\n'), ((10515, 10541), 'numpy.concatenate', 'np.concatenate', (['ps'], {'axis': '(1)'}), '(ps, axis=1)\n', (10529, 10541), True, 'import numpy as np\n'), ((10658, 10693), 'eqsig.stockwell.itransform', 'eqsig.stockwell.itransform', (['surf_st'], {}), '(surf_st)\n', (10684, 10693), False, 'import eqsig\n'), ((10781, 10813), 'eqsig.AccSignal', 'eqsig.AccSignal', (['iacc', 'in_sig.dt'], {}), '(iacc, in_sig.dt)\n', (10796, 10813), False, 'import eqsig\n'), ((12481, 12531), 'pysra.propagation.EquivalentLinearCalculator', 'pysra.propagation.EquivalentLinearCalculator', ([], {}), '(**kw)\n', (12525, 12531), False, 'import pysra\n'), ((2601, 2654), 'numpy.pad', 'np.pad', (['in_sig.values', '(front, back)'], {'mode': '"""constant"""'}), "(in_sig.values, (front, back), mode='constant')\n", (2607, 2654), True, 'import numpy as np\n'), ((3071, 3091), 'numpy.arange', 'np.arange', (['(0)', 'points'], {}), '(0, points)\n', (3080, 3091), True, 'import numpy as np\n'), ((5897, 5927), 'numpy.ones', 'np.ones', (['soil_profile.n_layers'], {}), '(soil_profile.n_layers)\n', (5904, 5927), True, 'import numpy as np\n'), ((6017, 6048), 'numpy.zeros', 'np.zeros', (['soil_profile.n_layers'], {}), '(soil_profile.n_layers)\n', (6025, 6048), True, 'import numpy as np\n'), ((7191, 7244), 'numpy.pad', 'np.pad', (['rus', '[(0, 0), (front, back)]'], {'mode': '"""constant"""'}), "(rus, [(0, 0), (front, back)], mode='constant')\n", (7197, 7244), True, 'import numpy as np\n'), ((7368, 7481), 'numpy.where', 'np.where', (['(rus < ru_gr_low)', '(1)', '(1 - (1 - gr_liqs[:, np.newaxis]) / (ru_gr_high - ru_gr_low) * (rus -\n ru_gr_low))'], {}), '(rus < ru_gr_low, 1, 1 - (1 - gr_liqs[:, np.newaxis]) / (ru_gr_high -\n ru_gr_low) * (rus - ru_gr_low))\n', (7376, 7481), True, 'import numpy as np\n'), ((7490, 7545), 'numpy.clip', 'np.clip', (['greds', 'gr_liqs[:, np.newaxis]', 'None'], {'out': 'greds'}), '(greds, gr_liqs[:, np.newaxis], None, out=greds)\n', (7497, 7545), True, 'import numpy as np\n'), ((7661, 7765), 'numpy.where', 'np.where', (['(rus < ru_dx_low)', '(0)', '(dxi_liqs[:, np.newaxis] / (ru_dx_high - ru_dx_low) * (rus - ru_dx_low))'], {}), '(rus < ru_dx_low, 0, dxi_liqs[:, np.newaxis] / (ru_dx_high -\n ru_dx_low) * (rus - ru_dx_low))\n', (7669, 7765), True, 'import numpy as np\n'), ((7774, 7826), 'numpy.clip', 'np.clip', (['dxs', 'None', 'dxi_liqs[:, np.newaxis]'], {'out': 'dxs'}), '(dxs, None, dxi_liqs[:, np.newaxis], out=dxs)\n', (7781, 7826), True, 'import numpy as np\n'), ((9546, 9599), 'pysra.site.Profile', 'pysra.site.Profile', (['layers'], {'wt_depth': 'soil_profile.gwl'}), '(layers, wt_depth=soil_profile.gwl)\n', (9564, 9599), False, 'import pysra\n'), ((9699, 9770), 'liquepy.sra.calc_pysra_tf', 'lq.sra.calc_pysra_tf', (['strain_comp_profile', 'freqs'], {'wave_field': 'wave_field'}), '(strain_comp_profile, freqs, wave_field=wave_field)\n', (9719, 9770), True, 'import liquepy as lq\n'), ((11084, 11125), 'numpy.linspace', 'np.linspace', (['(0.2)', '(1 / (4 * in_sig.dt))', '(30)'], {}), '(0.2, 1 / (4 * in_sig.dt), 30)\n', (11095, 11125), True, 'import numpy as np\n'), ((11167, 11208), 'numpy.linspace', 'np.linspace', (['(0.2)', '(1 / (4 * in_sig.dt))', '(30)'], {}), '(0.2, 1 / (4 * in_sig.dt), 30)\n', (11178, 11208), True, 'import numpy as np\n'), ((12202, 12232), 'numpy.ones', 'np.ones', (['soil_profile.n_layers'], {}), '(soil_profile.n_layers)\n', (12209, 12232), True, 'import numpy as np\n'), ((12571, 12657), 'pysra.propagation.FrequencyDependentEqlCalculator', 'pysra.propagation.FrequencyDependentEqlCalculator', ([], {'use_smooth_spectrum': '(False)'}), '(use_smooth_spectrum=False,\n **kw)\n', (12620, 12657), False, 'import pysra\n'), ((6564, 6665), 'numpy.where', 'np.where', (['(dr < x_ld)', 'gr_ld_liq', '(gr_ld_liq + (gr_hd_liq - gr_ld_liq) / (x_hd - x_ld) * (dr - x_ld))'], {}), '(dr < x_ld, gr_ld_liq, gr_ld_liq + (gr_hd_liq - gr_ld_liq) / (x_hd -\n x_ld) * (dr - x_ld))\n', (6572, 6665), True, 'import numpy as np\n'), ((6678, 6722), 'numpy.clip', 'np.clip', (['gr_liq', 'None', 'gr_hd_liq'], {'out': 'gr_liq'}), '(gr_liq, None, gr_hd_liq, out=gr_liq)\n', (6685, 6722), True, 'import numpy as np\n'), ((6875, 6981), 'numpy.where', 'np.where', (['(dr < x_ld)', 'dxi_ld_liq', '(dxi_ld_liq + (dxi_hd_liq - dxi_ld_liq) / (x_hd - x_ld) * (dr - x_ld))'], {}), '(dr < x_ld, dxi_ld_liq, dxi_ld_liq + (dxi_hd_liq - dxi_ld_liq) / (\n x_hd - x_ld) * (dr - x_ld))\n', (6883, 6981), True, 'import numpy as np\n'), ((6993, 7038), 'numpy.clip', 'np.clip', (['dx_max', 'dxi_hd_liq', 'None'], {'out': 'dx_max'}), '(dx_max, dxi_hd_liq, None, out=dx_max)\n', (7000, 7038), True, 'import numpy as np\n'), ((7994, 8056), 'numpy.mean', 'np.mean', (['greds[:, self.t_inds[tt]:self.t_inds[tt + 1]]'], {'axis': '(1)'}), '(greds[:, self.t_inds[tt]:self.t_inds[tt + 1]], axis=1)\n', (8001, 8056), True, 'import numpy as np\n'), ((8090, 8150), 'numpy.mean', 'np.mean', (['dxs[:, self.t_inds[tt]:self.t_inds[tt + 1]]'], {'axis': '(1)'}), '(dxs[:, self.t_inds[tt]:self.t_inds[tt + 1]], axis=1)\n', (8097, 8150), True, 'import numpy as np\n'), ((9277, 9337), 'pysra.site.SoilType', 'pysra.site.SoilType', (['"""soil"""', 'org_layer.unit_wt', 'None', 'xi_liq'], {}), "('soil', org_layer.unit_wt, None, xi_liq)\n", (9296, 9337), False, 'import pysra\n'), ((9360, 9411), 'pysra.site.Layer', 'pysra.site.Layer', (['pysra_sl', 'slice_thickness', 'vs_liq'], {}), '(pysra_sl, slice_thickness, vs_liq)\n', (9376, 9411), False, 'import pysra\n'), ((9883, 9901), 'numpy.conj', 'np.conj', (['tf_values'], {}), '(tf_values)\n', (9890, 9901), True, 'import numpy as np\n'), ((12706, 12791), 'pysra.propagation.FrequencyDependentEqlCalculator', 'pysra.propagation.FrequencyDependentEqlCalculator', ([], {'use_smooth_spectrum': '(True)'}), '(use_smooth_spectrum=True,\n **kw)\n', (12755, 12791), False, 'import pysra\n'), ((13239, 13289), 'pysra.output.OutputLocation', 'pysra.output.OutputLocation', (['"""within"""'], {'depth': 'depth'}), "('within', depth=depth)\n", (13266, 13289), False, 'import pysra\n'), ((12831, 12874), 'pysra.propagation.LinearElasticCalculator', 'pysra.propagation.LinearElasticCalculator', ([], {}), '()\n', (12872, 12874), False, 'import pysra\n'), ((2178, 2195), 'numpy.log2', 'np.log2', (['org_npts'], {}), '(org_npts)\n', (2185, 2195), True, 'import numpy as np\n'), ((2441, 2462), 'numpy.ceil', 'np.ceil', (['(org_npts / 4)'], {}), '(org_npts / 4)\n', (2448, 2462), True, 'import numpy as np\n'), ((9183, 9207), 'numpy.sqrt', 'np.sqrt', (['min_g_liq_vs_g0'], {}), '(min_g_liq_vs_g0)\n', (9190, 9207), True, 'import numpy as np\n'), ((9234, 9247), 'numpy.sqrt', 'np.sqrt', (['gred'], {}), '(gred)\n', (9241, 9247), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import pysan.core as pysan_core
import itertools, math
import numpy as np
import pandas as pd
from sklearn import cluster
import scipy
def generate_sequences(count, length, alphabet):
"""
Generates a number of sequences of a given length, with elements uniformly distributed using a given alphabet.
This is useful for speed testing and other developer use-cases.
Example
--------
>>> ps.generate_sequences(5, 10, [1,2,3]) #doctest: +SKIP
[[2, 3, 2, 2, 3, 1, 2, 2, 1, 2],
[3, 1, 3, 3, 1, 3, 1, 3, 3, 1],
[1, 1, 2, 3, 3, 1, 3, 1, 3, 3],
[1, 3, 1, 2, 3, 2, 3, 1, 3, 2],
[1, 3, 2, 2, 2, 2, 3, 3, 1, 3]]
"""
sequences = []
for x in range(count):
sequences.append(pysan_core.generate_sequence(length, alphabet))
return sequences
# ===== ELEMENTS =====
def get_global_alphabet(sequences):
"""
Computes the alphabet across all sequences in a collection.
Example
---------
>>> s1 = [1,1,1,2,2,2]
>>> s2 = [1,1,2,2,3,3]
>>> sequences = [s1,s2]
>>> ps.get_global_alphabet(sequences)
[1, 2, 3]
"""
alphabets = [pysan_core.get_alphabet(s) for s in sequences]
global_alphabet = sorted(list(set([item for sublist in alphabets for item in sublist])))
return global_alphabet
def get_all_element_counts(sequences):
"""
UC Counts the number of occurances of each element across a collection of sequences.
"""
pass
def get_all_element_frequencies(sequences):
"""
UC Computes the frequencies of each element across a collection of sequences.
"""
pass
def get_first_position_reports(sequences):
"""
UC Reports the positions of each first occurance of each element across a collection of sequences.
"""
pass
# ===== NGRAM METHODS =====
def get_common_ngrams(sequences, ngram_length):
"""
Extracts n-grams which appear one or more times in a collection of sequences, returning the number of occurances in a dictionary.
Example
---------
>>> s1 = [1,1,1,1,1,2,2,2,2,3,3,3,4,4,4]
>>> s2 = [1,1,1,2,2,2,2,2,3,3,3,3,4,4,4]
>>> s3 = [1,1,2,2,2,2,2,3,3,3,2,3,3,4,4]
>>> sequences = [s1,s2,s3]
>>> ps.get_common_ngrams(sequences, 3) #doctest: +NORMALIZE_WHITESPACE
{'[1, 1, 2]': 3,
'[1, 2, 2]': 3,
'[2, 2, 2]': 8,
'[2, 2, 3]': 3,
'[2, 3, 3]': 4,
'[3, 3, 3]': 4,
'[3, 3, 4]': 3,
'[3, 4, 4]': 3}
"""
found_ngrams = 'none'
for sequence in sequences:
ngrams = pysan_core.get_ngram_counts(sequence, ngram_length)
if found_ngrams == 'none':
found_ngrams = ngrams
else:
keys_to_remove = []
for key, value in found_ngrams.items():
if key in ngrams.keys():
found_ngrams[key] = value + ngrams[key]
else:
keys_to_remove.append(key)
for key in keys_to_remove:
del found_ngrams[key]
return found_ngrams
def get_all_unique_ngrams(sequences ,n):
"""
UC Creates a list of all unique ngrams in a collection of sequences.
"""
pass
def get_every_ngram(sequences, n):
"""
UC Creates a list of all ngrams across all sequences in a collection.
"""
pass
def get_all_ngram_counts(sequences, n):
"""
UC Computes the prevalence of ngrams in a collection of sequences.
"""
pass
# ===== TRANSITIONS =====
def get_transition_frequencies(sequences):
"""
Computes the number of transitions for all sequences in a collection.
Example
--------
.. plot::
>>> s1 = [1,1,1,2,2,3,3,3]
>>> s2 = [1,1,2,2,3,2,4,4]
>>> s3 = [1,1,1,2,2,3,3,3]
>>> s4 = [1,1,1,1,2,3,2,3]
>>> sequences = [s1,s2,s3,s4]
>>> ps.get_transition_frequencies(sequences) #doctest: +NORMALIZE_WHITESPACE
{'[2, 3]': 5,
'[1, 2]': 4,
'[3, 2]': 2,
'[2, 4]': 1}
"""
all_transitions = []
for sequence in sequences:
all_transitions += pysan_core.get_transitions(sequence)
all_transitions_as_strings = [str(t) for t in all_transitions]
transition_frequencies = {}
for transition in set(all_transitions_as_strings):
transition_frequencies[str(transition)] = all_transitions_as_strings.count(transition)
transition_frequencies = {k: v for k, v in sorted(transition_frequencies.items(), key=lambda item: item[1], reverse=True)}
return transition_frequencies
def get_all_transitions_matrix(sequences):
"""
UC Computes a transition matrix across all transitions in every sequence in a collection.
"""
pass
def get_all_ntransitions(sequences):
"""
UC Returns a list containing the number of transactions in each sequence in a collection.
"""
pass
# ===== SPELLS =====
def get_all_spells(sequences):
"""
UC Computes spells across a collection of sequences, returning a list of tuples where each tuple holds the element, the length of the spell, and the number of occurances in the collection.
"""
all_spells = []
for sequence in sequences:
spells = ps.get_spells(sequence)
pass
def get_longest_spells(sequences):
"""
UC Extracts the longest spell for each sequence in a collection, returning the element, count, and starting position for each of the spells.
"""
pass
# ===== COLLECTION ATTRIBUTES =====
def are_recurrent(sequences):
"""
Returns true if any of the sequences in a given collection are recurrant, false otherwise.
Example
---------
>>> s1 = [1,2,3,4]
>>> s2 = [3,2,4,5]
>>> s3 = [2,3,4,1]
>>> sequences = [s1,s2,s3]
>>> ps.are_recurrent(sequences)
False
"""
for sequence in sequences:
if pysan_core.is_recurrent(sequence):
return True
return False
def get_summary_statistic(sequence, function):
"""
UC Computes a summary statistic (e.g. entropy, complexity, or turbulence) for each sequence in a collection, returning the results as a list.
"""
pass
def get_routine_scores(sequences, duration):
"""
UC Returns a list containing the routine scores for each sequence in a collection using :meth:`get_routine() <pysan.core.get_routine>`.
"""
pass
def get_synchrony(sequences):
"""
Computes the normalised synchrony between a two or more sequences.
Synchrony here refers to positions with identical elements, e.g. two identical sequences have a synchrony of 1, two completely different sequences have a synchrony of 0.
The value is normalised by dividing by the number of positions compared.
This computation is defined in Cornwell's 2015 book on social sequence analysis, page 230.
Example
--------
>>> s1 = [1,1,2,2,3]
>>> s2 = [1,2,2,3,3]
>>> sequences = [s1,s2]
>>> ps.get_synchrony(sequences)
0.6
"""
shortest_sequence = min([len(s) for s in sequences])
same_elements = []
for position in range(shortest_sequence):
elements_at_this_position = []
for sequence in sequences:
elements_at_this_position.append(sequence[position])
same_elements.append(elements_at_this_position.count(elements_at_this_position[0]) == len(elements_at_this_position))
return same_elements.count(True) / shortest_sequence
def get_sequence_frequencies(sequences):
"""
Computes the frequencies of different sequences in a collection, returning a dictionary of their string representations and counts.
Example
--------
>>> s1 = [1,1,2,2,3]
>>> s2 = [1,2,2,3,3]
>>> s3 = [1,1,2,2,2]
>>> sequences = [s1,s2,s2,s3,s3,s3]
>>> ps.get_sequence_frequencies(sequences) #doctest: +NORMALIZE_WHITESPACE
{'[1, 1, 2, 2, 2]': 3,
'[1, 2, 2, 3, 3]': 2,
'[1, 1, 2, 2, 3]': 1}
"""
# converting to strings makes comparison easy
sequences_as_strings = [str(s) for s in sequences]
sequence_frequencies = {}
for sequence in set(sequences_as_strings):
sequence_frequencies[sequence] = sequences_as_strings.count(sequence)
sequence_frequencies = {k: v for k, v in sorted(sequence_frequencies.items(), key=lambda item: item[1], reverse=True)}
return sequence_frequencies
# ===== DERIVATIVE SEQUENCES =====
def get_motif(sequences):
"""
Computes the motif for a given collection of sequences.
A motif is a representative sequence for all sequences in a collection, with blank values (0) being those which are variable within the collection, and fixed values which are not.
Motifs are related to the measure of synchrony in that synchrony is equal to the number of non-blank elements in the motif.
Example
--------
>>> s1 = [1,1,2,2,3]
>>> s2 = [1,2,2,3,3]
>>> s3 = [1,1,2,2,2]
>>> sequences = [s1,s2,s3]
>>> ps.get_motif(sequences)
[1, 0, 2, 0, 0]
"""
shortest_sequence = min([len(s) for s in sequences])
same_elements = []
for position in range(shortest_sequence):
elements_at_this_position = []
for sequence in sequences:
elements_at_this_position.append(sequence[position])
if elements_at_this_position.count(elements_at_this_position[0]) == len(elements_at_this_position):
same_elements.append(sequences[0][position])
else:
same_elements.append(0)
return same_elements
def get_modal_state(sequences):
"""
Computes the modal states for each position in a collection of sequences, returning a sequence of tuples containing the modal element and its number of occurances at that position.
Example
--------
>>> s1 = [1,1,1,2,2,3,3]
>>> s2 = [1,2,2,2,2,3,3]
>>> s3 = [1,1,1,1,2,2,3]
>>> sequences = [s1,s2,s3]
>>> ps.get_modal_state(sequences)
[(1, 3), (1, 2), (1, 2), (2, 2), (2, 3), (3, 2), (3, 3)]
"""
longest_sequence = max([len(s) for s in sequences])
modal_elements = []
for position in range(longest_sequence):
elements_at_this_position = []
for sequence in sequences:
try:
elements_at_this_position.append(sequence[position])
except:
continue
# this line leaves multi-modal position behaviour undefined
modal_element = max(set(elements_at_this_position), key=elements_at_this_position.count)
modal_elements.append((modal_element, elements_at_this_position.count(modal_element)))
return modal_elements
# ===== EDIT DISTANCES =====
def get_optimal_distance(s1,s2, match = 0, mismatch = -1, gap = -1):
"""
Computes the optimal matching distance between two sequences using the `Needleman-Wunsch algorithm <https://www.sciencedirect.com/science/article/abs/pii/0022283670900574?via%3Dihub>`_ based on Devon Ryan's implementation found `here <https://www.biostars.org/p/231391/>`_.
Example
--------
>>> s1 = [1,1,1,1,2,2,2,2]
>>> s2 = [1,2,2,3,3,4,5,5]
>>> ps.get_optimal_distance(s1,s2)
7.0
"""
penalty = {'MATCH': match, 'MISMATCH': mismatch, 'GAP': gap} #A dictionary for all the penalty valuse.
n = len(s1) + 1 #The dimension of the matrix columns.
m = len(s2) + 1 #The dimension of the matrix rows.
al_mat = np.zeros((m,n),dtype = float) #Initializes the alighment matrix with zeros.
p_mat = np.zeros((m,n),dtype = str) #Initializes the pointer matrix with zeros.
#Scans all the first rows element in the matrix and fill it with "gap penalty"
for i in range(m):
al_mat[i][0] = penalty['GAP'] * i
p_mat[i][0] = 'V'
#Scans all the first columns element in the matrix and fill it with "gap penalty"
for j in range (n):
al_mat[0][j] = penalty['GAP'] * j
p_mat [0][j] = 'H'
#-------------------------------------------------------
#This function returns to values for cae of match or mismatch
def Diagonal(n1,n2,pt):
if(n1 == n2):
return pt['MATCH']
else:
return pt['MISMATCH']
#------------------------------------------------------------
#This function gets the optional elements of the aligment matrix and returns the elements for the pointers matrix.
def Pointers(di,ho,ve):
pointer = max(di,ho,ve) #based on python default maximum(return the first element).
if(di == pointer):
return 'D'
elif(ho == pointer):
return 'H'
else:
return 'V'
#Fill the matrix with the correct values.
p_mat [0][0] = 0 #Return the first element of the pointer matrix back to 0.
for i in range(1,m):
for j in range(1,n):
di = al_mat[i-1][j-1] + Diagonal(s1[j-1],s2[i-1],penalty) #The value for match/mismatch - diagonal.
ho = al_mat[i][j-1] + penalty['GAP'] #The value for gap - horizontal.(from the left cell)
ve = al_mat[i-1][j] + penalty['GAP'] #The value for gap - vertical.(from the upper cell)
al_mat[i][j] = max(di,ho,ve) #Fill the matrix with the maximal value.(based on the python default maximum)
p_mat[i][j] = Pointers(di,ho,ve)
#print(np.matrix(al_mat))
#print(np.matrix(p_mat))
# optimal alignment score = bottom right value in al_mat
score = al_mat[m-1][n-1]
#print(score)
if score == 0: # fixes -0 bug for completeness
return 0
return -score
def get_levenshtein_distance(s1,s2):
"""
Computes the `Levenshtein II distance <https://journals.sagepub.com/doi/abs/10.1177/0049124110362526>`_ between two sequences, which is the optimal distance using only insertions and deletions.
This is identical to the :meth:`get_optimal_distance` method with a mismatch cost of ~infinity (-9999999) and a gap cost of -1.
See the :meth:`get_optimal_distance` method with its default parameters for the Levenshtein I distance.
Example
--------
>>> s1 = [1,1,1,1,2,2,2,2]
>>> s2 = [1,2,2,3,3,4,5,5]
>>> ps.get_levenshtein_distance(s1,s2)
10.0
"""
return get_optimal_distance(s1,s2, match=0, mismatch=-9999999, gap=-1)
def get_hamming_distance(s1,s2):
"""
Computes the Hamming distance between two sequences, which is the optimal distance using only substitutions (no indels).
This is identical to the :meth:`get_optimal_distance` method with a mismatch cost of -1 and a gap cost of ~infinity (-999999).
Note that this can only be used on sequences of the same length given the infinite cost of gaps.
Example
--------
>>> s1 = [1,1,1,1,2,2,2,2]
>>> s2 = [1,2,2,3,3,4,5,5]
>>> ps.get_hamming_distance(s1,s2)
7.0
"""
if len(s1) != len(s2):
raise Exception('sequences provided are not equal length - cannot compute Hamming distance')
return get_optimal_distance(s1,s2, match=0, mismatch=-1, gap=-999999)
def get_combinatorial_distance(s1,s2):
"""
Computes the combinatorial distance between two sequences.
This is defined as 1 minus the number of common subsequences divided by the square root of the product of the number of subsequences in each sequence.
See page 149 in Social Sequence Analysis by <NAME> for details.
Example
--------
>>> s1 = [1,2,3]
>>> s2 = [2,3,4]
>>> ps.get_combinatorial_distance(s1,s2)
0.5714285714285714
"""
s1_subs = pysan_core.get_subsequences(s1)
s2_subs = pysan_core.get_subsequences(s2)
# parse to strings so that they can be easily compared
# - haven't tried it without, may be faster...
s1_subs_strings = [str(s) for s in s1_subs]
s2_subs_strings = [str(s) for s in s2_subs]
common_subs = list(set(s1_subs_strings) & set(s2_subs_strings))
bottom_fraction = math.sqrt(len(s1_subs) * len(s2_subs))
full_fraction = len(common_subs) / bottom_fraction
return 1 - full_fraction
# ===== WHOLE SEQUENCE COMPARISON =====
def get_dissimilarity_matrix(sequences, function):
"""
Computes a dissimilarity matrix using a given function.
This function can be a measure of dissimilarity, distance, or any other measurement between two sequences.
The column names and index on the matrix are the indexes of each sequences in the collection.
Example
----------
>>> s1 = [1,1,1,2,2,3,3,3]
>>> s2 = [1,1,3,2,2,3,1,3]
>>> s3 = [1,1,2,2,3,2,3,2]
>>> sequences = [s1,s2,s3]
>>> ps.get_dissimilarity_matrix(sequences, ps.get_optimal_distance) #doctest: +NORMALIZE_WHITESPACE
0 1 2
0 0.0 2.0 3.0
1 2.0 0.0 3.0
2 3.0 3.0 0.0
"""
matrix = np.zeros((len(sequences), len(sequences)))
for x, row in enumerate(sequences):
for y, column, in enumerate(sequences):
matrix[x][y] = function(row, column)
df = pd.DataFrame(matrix, columns=range(len(sequences)), index=range(len(sequences)))
return df
def get_heirarchical_clustering(sequences, function):
"""
Fits an `sklearn agglomerative clustering model <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html>`_ using the 'average' linkage criteria.
The source code for this method is only two lines, so please copy to your own script to modify specific clustering parameters!
Example
---------
AgglomerativeClustering(affinity='precomputed', distance_threshold=0,
linkage='average', n_clusters=None)
"""
matrix = get_dissimilarity_matrix(sequences, function)
model = cluster.AgglomerativeClustering(affinity='precomputed', linkage='average', distance_threshold=0, n_clusters=None).fit(matrix)
return model
def get_ch_index(model):
"""
UC Computes the Calinski-Harabasz index
"""
pass
# ============= MULTISEQUENCE PLOTTING ===============
def plot_common_ngrams(sequences, ngram_length):
"""
Plot the number of occurances (per sequence) of ngrams common to a collection of sequences.
.. plot::
>>> s1 = [1,2,3,4,3,3,2,2,3,2,3,2,3,1,3]
>>> s2 = [2,3,3,2,1,2,2,2,3,4,4,1,2,1,3]
>>> s3 = [1,3,3,2,2,2,2,3,3,3,2,3,3,4,4]
>>> sequences = [s1,s2,s3]
>>> ps.plot_common_ngrams(sequences, 3) #doctest: +SKIP
"""
found_ngrams = get_common_ngrams(sequences, ngram_length)
ngrams = [eval(key) for key in found_ngrams.keys()]
most_common_ngram = eval(max(found_ngrams, key=lambda key: found_ngrams[key]))
for sequence in sequences:
pysan_core.plot_sequence(sequence, most_common_ngram)
return plt
def plot_sequences(sequences, gap=True):
"""
Creates a scatter style sequence plot for a collection of sequences.
Example
----------
.. plot::
>>> s1 = [1,1,1,2,2,3,2,4,4,3,2,1,2,3,3,3,2,2,1,1,1]
>>> s2 = [1,1,2,2,3,2,4,4,3,2,1,2,3,2,2,2,3,3,2,4,4]
>>> s3 = [1,1,1,2,2,3,2,4,4,3,2,1,2,3,3,3,4,4,4,3,3]
>>> s4 = [1,1,1,1,2,3,2,3,3,3,3,1,2,2,3,3,3,4,4,4,4]
>>> sequences = [s1,s2,s3,s4]
>>> ps.plot_sequences(sequences) #doctest: +SKIP
>>> ps.plot_sequences(sequences, gap=False) #doctest: +SKIP
"""
max_sequence_length = max([len(s) for s in sequences])
plt.figure(figsize=[max_sequence_length*0.3,0.3 * len(sequences)])
for y,sequence in enumerate(sequences):
np_sequence = np.array(sequence)
alphabet_len = len(pysan_core.get_alphabet(sequence))
plt.gca().set_prop_cycle(None)
unique_values = pysan_core.get_alphabet(sequence)
for i, value in enumerate(unique_values):
if gap:
points = np.where(np_sequence == value, y + 1, np.nan)
plt.scatter(x=range(len(np_sequence)), y=points, marker='s', label=value, s=100)
else:
points = np.where(np_sequence == value, 1, np.nan)
plt.bar(range(len(points)), points, bottom=[y for x in range(len(points))], width=1, align='edge', label=i)
if gap:
plt.ylim(0.4, len(sequences) + 0.6)
plt.xlim(-0.6, max_sequence_length - 0.4)
else:
plt.ylim(0,len(sequences))
plt.xlim(0,max_sequence_length)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1.0, 1.1), loc='upper left')
plt.tick_params(
axis='y',
which='both',
left=False,
labelleft=False)
return plt
def plot_state_distribution(sequences):
"""
Creates a state distribution plot based on a collection of sequences.
Example
--------
.. plot::
>>> s1 = [1,1,1,2,2,3,3,4,4,3,2,2,2,3,3,3,2,2,1,1,1]
>>> s2 = [1,1,2,2,3,2,4,4,3,2,2,2,3,2,2,2,3,3,3,4,4]
>>> s3 = [1,1,1,2,2,3,3,3,4,3,2,2,2,3,3,3,4,4,4,3,3]
>>> s4 = [1,1,1,1,2,3,2,3,3,3,3,2,2,2,3,3,3,4,4,4,4]
>>> sequences = [s1,s2,s3,s4]
>>> ps.plot_state_distribution(sequences) #doctest: +SKIP
"""
longest_sequence = max([len(s) for s in sequences])
alphabets = [list(pysan_core.get_alphabet(s)) for s in sequences]
global_alphabet = list(set(list(itertools.chain.from_iterable(alphabets))))
sorted_global_alphabet = sorted(global_alphabet)
plt.figure()
previous_bar_tops = [0 for x in range(longest_sequence)]
for element in sorted_global_alphabet:
element_position_counts = []
for position in range(longest_sequence):
elements_this_position = 0
for sequence in sequences:
try: # this try is for sequences of non-identical lengths
if sequence[position] == element:
elements_this_position += 1 / len(sequences)
except:
continue
element_position_counts.append(elements_this_position)
plt.bar(range(longest_sequence), element_position_counts, bottom=previous_bar_tops, label=element, width=1, align='edge')
previous_bar_tops = [a + b for a, b in zip(previous_bar_tops, element_position_counts)]
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.xlim(0, longest_sequence)
plt.ylim(0,1)
plt.ylabel('Frequency (n=' + str(len(sequences)) + ')')
plt.xlabel('Position')
return plt
def plot_sequence_frequencies(sequences):
"""
Plots sequences using :meth:`plot_sequences`, ordering sequences with the most common at the bottom, and the rarest at the top. This is most useful when comparing short sequences.
Example
---------
.. plot::
>>> s1 = [1,1,1,2,2,3,3,2,2,3,2,2,2,3,3,3,2,2,1,1,1]
>>> s2 = [1,1,2,2,3,2,4,4,3,2,2,2,3,2,2,2,3,3,3,4,4]
>>> s3 = [1,1,1,2,2,3,3,3,4,3,2,2,2,3,3,3,4,4,4,3,3]
>>> sequences = [s1,s2,s2,s3,s3,s3]
>>> ps.plot_sequence_frequencies(sequences) #doctest: +SKIP
"""
frequencies = get_sequence_frequencies(sequences)
raw_sequences_ordered = []
for sequence, count in frequencies.items():
for x in range(count):
raw_sequences_ordered.append(eval(sequence))
plt = plot_sequences(raw_sequences_ordered, gap=False)
plt.tick_params(
axis='y',
which='both',
left=True,
labelleft=True)
plt.yticks([0, len(sequences) * 0.25, len(sequences) * 0.5, len(sequences) * 0.75, len(sequences)], [0,25,50,75,100])
plt.ylabel('Frequency (%)')
plt.xlabel('Position')
return plt
def plot_transition_frequencies(sequences):
"""
Creates a transition frequency plot for each transition in a collection of sequences.
Example
--------
.. plot::
>>> s1 = [1,1,1,2,2,3,3,4,4,3,2,2,2,3,3,3,2,2,1,1,1]
>>> s2 = [1,1,2,2,3,2,4,4,3,2,2,2,3,2,2,2,3,3,3,4,4]
>>> s3 = [1,1,1,2,2,3,3,3,4,3,2,2,2,3,3,3,4,4,4,3,3]
>>> s4 = [1,1,1,1,2,3,2,3,3,3,3,2,2,2,3,3,3,4,4,4,4]
>>> sequences = [s1,s2,s3,s4]
>>> ps.plot_transition_frequencies(sequences) #doctest: +SKIP
"""
transition_frequencies = get_transition_frequencies(sequences)
transitions = [key.replace(', ', '>') for key, v in transition_frequencies.items()]
counts = [value for k, value in transition_frequencies.items()]
plt.bar(transitions, counts)
plt.xlim(-0.6, len(transitions) - 0.4)
plt.ylabel('Number of Transitions')
plt.xlabel('State Transitions')
return plt
def plot_mean_occurance(sequences):
"""
Plots the mean number of occurances of each element across a collection of sequences.
Example
--------
.. plot::
>>> s1 = [1,1,1,1,1,2,2,2,2,3,3,3,4,4,4]
>>> s2 = [1,1,1,2,2,2,2,2,3,3,3,3,4,4,4]
>>> s3 = [1,1,2,2,2,2,2,3,3,3,2,3,3,4,4]
>>> sequences = [s1,s2,s3]
>>> ps.plot_mean_occurance(sequences) #doctest: +SKIP
"""
longest_sequence = max([len(s) for s in sequences])
alphabets = [list(pysan_core.get_alphabet(s)) for s in sequences]
global_alphabet = list(set(list(itertools.chain.from_iterable(alphabets))))
sorted_global_alphabet = sorted(global_alphabet)
for element in sorted_global_alphabet:
occurances = 0
for sequence in sequences:
occurances += sequence.count(element)
plt.bar(element, occurances / len(sequences))
plt.xticks(range(1, len((sorted_global_alphabet)) + 1), sorted_global_alphabet)
plt.xlabel('Element')
plt.ylabel('Mean Occurance per Sequence')
return plt
def plot_modal_state(sequences):
"""
Plots the modal state for each position in a collection of sequences.
Example
--------
.. plot::
>>> s1 = [1,1,1,2,2,3,3]
>>> s2 = [1,2,2,2,2,3,3]
>>> s3 = [1,1,1,1,2,2,3]
>>> sequences = [s1,s2,s3]
>>> ps.plot_modal_state(sequences) #doctest: +SKIP
"""
modal_elements = get_modal_state(sequences)
longest_sequence = max([len(s) for s in sequences])
plt.figure()
global_alphabet = get_global_alphabet(sequences)
for element in global_alphabet:
modal_element_counts = []
for position in range(longest_sequence):
if modal_elements[position][0] == element:
modal_element_counts.append(modal_elements[position][1] / len(sequences))
else:
modal_element_counts.append(0)
plt.bar(range(longest_sequence), modal_element_counts, label=element, width=1, align='edge')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.xlim(0, longest_sequence)
plt.ylim(0, 1)
plt.ylabel('State Frequency, n=' + str(len(sequences)))
plt.xlabel('Position')
return plt
def plot_entropy(sequences):
"""
Plots the entropy at each position across a collection of sequences.
Example
----------
.. plot::
>>> s1 = [1,1,1,2,2,3,2,4,4,3,2,1,2,3,3,3,2,2,1,1,1]
>>> s2 = [1,1,2,2,3,2,4,4,3,2,1,2,3,2,2,2,3,3,2,4,4]
>>> s3 = [2,2,1,1,2,3,2,4,4,3,2,1,2,3,3,3,4,4,4,3,4]
>>> s4 = [1,1,1,1,2,3,2,3,3,3,3,1,2,2,3,3,3,4,4,4,3]
>>> sequences = [s1,s2,s3,s4]
>>> ps.plot_entropy(sequences) #doctest: +SKIP
"""
longest_sequence = max([len(s) for s in sequences])
entropies = []
for position in range(longest_sequence):
this_position_crosssection = [sequence[position] for sequence in sequences]
entropy = pysan_core.get_entropy(this_position_crosssection)
entropies.append(entropy)
plt.ylim(0,1)
plt.plot(range(len(entropies)), entropies)
plt.xlabel('Position, p')
plt.ylabel('Normalised Entropy, e')
return plt
def plot_dendrogram(model, **kwargs):
"""
Plots a heirarchical clustering model - example taken from the `sklearn library <https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_dendrogram.html#sphx-glr-auto-examples-cluster-plot-agglomerative-dendrogram-py>`_
Example
----------
.. plot::
>>> s1 = [1,1,1,2,2,3,2,4,4,3,2,1,2,3,3,3,2,2,1,1,1]
>>> s2 = [1,1,2,2,3,2,4,4,3,2,1,2,3,2,2,2,3,3,2,4,4]
>>> s3 = [2,2,1,1,2,3,2,4,4,3,2,1,2,3,3,3,4,4,4,3,4]
>>> s4 = [1,1,1,1,2,3,2,3,3,3,3,1,2,2,3,3,3,4,4,4,3]
>>> sequences = [s1,s2,s3,s4]
>>> model = ps.get_heirarchical_clustering(sequences, ps.get_optimal_distance)
>>> ps.plot_dendrogram(model) #doctest: +SKIP
"""
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_, counts]).astype(float)
plot = scipy.cluster.hierarchy.dendrogram(linkage_matrix, **kwargs)
return plot | [
"pysan.core.get_entropy",
"matplotlib.pyplot.ylabel",
"numpy.column_stack",
"pysan.core.get_subsequences",
"numpy.array",
"pysan.core.plot_sequence",
"sklearn.cluster.AgglomerativeClustering",
"pysan.core.generate_sequence",
"numpy.where",
"matplotlib.pyplot.xlabel",
"pysan.core.get_transitions"... | [((10410, 10439), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': 'float'}), '((m, n), dtype=float)\n', (10418, 10439), True, 'import numpy as np\n'), ((10495, 10522), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': 'str'}), '((m, n), dtype=str)\n', (10503, 10522), True, 'import numpy as np\n'), ((14171, 14202), 'pysan.core.get_subsequences', 'pysan_core.get_subsequences', (['s1'], {}), '(s1)\n', (14198, 14202), True, 'import pysan.core as pysan_core\n'), ((14214, 14245), 'pysan.core.get_subsequences', 'pysan_core.get_subsequences', (['s2'], {}), '(s2)\n', (14241, 14245), True, 'import pysan.core as pysan_core\n'), ((18754, 18822), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'left': '(False)', 'labelleft': '(False)'}), "(axis='y', which='both', left=False, labelleft=False)\n", (18769, 18822), True, 'import matplotlib.pyplot as plt\n'), ((19569, 19581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19579, 19581), True, 'import matplotlib.pyplot as plt\n'), ((20272, 20326), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (20282, 20326), True, 'import matplotlib.pyplot as plt\n'), ((20328, 20357), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'longest_sequence'], {}), '(0, longest_sequence)\n', (20336, 20357), True, 'import matplotlib.pyplot as plt\n'), ((20359, 20373), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (20367, 20373), True, 'import matplotlib.pyplot as plt\n'), ((20432, 20454), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (20442, 20454), True, 'import matplotlib.pyplot as plt\n'), ((21265, 21331), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'left': '(True)', 'labelleft': '(True)'}), "(axis='y', which='both', left=True, labelleft=True)\n", (21280, 21331), True, 'import matplotlib.pyplot as plt\n'), ((21463, 21490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (%)"""'], {}), "('Frequency (%)')\n", (21473, 21490), True, 'import matplotlib.pyplot as plt\n'), ((21492, 21514), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (21502, 21514), True, 'import matplotlib.pyplot as plt\n'), ((22241, 22269), 'matplotlib.pyplot.bar', 'plt.bar', (['transitions', 'counts'], {}), '(transitions, counts)\n', (22248, 22269), True, 'import matplotlib.pyplot as plt\n'), ((22311, 22346), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Transitions"""'], {}), "('Number of Transitions')\n", (22321, 22346), True, 'import matplotlib.pyplot as plt\n'), ((22348, 22379), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""State Transitions"""'], {}), "('State Transitions')\n", (22358, 22379), True, 'import matplotlib.pyplot as plt\n'), ((23295, 23316), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Element"""'], {}), "('Element')\n", (23305, 23316), True, 'import matplotlib.pyplot as plt\n'), ((23318, 23359), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Occurance per Sequence"""'], {}), "('Mean Occurance per Sequence')\n", (23328, 23359), True, 'import matplotlib.pyplot as plt\n'), ((23789, 23801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23799, 23801), True, 'import matplotlib.pyplot as plt\n'), ((24232, 24286), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (24242, 24286), True, 'import matplotlib.pyplot as plt\n'), ((24288, 24317), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'longest_sequence'], {}), '(0, longest_sequence)\n', (24296, 24317), True, 'import matplotlib.pyplot as plt\n'), ((24319, 24333), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (24327, 24333), True, 'import matplotlib.pyplot as plt\n'), ((24392, 24414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (24402, 24414), True, 'import matplotlib.pyplot as plt\n'), ((25162, 25176), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (25170, 25176), True, 'import matplotlib.pyplot as plt\n'), ((25221, 25246), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position, p"""'], {}), "('Position, p')\n", (25231, 25246), True, 'import matplotlib.pyplot as plt\n'), ((25248, 25283), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalised Entropy, e"""'], {}), "('Normalised Entropy, e')\n", (25258, 25283), True, 'import matplotlib.pyplot as plt\n'), ((26115, 26149), 'numpy.zeros', 'np.zeros', (['model.children_.shape[0]'], {}), '(model.children_.shape[0])\n', (26123, 26149), True, 'import numpy as np\n'), ((26529, 26589), 'scipy.cluster.hierarchy.dendrogram', 'scipy.cluster.hierarchy.dendrogram', (['linkage_matrix'], {}), '(linkage_matrix, **kwargs)\n', (26563, 26589), False, 'import scipy\n'), ((1080, 1106), 'pysan.core.get_alphabet', 'pysan_core.get_alphabet', (['s'], {}), '(s)\n', (1103, 1106), True, 'import pysan.core as pysan_core\n'), ((2358, 2409), 'pysan.core.get_ngram_counts', 'pysan_core.get_ngram_counts', (['sequence', 'ngram_length'], {}), '(sequence, ngram_length)\n', (2385, 2409), True, 'import pysan.core as pysan_core\n'), ((3666, 3702), 'pysan.core.get_transitions', 'pysan_core.get_transitions', (['sequence'], {}), '(sequence)\n', (3692, 3702), True, 'import pysan.core as pysan_core\n'), ((5300, 5333), 'pysan.core.is_recurrent', 'pysan_core.is_recurrent', (['sequence'], {}), '(sequence)\n', (5323, 5333), True, 'import pysan.core as pysan_core\n'), ((17088, 17141), 'pysan.core.plot_sequence', 'pysan_core.plot_sequence', (['sequence', 'most_common_ngram'], {}), '(sequence, most_common_ngram)\n', (17112, 17141), True, 'import pysan.core as pysan_core\n'), ((17859, 17877), 'numpy.array', 'np.array', (['sequence'], {}), '(sequence)\n', (17867, 17877), True, 'import numpy as np\n'), ((17986, 18019), 'pysan.core.get_alphabet', 'pysan_core.get_alphabet', (['sequence'], {}), '(sequence)\n', (18009, 18019), True, 'import pysan.core as pysan_core\n'), ((18449, 18490), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.6)', '(max_sequence_length - 0.4)'], {}), '(-0.6, max_sequence_length - 0.4)\n', (18457, 18490), True, 'import matplotlib.pyplot as plt\n'), ((18529, 18561), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'max_sequence_length'], {}), '(0, max_sequence_length)\n', (18537, 18561), True, 'import matplotlib.pyplot as plt\n'), ((25080, 25130), 'pysan.core.get_entropy', 'pysan_core.get_entropy', (['this_position_crosssection'], {}), '(this_position_crosssection)\n', (25102, 25130), True, 'import pysan.core as pysan_core\n'), ((717, 763), 'pysan.core.generate_sequence', 'pysan_core.generate_sequence', (['length', 'alphabet'], {}), '(length, alphabet)\n', (745, 763), True, 'import pysan.core as pysan_core\n'), ((16196, 16313), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'affinity': '"""precomputed"""', 'linkage': '"""average"""', 'distance_threshold': '(0)', 'n_clusters': 'None'}), "(affinity='precomputed', linkage='average',\n distance_threshold=0, n_clusters=None)\n", (16227, 16313), False, 'from sklearn import cluster\n'), ((17899, 17932), 'pysan.core.get_alphabet', 'pysan_core.get_alphabet', (['sequence'], {}), '(sequence)\n', (17922, 17932), True, 'import pysan.core as pysan_core\n'), ((18583, 18592), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18590, 18592), True, 'import matplotlib.pyplot as plt\n'), ((19390, 19416), 'pysan.core.get_alphabet', 'pysan_core.get_alphabet', (['s'], {}), '(s)\n', (19413, 19416), True, 'import pysan.core as pysan_core\n'), ((22852, 22878), 'pysan.core.get_alphabet', 'pysan_core.get_alphabet', (['s'], {}), '(s)\n', (22875, 22878), True, 'import pysan.core as pysan_core\n'), ((26445, 26505), 'numpy.column_stack', 'np.column_stack', (['[model.children_, model.distances_, counts]'], {}), '([model.children_, model.distances_, counts])\n', (26460, 26505), True, 'import numpy as np\n'), ((17937, 17946), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (17944, 17946), True, 'import matplotlib.pyplot as plt\n'), ((18092, 18137), 'numpy.where', 'np.where', (['(np_sequence == value)', '(y + 1)', 'np.nan'], {}), '(np_sequence == value, y + 1, np.nan)\n', (18100, 18137), True, 'import numpy as np\n'), ((18245, 18286), 'numpy.where', 'np.where', (['(np_sequence == value)', '(1)', 'np.nan'], {}), '(np_sequence == value, 1, np.nan)\n', (18253, 18286), True, 'import numpy as np\n'), ((19472, 19512), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['alphabets'], {}), '(alphabets)\n', (19501, 19512), False, 'import itertools, math\n'), ((22934, 22974), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['alphabets'], {}), '(alphabets)\n', (22963, 22974), False, 'import itertools, math\n')] |
from image_to_ascii import image_to_ascii
import cv2,os,numpy as np
import concurrent.futures
from threading import Thread
from time import perf_counter,sleep as nap
import argparse
# may add sound later .\
class ascii_video :
""" working of class
extract image and yield
convert into ascii image
save in the video
"""
ascii_range_dictCHARS = [
' ','.',
',',"'",
'"',':',
";",'-',
'*','~',
'+','=',
'?','/',
'|','#',
'%','₹',
'$','@']
def __init__(self,video,output_video,fps,pbs):
self.pbs = pbs
self.video_name = video
self.video_output_name = output_video
self.fps = fps
if not os.path.exists(self.video_name) : raise Exception("File not found!!!")
self.ascii_range_dictCHARS.reverse()
self.pixle_to_ascii_dict = {}
for index,key in enumerate(np.linspace(0,255,num=len(self.ascii_range_dictCHARS),endpoint=True)):
key = round(key)
if index == 0 :
last = index
continue
for px in range(last,key+1) :
self.pixle_to_ascii_dict[px] = self.ascii_range_dictCHARS[index]
last = key
self.pixle_count_in_block = self.pbs**2
self.frame_list = []
def __enter__(self):
# this will start reading and writting the frames
print("starting the functions ...")
# reading video stuff
self.vidcap = cv2.VideoCapture(self.video_name)
# fps set for reading and saving file
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
print("Total frame count is --> ",self.total_frames)
default_fps = round(self.vidcap.get(cv2.CAP_PROP_FPS))
print("default fps of video is --> ",default_fps)
if self.fps < default_fps : self.steps = round(default_fps/self.fps)
else : self.steps = 1
self.fps =int(default_fps/self.steps)
print("new fps of video is --> ",self.fps)
self.reader_completed = False
# extracting first frame for the setup
success,frame = self.vidcap.read()
self.width,self.height = tuple(list(frame.shape)[0:2][::-1]) # for creating ascii from the image
# blank black image
self.blank_black = np.zeros((self.height,self.width,3), np.uint8)
# for ascii conversion
self.ascii_in_pixles = np.full([self.height//self.pbs,self.width//self.pbs], "", dtype=np.object)
# writting video stuff
self.writer = cv2.VideoWriter(self.video_output_name, cv2.VideoWriter_fourcc(*"mp4v"), self.fps,tuple(list(frame.shape)[0:2][::-1]) )
return self
def __exit__(self,a,b,c):
self.vidcap.release() # print(self.vidcap.isOpened())
print(f"\nSaving video as - { self.video_output_name }")
self.writer.release()
def iter_each_frame(self):
success = True
t1 = Thread(target = lambda : None )
t1.start()
while success:
count = int(self.vidcap.get(1))
success,frame = self.vidcap.read()
if count%self.steps == 0 and success :
if success and self.total_frames > count :
print(f"Working on frame -> '{str(count).zfill(5)}'")
t1.join()
t1 = Thread(target = lambda : self.frame_list.append(frame))
t1.start()
# make it save frames in thread in frame list
self.reader_completed = True
print("Just funishing up last -",len(self.frame_list),"process 😄😄")
def image_to_ascii_convertor(self,image):
# read the image in the b&w format transpose it and return the ascii nested list for that
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY).transpose()
ascii_in_pixles = np.copy(self.ascii_in_pixles)
# use numpy for fast working here
for index_h,h in enumerate(range(0,self.height,self.pbs)) :
for index_w,w in enumerate(range(0,self.width,self.pbs)) :
try :
sum_ = sum(image[w:w + self.pbs,h:h+self.pbs].flatten())
average = round(float(sum_)/self.pixle_count_in_block)
ascii_in_pixles[index_h][index_w] = self.pixle_to_ascii_dict[average]
except : pass # last some pixle less then pixle_count_in_block will be leaved because they may cause some irragularity in shades
return ascii_in_pixles
def frame_to_ascii_to_ascii_image(self,current_frame):
# take frame extract ascii data and return the ascii image
# print('converting to ASCII images' ,end = " - ")
ascii_data = self.image_to_ascii_convertor(current_frame)
# copy that blank image here black image
image = np.copy(self.blank_black)
# np.zeros((self.height,self.width,3), np.uint8)
# updating the text in it
for index_r,row in enumerate(ascii_data) :
for index_c,ascii_val in enumerate(row) :
if ascii_val.strip() != "" :
image = cv2.putText(image,ascii_val,(index_c*self.pbs,(index_r+1)*self.pbs),cv2.FONT_HERSHEY_PLAIN,0.9,(255,255,255),1)
return image
def add_ascii_frame(self,frame):
# convert the frame into ascii then convert the ascii to ascii frame
ascii_frame = self.frame_to_ascii_to_ascii_image(frame)
self.writer.write(ascii_frame) # save the frame
def frame_thread_superviser(self):
print("working on image computing")
while not self.reader_completed :
with concurrent.futures.ThreadPoolExecutor() as executor:
new_frames = executor.map(self.frame_to_ascii_to_ascii_image , self.frame_list )
for new_frame in new_frames:
Thread(target=lambda : self.frame_list.pop(0) ).start()
self.writer.write(new_frame) # save the frame
print("Just funishing up last -",len(self.frame_list),"process 😄😄")
with concurrent.futures.ThreadPoolExecutor() as executor:
new_frames = executor.map(self.frame_to_ascii_to_ascii_image , self.frame_list )
for new_frame in new_frames:
Thread(target=lambda : self.frame_list.pop(0) ).start()
self.writer.write(new_frame) # save the frame
print('Done. 😎')
@classmethod
def runner(cls,video,output_video,fps,pbs):
with cls(video,output_video,fps,pbs) as ascii_video :
reader = Thread(target= ascii_video.iter_each_frame )
reader.start()
# start the frame saving thread
saver = Thread(target = ascii_video.frame_thread_superviser)
saver.start()
# waiting for complete all the reading frames
reader.join()
print('waiting for the results...')
saver.join()
# example - args - inputVideo, outoutVideo,fps,pbs
# ascii_video.runner('ab.mp4',"Ascii_video2.mp4",30,10)
# ascii_video.runner('ab.mp4',"Ascii_video2.mp4",30,10)
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument('-f','--file' ,help = "name of the file you wanna use with extention !")
parser.add_argument('-o','--outfile',default = "Ascii_video.mp4" ,help = "name of the output file !")
parser.add_argument('--fps' ,default = 20,type = int,help = "fps of the output videos ! (default = 20)")
parser.add_argument('--pbs' ,default = 15,type = int,help = "pixle block size | smaller the number much fine result and but slow processing (default = 15 )")
args = parser.parse_args()
print(args)
if args.file:
start = perf_counter()
ascii_video.runner(args.file,args.outfile,args.fps,args.pbs)
finish = perf_counter()
print(f"Total time Taken {finish - start}s")
else :
raise Exception('file name is important for the program use -h for help')
| [
"numpy.copy",
"os.path.exists",
"argparse.ArgumentParser",
"time.perf_counter",
"cv2.putText",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"numpy.full",
"threading.Thread"
] | [((7255, 7280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7278, 7280), False, 'import argparse\n'), ((1539, 1572), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.video_name'], {}), '(self.video_name)\n', (1555, 1572), False, 'import cv2, os, numpy as np\n'), ((2380, 2428), 'numpy.zeros', 'np.zeros', (['(self.height, self.width, 3)', 'np.uint8'], {}), '((self.height, self.width, 3), np.uint8)\n', (2388, 2428), True, 'import cv2, os, numpy as np\n'), ((2498, 2577), 'numpy.full', 'np.full', (['[self.height // self.pbs, self.width // self.pbs]', '""""""'], {'dtype': 'np.object'}), "([self.height // self.pbs, self.width // self.pbs], '', dtype=np.object)\n", (2505, 2577), True, 'import cv2, os, numpy as np\n'), ((3034, 3062), 'threading.Thread', 'Thread', ([], {'target': '(lambda : None)'}), '(target=lambda : None)\n', (3040, 3062), False, 'from threading import Thread\n'), ((3947, 3976), 'numpy.copy', 'np.copy', (['self.ascii_in_pixles'], {}), '(self.ascii_in_pixles)\n', (3954, 3976), True, 'import cv2, os, numpy as np\n'), ((4920, 4945), 'numpy.copy', 'np.copy', (['self.blank_black'], {}), '(self.blank_black)\n', (4927, 4945), True, 'import cv2, os, numpy as np\n'), ((7836, 7850), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (7848, 7850), False, 'from time import perf_counter, sleep as nap\n'), ((7937, 7951), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (7949, 7951), False, 'from time import perf_counter, sleep as nap\n'), ((760, 791), 'os.path.exists', 'os.path.exists', (['self.video_name'], {}), '(self.video_name)\n', (774, 791), False, 'import cv2, os, numpy as np\n'), ((2668, 2699), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (2690, 2699), False, 'import cv2, os, numpy as np\n'), ((6666, 6708), 'threading.Thread', 'Thread', ([], {'target': 'ascii_video.iter_each_frame'}), '(target=ascii_video.iter_each_frame)\n', (6672, 6708), False, 'from threading import Thread\n'), ((6803, 6853), 'threading.Thread', 'Thread', ([], {'target': 'ascii_video.frame_thread_superviser'}), '(target=ascii_video.frame_thread_superviser)\n', (6809, 6853), False, 'from threading import Thread\n'), ((3868, 3907), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3880, 3907), False, 'import cv2, os, numpy as np\n'), ((5216, 5347), 'cv2.putText', 'cv2.putText', (['image', 'ascii_val', '(index_c * self.pbs, (index_r + 1) * self.pbs)', 'cv2.FONT_HERSHEY_PLAIN', '(0.9)', '(255, 255, 255)', '(1)'], {}), '(image, ascii_val, (index_c * self.pbs, (index_r + 1) * self.pbs\n ), cv2.FONT_HERSHEY_PLAIN, 0.9, (255, 255, 255), 1)\n', (5227, 5347), False, 'import cv2, os, numpy as np\n')] |
import json
import time
from pathlib import Path
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from .ingestion import ingest_session, EpisodeDataset
from .models import FeatureExtractor1d, Model
class EEGDrive:
@staticmethod
def ingest(data_path: str, output_dir: str) -> None:
data_path = Path(data_path).expanduser()
output_dir = Path(output_dir).expanduser()
output_dir.mkdir(parents=True, exist_ok=True)
statistics = ingest_session(data_path, output_dir)
with open(output_dir / f'{data_path.stem}_statistics.json', 'w') as f:
json.dump(statistics, f, indent=4)
@staticmethod
def train(
dataset_dir: str,
output_dir: str,
filters: int,
label_type: str = 'action',
seed: int = 42,
) -> None:
dataset_dir = Path(dataset_dir).expanduser()
run_dir = Path(output_dir) / str(int(time.time()))
run_dir.mkdir(parents=True)
torch.manual_seed(seed)
np.random.seed(seed)
dataset = EpisodeDataset(dataset_dir, label_type)
dilation_exponent = 5 if label_type == 'preparation' else 7
feature_extractor = FeatureExtractor1d(
channels=19, filters=filters, max_dilation_exponent=dilation_exponent
)
model = Model(feature_extractor)
torch.save(feature_extractor.state_dict(), run_dir / 'feature_extractor.pt')
features, labels = model.represent(dataset)
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, test_size=0.14, random_state=seed,
)
excluded_channels, cv_accuracy = model.channel_selection(
train_features, train_labels
)
print('Excluded channels:', excluded_channels.tolist())
print(f'Cross-validation mean accuracy: {cv_accuracy:0.3f}')
model.fit(train_features, train_labels, excluded_channels)
test_accuracy = model.eval(test_features, test_labels, excluded_channels)
print(f'Test accuracy: {test_accuracy:0.3f}')
| [
"torch.manual_seed",
"pathlib.Path",
"sklearn.model_selection.train_test_split",
"numpy.random.seed",
"time.time",
"json.dump"
] | [((1027, 1050), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1044, 1050), False, 'import torch\n'), ((1059, 1079), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1073, 1079), True, 'import numpy as np\n'), ((1592, 1661), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': '(0.14)', 'random_state': 'seed'}), '(features, labels, test_size=0.14, random_state=seed)\n', (1608, 1661), False, 'from sklearn.model_selection import train_test_split\n'), ((633, 667), 'json.dump', 'json.dump', (['statistics', 'f'], {'indent': '(4)'}), '(statistics, f, indent=4)\n', (642, 667), False, 'import json\n'), ((941, 957), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (945, 957), False, 'from pathlib import Path\n'), ((349, 364), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (353, 364), False, 'from pathlib import Path\n'), ((399, 415), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (403, 415), False, 'from pathlib import Path\n'), ((892, 909), 'pathlib.Path', 'Path', (['dataset_dir'], {}), '(dataset_dir)\n', (896, 909), False, 'from pathlib import Path\n'), ((968, 979), 'time.time', 'time.time', ([], {}), '()\n', (977, 979), False, 'import time\n')] |
#!/usr/bin/env python
"""
outline to create combination EUV/CHD maps using ML Algorithm
1. Select images
2. Apply pre-processing corrections
a. Limb-Brightening
b. Inter-Instrument Transformation
3. Coronal Hole Detection using ML Algorithm
4. Convert to Map
5. Combine Maps and Save to DB
"""
import os
import numpy as np
import datetime
import time
import chmap.utilities.plotting.psi_plotting as Plotting
import chmap.database.db_classes as db_class
import chmap.database.db_funs as db_funcs
import chmap.data.corrections.apply_lbc_iit as apply_lbc_iit
import chmap.coronal_holes.ml_detect.tools.ml_functions as ml_funcs
import chmap.maps.image2map as image2map
import chmap.maps.midm as midm
import chmap.maps.synchronic.synch_utils as synch_utils
# -------- parameters --------- #
# TIME RANGE FOR QUERYING
query_time_min = datetime.datetime(2011, 8, 16, 0, 0, 0)
query_time_max = datetime.datetime(2011, 8, 18, 0, 0, 0)
# # define map interval cadence and width
map_freq = 2 # number of hours
interval_delta = 30 # number of minutes
del_interval_dt = datetime.timedelta(minutes=interval_delta)
del_interval = np.timedelta64(interval_delta, 'm')
# INITIALIZE DATABASE CONNECTION
# DATABASE PATHS
map_data_dir = 'path/to/map/directory'
raw_data_dir = 'path/to/raw_data/directory'
hdf_data_dir = 'path/to/processed_data/directory'
database_dir = 'path/to/database/directory'
sqlite_filename = 'path/to/database/filename'
# designate which database to connect to
use_db = "mysql-Q" # 'sqlite' Use local sqlite file-based db
# 'mysql-Q' Use the remote MySQL database on Q
# 'mysql-Q_test' Use the development database on Q
user = "tervin" # only needed for remote databases.
password = "" # See example109 for setting-up an encrypted password. In this case leave password="", and
# init_db_conn_old() will automatically find and use your saved password. Otherwise, enter your MySQL password here.
# INSTRUMENTS
inst_list = ["AIA", "EUVI-A", "EUVI-B"]
# CORRECTION PARAMETERS
n_intensity_bins = 200
R0 = 1.01
N_CLUSTERS = 14
weight = 1.4
# MINIMUM MERGE MAPPING PARAMETERS
del_mu = None # optional between this method and mu_merge_cutoff method
mu_cutoff = 0.0 # lower mu cutoff value
mu_merge_cutoff = 0.4 # mu cutoff in overlap areas
EUV_CHD_sep = False # Do separate minimum intensity merges for image and CHD
# MAP PARAMETERS
x_range = [0, 2 * np.pi]
y_range = [-1, 1]
map_nycoord = 720
map_nxcoord = 1800
# generate map x,y grids. y grid centered on equator, x referenced from lon=0
map_y = np.linspace(y_range[0], y_range[1], map_nycoord, dtype='<f4')
map_x = np.linspace(x_range[0], x_range[1], map_nxcoord, dtype='<f4')
# generate K-Means x, y grids
idx = np.indices((map_nxcoord, map_nycoord))
idx_row = idx[0]
idx_row = idx_row / np.max(idx_row)
idx_col = idx[1]
idx_col = idx_col / np.max(idx_col)
# flatten arrays
idx_col_flt = idx_col.flatten()
idx_row_flt = idx_row.flatten()
### --------- NOTHING TO UPDATE BELOW -------- ###
# Establish connection to database
if use_db == 'sqlite':
# setup database connection to local sqlite file
sqlite_path = os.path.join(database_dir, sqlite_filename)
db_session = db_funcs.init_db_conn_old(db_name=use_db, chd_base=db_class.Base, sqlite_path=sqlite_path)
elif use_db in ('mysql-Q', 'mysql-Q_test'):
# setup database connection to MySQL database on Q
db_session = db_funcs.init_db_conn_old(db_name=use_db, chd_base=db_class.Base, user=user, password=password)
#### STEP ONE: SELECT IMAGES ####
start_time = time.time()
# 1.) query some images
query_pd = db_funcs.query_euv_images(db_session=db_session, time_min=query_time_min - del_interval_dt,
time_max=query_time_max + del_interval_dt)
#### STEP TWO: APPLY PRE-PROCESSING CORRECTIONS ####
# 1.) get dates
moving_avg_centers = synch_utils.get_dates(time_min=query_time_min, time_max=query_time_max, map_freq=map_freq)
# 3.) loop through center dates
for date_ind, center in enumerate(moving_avg_centers):
# choose which images to use in the same way we choose images for synchronic download
synch_images, cluster_method = synch_utils.select_synchronic_images(
center, del_interval, query_pd, inst_list)
if synch_images is None:
# no images fall in the appropriate range, skip
continue
# apply corrections to those images
date_pd, los_list, iit_list, use_indices, methods_list, ref_alpha, ref_x = \
apply_lbc_iit.apply_ipp_2(db_session, center, synch_images, inst_list, hdf_data_dir,
n_intensity_bins, R0)
#### STEP THREE: CORONAL HOLE DETECTION ####
if los_list[0] is not None:
#### STEP FOUR: CONVERT TO MAP ####
map_list, methods_list, data_info, map_info = \
image2map.create_singles_maps_2(synch_images, iit_list, chd_image_list=None,
methods_list=methods_list, map_x=map_x, map_y=map_y, R0=R0)
#### STEP FIVE: CREATE COMBINED MAPS AND SAVE TO DB ####
synchronic_map = midm.create_combined_maps_2(
map_list, mu_merge_cutoff=mu_merge_cutoff, del_mu=del_mu,
mu_cutoff=mu_cutoff, EUV_CHD_sep=False)
#### STEP SIX: APPLY CH AND AR DETECTION ####
map = np.where(synchronic_map.data == -9999, 0, synchronic_map.data)
map2 = np.log(map)
map2 = np.where(map2 == -np.inf, 0, map2)
arr = np.zeros((map_nxcoord * map_nycoord, 3))
arr[:, 0] = idx_col_flt * weight
arr[:, 1] = idx_row_flt * weight
arr[:, 2] = map2.flatten() * 2
psi_chd_map, psi_ar_map, chd_labeled, ar_labeled = ml_funcs.kmeans_detection(synchronic_map.data, map2, arr, N_CLUSTERS,
map_nycoord, map_nxcoord, map_x, map_y)
title = 'Minimum Intensity Merge: Unsupervised Detection Map\nDate: ' + str(center)
Plotting.PlotMap(psi_chd_map, title=title, nfig=date_ind)
Plotting.PlotMap(psi_chd_map, map_type='Contour', title=title, nfig=date_ind)
Plotting.PlotMap(psi_ar_map, map_type='Contour1', title=title, nfig=date_ind)
end_time = time.time()
print("Total elapsed time: ", end_time-start_time)
| [
"chmap.maps.image2map.create_singles_maps_2",
"chmap.maps.synchronic.synch_utils.select_synchronic_images",
"numpy.log",
"datetime.timedelta",
"datetime.datetime",
"numpy.where",
"chmap.data.corrections.apply_lbc_iit.apply_ipp_2",
"numpy.max",
"numpy.linspace",
"chmap.utilities.plotting.psi_plotti... | [((841, 880), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(8)', '(16)', '(0)', '(0)', '(0)'], {}), '(2011, 8, 16, 0, 0, 0)\n', (858, 880), False, 'import datetime\n'), ((898, 937), 'datetime.datetime', 'datetime.datetime', (['(2011)', '(8)', '(18)', '(0)', '(0)', '(0)'], {}), '(2011, 8, 18, 0, 0, 0)\n', (915, 937), False, 'import datetime\n'), ((1071, 1113), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'interval_delta'}), '(minutes=interval_delta)\n', (1089, 1113), False, 'import datetime\n'), ((1129, 1164), 'numpy.timedelta64', 'np.timedelta64', (['interval_delta', '"""m"""'], {}), "(interval_delta, 'm')\n", (1143, 1164), True, 'import numpy as np\n'), ((2587, 2648), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', 'map_nycoord'], {'dtype': '"""<f4"""'}), "(y_range[0], y_range[1], map_nycoord, dtype='<f4')\n", (2598, 2648), True, 'import numpy as np\n'), ((2657, 2718), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', 'map_nxcoord'], {'dtype': '"""<f4"""'}), "(x_range[0], x_range[1], map_nxcoord, dtype='<f4')\n", (2668, 2718), True, 'import numpy as np\n'), ((2756, 2794), 'numpy.indices', 'np.indices', (['(map_nxcoord, map_nycoord)'], {}), '((map_nxcoord, map_nycoord))\n', (2766, 2794), True, 'import numpy as np\n'), ((3576, 3587), 'time.time', 'time.time', ([], {}), '()\n', (3585, 3587), False, 'import time\n'), ((3623, 3761), 'chmap.database.db_funs.query_euv_images', 'db_funcs.query_euv_images', ([], {'db_session': 'db_session', 'time_min': '(query_time_min - del_interval_dt)', 'time_max': '(query_time_max + del_interval_dt)'}), '(db_session=db_session, time_min=query_time_min -\n del_interval_dt, time_max=query_time_max + del_interval_dt)\n', (3648, 3761), True, 'import chmap.database.db_funs as db_funcs\n'), ((3886, 3980), 'chmap.maps.synchronic.synch_utils.get_dates', 'synch_utils.get_dates', ([], {'time_min': 'query_time_min', 'time_max': 'query_time_max', 'map_freq': 'map_freq'}), '(time_min=query_time_min, time_max=query_time_max,\n map_freq=map_freq)\n', (3907, 3980), True, 'import chmap.maps.synchronic.synch_utils as synch_utils\n'), ((6252, 6263), 'time.time', 'time.time', ([], {}), '()\n', (6261, 6263), False, 'import time\n'), ((2832, 2847), 'numpy.max', 'np.max', (['idx_row'], {}), '(idx_row)\n', (2838, 2847), True, 'import numpy as np\n'), ((2885, 2900), 'numpy.max', 'np.max', (['idx_col'], {}), '(idx_col)\n', (2891, 2900), True, 'import numpy as np\n'), ((3163, 3206), 'os.path.join', 'os.path.join', (['database_dir', 'sqlite_filename'], {}), '(database_dir, sqlite_filename)\n', (3175, 3206), False, 'import os\n'), ((3225, 3319), 'chmap.database.db_funs.init_db_conn_old', 'db_funcs.init_db_conn_old', ([], {'db_name': 'use_db', 'chd_base': 'db_class.Base', 'sqlite_path': 'sqlite_path'}), '(db_name=use_db, chd_base=db_class.Base,\n sqlite_path=sqlite_path)\n', (3250, 3319), True, 'import chmap.database.db_funs as db_funcs\n'), ((4190, 4269), 'chmap.maps.synchronic.synch_utils.select_synchronic_images', 'synch_utils.select_synchronic_images', (['center', 'del_interval', 'query_pd', 'inst_list'], {}), '(center, del_interval, query_pd, inst_list)\n', (4226, 4269), True, 'import chmap.maps.synchronic.synch_utils as synch_utils\n'), ((4510, 4620), 'chmap.data.corrections.apply_lbc_iit.apply_ipp_2', 'apply_lbc_iit.apply_ipp_2', (['db_session', 'center', 'synch_images', 'inst_list', 'hdf_data_dir', 'n_intensity_bins', 'R0'], {}), '(db_session, center, synch_images, inst_list,\n hdf_data_dir, n_intensity_bins, R0)\n', (4535, 4620), True, 'import chmap.data.corrections.apply_lbc_iit as apply_lbc_iit\n'), ((3432, 3531), 'chmap.database.db_funs.init_db_conn_old', 'db_funcs.init_db_conn_old', ([], {'db_name': 'use_db', 'chd_base': 'db_class.Base', 'user': 'user', 'password': 'password'}), '(db_name=use_db, chd_base=db_class.Base, user=user,\n password=password)\n', (3457, 3531), True, 'import chmap.database.db_funs as db_funcs\n'), ((4845, 4985), 'chmap.maps.image2map.create_singles_maps_2', 'image2map.create_singles_maps_2', (['synch_images', 'iit_list'], {'chd_image_list': 'None', 'methods_list': 'methods_list', 'map_x': 'map_x', 'map_y': 'map_y', 'R0': 'R0'}), '(synch_images, iit_list, chd_image_list=None,\n methods_list=methods_list, map_x=map_x, map_y=map_y, R0=R0)\n', (4876, 4985), True, 'import chmap.maps.image2map as image2map\n'), ((5117, 5246), 'chmap.maps.midm.create_combined_maps_2', 'midm.create_combined_maps_2', (['map_list'], {'mu_merge_cutoff': 'mu_merge_cutoff', 'del_mu': 'del_mu', 'mu_cutoff': 'mu_cutoff', 'EUV_CHD_sep': '(False)'}), '(map_list, mu_merge_cutoff=mu_merge_cutoff,\n del_mu=del_mu, mu_cutoff=mu_cutoff, EUV_CHD_sep=False)\n', (5144, 5246), True, 'import chmap.maps.midm as midm\n'), ((5337, 5399), 'numpy.where', 'np.where', (['(synchronic_map.data == -9999)', '(0)', 'synchronic_map.data'], {}), '(synchronic_map.data == -9999, 0, synchronic_map.data)\n', (5345, 5399), True, 'import numpy as np\n'), ((5415, 5426), 'numpy.log', 'np.log', (['map'], {}), '(map)\n', (5421, 5426), True, 'import numpy as np\n'), ((5442, 5476), 'numpy.where', 'np.where', (['(map2 == -np.inf)', '(0)', 'map2'], {}), '(map2 == -np.inf, 0, map2)\n', (5450, 5476), True, 'import numpy as np\n'), ((5492, 5532), 'numpy.zeros', 'np.zeros', (['(map_nxcoord * map_nycoord, 3)'], {}), '((map_nxcoord * map_nycoord, 3))\n', (5500, 5532), True, 'import numpy as np\n'), ((5714, 5827), 'chmap.coronal_holes.ml_detect.tools.ml_functions.kmeans_detection', 'ml_funcs.kmeans_detection', (['synchronic_map.data', 'map2', 'arr', 'N_CLUSTERS', 'map_nycoord', 'map_nxcoord', 'map_x', 'map_y'], {}), '(synchronic_map.data, map2, arr, N_CLUSTERS,\n map_nycoord, map_nxcoord, map_x, map_y)\n', (5739, 5827), True, 'import chmap.coronal_holes.ml_detect.tools.ml_functions as ml_funcs\n'), ((6010, 6067), 'chmap.utilities.plotting.psi_plotting.PlotMap', 'Plotting.PlotMap', (['psi_chd_map'], {'title': 'title', 'nfig': 'date_ind'}), '(psi_chd_map, title=title, nfig=date_ind)\n', (6026, 6067), True, 'import chmap.utilities.plotting.psi_plotting as Plotting\n'), ((6076, 6153), 'chmap.utilities.plotting.psi_plotting.PlotMap', 'Plotting.PlotMap', (['psi_chd_map'], {'map_type': '"""Contour"""', 'title': 'title', 'nfig': 'date_ind'}), "(psi_chd_map, map_type='Contour', title=title, nfig=date_ind)\n", (6092, 6153), True, 'import chmap.utilities.plotting.psi_plotting as Plotting\n'), ((6162, 6239), 'chmap.utilities.plotting.psi_plotting.PlotMap', 'Plotting.PlotMap', (['psi_ar_map'], {'map_type': '"""Contour1"""', 'title': 'title', 'nfig': 'date_ind'}), "(psi_ar_map, map_type='Contour1', title=title, nfig=date_ind)\n", (6178, 6239), True, 'import chmap.utilities.plotting.psi_plotting as Plotting\n')] |
# import some data to play with
from sklearn.metrics import accuracy_score
from predict import *
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
X_D= pd.read_csv('dataset1.csv').as_matrix()
Y = X_D[:,0]
X = np.delete(X_D, 0, 1)
#split test and train
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, stratify=Y, test_size=0.3, random_state=42 )
Z = predictSVM(X_test)
print(accuracy_score(Y_test,Z))
| [
"sklearn.model_selection.train_test_split",
"numpy.delete",
"sklearn.metrics.accuracy_score",
"pandas.read_csv"
] | [((265, 285), 'numpy.delete', 'np.delete', (['X_D', '(0)', '(1)'], {}), '(X_D, 0, 1)\n', (274, 285), True, 'import numpy as np\n'), ((343, 409), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'stratify': 'Y', 'test_size': '(0.3)', 'random_state': '(42)'}), '(X, Y, stratify=Y, test_size=0.3, random_state=42)\n', (359, 409), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((440, 465), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'Z'], {}), '(Y_test, Z)\n', (454, 465), False, 'from sklearn.metrics import accuracy_score\n'), ((208, 235), 'pandas.read_csv', 'pd.read_csv', (['"""dataset1.csv"""'], {}), "('dataset1.csv')\n", (219, 235), True, 'import pandas as pd\n')] |
import logging
import sys
from os.path import isfile
import numpy as np
from phi import math
from phi.field import Scene
class SceneLog:
def __init__(self, scene: Scene):
self.scene = scene
self._scalars = {} # name -> (frame, value)
self._scalar_streams = {}
root_logger = logging.getLogger()
root_logger.setLevel(logging.WARNING)
self.logger = logging.Logger("vis", logging.DEBUG)
console_handler = self.console_handler = logging.StreamHandler(sys.stdout)
log_formatter = logging.Formatter("%(message)s (%(levelname)s), %(asctime)sn\n")
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.INFO)
self.logger.addHandler(console_handler)
if self.scene is not None:
if not isfile(self.scene.subpath("info.log")):
log_file = self.scene.subpath("info.log")
else:
index = 2
while True:
log_file = self.scene.subpath("info_%d.log" % index)
if not isfile(log_file):
break
else:
index += 1
self.log_file = log_file
file_handler = self.file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_formatter)
self.logger.addHandler(file_handler)
else:
self.log_file = None
def log(self, message):
self.logger.info(message)
def log_scalars(self, frame: int, **values: float or math.Tensor):
"""
Adds `values` to the curves by name.
This can be used to log the evolution of scalar quantities or summaries.
The values are stored in a text file within the scene directory.
The curves may also be directly viewed in the user interface.
Args:
frame: step
values: Values and names to append to the curves, must be numbers or `phi.math.Tensor`.
If a curve does not yet exists, a new one is created.
"""
for name, value in values.items():
assert isinstance(name, str)
value = float(math.mean(value).mean)
if name not in self._scalars:
self._scalars[name] = []
if self.scene is not None:
path = self.scene.subpath(f"log_{name}.txt")
self._scalar_streams[name] = open(path, "w")
self._scalars[name].append((frame, value))
if self.scene is not None:
self._scalar_streams[name].write(f"{frame} {value}\n")
self._scalar_streams[name].flush()
def get_scalar_curve(self, name) -> tuple:
frames = np.array([item[0] for item in self._scalars[name]])
values = np.array([item[1] for item in self._scalars[name]])
return frames, values
@property
def scalar_curve_names(self) -> tuple:
return tuple(self._scalars.keys())
| [
"logging.getLogger",
"logging.StreamHandler",
"logging.Formatter",
"os.path.isfile",
"numpy.array",
"logging.FileHandler",
"logging.Logger",
"phi.math.mean"
] | [((316, 335), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (333, 335), False, 'import logging\n'), ((404, 440), 'logging.Logger', 'logging.Logger', (['"""vis"""', 'logging.DEBUG'], {}), "('vis', logging.DEBUG)\n", (418, 440), False, 'import logging\n'), ((490, 523), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (511, 523), False, 'import logging\n'), ((548, 612), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s (%(levelname)s), %(asctime)sn\n"""'], {}), "('%(message)s (%(levelname)s), %(asctime)sn\\n')\n", (565, 612), False, 'import logging\n'), ((2764, 2815), 'numpy.array', 'np.array', (['[item[0] for item in self._scalars[name]]'], {}), '([item[0] for item in self._scalars[name]])\n', (2772, 2815), True, 'import numpy as np\n'), ((2833, 2884), 'numpy.array', 'np.array', (['[item[1] for item in self._scalars[name]]'], {}), '([item[1] for item in self._scalars[name]])\n', (2841, 2884), True, 'import numpy as np\n'), ((1277, 1306), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1296, 1306), False, 'import logging\n'), ((2204, 2220), 'phi.math.mean', 'math.mean', (['value'], {}), '(value)\n', (2213, 2220), False, 'from phi import math\n'), ((1084, 1100), 'os.path.isfile', 'isfile', (['log_file'], {}), '(log_file)\n', (1090, 1100), False, 'from os.path import isfile\n')] |
#=============================================================================#
# #
# <NAME> #
# CS 3150 #
# Project 2: Identify Bouldering Routes #
# November 13th, 2020 #
# #
# #
#=============================================================================#
#
# >>>>>>>>>>>>>>> Goals <<<<<<<<<<<<<<<<
#
#
#
#
#
# imports
import cv2
import math
import numpy
from matplotlib import cm
from matplotlib import pyplot
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
# helper functions
def max_of_three(i, j, k):
""" returns the max of three """
mot = i if (i > j) else j
return mot if (mot > k) else k
def show(image, color):
""" Helper method to display a single image
with pyplot """
if (color == "gray"):
pyplot.imshow(image, cmap="gray")
else:
pyplot.imshow(image)
pyplot.show()
def plot(image, c1, c2, c3, x, y, z):
p = pyplot.figure()
pixels = image.reshape((numpy.shape(image)[0] * numpy.shape(image)[1], 3))
normal = colors.Normalize(vmin=-1.,vmax=1.)
normal.autoscale(pixels)
pixels = normal(pixels).tolist()
axis = p.add_subplot(1, 1, 1, projection="3d")
axis.scatter(c1.flatten(), c2.flatten(), c3.flatten(), \
c=pixels, marker=".")
axis.set_xlabel(x)
axis.set_ylabel(y)
axis.set_zlabel(z)
pyplot.show()
def sub_matrix(matrix, n):
h, w = matrix.shape[:2]
temp = numpy.zeros((h, w))
for i in range(h):
for j in range(w):
temp[i,j] = matrix[i,j,n]
return temp
def dilate_and_erode(image):
"""Using openCV method to get rid of extra lines on
the image"""
blue = sub_matrix(image, 0)
green = sub_matrix(image, 1)
red = sub_matrix(image, 2)
kernel2 = numpy.ones((3,3))
w, h = image.shape[:2]
blue = cv2.dilate(blue, kernel2, iterations=2)
green = cv2.dilate(green, kernel2, iterations=2)
red = cv2.dilate(red, kernel2, iterations=2)
blue = cv2.erode(blue, kernel2, iterations=2)
green = cv2.erode(green, kernel2, iterations=2)
red = cv2.erode(red, kernel2, iterations=2)
for i in range(w):
for j in range(h):
image[i,j,0] = blue[i,j]
image[i,j,1] = green[i,j]
image[i,j,2] = red[i,j]
show(image, "with dilation")
return image
# read input image
wall = cv2.imread('./test2.jpg')
## show(wall)
length, width, depth = wall.shape
# Convert color from BGR to RGB
wall = cv2.cvtColor(wall, cv2.COLOR_BGR2RGB)
show(wall, "RGB")
# make black and white version
wall_gray = cv2.cvtColor(wall, cv2.COLOR_RGB2GRAY)
## show(wall_gray, "gray")
# make scatter plots of colors
wall_hsv = cv2.cvtColor(wall, cv2.COLOR_RGB2HSV)
red, green, blue = cv2.split(wall)
hue, sat, val = cv2.split(wall_hsv)
## plot(wall, red, green, blue, "Red", "Green", "Blue")
## plot(wall_hsv, hue, sat, val, "Hue", "Saturation", "Value")
# use saliency to identify holds
##s = cv2.saliency.StaticSaliencySpectralResidual_create()
##worked, saliency_wall = s.computeSaliency(wall_gray)
##show(saliency_wall, "gray")
# use color mask to isolate holds
green_high = (160, 210, 120)
green_low = (80, 130, 30)
yellow_high = (255, 255, 75)
yellow_low = (155, 120, 0)
orange_high = (255, 90, 75)
orange_low = (115, 50, 30)
pink_high = (255, 95, 160)
pink_low = (160, 40, 70)
blue_high = (100, 155, 255)
blue_low = (25, 45, 120)
purple_high = (140, 70, 120)
purple_low = (75, 40, 60)
white_high = (200, 200, 200)
white_low = (150, 150, 150)
## yellow_swatche = numpy.full((10, 10, 3), yellow_high, dtype=numpy.uint8)
## gray_swatche = numpy.full((10, 10, 3), yellow_low, dtype=numpy.uint8)
## show(yellow_swatche, "RGB")
## show(gray_swatche, "RGB")
green_mask = cv2.inRange(wall, green_low, green_high)
green_holds = cv2.bitwise_and(wall, wall, mask=green_mask)
yellow_mask = cv2.inRange(wall, yellow_low, yellow_high)
yellow_holds = cv2.bitwise_and(wall, wall, mask=yellow_mask)
orange_mask = cv2.inRange(wall, orange_low, orange_high)
orange_holds = cv2.bitwise_and(wall, wall, mask=orange_mask)
pink_mask = cv2.inRange(wall, pink_low, pink_high)
pink_holds = cv2.bitwise_and(wall, wall, mask=pink_mask)
blue_mask = cv2.inRange(wall, blue_low, blue_high)
blue_holds = cv2.bitwise_and(wall, wall, mask=blue_mask)
purple_mask = cv2.inRange(wall, purple_low, purple_high)
purple_holds = cv2.bitwise_and(wall, wall, mask=purple_mask)
white_mask = cv2.inRange(wall, white_low, white_high)
white_holds = cv2.bitwise_and(wall, wall, mask=white_mask)
# show(pink_mask, "gray")
# show(pink_holds, "RGB")
box = numpy.ones((5, 5))
# pink_holds = dilate_and_erode(pink_holds)
pink_gray = cv2.cvtColor(pink_holds, cv2.COLOR_RGB2GRAY)
pink_gray = cv2.morphologyEx(pink_gray, cv2.MORPH_OPEN, box)
show(pink_gray, "gray")
blurred_holds = cv2.blur(pink_gray, (3, 3))
# show(blurred_holds, "gray")
toss, thresh = cv2.threshold(blurred_holds, 50, 255, cv2.THRESH_BINARY)
# show(thresh, "gray")
pink_contours, hierarchy = cv2.findContours(thresh,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
print(len(pink_contours))
for con in pink_contours:
c1, c2, c3, c4 = cv2.boundingRect(con)
(x, y), radius = cv2.minEnclosingCircle(con)
print(radius)
if (radius > 50):
for i in range(length):
for j in range(width):
if math.sqrt((i - y)**2 + (j - x)**2) < radius + 4 and \
math.sqrt((i - y)**2 + (j - x)**2) > radius:
wall[i][j] = 255
show(wall, "RGB")
'''pink_hull = []
for i in contours:
pink_hull.append(cv2.convexHull(i, False))
final_holds = numpy.zeros((tresh.shape[0], thresh.shape[1], 3), numpy.uint8)
for i in range(len(contours)):
cv2.drawContours(drawing, contours, i, pink_high, 1, 8, hierarchy)
cv2.drawContours(drawing, pink_hull, i, pink_low, 1, 8)
show(pink_holds, "RGB")
# circle routes of a particular color
'''
# connect route as a graph
| [
"math.sqrt",
"matplotlib.pyplot.imshow",
"cv2.threshold",
"cv2.erode",
"cv2.blur",
"numpy.ones",
"cv2.minEnclosingCircle",
"cv2.morphologyEx",
"cv2.split",
"cv2.cvtColor",
"matplotlib.colors.Normalize",
"numpy.shape",
"cv2.imread",
"matplotlib.pyplot.show",
"cv2.inRange",
"cv2.bitwise_... | [((2869, 2894), 'cv2.imread', 'cv2.imread', (['"""./test2.jpg"""'], {}), "('./test2.jpg')\n", (2879, 2894), False, 'import cv2\n'), ((2983, 3020), 'cv2.cvtColor', 'cv2.cvtColor', (['wall', 'cv2.COLOR_BGR2RGB'], {}), '(wall, cv2.COLOR_BGR2RGB)\n', (2995, 3020), False, 'import cv2\n'), ((3083, 3121), 'cv2.cvtColor', 'cv2.cvtColor', (['wall', 'cv2.COLOR_RGB2GRAY'], {}), '(wall, cv2.COLOR_RGB2GRAY)\n', (3095, 3121), False, 'import cv2\n'), ((3192, 3229), 'cv2.cvtColor', 'cv2.cvtColor', (['wall', 'cv2.COLOR_RGB2HSV'], {}), '(wall, cv2.COLOR_RGB2HSV)\n', (3204, 3229), False, 'import cv2\n'), ((3249, 3264), 'cv2.split', 'cv2.split', (['wall'], {}), '(wall)\n', (3258, 3264), False, 'import cv2\n'), ((3281, 3300), 'cv2.split', 'cv2.split', (['wall_hsv'], {}), '(wall_hsv)\n', (3290, 3300), False, 'import cv2\n'), ((4241, 4281), 'cv2.inRange', 'cv2.inRange', (['wall', 'green_low', 'green_high'], {}), '(wall, green_low, green_high)\n', (4252, 4281), False, 'import cv2\n'), ((4296, 4340), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'green_mask'}), '(wall, wall, mask=green_mask)\n', (4311, 4340), False, 'import cv2\n'), ((4355, 4397), 'cv2.inRange', 'cv2.inRange', (['wall', 'yellow_low', 'yellow_high'], {}), '(wall, yellow_low, yellow_high)\n', (4366, 4397), False, 'import cv2\n'), ((4413, 4458), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'yellow_mask'}), '(wall, wall, mask=yellow_mask)\n', (4428, 4458), False, 'import cv2\n'), ((4473, 4515), 'cv2.inRange', 'cv2.inRange', (['wall', 'orange_low', 'orange_high'], {}), '(wall, orange_low, orange_high)\n', (4484, 4515), False, 'import cv2\n'), ((4531, 4576), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'orange_mask'}), '(wall, wall, mask=orange_mask)\n', (4546, 4576), False, 'import cv2\n'), ((4589, 4627), 'cv2.inRange', 'cv2.inRange', (['wall', 'pink_low', 'pink_high'], {}), '(wall, pink_low, pink_high)\n', (4600, 4627), False, 'import cv2\n'), ((4641, 4684), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'pink_mask'}), '(wall, wall, mask=pink_mask)\n', (4656, 4684), False, 'import cv2\n'), ((4697, 4735), 'cv2.inRange', 'cv2.inRange', (['wall', 'blue_low', 'blue_high'], {}), '(wall, blue_low, blue_high)\n', (4708, 4735), False, 'import cv2\n'), ((4749, 4792), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'blue_mask'}), '(wall, wall, mask=blue_mask)\n', (4764, 4792), False, 'import cv2\n'), ((4807, 4849), 'cv2.inRange', 'cv2.inRange', (['wall', 'purple_low', 'purple_high'], {}), '(wall, purple_low, purple_high)\n', (4818, 4849), False, 'import cv2\n'), ((4865, 4910), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'purple_mask'}), '(wall, wall, mask=purple_mask)\n', (4880, 4910), False, 'import cv2\n'), ((4924, 4964), 'cv2.inRange', 'cv2.inRange', (['wall', 'white_low', 'white_high'], {}), '(wall, white_low, white_high)\n', (4935, 4964), False, 'import cv2\n'), ((4979, 5023), 'cv2.bitwise_and', 'cv2.bitwise_and', (['wall', 'wall'], {'mask': 'white_mask'}), '(wall, wall, mask=white_mask)\n', (4994, 5023), False, 'import cv2\n'), ((5083, 5101), 'numpy.ones', 'numpy.ones', (['(5, 5)'], {}), '((5, 5))\n', (5093, 5101), False, 'import numpy\n'), ((5158, 5202), 'cv2.cvtColor', 'cv2.cvtColor', (['pink_holds', 'cv2.COLOR_RGB2GRAY'], {}), '(pink_holds, cv2.COLOR_RGB2GRAY)\n', (5170, 5202), False, 'import cv2\n'), ((5215, 5263), 'cv2.morphologyEx', 'cv2.morphologyEx', (['pink_gray', 'cv2.MORPH_OPEN', 'box'], {}), '(pink_gray, cv2.MORPH_OPEN, box)\n', (5231, 5263), False, 'import cv2\n'), ((5304, 5331), 'cv2.blur', 'cv2.blur', (['pink_gray', '(3, 3)'], {}), '(pink_gray, (3, 3))\n', (5312, 5331), False, 'import cv2\n'), ((5377, 5433), 'cv2.threshold', 'cv2.threshold', (['blurred_holds', '(50)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blurred_holds, 50, 255, cv2.THRESH_BINARY)\n', (5390, 5433), False, 'import cv2\n'), ((5484, 5548), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (5500, 5548), False, 'import cv2\n'), ((1361, 1374), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1372, 1374), False, 'from matplotlib import pyplot\n'), ((1422, 1437), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (1435, 1437), False, 'from matplotlib import pyplot\n'), ((1530, 1567), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(-1.0)', 'vmax': '(1.0)'}), '(vmin=-1.0, vmax=1.0)\n', (1546, 1567), False, 'from matplotlib import colors\n'), ((1855, 1868), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1866, 1868), False, 'from matplotlib import pyplot\n'), ((1936, 1955), 'numpy.zeros', 'numpy.zeros', (['(h, w)'], {}), '((h, w))\n', (1947, 1955), False, 'import numpy\n'), ((2279, 2297), 'numpy.ones', 'numpy.ones', (['(3, 3)'], {}), '((3, 3))\n', (2289, 2297), False, 'import numpy\n'), ((2336, 2375), 'cv2.dilate', 'cv2.dilate', (['blue', 'kernel2'], {'iterations': '(2)'}), '(blue, kernel2, iterations=2)\n', (2346, 2375), False, 'import cv2\n'), ((2388, 2428), 'cv2.dilate', 'cv2.dilate', (['green', 'kernel2'], {'iterations': '(2)'}), '(green, kernel2, iterations=2)\n', (2398, 2428), False, 'import cv2\n'), ((2439, 2477), 'cv2.dilate', 'cv2.dilate', (['red', 'kernel2'], {'iterations': '(2)'}), '(red, kernel2, iterations=2)\n', (2449, 2477), False, 'import cv2\n'), ((2490, 2528), 'cv2.erode', 'cv2.erode', (['blue', 'kernel2'], {'iterations': '(2)'}), '(blue, kernel2, iterations=2)\n', (2499, 2528), False, 'import cv2\n'), ((2541, 2580), 'cv2.erode', 'cv2.erode', (['green', 'kernel2'], {'iterations': '(2)'}), '(green, kernel2, iterations=2)\n', (2550, 2580), False, 'import cv2\n'), ((2591, 2628), 'cv2.erode', 'cv2.erode', (['red', 'kernel2'], {'iterations': '(2)'}), '(red, kernel2, iterations=2)\n', (2600, 2628), False, 'import cv2\n'), ((5712, 5733), 'cv2.boundingRect', 'cv2.boundingRect', (['con'], {}), '(con)\n', (5728, 5733), False, 'import cv2\n'), ((5756, 5783), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['con'], {}), '(con)\n', (5778, 5783), False, 'import cv2\n'), ((1284, 1317), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1297, 1317), False, 'from matplotlib import pyplot\n'), ((1336, 1356), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image'], {}), '(image)\n', (1349, 1356), False, 'from matplotlib import pyplot\n'), ((1466, 1484), 'numpy.shape', 'numpy.shape', (['image'], {}), '(image)\n', (1477, 1484), False, 'import numpy\n'), ((1490, 1508), 'numpy.shape', 'numpy.shape', (['image'], {}), '(image)\n', (1501, 1508), False, 'import numpy\n'), ((5911, 5949), 'math.sqrt', 'math.sqrt', (['((i - y) ** 2 + (j - x) ** 2)'], {}), '((i - y) ** 2 + (j - x) ** 2)\n', (5920, 5949), False, 'import math\n'), ((5984, 6022), 'math.sqrt', 'math.sqrt', (['((i - y) ** 2 + (j - x) ** 2)'], {}), '((i - y) ** 2 + (j - x) ** 2)\n', (5993, 6022), False, 'import math\n')] |
import numpy as np
def R_P(take_off_angle, strike, dip, rake, az):
""" Radiation pattern for P"""
inc = np.deg2rad(take_off_angle)
SR = Fault_geom_SR(dip, rake)
QR = Fault_geom_QR(strike, dip, rake, az)
PR = Fault_geom_PR(strike, dip, rake, az)
RP = SR * (3 * np.cos(inc) ** 2 - 1) - QR * np.sin(2 * inc) - PR * np.sin(inc) ** 2
return RP
def R_SV(take_off_angle, strike, dip, rake, az):
""" Radiation pattern for SV"""
inc = np.deg2rad(take_off_angle)
SR = Fault_geom_SR(dip, rake)
QR = Fault_geom_QR(strike, dip, rake, az)
PR = Fault_geom_PR(strike, dip, rake, az)
RSV = (3 / 2) * SR * np.sin(2 * inc) + QR * np.cos(2 * inc) + (1 / 2) * PR * np.sin(2 * inc)
return RSV
def R_SH(take_off_angle, strike, dip, rake, az):
""" Radiation pattern for SH"""
inc = np.deg2rad(take_off_angle)
QL = Fault_geom_QL(strike, dip, rake, az)
PL = Fault_geom_PL(strike, dip, rake, az)
RSH = -QL * np.cos(inc) - PL * np.sin(inc)
return RSH
def Fault_geom_SR(dip, rake):
""" Fault geometry factor for P - SV waves"""
delta = np.deg2rad(dip)
lambd = np.deg2rad(rake)
SR = np.sin(lambd) * np.sin(delta) * np.cos(delta)
return SR
def Fault_geom_QR(strike, dip, rake, az):
""" Fault geometry factor for P - SV waves"""
delta = np.deg2rad(dip)
lambd = np.deg2rad(rake)
phi_az = np.deg2rad(strike - az)
QR = np.sin(lambd) * np.cos(2 * delta) * np.sin(phi_az) + np.cos(lambd) * np.cos(
delta
) * np.cos(phi_az)
return QR
def Fault_geom_PR(strike, dip, rake, az):
""" Fault geometry factor for P - SV waves"""
delta = np.deg2rad(dip)
lambd = np.deg2rad(rake)
phi_az = np.deg2rad(strike - az)
PR = np.cos(lambd) * np.sin(delta) * np.sin(2 * phi_az) - np.sin(lambd) * np.sin(
delta
) * np.cos(delta) * np.cos(2 * phi_az)
return PR
def Fault_geom_PL(strike, dip, rake, az):
""" Fault geometry factor for SH waves"""
delta = np.deg2rad(dip)
lambd = np.deg2rad(rake)
phi_az = np.deg2rad(strike - az)
PL = np.sin(lambd) * np.sin(delta) * np.cos(delta) * np.sin(2 * phi_az) + np.cos(
lambd
) * np.sin(delta) * np.cos(2 * phi_az)
return PL
def Fault_geom_QL(strike, dip, rake, az):
""" Fault geometry factor for SH waves"""
delta = np.deg2rad(dip)
lambd = np.deg2rad(rake)
phi_az = np.deg2rad(strike - az)
QL = -np.cos(lambd) * np.cos(delta) * np.sin(phi_az) + np.sin(lambd) * np.cos(
2 * delta
) * np.cos(phi_az)
return QL
| [
"numpy.sin",
"numpy.deg2rad",
"numpy.cos"
] | [((114, 140), 'numpy.deg2rad', 'np.deg2rad', (['take_off_angle'], {}), '(take_off_angle)\n', (124, 140), True, 'import numpy as np\n'), ((467, 493), 'numpy.deg2rad', 'np.deg2rad', (['take_off_angle'], {}), '(take_off_angle)\n', (477, 493), True, 'import numpy as np\n'), ((830, 856), 'numpy.deg2rad', 'np.deg2rad', (['take_off_angle'], {}), '(take_off_angle)\n', (840, 856), True, 'import numpy as np\n'), ((1107, 1122), 'numpy.deg2rad', 'np.deg2rad', (['dip'], {}), '(dip)\n', (1117, 1122), True, 'import numpy as np\n'), ((1135, 1151), 'numpy.deg2rad', 'np.deg2rad', (['rake'], {}), '(rake)\n', (1145, 1151), True, 'import numpy as np\n'), ((1329, 1344), 'numpy.deg2rad', 'np.deg2rad', (['dip'], {}), '(dip)\n', (1339, 1344), True, 'import numpy as np\n'), ((1357, 1373), 'numpy.deg2rad', 'np.deg2rad', (['rake'], {}), '(rake)\n', (1367, 1373), True, 'import numpy as np\n'), ((1388, 1411), 'numpy.deg2rad', 'np.deg2rad', (['(strike - az)'], {}), '(strike - az)\n', (1398, 1411), True, 'import numpy as np\n'), ((1657, 1672), 'numpy.deg2rad', 'np.deg2rad', (['dip'], {}), '(dip)\n', (1667, 1672), True, 'import numpy as np\n'), ((1685, 1701), 'numpy.deg2rad', 'np.deg2rad', (['rake'], {}), '(rake)\n', (1695, 1701), True, 'import numpy as np\n'), ((1716, 1739), 'numpy.deg2rad', 'np.deg2rad', (['(strike - az)'], {}), '(strike - az)\n', (1726, 1739), True, 'import numpy as np\n'), ((2001, 2016), 'numpy.deg2rad', 'np.deg2rad', (['dip'], {}), '(dip)\n', (2011, 2016), True, 'import numpy as np\n'), ((2029, 2045), 'numpy.deg2rad', 'np.deg2rad', (['rake'], {}), '(rake)\n', (2039, 2045), True, 'import numpy as np\n'), ((2060, 2083), 'numpy.deg2rad', 'np.deg2rad', (['(strike - az)'], {}), '(strike - az)\n', (2070, 2083), True, 'import numpy as np\n'), ((2345, 2360), 'numpy.deg2rad', 'np.deg2rad', (['dip'], {}), '(dip)\n', (2355, 2360), True, 'import numpy as np\n'), ((2373, 2389), 'numpy.deg2rad', 'np.deg2rad', (['rake'], {}), '(rake)\n', (2383, 2389), True, 'import numpy as np\n'), ((2404, 2427), 'numpy.deg2rad', 'np.deg2rad', (['(strike - az)'], {}), '(strike - az)\n', (2414, 2427), True, 'import numpy as np\n'), ((1194, 1207), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1200, 1207), True, 'import numpy as np\n'), ((702, 717), 'numpy.sin', 'np.sin', (['(2 * inc)'], {}), '(2 * inc)\n', (708, 717), True, 'import numpy as np\n'), ((966, 977), 'numpy.cos', 'np.cos', (['inc'], {}), '(inc)\n', (972, 977), True, 'import numpy as np\n'), ((985, 996), 'numpy.sin', 'np.sin', (['inc'], {}), '(inc)\n', (991, 996), True, 'import numpy as np\n'), ((1162, 1175), 'numpy.sin', 'np.sin', (['lambd'], {}), '(lambd)\n', (1168, 1175), True, 'import numpy as np\n'), ((1178, 1191), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (1184, 1191), True, 'import numpy as np\n'), ((1458, 1472), 'numpy.sin', 'np.sin', (['phi_az'], {}), '(phi_az)\n', (1464, 1472), True, 'import numpy as np\n'), ((1521, 1535), 'numpy.cos', 'np.cos', (['phi_az'], {}), '(phi_az)\n', (1527, 1535), True, 'import numpy as np\n'), ((1782, 1800), 'numpy.sin', 'np.sin', (['(2 * phi_az)'], {}), '(2 * phi_az)\n', (1788, 1800), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.cos', 'np.cos', (['(2 * phi_az)'], {}), '(2 * phi_az)\n', (1871, 1883), True, 'import numpy as np\n'), ((2142, 2160), 'numpy.sin', 'np.sin', (['(2 * phi_az)'], {}), '(2 * phi_az)\n', (2148, 2160), True, 'import numpy as np\n'), ((2209, 2227), 'numpy.cos', 'np.cos', (['(2 * phi_az)'], {}), '(2 * phi_az)\n', (2215, 2227), True, 'import numpy as np\n'), ((2471, 2485), 'numpy.sin', 'np.sin', (['phi_az'], {}), '(phi_az)\n', (2477, 2485), True, 'import numpy as np\n'), ((2538, 2552), 'numpy.cos', 'np.cos', (['phi_az'], {}), '(phi_az)\n', (2544, 2552), True, 'import numpy as np\n'), ((316, 331), 'numpy.sin', 'np.sin', (['(2 * inc)'], {}), '(2 * inc)\n', (322, 331), True, 'import numpy as np\n'), ((339, 350), 'numpy.sin', 'np.sin', (['inc'], {}), '(inc)\n', (345, 350), True, 'import numpy as np\n'), ((646, 661), 'numpy.sin', 'np.sin', (['(2 * inc)'], {}), '(2 * inc)\n', (652, 661), True, 'import numpy as np\n'), ((669, 684), 'numpy.cos', 'np.cos', (['(2 * inc)'], {}), '(2 * inc)\n', (675, 684), True, 'import numpy as np\n'), ((1422, 1435), 'numpy.sin', 'np.sin', (['lambd'], {}), '(lambd)\n', (1428, 1435), True, 'import numpy as np\n'), ((1438, 1455), 'numpy.cos', 'np.cos', (['(2 * delta)'], {}), '(2 * delta)\n', (1444, 1455), True, 'import numpy as np\n'), ((1475, 1488), 'numpy.cos', 'np.cos', (['lambd'], {}), '(lambd)\n', (1481, 1488), True, 'import numpy as np\n'), ((1491, 1504), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1497, 1504), True, 'import numpy as np\n'), ((1750, 1763), 'numpy.cos', 'np.cos', (['lambd'], {}), '(lambd)\n', (1756, 1763), True, 'import numpy as np\n'), ((1766, 1779), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (1772, 1779), True, 'import numpy as np\n'), ((1849, 1862), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (1855, 1862), True, 'import numpy as np\n'), ((2126, 2139), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (2132, 2139), True, 'import numpy as np\n'), ((2163, 2176), 'numpy.cos', 'np.cos', (['lambd'], {}), '(lambd)\n', (2169, 2176), True, 'import numpy as np\n'), ((2193, 2206), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (2199, 2206), True, 'import numpy as np\n'), ((2455, 2468), 'numpy.cos', 'np.cos', (['delta'], {}), '(delta)\n', (2461, 2468), True, 'import numpy as np\n'), ((2488, 2501), 'numpy.sin', 'np.sin', (['lambd'], {}), '(lambd)\n', (2494, 2501), True, 'import numpy as np\n'), ((2504, 2521), 'numpy.cos', 'np.cos', (['(2 * delta)'], {}), '(2 * delta)\n', (2510, 2521), True, 'import numpy as np\n'), ((1803, 1816), 'numpy.sin', 'np.sin', (['lambd'], {}), '(lambd)\n', (1809, 1816), True, 'import numpy as np\n'), ((1819, 1832), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (1825, 1832), True, 'import numpy as np\n'), ((2094, 2107), 'numpy.sin', 'np.sin', (['lambd'], {}), '(lambd)\n', (2100, 2107), True, 'import numpy as np\n'), ((2110, 2123), 'numpy.sin', 'np.sin', (['delta'], {}), '(delta)\n', (2116, 2123), True, 'import numpy as np\n'), ((2439, 2452), 'numpy.cos', 'np.cos', (['lambd'], {}), '(lambd)\n', (2445, 2452), True, 'import numpy as np\n'), ((287, 298), 'numpy.cos', 'np.cos', (['inc'], {}), '(inc)\n', (293, 298), True, 'import numpy as np\n')] |
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
MAX_ACCOUNT_BALANCE = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 20000
COMMISSION_FEE = 0.008
INITIAL_ACCOUNT_BALANCE = 10000
class StockTradingEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self, df_list, isTraining=True):
super(StockTradingEnv, self).__init__()
self.training = isTraining
self.window_size = 6
self.df_list = []
df_list[0].dropna(inplace = True)
self.intersect_dates = df_list[0]['Date']
for df in df_list[1:]:
df.dropna(inplace = True)
self.intersect_dates = np.intersect1d(self.intersect_dates, df['Date'])
# Remove all NAN in the df
self.start_date = np.min(self.intersect_dates)
self.end_date = np.max(self.intersect_dates)
for df in df_list:
self.df_list.append(df[df['Date'].isin(self.intersect_dates)].reset_index(drop=True))
# For Multiple Markets: Adding the CASH to the action
self.market_number = len(df_list)+1
lower_bond = [[0.0]*self.market_number]*3
lower_bond = np.array(lower_bond)
lower_bond = np.reshape(lower_bond, (1,-1))
upper_bond = [[1.0]*self.market_number]*3
upper_bond = np.array(upper_bond)
upper_bond = np.reshape(upper_bond, (1,-1))
self.action_space = spaces.Box(
low=lower_bond[0], high=upper_bond[0], dtype=np.float16)
# Lower bond: [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
# Upper bond: [[3.0, 3.0, 3.0, 3.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]
# Prices contains the OHCL values for the last six prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(self.market_number, 6, 6), dtype=np.float16)
def _next_observation(self):
'''
The _next_observation method compiles the stock data for the last five time steps,
appends the agent’s account information, and scales all the values to between 0 and 1.
'''
# self.current_step is defined in reset method,
# We assume the current_step is TODAY (BEFORE FINAL), which means we only know infomation till YESTERDAY ENDS.
obs_list = []
for i, df in enumerate(self.df_list):
frame = np.array([
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Open'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'High'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Low'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Price'].values / MAX_SHARE_PRICE,
df.loc[self.current_step-self.window_size: self.current_step - 1,
'Vol'].values / MAX_NUM_SHARES,
], dtype=np.float64)
# Append additional data and scale each value to between 0-1
obs = np.append(frame, [[
self.cash / INITIAL_ACCOUNT_BALANCE,
self.total_net_worth / INITIAL_ACCOUNT_BALANCE,
self.net_worth[i] / INITIAL_ACCOUNT_BALANCE,
self.cost_basis[i] / INITIAL_ACCOUNT_BALANCE,
self.total_sales_value[i] / MAX_NUM_SHARES,
0.0
]], axis=0)
obs_list.append(obs)
cash_obs = np.array([
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size) / MAX_SHARE_PRICE,
np.array([1.0]*self.window_size)
], dtype=np.float64)
cash_obs = np.stack(cash_obs)
cash_obs = np.append(cash_obs, [[
self.cash / INITIAL_ACCOUNT_BALANCE,
self.total_net_worth / INITIAL_ACCOUNT_BALANCE,
self.cash / INITIAL_ACCOUNT_BALANCE,
1 / INITIAL_ACCOUNT_BALANCE,
self.cash / MAX_NUM_SHARES,
0.0
]], axis=0)
obs_list.append(cash_obs)
obs_array = np.array(obs_list)
obs_array[pd.isna(obs_array)] = 0
obs_array[np.isinf(obs_array)] = 1
self.backup_obs = np.array(obs_list, dtype=np.float64)
return self.backup_obs
def _take_action(self, action):
# Set the current price to a random price within the time step
# dim(self.actual_price) = [n,6], dim(action) = [1, n+1]
self.actual_price = np.array([random.uniform(df.loc[self.current_step, "Low"],
df.loc[self.current_step, "High"]) for df in self.df_list], dtype=np.float64)
self.actual_price = np.append(self.actual_price, 1)
# Add CASH price = 1, now dim=n+1
self.actual_price[pd.isna(self.actual_price)] = self.prev_buyNhold_price[pd.isna(self.actual_price)]
tradable_asset = (pd.isna(self.actual_price).astype(int))*(-1)+1
action = np.reshape(action, (3,-1))
action_type = np.floor(action[0]*2.99).astype(int)*tradable_asset
self.current_action = action_type
sell_percent = action[1] * (action_type == 1)
buy_percent = action[2] * (action_type == 2)
if np.nansum(buy_percent) > 1:
buy_percent = buy_percent/np.nansum(buy_percent)
'''
Updated on 20 Feb.
1. decision in [0,1], where [0,1] = Hold, [1,2] = Sell, [2,3] = Buy
# Buy or sell is EITHER/OR, cannot happen concurrently
2. sell_percent is the percentage of each inventory.
3. Calculate the total cash from sell (including "selling cash", which means put the cash into the pool)
4. Calculate the amount of cash used to purchase asset (including "buying cash")
5. Calculate the number for buying
6. Update the inventory
'''
# dim:n+1, those with no price will be nan
inventory_value = self.actual_price * self.inventory_number
sell_number = self.inventory_number * sell_percent
sell_value = sell_number * self.actual_price
inv_number_after_sell = self.inventory_number - sell_number
cash_from_sale = np.sum(sell_value) * (1-COMMISSION_FEE)
cash_from_sale += inventory_value[-1] * sell_percent[-1] * COMMISSION_FEE # NO commission fee for "selling" the CASH
extra_cash_percent = 1-np.nansum(buy_percent)
buy_value = cash_from_sale * buy_percent * (1-COMMISSION_FEE)
buy_value[-1] += cash_from_sale * buy_percent[-1] * COMMISSION_FEE
buy_value[-1] += cash_from_sale * extra_cash_percent
buy_number = buy_value / self.actual_price
self.total_sales_value += sell_value
prev_cost = self.cost_basis * self.inventory_number
self.inventory_number = inv_number_after_sell + buy_number
if np.isnan(self.inventory_number).any():
self.inventory_number = self.back_up_inv
else:
self.back_up_inv = self.inventory_number
a = (prev_cost - sell_value + buy_value)
b = self.inventory_number
self.cost_basis = np.divide(a, b, out=np.zeros_like(a), where=b!=0)
self.cost_basis[pd.isna(self.cost_basis)] = 0
self.cost_basis[np.isinf(self.cost_basis)] = 0
self.cash = self.inventory_number[-1]
self.prev_net_worth = self.net_worth
self.net_worth = self.inventory_number * self.actual_price
self.prev_total_net_worth = self.total_net_worth
self.total_net_worth = np.sum(self.net_worth)
if self.total_net_worth > self.max_net_worth:
self.max_net_worth = self.total_net_worth
if np.isnan(self.inventory_number).any():
raise Exception("Inv NAN WARNING!")
'''
action_type = action[0]
buyAmount = action[1]
sellAmount = action[2]
if action_type < 1:
# [0,1): Buy amount % of balance in shares
cash_spend = self.balance * buyAmount
if cash_spend < 0.01*self.net_worth: # Not executing this transaction
buyAmount = 0
cash_spend = 0
self.current_action = 0
else:
shares_bought = cash_spend / \
(self.actual_price*(1+COMMISSION_FEE))
self.current_action = shares_bought
prev_cost = self.cost_basis * self.shares_held
self.balance -= cash_spend
self.cost_basis = (
prev_cost + cash_spend) / (self.shares_held + shares_bought)
self.shares_held += shares_bought
elif action_type < 2:
# [1,2): Sell amount % of shares held
shares_sold = self.shares_held * sellAmount
cash_get = shares_sold*self.actual_price*(1-COMMISSION_FEE)
if cash_get < 0.001*self.net_worth: # Not executing this transaction
sellAmount = 0
shares_sold = 0
cash_get = 0
self.current_action = 0
else:
self.current_action = shares_sold*-1
self.balance += shares_sold * self.actual_price
self.shares_held -= shares_sold
self.total_shares_sold += shares_sold
self.total_sales_value += shares_sold * self.actual_price
else: # [2,3): Hold
self.current_action = 0
self.prev_net_worth = self.net_worth
self.net_worth = self.balance + self.shares_held * self.actual_price
if self.net_worth > self.max_net_worth:
self.max_net_worth = self.net_worth
if self.shares_held == 0:
self.cost_basis = 0
'''
def step(self, action):
'''
Next, our environment needs to be able to take a step.
At each step we will take the specified action (chosen by our model),
calculate the reward, and return the next observation.
'''
if np.isnan(action).any():
action = np.nan_to_num(action)
# 1. Determine TODAY's Date (For training)
if self.current_step >= len(self.intersect_dates)-1:
# if self.training:
if self.training:
self._take_action(action)
self.current_step = self.window_size # Going back to time 0
else: # if is testing
if not self.finished:
self.finished = True
print("$$$$$$$$$$$ CASH OUT at time " +
str(self.intersect_dates[self.current_step-1]) + "$$$$$$$$$$$")
# SELL EVERYTHING on the last day
new_action = [1.0]*(self.market_number-1) + [2.0]
# First Row [1,1,1,...,2]
new_action += [1.0]*(self.market_number-1) + [0.0]
# Second Row [1,1,1,...,0]
new_action += [0.0]*(self.market_number-1) + [1.0]
# Third Row [0,0,0,...,1]
new_action = np.array(new_action)
self._take_action(new_action)
self.current_step += 1
else:
self.finished_twice = True
else:
# 1. Execute TODAY's Action
self._take_action(action)
'''
Updates self.balance, self.cost_basis, self.shares_held,
self.total_shares_sold, self.total_sales_value,
self.net_worth, self.max_net_worth,
'''
self.current_step += 1
# ****IMPORTANT: From now on, the current_step becomes TOMORROW****
# Keep the current_step undiscovered
'''
We want to incentivize profit that is sustained over long periods of time.
At each step, we will set the reward to the account balance multiplied by
some fraction of the number of time steps so far.
The purpose of this is to delay rewarding the agent too fast in the early stages
and allow it to explore sufficiently before optimizing a single strategy too deeply.
It will also reward agents that maintain a higher balance for longer,
rather than those who rapidly gain money using unsustainable strategies.
'''
close_prices = [df.loc[self.current_step-1, "Price"] for df in self.df_list]
close_prices.append(1)
close_prices = np.array(close_prices, dtype=np.float64)
self.prev_buyNhold_balance = self.buyNhold_balance
self.buyNhold_balance = np.sum(
self.init_buyNhold_amount * close_prices)
self.prev_buyNhold_price = close_prices
profit = self.total_net_worth - INITIAL_ACCOUNT_BALANCE
actual_profit = self.total_net_worth - self.buyNhold_balance
# delay_modifier = (self.current_step / MAX_STEPS)
# reward = self.balance * delay_modifier # Original Version
# reward = actual_profit * delay_modifier # Use Actual Net Profit
total_net_worth_delta = self.total_net_worth - self.prev_total_net_worth
buyNhold_delta = self.buyNhold_balance - self.prev_buyNhold_balance
reward = (total_net_worth_delta+1) / \
(buyNhold_delta+1) # TODO: NEED TO Reengineer!!!
# OpenAI will reset if done==True
done = (self.total_net_worth <= 0) or self.finished_twice
if not self.finished:
obs = self._next_observation()
else:
self.current_step -= 1
obs = self._next_observation()
self.current_step += 1
if not self.finished_twice:
info = {"profit": profit, "total_shares_sold": self.total_sales_value,
"actual_profit": actual_profit}
else:
info = {"profit": 0, "total_shares_sold": 0, "actual_profit": 0}
return (obs, reward, done, info)
def reset(self):
# Reset the state of the environment to an initial state
self.cash = INITIAL_ACCOUNT_BALANCE / self.market_number
self.total_net_worth = INITIAL_ACCOUNT_BALANCE
self.prev_total_net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.current_step = 0
self.prev_buyNhold_balance = 0
self.finished = False
self.finished_twice = False
self.net_worth = np.array(
[INITIAL_ACCOUNT_BALANCE / self.market_number] * self.market_number,
dtype=np.float64)
self.total_sales_value = np.array([0.0] * self.market_number)
self.prev_net_worth = self.net_worth
self.current_action = np.array(
[0.0]*self.market_number, dtype=np.float64)
# Set the current step to a random point within the data frame
# We set the current step to a random point within the data frame, because it essentially gives our agent’s more unique experiences from the same data set.
if self.training:
days_range = len(self.intersect_dates)
rand_days = random.randint(self.window_size, days_range - 1)
self.current_step = rand_days
else:
self.current_step = self.window_size
init_price = [df.loc[0, "Price"] for df in self.df_list]
init_price.append(1.0)
init_price = np.array(init_price, dtype=np.float64)
self.prev_buyNhold_price = init_price
self.init_buyNhold_amount = (INITIAL_ACCOUNT_BALANCE/self.market_number) / init_price
self.buyNhold_balance = INITIAL_ACCOUNT_BALANCE
self.inventory_number = self.init_buyNhold_amount
self.back_up_inv = self.inventory_number
self.cost_basis = init_price
return self._next_observation()
def render(self, mode='human', close=False, afterStep=True):
'''
afterStep: if is rendered after the step function, the current_step should -=1.
'''
if afterStep:
todayDate = self.intersect_dates[self.current_step-1]
else:
todayDate = self.intersect_dates[self.current_step]
if mode == 'human':
# Render the environment to the screen
profit = self.total_net_worth - INITIAL_ACCOUNT_BALANCE
print(f'Date: {todayDate}')
print(f'Balance: {self.cash}')
print(
f'Shares held: {self.inventory_number}')
print(
f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})')
print(
f'Net worth: {self.net_worth} (Total net worth: {self.total_net_worth})')
print(f'Profit: {profit}')
elif mode == 'detail': # Want to add all transaction details
net_worth_delta = self.total_net_worth - self.prev_total_net_worth
buyNhold_delta = self.buyNhold_balance - self.prev_buyNhold_balance
return {
"date": todayDate,
"actual_price": self.actual_price,
"action": self.current_action,
"inventory": self.net_worth,
"shares_held": self.inventory_number,
"net_worth": self.total_net_worth,
"net_worth_delta": net_worth_delta,
"buyNhold_balance": self.buyNhold_balance,
"buyNhold_delta": buyNhold_delta,
"actual_profit": self.total_net_worth - self.buyNhold_balance,
"progress": (net_worth_delta+1)/(buyNhold_delta+1)
}
| [
"numpy.intersect1d",
"random.uniform",
"numpy.reshape",
"numpy.floor",
"gym.spaces.Box",
"numpy.max",
"numpy.array",
"numpy.stack",
"numpy.append",
"numpy.sum",
"numpy.isnan",
"numpy.min",
"pandas.isna",
"numpy.nansum",
"numpy.isinf",
"numpy.zeros_like",
"random.randint",
"numpy.na... | [((867, 895), 'numpy.min', 'np.min', (['self.intersect_dates'], {}), '(self.intersect_dates)\n', (873, 895), True, 'import numpy as np\n'), ((920, 948), 'numpy.max', 'np.max', (['self.intersect_dates'], {}), '(self.intersect_dates)\n', (926, 948), True, 'import numpy as np\n'), ((1253, 1273), 'numpy.array', 'np.array', (['lower_bond'], {}), '(lower_bond)\n', (1261, 1273), True, 'import numpy as np\n'), ((1295, 1326), 'numpy.reshape', 'np.reshape', (['lower_bond', '(1, -1)'], {}), '(lower_bond, (1, -1))\n', (1305, 1326), True, 'import numpy as np\n'), ((1398, 1418), 'numpy.array', 'np.array', (['upper_bond'], {}), '(upper_bond)\n', (1406, 1418), True, 'import numpy as np\n'), ((1440, 1471), 'numpy.reshape', 'np.reshape', (['upper_bond', '(1, -1)'], {}), '(upper_bond, (1, -1))\n', (1450, 1471), True, 'import numpy as np\n'), ((1500, 1567), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'lower_bond[0]', 'high': 'upper_bond[0]', 'dtype': 'np.float16'}), '(low=lower_bond[0], high=upper_bond[0], dtype=np.float16)\n', (1510, 1567), False, 'from gym import spaces\n'), ((1859, 1936), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(self.market_number, 6, 6)', 'dtype': 'np.float16'}), '(low=0, high=1, shape=(self.market_number, 6, 6), dtype=np.float16)\n', (1869, 1936), False, 'from gym import spaces\n'), ((4073, 4091), 'numpy.stack', 'np.stack', (['cash_obs'], {}), '(cash_obs)\n', (4081, 4091), True, 'import numpy as np\n'), ((4112, 4340), 'numpy.append', 'np.append', (['cash_obs', '[[self.cash / INITIAL_ACCOUNT_BALANCE, self.total_net_worth /\n INITIAL_ACCOUNT_BALANCE, self.cash / INITIAL_ACCOUNT_BALANCE, 1 /\n INITIAL_ACCOUNT_BALANCE, self.cash / MAX_NUM_SHARES, 0.0]]'], {'axis': '(0)'}), '(cash_obs, [[self.cash / INITIAL_ACCOUNT_BALANCE, self.\n total_net_worth / INITIAL_ACCOUNT_BALANCE, self.cash /\n INITIAL_ACCOUNT_BALANCE, 1 / INITIAL_ACCOUNT_BALANCE, self.cash /\n MAX_NUM_SHARES, 0.0]], axis=0)\n', (4121, 4340), True, 'import numpy as np\n'), ((4466, 4484), 'numpy.array', 'np.array', (['obs_list'], {}), '(obs_list)\n', (4474, 4484), True, 'import numpy as np\n'), ((4597, 4633), 'numpy.array', 'np.array', (['obs_list'], {'dtype': 'np.float64'}), '(obs_list, dtype=np.float64)\n', (4605, 4633), True, 'import numpy as np\n'), ((5086, 5117), 'numpy.append', 'np.append', (['self.actual_price', '(1)'], {}), '(self.actual_price, 1)\n', (5095, 5117), True, 'import numpy as np\n'), ((5360, 5387), 'numpy.reshape', 'np.reshape', (['action', '(3, -1)'], {}), '(action, (3, -1))\n', (5370, 5387), True, 'import numpy as np\n'), ((8010, 8032), 'numpy.sum', 'np.sum', (['self.net_worth'], {}), '(self.net_worth)\n', (8016, 8032), True, 'import numpy as np\n'), ((12967, 13007), 'numpy.array', 'np.array', (['close_prices'], {'dtype': 'np.float64'}), '(close_prices, dtype=np.float64)\n', (12975, 13007), True, 'import numpy as np\n'), ((13100, 13148), 'numpy.sum', 'np.sum', (['(self.init_buyNhold_amount * close_prices)'], {}), '(self.init_buyNhold_amount * close_prices)\n', (13106, 13148), True, 'import numpy as np\n'), ((14912, 15012), 'numpy.array', 'np.array', (['([INITIAL_ACCOUNT_BALANCE / self.market_number] * self.market_number)'], {'dtype': 'np.float64'}), '([INITIAL_ACCOUNT_BALANCE / self.market_number] * self.\n market_number, dtype=np.float64)\n', (14920, 15012), True, 'import numpy as np\n'), ((15086, 15122), 'numpy.array', 'np.array', (['([0.0] * self.market_number)'], {}), '([0.0] * self.market_number)\n', (15094, 15122), True, 'import numpy as np\n'), ((15199, 15253), 'numpy.array', 'np.array', (['([0.0] * self.market_number)'], {'dtype': 'np.float64'}), '([0.0] * self.market_number, dtype=np.float64)\n', (15207, 15253), True, 'import numpy as np\n'), ((15874, 15912), 'numpy.array', 'np.array', (['init_price'], {'dtype': 'np.float64'}), '(init_price, dtype=np.float64)\n', (15882, 15912), True, 'import numpy as np\n'), ((748, 796), 'numpy.intersect1d', 'np.intersect1d', (['self.intersect_dates', "df['Date']"], {}), "(self.intersect_dates, df['Date'])\n", (762, 796), True, 'import numpy as np\n'), ((2461, 3025), 'numpy.array', 'np.array', (["[df.loc[self.current_step - self.window_size:self.current_step - 1, 'Open']\n .values / MAX_SHARE_PRICE, df.loc[self.current_step - self.window_size:\n self.current_step - 1, 'High'].values / MAX_SHARE_PRICE, df.loc[self.\n current_step - self.window_size:self.current_step - 1, 'Low'].values /\n MAX_SHARE_PRICE, df.loc[self.current_step - self.window_size:self.\n current_step - 1, 'Price'].values / MAX_SHARE_PRICE, df.loc[self.\n current_step - self.window_size:self.current_step - 1, 'Vol'].values /\n MAX_NUM_SHARES]"], {'dtype': 'np.float64'}), "([df.loc[self.current_step - self.window_size:self.current_step - 1,\n 'Open'].values / MAX_SHARE_PRICE, df.loc[self.current_step - self.\n window_size:self.current_step - 1, 'High'].values / MAX_SHARE_PRICE, df\n .loc[self.current_step - self.window_size:self.current_step - 1, 'Low']\n .values / MAX_SHARE_PRICE, df.loc[self.current_step - self.window_size:\n self.current_step - 1, 'Price'].values / MAX_SHARE_PRICE, df.loc[self.\n current_step - self.window_size:self.current_step - 1, 'Vol'].values /\n MAX_NUM_SHARES], dtype=np.float64)\n", (2469, 3025), True, 'import numpy as np\n'), ((3294, 3561), 'numpy.append', 'np.append', (['frame', '[[self.cash / INITIAL_ACCOUNT_BALANCE, self.total_net_worth /\n INITIAL_ACCOUNT_BALANCE, self.net_worth[i] / INITIAL_ACCOUNT_BALANCE, \n self.cost_basis[i] / INITIAL_ACCOUNT_BALANCE, self.total_sales_value[i] /\n MAX_NUM_SHARES, 0.0]]'], {'axis': '(0)'}), '(frame, [[self.cash / INITIAL_ACCOUNT_BALANCE, self.\n total_net_worth / INITIAL_ACCOUNT_BALANCE, self.net_worth[i] /\n INITIAL_ACCOUNT_BALANCE, self.cost_basis[i] / INITIAL_ACCOUNT_BALANCE, \n self.total_sales_value[i] / MAX_NUM_SHARES, 0.0]], axis=0)\n', (3303, 3561), True, 'import numpy as np\n'), ((4503, 4521), 'pandas.isna', 'pd.isna', (['obs_array'], {}), '(obs_array)\n', (4510, 4521), True, 'import pandas as pd\n'), ((4545, 4564), 'numpy.isinf', 'np.isinf', (['obs_array'], {}), '(obs_array)\n', (4553, 4564), True, 'import numpy as np\n'), ((5186, 5212), 'pandas.isna', 'pd.isna', (['self.actual_price'], {}), '(self.actual_price)\n', (5193, 5212), True, 'import pandas as pd\n'), ((5241, 5267), 'pandas.isna', 'pd.isna', (['self.actual_price'], {}), '(self.actual_price)\n', (5248, 5267), True, 'import pandas as pd\n'), ((5640, 5662), 'numpy.nansum', 'np.nansum', (['buy_percent'], {}), '(buy_percent)\n', (5649, 5662), True, 'import numpy as np\n'), ((6620, 6638), 'numpy.sum', 'np.sum', (['sell_value'], {}), '(sell_value)\n', (6626, 6638), True, 'import numpy as np\n'), ((6817, 6839), 'numpy.nansum', 'np.nansum', (['buy_percent'], {}), '(buy_percent)\n', (6826, 6839), True, 'import numpy as np\n'), ((7652, 7676), 'pandas.isna', 'pd.isna', (['self.cost_basis'], {}), '(self.cost_basis)\n', (7659, 7676), True, 'import pandas as pd\n'), ((7706, 7731), 'numpy.isinf', 'np.isinf', (['self.cost_basis'], {}), '(self.cost_basis)\n', (7714, 7731), True, 'import numpy as np\n'), ((10529, 10550), 'numpy.nan_to_num', 'np.nan_to_num', (['action'], {}), '(action)\n', (10542, 10550), True, 'import numpy as np\n'), ((15602, 15650), 'random.randint', 'random.randint', (['self.window_size', '(days_range - 1)'], {}), '(self.window_size, days_range - 1)\n', (15616, 15650), False, 'import random\n'), ((3991, 4025), 'numpy.array', 'np.array', (['([1.0] * self.window_size)'], {}), '([1.0] * self.window_size)\n', (3999, 4025), True, 'import numpy as np\n'), ((4878, 4965), 'random.uniform', 'random.uniform', (["df.loc[self.current_step, 'Low']", "df.loc[self.current_step, 'High']"], {}), "(df.loc[self.current_step, 'Low'], df.loc[self.current_step,\n 'High'])\n", (4892, 4965), False, 'import random\n'), ((5706, 5728), 'numpy.nansum', 'np.nansum', (['buy_percent'], {}), '(buy_percent)\n', (5715, 5728), True, 'import numpy as np\n'), ((7300, 7331), 'numpy.isnan', 'np.isnan', (['self.inventory_number'], {}), '(self.inventory_number)\n', (7308, 7331), True, 'import numpy as np\n'), ((7589, 7605), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (7602, 7605), True, 'import numpy as np\n'), ((8161, 8192), 'numpy.isnan', 'np.isnan', (['self.inventory_number'], {}), '(self.inventory_number)\n', (8169, 8192), True, 'import numpy as np\n'), ((10484, 10500), 'numpy.isnan', 'np.isnan', (['action'], {}), '(action)\n', (10492, 10500), True, 'import numpy as np\n'), ((3735, 3769), 'numpy.array', 'np.array', (['([1.0] * self.window_size)'], {}), '([1.0] * self.window_size)\n', (3743, 3769), True, 'import numpy as np\n'), ((3799, 3833), 'numpy.array', 'np.array', (['([1.0] * self.window_size)'], {}), '([1.0] * self.window_size)\n', (3807, 3833), True, 'import numpy as np\n'), ((3863, 3897), 'numpy.array', 'np.array', (['([1.0] * self.window_size)'], {}), '([1.0] * self.window_size)\n', (3871, 3897), True, 'import numpy as np\n'), ((3927, 3961), 'numpy.array', 'np.array', (['([1.0] * self.window_size)'], {}), '([1.0] * self.window_size)\n', (3935, 3961), True, 'import numpy as np\n'), ((5410, 5436), 'numpy.floor', 'np.floor', (['(action[0] * 2.99)'], {}), '(action[0] * 2.99)\n', (5418, 5436), True, 'import numpy as np\n'), ((11566, 11586), 'numpy.array', 'np.array', (['new_action'], {}), '(new_action)\n', (11574, 11586), True, 'import numpy as np\n'), ((5295, 5321), 'pandas.isna', 'pd.isna', (['self.actual_price'], {}), '(self.actual_price)\n', (5302, 5321), True, 'import pandas as pd\n')] |
import gc
import numpy as np
import pandas as pd
def drop_duplicates_pharma(df):
"""
df: long-format dataframe of a patient
varref: variable reference table that contain the mean and standard deviation of values for a subset of variables
"""
df_dup = df[df.duplicated(["givenat", "pharmaid", "infusionid"], keep=False)]
for pharmaid in df_dup.pharmaid.unique():
for infusionid in df_dup[df_dup.pharmaid == pharmaid].infusionid.unique():
tmp = df_dup[(df_dup.pharmaid == pharmaid) & (df_dup.infusionid == infusionid)]
if len(tmp.recordstatus.unique()) == 1 and tmp.recordstatus.unique()[0] == 780:
for i in range(len(tmp)):
df.loc[tmp.index[i], "infusionid"] = "%s_%s" % (int(df.loc[tmp.index[i], "infusionid"]), i)
tmp = df[(df.pharmaid == pharmaid) & (
df.infusionid.apply(lambda x: "%s_" % (infusionid) in x if type(x) == str else False))]
elif len(tmp.recordstatus.unique()) == 1 and tmp.recordstatus.unique()[0] == 776:
if (tmp.givendose != 0).sum() == 1:
df.drop(tmp.index[tmp.givendose == 0], inplace=True)
else:
df.drop(tmp.index[:-1], inplace=True)
elif len(tmp.recordstatus.unique()) == 2 and 776 in tmp.recordstatus.unique():
df.drop(tmp.index[tmp.recordstatus != 776], inplace=True)
else:
raise Exception("Debug needed")
return df
def process_status780(df, acting_period):
'''
Convert the infusion channel with status injection/tablet to "infusion-like" channel.
'''
infusionid = int(df.iloc[0].infusionid)
start_code = 524
stop_code = 776
df.set_index("givenat", inplace=True)
drug_giventime_780 = df.index.tolist()
df_new = []
for i, dt in enumerate(drug_giventime_780):
tmp = df.loc[[dt]].copy()
endtime_780 = dt + np.timedelta64(acting_period, "m")
tmp.loc[endtime_780, "givendose"] = tmp.loc[dt, "givendose"]
tmp.loc[endtime_780, "recordstatus"] = stop_code
tmp.loc[endtime_780, "infusionid"] = "%d_%d" % (infusionid, i)
tmp.loc[dt, "givendose"] = 0
tmp.loc[dt, "recordstatus"] = start_code
tmp.loc[dt, "infusionid"] = "%d_%d" % (infusionid, i)
df_new.append(tmp.reset_index())
df_new = pd.concat(df_new).sort_values("givenat")
return df_new
def process_single_infusion(df, acting_period):
'''
Convert given dose from a single infusion channel to rate
'''
infusionid = int(df.iloc[0].infusionid)
if len(df.recordstatus.unique()) == 1 and df.recordstatus.unique()[0] == 780:
df = process_status780(df, acting_period)
df_rate = []
for sub_infusionid in df.infusionid.unique():
tmp = df[df.infusionid == sub_infusionid].copy()
try:
assert ((tmp.recordstatus == 524).sum() == 1)
except AssertionError:
tmp.set_index("givenat", inplace=True)
beg_time = tmp.index[0] - np.timedelta64(acting_period, "m")
tmp.loc[beg_time, "givendose"] = 0
tmp.loc[beg_time, "recordstatus"] = 524
tmp.loc[beg_time, "infusionid"] = sub_infusionid
tmp.sort_index(inplace=True)
tmp.reset_index(inplace=True)
try:
assert ((tmp.recordstatus == 776).sum() == 1)
except AssertionError:
pass
tmp.loc[:, "rate"] = 0
tmp.loc[tmp.index[:-1], "rate"] = tmp.givendose.values[1:] / (tmp.givenat.diff() / np.timedelta64(1,
"m")).values[
1:]
tmp.rename(columns={"rate": str(sub_infusionid)}, inplace=True)
df_rate.append(tmp[["givenat", str(sub_infusionid)]].set_index("givenat"))
df_rate = pd.concat(df_rate, axis=1).sum(axis=1).to_frame(name=str(infusionid))
return df_rate
def convert_cumul_value_to_rate(df, cumul_urine_id_lst, general_table):
pid = df.iloc[0].patientid
short_gap = 5 / 60
rec_adm_time = general_table.loc[pid].admissiontime
# if the first HR measuremet time is earlier than recorded admission time, then we estimated
# the "true" admission time to be the earlier of these two time points.
if df[df.variableid == 200]["value"].notnull().sum() > 0:
hr_first_meas_time = df.loc[df[df.variableid == 200]["value"].notnull().index[0], "datetime"]
esti_adm_time = min(rec_adm_time, hr_first_meas_time)
else:
esti_adm_time = rec_adm_time
df_urine = df[df.variableid.isin(cumul_urine_id_lst)]
if len(df_urine) == 0:
return df
else:
for vid in df_urine.variableid.unique():
df_tmp = df_urine[df_urine.variableid == vid] # table of a single urine variable
index_pre_general_table = df_tmp.index[df_tmp.datetime < esti_adm_time - np.timedelta64(15 * 60 + 30,
"s")] # number of records before general_tableission time
if len(index_pre_general_table) == 0:
pass
elif len(index_pre_general_table) == 1:
# if there's one record before general_tableission, reset datetime from system reset time 12pm to the general_tableission time
index_pre_general_table = df_tmp.index[df_tmp.datetime < esti_adm_time]
df.loc[index_pre_general_table[0], 'datetime'] = esti_adm_time
else:
index_pre_general_table = df_tmp.index[df_tmp.datetime < esti_adm_time]
df.drop(index_pre_general_table[:-1], inplace=True)
df.loc[index_pre_general_table[-1], 'datetime'] = esti_adm_time
df_tmp = df[df.variableid == vid]
if df_tmp.duplicated(["datetime"]).sum() == 0:
pass
else:
df.drop(df_tmp.index[df_tmp.duplicated(["datetime"])], inplace=True)
# delete urine record if therre's only one left
if (df.variableid == vid).sum() < 2:
df.drop(df.index[df.variableid == vid], inplace=True)
continue
# compute the cumulative values over the entire icu stay
df_tmp = df[df.variableid == vid]
t_reset = df_tmp[(df_tmp["value"].diff() < 0) | (
df_tmp.index == df_tmp.index[0])].datetime # reset time for the cumulative counting
idx_not_reset = df_tmp[(df_tmp["value"].diff() >= 0) & (df_tmp.index != df_tmp.index[0])].index
for i in np.arange(1, len(t_reset)):
tmp = df_tmp[df_tmp.datetime >= t_reset.iloc[i]]
if i < len(t_reset) - 1:
tmp = tmp[tmp.datetime < t_reset.iloc[i + 1]]
df.loc[tmp.index, 'value'] += df.loc[df_tmp.index[df_tmp.datetime < t_reset.iloc[i]][-1], 'value']
# drop the time point with time difference from the previous time point that is smaller than half an hour
df_tmp = df[df.variableid == vid]
tdiff = (df_tmp.datetime.diff().iloc[1:] / np.timedelta64(3600, 's'))
if (tdiff < short_gap).sum() > 0:
df.drop(df_tmp.index[1:][tdiff.values < short_gap], inplace=True)
if (df.variableid == vid).sum() < 2:
df.drop(df.index[df.variableid == vid], inplace=True)
continue
# debug if the cumulative value is not strictly increasing
df_tmp = df[df.variableid == vid]
vdiff = df_tmp["value"].diff()
try:
assert ((vdiff < 0).sum() == 0)
except AssertionError:
import ipdb
ipdb.set_trace()
gc.collect()
for vid in df_urine.variableid.unique():
df_tmp = df[df.variableid == vid]
if len(df_tmp) == 0:
continue
elif len(df_tmp) == 1:
continue
else:
tdiff = (df_tmp.datetime.diff() / np.timedelta64(3600, 's'))
df.loc[df_tmp.index[1:], 'value'] = (df_tmp["value"].diff().iloc[1:] / tdiff.iloc[1:]).values
# logging.info(tdiff.loc[df.loc[df_tmp.index,'value']>1e+4]*np.timedelta64(3600, 's'))
# df.loc[df_tmp.index[0],'value'] = df.loc[df_tmp.index[1],'value']
df.loc[df_tmp.index[0], 'value'] = 0
for vid in df_urine.variableid.unique():
df_tmp = df[df.variableid == vid]
# df.drop(df_tmp.index[df_tmp['value'] > 1e+6], inplace=True)
return df
| [
"numpy.timedelta64",
"pandas.concat",
"gc.collect",
"ipdb.set_trace"
] | [((7930, 7942), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7940, 7942), False, 'import gc\n'), ((1965, 1999), 'numpy.timedelta64', 'np.timedelta64', (['acting_period', '"""m"""'], {}), "(acting_period, 'm')\n", (1979, 1999), True, 'import numpy as np\n'), ((2401, 2418), 'pandas.concat', 'pd.concat', (['df_new'], {}), '(df_new)\n', (2410, 2418), True, 'import pandas as pd\n'), ((7300, 7325), 'numpy.timedelta64', 'np.timedelta64', (['(3600)', '"""s"""'], {}), "(3600, 's')\n", (7314, 7325), True, 'import numpy as np\n'), ((3080, 3114), 'numpy.timedelta64', 'np.timedelta64', (['acting_period', '"""m"""'], {}), "(acting_period, 'm')\n", (3094, 3114), True, 'import numpy as np\n'), ((3979, 4005), 'pandas.concat', 'pd.concat', (['df_rate'], {'axis': '(1)'}), '(df_rate, axis=1)\n', (3988, 4005), True, 'import pandas as pd\n'), ((7905, 7921), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7919, 7921), False, 'import ipdb\n'), ((3599, 3621), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (3613, 3621), True, 'import numpy as np\n'), ((5043, 5076), 'numpy.timedelta64', 'np.timedelta64', (['(15 * 60 + 30)', '"""s"""'], {}), "(15 * 60 + 30, 's')\n", (5057, 5076), True, 'import numpy as np\n'), ((8225, 8250), 'numpy.timedelta64', 'np.timedelta64', (['(3600)', '"""s"""'], {}), "(3600, 's')\n", (8239, 8250), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import covariance as cov
from gp import GaussianProcess
#import com.ntraft.covariance as cov
#from com.ntraft.gp import GaussianProcess
import matplotlib
# The 'MacOSX' backend appears to have some issues on Mavericks.
import sys
if sys.platform.startswith('darwin'):
matplotlib.use('TkAgg')
import matplotlib.pyplot as pl
# This is the true unknown function we are trying to approximate
x1 = lambda x: x.flatten()
x2 = lambda x: x.flatten() # y = x
# x2 = lambda x: 2*np.ones_like(x) # constant
# x2 = lambda x: np.sin(0.9*x).flatten() # sin
# Sample some input points and noisy versions of the function evaluated at
# these points.
N = 20 # number of training points
n = 40 # number of test points
s = 0.00000 # noise variance
# T = np.random.uniform(-5, 0, size=(N,))
T = np.linspace(-10, -5, N)
# T = np.linspace(-90, 0, N)
T[-1] = 19.6 # set a goal point
# T[-1] = 175 # set a goal point
x = x1(T) + s*np.random.randn(N)
y = x2(T) + s*np.random.randn(N)
# points we're going to make predictions at.
Ttest = np.linspace(-5, 20, n)
#Ttest = np.linspace(0, 180, n)
axis = [-20, 35, -10, 25]
#axis = [-200, 400, -90, 200]
# Build our Gaussian process.
# xkernel = cov.sq_exp_kernel(2.5, 1)
# ykernel = cov.sq_exp_kernel(2.5, 1)
# kernel = cov.matern_kernel(2.28388, 2.52288)
# kernel = cov.linear_kernel(-2.87701)
# xkernel = cov.summed_kernel(cov.sq_exp_kernel(2.5, 1), cov.noise_kernel(0.01))
# ykernel = cov.summed_kernel(cov.sq_exp_kernel(2.5, 1), cov.noise_kernel(0.01))
# Cafeteria Hyperparams (pre-evaluated)
# xkernel = cov.summed_kernel(
# cov.matern_kernel(33.542, 47517),
# cov.linear_kernel(315.46),
# cov.noise_kernel(0.53043)
# )
# ykernel = cov.summed_kernel(
# cov.matern_kernel(9.8147, 155.36),
# cov.linear_kernel(17299),
# cov.noise_kernel(0.61790)
# )
# Cafeteria Hyperparams
xkernel = cov.summed_kernel(
#cov.sq_exp_kernel(-1),
cov.matern_kernel(np.exp(1.9128), np.exp(2*5.3844)),
cov.linear_kernel(np.exp(-.5*-2.8770)),
cov.noise_kernel(np.exp(2*-0.3170))
)
ykernel = cov.summed_kernel(
#cov.sq_exp_kernel(-1),
cov.matern_kernel(np.exp(1.2839), np.exp(2*2.5229)),
cov.linear_kernel(np.exp(-3.2*-4.8792)),
cov.noise_kernel(np.exp(2*-0.2407))
)
xgp = GaussianProcess(T, x, Ttest, xkernel)
ygp = GaussianProcess(T, y, Ttest, ykernel)
# PLOTS:
# draw samples from the prior at our test points.
xs = xgp.sample_prior(10)
ys = ygp.sample_prior(10)
pl.figure(1)
pl.plot(xs, ys)
pl.title('Ten samples from the GP prior')
# draw samples from the posterior
ns = 100
xs = xgp.sample(ns)
ys = ygp.sample(ns)
# illustrate the possible paths.
'''pl.figure(2)
pl.subplots_adjust(0.05, 0.1, 0.95, 0.9)
pl.subplot(2,2,1)
pl.plot(x, y, 'yo', ms=8)
ne = 10
pl.plot(xs[:,0:ne], ys[:,0:ne], 'g-')
pl.title('{} samples from the GP posterior'.format(ne))
pl.axis(axis)
pl.subplot(2,2,2)
pl.plot(x, y, 'yo', ms=8)
pl.plot(xs, ys, 'g-')
pl.title('{} samples from the GP posterior'.format(ns))
pl.axis(axis)
pl.subplot(2,2,3)
pl.plot(x, y, 'yo', ms=8)
pl.plot(x1(Ttest), x2(Ttest), 'b-')
pl.plot(xgp.mu, ygp.mu, 'r--', lw=2)
pl.title('Predictive mean and ground truth')
pl.axis(axis)
pl.subplot(2,2,4)
pl.plot(x, y, 'yo', ms=8)
xmean = np.mean(xs, 1)
ymean = np.mean(ys, 1)
pl.plot(xmean, ymean, 'r--', lw=2)
pl.title('Mean of {} samples'.format(ns))
pl.axis(axis)'''
pl.show()
| [
"matplotlib.use",
"matplotlib.pyplot.plot",
"sys.platform.startswith",
"gp.GaussianProcess",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((284, 317), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (307, 317), False, 'import sys\n'), ((836, 859), 'numpy.linspace', 'np.linspace', (['(-10)', '(-5)', 'N'], {}), '(-10, -5, N)\n', (847, 859), True, 'import numpy as np\n'), ((1077, 1099), 'numpy.linspace', 'np.linspace', (['(-5)', '(20)', 'n'], {}), '(-5, 20, n)\n', (1088, 1099), True, 'import numpy as np\n'), ((2254, 2291), 'gp.GaussianProcess', 'GaussianProcess', (['T', 'x', 'Ttest', 'xkernel'], {}), '(T, x, Ttest, xkernel)\n', (2269, 2291), False, 'from gp import GaussianProcess\n'), ((2298, 2335), 'gp.GaussianProcess', 'GaussianProcess', (['T', 'y', 'Ttest', 'ykernel'], {}), '(T, y, Ttest, ykernel)\n', (2313, 2335), False, 'from gp import GaussianProcess\n'), ((2449, 2461), 'matplotlib.pyplot.figure', 'pl.figure', (['(1)'], {}), '(1)\n', (2458, 2461), True, 'import matplotlib.pyplot as pl\n'), ((2462, 2477), 'matplotlib.pyplot.plot', 'pl.plot', (['xs', 'ys'], {}), '(xs, ys)\n', (2469, 2477), True, 'import matplotlib.pyplot as pl\n'), ((2478, 2519), 'matplotlib.pyplot.title', 'pl.title', (['"""Ten samples from the GP prior"""'], {}), "('Ten samples from the GP prior')\n", (2486, 2519), True, 'import matplotlib.pyplot as pl\n'), ((3356, 3365), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (3363, 3365), True, 'import matplotlib.pyplot as pl\n'), ((320, 343), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (334, 343), False, 'import matplotlib\n'), ((968, 986), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (983, 986), True, 'import numpy as np\n'), ((1001, 1019), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (1016, 1019), True, 'import numpy as np\n'), ((1944, 1958), 'numpy.exp', 'np.exp', (['(1.9128)'], {}), '(1.9128)\n', (1950, 1958), True, 'import numpy as np\n'), ((1960, 1978), 'numpy.exp', 'np.exp', (['(2 * 5.3844)'], {}), '(2 * 5.3844)\n', (1966, 1978), True, 'import numpy as np\n'), ((1998, 2019), 'numpy.exp', 'np.exp', (['(-0.5 * -2.877)'], {}), '(-0.5 * -2.877)\n', (2004, 2019), True, 'import numpy as np\n'), ((2038, 2056), 'numpy.exp', 'np.exp', (['(2 * -0.317)'], {}), '(2 * -0.317)\n', (2044, 2056), True, 'import numpy as np\n'), ((2132, 2146), 'numpy.exp', 'np.exp', (['(1.2839)'], {}), '(1.2839)\n', (2138, 2146), True, 'import numpy as np\n'), ((2148, 2166), 'numpy.exp', 'np.exp', (['(2 * 2.5229)'], {}), '(2 * 2.5229)\n', (2154, 2166), True, 'import numpy as np\n'), ((2186, 2208), 'numpy.exp', 'np.exp', (['(-3.2 * -4.8792)'], {}), '(-3.2 * -4.8792)\n', (2192, 2208), True, 'import numpy as np\n'), ((2227, 2246), 'numpy.exp', 'np.exp', (['(2 * -0.2407)'], {}), '(2 * -0.2407)\n', (2233, 2246), True, 'import numpy as np\n')] |
#!/usr/bin/python
import rospy
import numpy as np
from operator import itemgetter
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Point
from std_msgs.msg import Float32
from std_msgs.msg import Float64
import tf
class ConeDetector:
def __init__(self):
#self.cone_sub = rospy.Subscriber("cone_location", Float32, self.phi_callback)
self.scan_window=rospy.Publisher("laser_window", LaserScan, queue_size=4)
self.cd_sub = rospy.Subscriber("scan", LaserScan, self.laser_callback)
self.cd_pub = rospy.Publisher("cone_position", PointStamped, queue_size=4)
self.phi = 0.2
self.phi_start=self.phi
self.phi_end = self.phi
self.window=3
self.stampedpoint=PointStamped()
self.counter=0
self.listener = tf.TransformListener(True, rospy.Duration(10.0))
def phi_callback(self, msg):
#print "recieved phi"
self.phi=-msg.data
def laser_callback(self,msg):
#ang="resolution:%s"%str(msg.angle_max-msg.angle_min)
# print "converting from %s to base_link" % msg.header.frame_id
# msg.header.stamp = self.listener.getLatestCommonTime("/base_link",msg.header.frame_id)
# msg = self.listener.transformScan("/base_link", msg)
time=rospy.Time.now()
if self.phi<np.pi:#check the angle
phi_index=int((msg.angle_max+self.phi)/(msg.angle_max-msg.angle_min)*len(msg.ranges))
phi_point=int((msg.angle_max+self.phi)/(msg.angle_max-msg.angle_min)*len(msg.ranges))
points=msg.ranges[phi_index-10*self.window:phi_index+10*self.window]
# # for i in range(phi_point-20*self.window,phi_point-20*self.window):
# # mean=np.mean(msg.ranges[i-2:i+3])
# # points.append((i,mean))
distance = np.mean(points)
# self.phi_start=self.phi-np.pi/(18+3*distance)
# self.phi_end=self.phi+np.pi/(18+3*distance)
self.phi_start=self.phi-.15
self.phi_end=self.phi+.15
start_point=int((msg.angle_max+self.phi_start)/(msg.angle_max-msg.angle_min)*len(msg.ranges))
end_point=int((msg.angle_max+self.phi_end)/(msg.angle_max-msg.angle_min)*len(msg.ranges))
#ang="start_point, end_point:%s"%str((start_point,end_point))
#print ang
#rospy.loginfo(ang)
points=[]
for i in range(start_point, end_point-5):
wind=msg.ranges[i:i+6]
mean=np.mean(wind)
points.append((i+2,mean))
point = min(points,key=lambda item:item[1])
position = start_point+point[0]
dist=point[1]
angle=msg.angle_increment*position+msg.angle_min
x=dist*np.sin(angle)
y=dist*np.cos(angle)
point = Point()
# point.x=x
# point.y=y
point.x = 6.0
point.y = 0.0
point.z=0.0
self.counter+=1
self.stampedpoint.header.seq=self.counter
self.stampedpoint.header.frame_id="base_link"
self.stampedpoint.header.stamp=time
#rospy.loginfo("point: %s" % str(point))
self.stampedpoint.point=point
self.cd_pub.publish(self.stampedpoint)
scan=LaserScan()
scan.header=msg.header
scan.angle_min=self.phi_start
scan.angle_max=self.phi_end
scan.angle_increment=msg.angle_increment
scan.time_increment=msg.time_increment
time=rospy.Time.now()
scan.scan_time=time
scan.range_min=msg.range_min
scan.range_max=msg.range_max
scan.ranges=msg.ranges[start_point:end_point]
self.scan_window.publish(scan)
else:
point=Point()
point.x=0.0
point.y=0.0
point.z=0.0
self.counter+=1
self.stampedpoint.header.seq=self.counter
self.stampedpoint.header.frame_id="base_link"
self.stampedpoint.header.stamp=time
self.stampedpoint.point=point
self.cd_pub.publish(self.stampedpoint)
if __name__=="__main__":
rospy.init_node("ConeDetector")
node=ConeDetector()
rospy.spin()
| [
"numpy.mean",
"sensor_msgs.msg.LaserScan",
"rospy.Subscriber",
"rospy.init_node",
"numpy.sin",
"geometry_msgs.msg.PointStamped",
"rospy.Time.now",
"geometry_msgs.msg.Point",
"rospy.spin",
"numpy.cos",
"rospy.Duration",
"rospy.Publisher"
] | [((4300, 4331), 'rospy.init_node', 'rospy.init_node', (['"""ConeDetector"""'], {}), "('ConeDetector')\n", (4315, 4331), False, 'import rospy\n'), ((4360, 4372), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (4370, 4372), False, 'import rospy\n'), ((435, 491), 'rospy.Publisher', 'rospy.Publisher', (['"""laser_window"""', 'LaserScan'], {'queue_size': '(4)'}), "('laser_window', LaserScan, queue_size=4)\n", (450, 491), False, 'import rospy\n'), ((514, 570), 'rospy.Subscriber', 'rospy.Subscriber', (['"""scan"""', 'LaserScan', 'self.laser_callback'], {}), "('scan', LaserScan, self.laser_callback)\n", (530, 570), False, 'import rospy\n'), ((593, 653), 'rospy.Publisher', 'rospy.Publisher', (['"""cone_position"""', 'PointStamped'], {'queue_size': '(4)'}), "('cone_position', PointStamped, queue_size=4)\n", (608, 653), False, 'import rospy\n'), ((789, 803), 'geometry_msgs.msg.PointStamped', 'PointStamped', ([], {}), '()\n', (801, 803), False, 'from geometry_msgs.msg import PointStamped\n'), ((1334, 1350), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (1348, 1350), False, 'import rospy\n'), ((878, 898), 'rospy.Duration', 'rospy.Duration', (['(10.0)'], {}), '(10.0)\n', (892, 898), False, 'import rospy\n'), ((1870, 1885), 'numpy.mean', 'np.mean', (['points'], {}), '(points)\n', (1877, 1885), True, 'import numpy as np\n'), ((2884, 2891), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (2889, 2891), False, 'from geometry_msgs.msg import Point\n'), ((3381, 3392), 'sensor_msgs.msg.LaserScan', 'LaserScan', ([], {}), '()\n', (3390, 3392), False, 'from sensor_msgs.msg import LaserScan\n'), ((3631, 3647), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3645, 3647), False, 'import rospy\n'), ((3896, 3903), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (3901, 3903), False, 'from geometry_msgs.msg import Point\n'), ((2555, 2568), 'numpy.mean', 'np.mean', (['wind'], {}), '(wind)\n', (2562, 2568), True, 'import numpy as np\n'), ((2817, 2830), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2823, 2830), True, 'import numpy as np\n'), ((2850, 2863), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2856, 2863), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
import glob
import tifffile as T
from libtiff import TIFF
import numpy as np
def range_normalize(v):
v = (v - v.mean(axis=(1, 2), keepdims=True)) / (v.std(axis=(1, 2), keepdims=True) + 1e-12)
v_min, v_max = v.min(axis=(1, 2), keepdims=True), v.max(axis=(1, 2), keepdims=True)
v = (v - v_min) / (v_max - v_min + 1e-5)
return v
def smart_padding(img, data_shape, lables_shape, stride):
if img.shape[0] < data_shape[0]:
img = np.pad(img, ((0, data_shape[0] - img.shape[0]), (0, 0), (0, 0)), mode='reflect')
if img.shape[1] < data_shape[1]:
img = np.pad(img, ((0, 0), (0, data_shape[1] - img.shape[1]), (0, 0)), mode='reflect')
if img.shape[2] < data_shape[2]:
img = np.pad(img, ((0, 0), (0, 0), (0, data_shape[2] - img.shape[1])), mode='reflect')
dz = int(np.floor((img.shape[0] - data_shape[0]) / stride[0] + 1))
dy = int(np.floor((img.shape[1] - data_shape[1]) / stride[1] + 1))
dx = int(np.floor((img.shape[2] - data_shape[2]) / stride[2] + 1))
effective_data_shape = (
data_shape[0] * dz - (data_shape[0] - stride[0]) * (dz - 1),
data_shape[1] * dy - (data_shape[1] - stride[1]) * (dy - 1),
data_shape[2] * dx - (data_shape[2] - stride[2]) * (dx - 1)
)
if effective_data_shape[0] < img.shape[0]:
img = np.pad(img,
(
(0, (data_shape[0] * (dz + 1) - (data_shape[0] - stride[0]) * dz) - img.shape[0]),
(0, 0),
(0, 0)),
mode='reflect')
if effective_data_shape[1] < img.shape[1]:
img = np.pad(img,
(
(0, 0),
(0, (data_shape[1] * (dy + 1) - (data_shape[1] - stride[1]) * dy) - img.shape[1]),
(0, 0)),
mode='reflect')
if effective_data_shape[2] < img.shape[2]:
img = np.pad(img,
(
(0, 0),
(0, 0),
(0, (data_shape[2] * (dx + 1) - (data_shape[2] - stride[2]) * dx) - img.shape[2])),
mode='reflect')
effective_data_shape = img.shape
effective_lable_shape = (
effective_data_shape[0] - (data_shape[0] - lables_shape[0]),
effective_data_shape[1] - (data_shape[1] - lables_shape[1]),
effective_data_shape[2] - (data_shape[2] - lables_shape[2])
)
if effective_lable_shape[0] < img.shape[0]:
img = np.pad(img, (((data_shape[0] - lables_shape[0]) // 2,
(data_shape[0] - lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2),
(0, 0),
(0, 0)),
mode='reflect')
if effective_lable_shape[1] < img.shape[1]:
img = np.pad(img, ((0, 0),
((data_shape[1] - lables_shape[1]) // 2,
(data_shape[1] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2),
(0, 0)),
mode='reflect')
if effective_lable_shape[2] < img.shape[2]:
img = np.pad(img, ((0, 0),
(0, 0),
((data_shape[2] - lables_shape[2]) // 2,
(data_shape[2] - lables_shape[2]) // 2 + (
data_shape[2] - lables_shape[2]) % 2)),
mode='reflect')
return img
class Single_Image_Eval(Dataset):
def __init__(self,
image_path='HaftJavaherian_DeepVess2018_GroundTruthImage.tif',
label_path='HaftJavaherian_DeepVess2018_GroundTruthLabel.tif',
data_shape=(7, 33, 33),
lables_shape=(1, 4, 4),
stride=(1, 1, 1),
range_norm=False):
self.range_norm = range_norm
try:
img = T.imread(image_path)
except:
img = []
tif = TIFF.open(image_path)
for _image in tif.iter_images():
img.append(_image)
img = np.stack(img, 0)
try:
lbl = T.imread(label_path)
except:
lbl = []
tif = TIFF.open(label_path)
for _lable in tif.iter_images():
lbl.append(_lable)
lbl = np.stack(lbl, 0)
img = smart_padding(img, data_shape, lables_shape, stride)
lbl = smart_padding(lbl, data_shape, lables_shape, stride)
self.org_shape = img.shape
self.img = img.astype(np.float32)
self.lbl = lbl.astype(np.float32)
self.shape = self.img.shape
self.data_shape = data_shape
self.lables_shape = lables_shape
self.stride = stride
self.dz = int(np.floor((self.shape[0] - data_shape[0]) / stride[0] + 1))
self.dy = int(np.floor((self.shape[1] - data_shape[1]) / stride[1] + 1))
self.dx = int(np.floor((self.shape[2] - data_shape[2]) / stride[2] + 1))
self.effective_data_shape = (
data_shape[0] * self.dz - (data_shape[0] - stride[0]) * (self.dz - 1),
data_shape[1] * self.dy - (data_shape[1] - stride[1]) * (self.dy - 1),
data_shape[2] * self.dx - (data_shape[2] - stride[2]) * (self.dx - 1)
)
self.effective_lable_shape = (
self.effective_data_shape[0] - (data_shape[0] - lables_shape[0]),
self.effective_data_shape[1] - (data_shape[1] - lables_shape[1]),
self.effective_data_shape[2] - (data_shape[2] - lables_shape[2])
)
self.effective_lable_idx = (
((data_shape[0] - lables_shape[0]) // 2,
self.effective_data_shape[0] - (
(data_shape[0] - lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2)),
((data_shape[1] - lables_shape[1]) // 2,
self.effective_data_shape[1] - (
(data_shape[1] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2)),
((data_shape[2] - lables_shape[2]) // 2,
self.effective_data_shape[2] - (
(data_shape[2] - lables_shape[2]) // 2 + (data_shape[2] - lables_shape[2]) % 2))
)
self.lbl_z = ((data_shape[0] - lables_shape[0]) // 2,
(data_shape[0] - lables_shape[0]) // 2 + lables_shape[0])
self.lbl_y = ((data_shape[1] - lables_shape[1]) // 2,
(data_shape[1] - lables_shape[1]) // 2 + lables_shape[1])
self.lbl_x = ((data_shape[2] - lables_shape[2]) // 2,
(data_shape[2] - lables_shape[2]) // 2 + lables_shape[2])
self.max_iter = self.dz * self.dy * self.dx
def __len__(self):
return self.max_iter
def __getitem__(self, index):
z, y, x = np.unravel_index(index, (self.dz, self.dy, self.dx))
z = z * self.stride[0]
y = y * self.stride[1]
x = x * self.stride[2]
v = self.img[z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = self.lbl[z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = lbl[self.lbl_z[0]: self.lbl_z[1],
self.lbl_y[0]: self.lbl_y[1],
self.lbl_x[0]: self.lbl_x[1]]
# Normalize
if self.range_norm:
v = range_normalize(v)
else:
v = (v - v.mean(axis=(1, 2), keepdims=True)) / (v.std(axis=(1, 2), keepdims=True) + 1e-12)
# To Tensor
data = torch.Tensor(v).unsqueeze(0)
lables = torch.Tensor(lbl // self.lbl.max()).long()
return data, lables
class Directory_Image_Train(Dataset):
def __init__(self,
images_path,
labels_path,
max_iter=1000,
data_shape=(7, 33, 33),
lables_shape=(1, 4, 4),
stride=(1, 1, 1),
range_norm=False):
self.range_norm = range_norm
images = sorted(glob.glob(images_path + '/*tif'))
labels = sorted(glob.glob(labels_path + '/*tif'))
self.org_shape = []
self.shape = []
self.img = []
self.lbl = []
self.data_shape = data_shape
self.lables_shape = lables_shape
self.stride = stride
self.dz = []
self.dy = []
self.dx = []
self.effective_data_shape = []
self.effective_lable_shape = []
self.effective_lable_idx = []
self.lbl_z = []
self.lbl_y = []
self.lbl_x = []
for img_path, lbl_path in zip(images, labels):
try:
img = T.imread(img_path)
except:
img = []
tif = TIFF.open(img_path)
for _image in tif.iter_images():
img.append(_image)
img = np.stack(img, 0)
try:
lbl = T.imread(lbl_path)
except:
lbl = []
tif = TIFF.open(lbl_path)
for _lable in tif.iter_images():
lbl.append(_lable)
lbl = np.stack(lbl, 0)
img = smart_padding(img, data_shape, lables_shape, stride)
lbl = smart_padding(lbl, data_shape, lables_shape, stride)
self.org_shape.append(img.shape)
self.img.append(img.astype(np.float32))
self.lbl.append(lbl.astype(np.float32))
shape = img.shape
self.shape.append(shape)
dz = int(np.floor((shape[0] - data_shape[0]) / stride[0] + 1))
dy = int(np.floor((shape[1] - data_shape[1]) / stride[1] + 1))
dx = int(np.floor((shape[2] - data_shape[2]) / stride[2] + 1))
effective_data_shape = (
data_shape[0] * dz - (data_shape[0] - stride[0]) * (dz - 1),
data_shape[1] * dy - (data_shape[1] - stride[1]) * (dy - 1),
data_shape[2] * dx - (data_shape[2] - stride[2]) * (dx - 1)
)
effective_lable_shape = (
effective_data_shape[0] - (data_shape[0] - lables_shape[0]),
effective_data_shape[1] - (data_shape[1] - lables_shape[1]),
effective_data_shape[2] - (data_shape[2] - lables_shape[2])
)
effective_lable_idx = (
((data_shape[0] - lables_shape[0]) // 2,
effective_data_shape[0] - (
(data_shape[0] - lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2)),
((data_shape[1] - lables_shape[1]) // 2,
effective_data_shape[1] - (
(data_shape[1] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2)),
((data_shape[2] - lables_shape[2]) // 2,
effective_data_shape[2] - (
(data_shape[2] - lables_shape[2]) // 2 + (data_shape[2] - lables_shape[2]) % 2))
)
lbl_z = ((data_shape[0] - lables_shape[0]) // 2,
(data_shape[0] - lables_shape[0]) // 2 + lables_shape[0])
lbl_y = ((data_shape[1] - lables_shape[1]) // 2,
(data_shape[1] - lables_shape[1]) // 2 + lables_shape[1])
lbl_x = ((data_shape[2] - lables_shape[2]) // 2,
(data_shape[2] - lables_shape[2]) // 2 + lables_shape[2])
self.dz.append(dz)
self.dy.append(dy)
self.dx.append(dx)
self.effective_data_shape.append(effective_data_shape)
self.effective_lable_shape.append(effective_lable_shape)
self.effective_lable_idx.append(effective_lable_idx)
self.lbl_z.append(lbl_z)
self.lbl_y.append(lbl_y)
self.lbl_x.append(lbl_x)
self.max_iter = max_iter
def __len__(self):
return self.max_iter
def __getitem__(self, index):
i = np.random.randint(0, len(self.img))
z = np.random.randint(0, self.dz[i])
y = np.random.randint(0, self.dy[i])
x = np.random.randint(0, self.dx[i])
z = z * self.stride[0]
y = y * self.stride[1]
x = x * self.stride[2]
v = self.img[i][z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = self.lbl[i][z: z + self.data_shape[0],
y: y + self.data_shape[1],
x: x + self.data_shape[2]]
lbl = lbl[self.lbl_z[i][0]: self.lbl_z[i][1],
self.lbl_y[i][0]: self.lbl_y[i][1],
self.lbl_x[i][0]: self.lbl_x[i][1]]
# Normalize
if self.range_norm:
v = range_normalize(v)
else:
v = (v - v.mean(axis=(1, 2), keepdims=True)) / (v.std(axis=(1, 2), keepdims=True) + 1e-12)
# To Tensor
data = torch.Tensor(v).unsqueeze(0)
lables = torch.Tensor(lbl // 255).long()
return data, lables
| [
"tifffile.imread",
"numpy.floor",
"torch.Tensor",
"numpy.stack",
"numpy.random.randint",
"numpy.unravel_index",
"libtiff.TIFF.open",
"numpy.pad",
"glob.glob"
] | [((506, 591), 'numpy.pad', 'np.pad', (['img', '((0, data_shape[0] - img.shape[0]), (0, 0), (0, 0))'], {'mode': '"""reflect"""'}), "(img, ((0, data_shape[0] - img.shape[0]), (0, 0), (0, 0)), mode='reflect'\n )\n", (512, 591), True, 'import numpy as np\n'), ((638, 723), 'numpy.pad', 'np.pad', (['img', '((0, 0), (0, data_shape[1] - img.shape[1]), (0, 0))'], {'mode': '"""reflect"""'}), "(img, ((0, 0), (0, data_shape[1] - img.shape[1]), (0, 0)), mode='reflect'\n )\n", (644, 723), True, 'import numpy as np\n'), ((770, 855), 'numpy.pad', 'np.pad', (['img', '((0, 0), (0, 0), (0, data_shape[2] - img.shape[1]))'], {'mode': '"""reflect"""'}), "(img, ((0, 0), (0, 0), (0, data_shape[2] - img.shape[1])), mode='reflect'\n )\n", (776, 855), True, 'import numpy as np\n'), ((865, 921), 'numpy.floor', 'np.floor', (['((img.shape[0] - data_shape[0]) / stride[0] + 1)'], {}), '((img.shape[0] - data_shape[0]) / stride[0] + 1)\n', (873, 921), True, 'import numpy as np\n'), ((936, 992), 'numpy.floor', 'np.floor', (['((img.shape[1] - data_shape[1]) / stride[1] + 1)'], {}), '((img.shape[1] - data_shape[1]) / stride[1] + 1)\n', (944, 992), True, 'import numpy as np\n'), ((1007, 1063), 'numpy.floor', 'np.floor', (['((img.shape[2] - data_shape[2]) / stride[2] + 1)'], {}), '((img.shape[2] - data_shape[2]) / stride[2] + 1)\n', (1015, 1063), True, 'import numpy as np\n'), ((1368, 1498), 'numpy.pad', 'np.pad', (['img', '((0, data_shape[0] * (dz + 1) - (data_shape[0] - stride[0]) * dz - img.\n shape[0]), (0, 0), (0, 0))'], {'mode': '"""reflect"""'}), "(img, ((0, data_shape[0] * (dz + 1) - (data_shape[0] - stride[0]) *\n dz - img.shape[0]), (0, 0), (0, 0)), mode='reflect')\n", (1374, 1498), True, 'import numpy as np\n'), ((1676, 1807), 'numpy.pad', 'np.pad', (['img', '((0, 0), (0, data_shape[1] * (dy + 1) - (data_shape[1] - stride[1]) * dy -\n img.shape[1]), (0, 0))'], {'mode': '"""reflect"""'}), "(img, ((0, 0), (0, data_shape[1] * (dy + 1) - (data_shape[1] - stride\n [1]) * dy - img.shape[1]), (0, 0)), mode='reflect')\n", (1682, 1807), True, 'import numpy as np\n'), ((1984, 2114), 'numpy.pad', 'np.pad', (['img', '((0, 0), (0, 0), (0, data_shape[2] * (dx + 1) - (data_shape[2] - stride[2]) *\n dx - img.shape[2]))'], {'mode': '"""reflect"""'}), "(img, ((0, 0), (0, 0), (0, data_shape[2] * (dx + 1) - (data_shape[2] -\n stride[2]) * dx - img.shape[2])), mode='reflect')\n", (1990, 2114), True, 'import numpy as np\n'), ((2573, 2748), 'numpy.pad', 'np.pad', (['img', '(((data_shape[0] - lables_shape[0]) // 2, (data_shape[0] - lables_shape[0]) //\n 2 + (data_shape[0] - lables_shape[0]) % 2), (0, 0), (0, 0))'], {'mode': '"""reflect"""'}), "(img, (((data_shape[0] - lables_shape[0]) // 2, (data_shape[0] -\n lables_shape[0]) // 2 + (data_shape[0] - lables_shape[0]) % 2), (0, 0),\n (0, 0)), mode='reflect')\n", (2579, 2748), True, 'import numpy as np\n'), ((2906, 3082), 'numpy.pad', 'np.pad', (['img', '((0, 0), ((data_shape[1] - lables_shape[1]) // 2, (data_shape[1] -\n lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2), (0, 0))'], {'mode': '"""reflect"""'}), "(img, ((0, 0), ((data_shape[1] - lables_shape[1]) // 2, (data_shape[1\n ] - lables_shape[1]) // 2 + (data_shape[1] - lables_shape[1]) % 2), (0,\n 0)), mode='reflect')\n", (2912, 3082), True, 'import numpy as np\n'), ((3239, 3416), 'numpy.pad', 'np.pad', (['img', '((0, 0), (0, 0), ((data_shape[2] - lables_shape[2]) // 2, (data_shape[2] -\n lables_shape[2]) // 2 + (data_shape[2] - lables_shape[2]) % 2))'], {'mode': '"""reflect"""'}), "(img, ((0, 0), (0, 0), ((data_shape[2] - lables_shape[2]) // 2, (\n data_shape[2] - lables_shape[2]) // 2 + (data_shape[2] - lables_shape[2\n ]) % 2)), mode='reflect')\n", (3245, 3416), True, 'import numpy as np\n'), ((6920, 6972), 'numpy.unravel_index', 'np.unravel_index', (['index', '(self.dz, self.dy, self.dx)'], {}), '(index, (self.dz, self.dy, self.dx))\n', (6936, 6972), True, 'import numpy as np\n'), ((12171, 12203), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.dz[i]'], {}), '(0, self.dz[i])\n', (12188, 12203), True, 'import numpy as np\n'), ((12216, 12248), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.dy[i]'], {}), '(0, self.dy[i])\n', (12233, 12248), True, 'import numpy as np\n'), ((12261, 12293), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.dx[i]'], {}), '(0, self.dx[i])\n', (12278, 12293), True, 'import numpy as np\n'), ((4003, 4023), 'tifffile.imread', 'T.imread', (['image_path'], {}), '(image_path)\n', (4011, 4023), True, 'import tifffile as T\n'), ((4247, 4267), 'tifffile.imread', 'T.imread', (['label_path'], {}), '(label_path)\n', (4255, 4267), True, 'import tifffile as T\n'), ((4881, 4938), 'numpy.floor', 'np.floor', (['((self.shape[0] - data_shape[0]) / stride[0] + 1)'], {}), '((self.shape[0] - data_shape[0]) / stride[0] + 1)\n', (4889, 4938), True, 'import numpy as np\n'), ((4962, 5019), 'numpy.floor', 'np.floor', (['((self.shape[1] - data_shape[1]) / stride[1] + 1)'], {}), '((self.shape[1] - data_shape[1]) / stride[1] + 1)\n', (4970, 5019), True, 'import numpy as np\n'), ((5043, 5100), 'numpy.floor', 'np.floor', (['((self.shape[2] - data_shape[2]) / stride[2] + 1)'], {}), '((self.shape[2] - data_shape[2]) / stride[2] + 1)\n', (5051, 5100), True, 'import numpy as np\n'), ((8185, 8217), 'glob.glob', 'glob.glob', (["(images_path + '/*tif')"], {}), "(images_path + '/*tif')\n", (8194, 8217), False, 'import glob\n'), ((8243, 8275), 'glob.glob', 'glob.glob', (["(labels_path + '/*tif')"], {}), "(labels_path + '/*tif')\n", (8252, 8275), False, 'import glob\n'), ((4079, 4100), 'libtiff.TIFF.open', 'TIFF.open', (['image_path'], {}), '(image_path)\n', (4088, 4100), False, 'from libtiff import TIFF\n'), ((4199, 4215), 'numpy.stack', 'np.stack', (['img', '(0)'], {}), '(img, 0)\n', (4207, 4215), True, 'import numpy as np\n'), ((4323, 4344), 'libtiff.TIFF.open', 'TIFF.open', (['label_path'], {}), '(label_path)\n', (4332, 4344), False, 'from libtiff import TIFF\n'), ((4443, 4459), 'numpy.stack', 'np.stack', (['lbl', '(0)'], {}), '(lbl, 0)\n', (4451, 4459), True, 'import numpy as np\n'), ((7698, 7713), 'torch.Tensor', 'torch.Tensor', (['v'], {}), '(v)\n', (7710, 7713), False, 'import torch\n'), ((8828, 8846), 'tifffile.imread', 'T.imread', (['img_path'], {}), '(img_path)\n', (8836, 8846), True, 'import tifffile as T\n'), ((9100, 9118), 'tifffile.imread', 'T.imread', (['lbl_path'], {}), '(lbl_path)\n', (9108, 9118), True, 'import tifffile as T\n'), ((9717, 9769), 'numpy.floor', 'np.floor', (['((shape[0] - data_shape[0]) / stride[0] + 1)'], {}), '((shape[0] - data_shape[0]) / stride[0] + 1)\n', (9725, 9769), True, 'import numpy as np\n'), ((9792, 9844), 'numpy.floor', 'np.floor', (['((shape[1] - data_shape[1]) / stride[1] + 1)'], {}), '((shape[1] - data_shape[1]) / stride[1] + 1)\n', (9800, 9844), True, 'import numpy as np\n'), ((9867, 9919), 'numpy.floor', 'np.floor', (['((shape[2] - data_shape[2]) / stride[2] + 1)'], {}), '((shape[2] - data_shape[2]) / stride[2] + 1)\n', (9875, 9919), True, 'import numpy as np\n'), ((13043, 13058), 'torch.Tensor', 'torch.Tensor', (['v'], {}), '(v)\n', (13055, 13058), False, 'import torch\n'), ((13089, 13113), 'torch.Tensor', 'torch.Tensor', (['(lbl // 255)'], {}), '(lbl // 255)\n', (13101, 13113), False, 'import torch\n'), ((8914, 8933), 'libtiff.TIFF.open', 'TIFF.open', (['img_path'], {}), '(img_path)\n', (8923, 8933), False, 'from libtiff import TIFF\n'), ((9044, 9060), 'numpy.stack', 'np.stack', (['img', '(0)'], {}), '(img, 0)\n', (9052, 9060), True, 'import numpy as np\n'), ((9186, 9205), 'libtiff.TIFF.open', 'TIFF.open', (['lbl_path'], {}), '(lbl_path)\n', (9195, 9205), False, 'from libtiff import TIFF\n'), ((9316, 9332), 'numpy.stack', 'np.stack', (['lbl', '(0)'], {}), '(lbl, 0)\n', (9324, 9332), True, 'import numpy as np\n')] |
import glob
from os.path import join, split
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random',parent_package,top_path)
source_files = [join('mtrand', i) for i in ['mtrand.c',
'mtrand.pyx',
'numpy.pxi',
'randomkit.c',
'randomkit.h',
'Python.pxi',
'initarray.c',
'initarray.h',
'distributions.c',
'distributions.h',
]]
config.add_sconscript('SConstruct', source_files = source_files)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| [
"numpy.distutils.core.setup",
"os.path.join",
"numpy.distutils.misc_util.Configuration"
] | [((180, 229), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""random"""', 'parent_package', 'top_path'], {}), "('random', parent_package, top_path)\n", (193, 229), False, 'from numpy.distutils.misc_util import Configuration, get_mathlibs\n'), ((1349, 1383), 'numpy.distutils.core.setup', 'setup', ([], {'configuration': 'configuration'}), '(configuration=configuration)\n', (1354, 1383), False, 'from numpy.distutils.core import setup\n'), ((249, 266), 'os.path.join', 'join', (['"""mtrand"""', 'i'], {}), "('mtrand', i)\n", (253, 266), False, 'from os.path import join, split\n'), ((1012, 1041), 'os.path.join', 'join', (['"""mtrand"""', '"""randomkit.h"""'], {}), "('mtrand', 'randomkit.h')\n", (1016, 1041), False, 'from os.path import join, split\n')] |
# -*- coding: utf-8 -*-
"""
This module contains the classes for testing the model module of mpcpy.
"""
import unittest
from mpcpy import models
from mpcpy import exodata
from mpcpy import utility
from mpcpy import systems
from mpcpy import units
from mpcpy import variables
from testing import TestCaseMPCPy
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import pickle
import os
#%%
class SimpleRC(TestCaseMPCPy):
'''Test simple model simulate and estimate.
'''
def setUp(self):
self.start_time = '1/1/2017';
self.final_time = '1/2/2017';
# Set measurements
self.measurements = {};
self.measurements['T_db'] = {'Sample' : variables.Static('T_db_sample', 1800, units.s)};
def tearDown(self):
del self.start_time
del self.final_time
del self.measurements
def test_simulate(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_simulate_with_save_parameter_input_data(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data,
save_parameter_input_data=True);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_estimate_one_par(self):
'''Test the estimation of one parameter of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data()]
index = ['heatCapacitor.C']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_one_par.csv', timeseries=False)
def test_estimate_two_par(self):
'''Test the estimation of two parameters of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
parameter_data['thermalResistor.R'] = {};
parameter_data['thermalResistor.R']['Value'] = variables.Static('R_Value', 0.02, units.K_W);
parameter_data['thermalResistor.R']['Minimum'] = variables.Static('R_Min', 0.001, units.K_W);
parameter_data['thermalResistor.R']['Maximum'] = variables.Static('R_Max', 0.1, units.K_W);
parameter_data['thermalResistor.R']['Free'] = variables.Static('R_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data(),
model.parameter_data['thermalResistor.R']['Value'].display_data(),]
index = ['heatCapacitor.C', 'thermalResistor.R']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_two_par.csv', timeseries=False)
def test_simulate_continue(self):
'''Test simulation of a model in steps.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
# Simulate model in 4-hour chunks
sim_steps = pd.date_range(self.start_time, self.final_time, freq=str('8H'))
for i in range(len(sim_steps)-1):
if i == 0:
model.simulate(sim_steps[i], sim_steps[i+1]);
else:
model.simulate('continue', sim_steps[i+1]);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_step{0}.csv'.format(i));
def test_simulate_noinputs(self):
'''Test simulation of a model with no external inputs.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_noinputs.csv');
def test_estimate_error_nofreeparameters(self):
'''Test error raised if no free parameters passed.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model_no_params = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Check error raised with no parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', False, units.boolean);
# Instantiate model
model_no_free = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
def test_estimate_error_nomeasurements(self):
'''Test error raised if measurement_variable_list not in measurements dictionary.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model_no_meas = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_meas.estimate(self.start_time, self.final_time, ['wrong_meas']);
def test_instantiate_error_incompatible_estimation(self):
'''Test error raised if estimation method is incompatible with model.'''
# Set model path
fmupath = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v1.fmu');
with self.assertRaises(ValueError):
model = models.Modelica(models.JModelica, models.RMSE, {}, fmupath=fmupath);
#%%
class EstimateFromJModelicaRealCSV(TestCaseMPCPy):
'''Test parameter estimation of a model using JModelica from real csv data.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path_est = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_est.csv');
self.building_source_file_path_val = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val.csv');
self.building_source_file_path_val_missing = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val_missing.csv');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurement_variable_map = {'wesTdb_mea' : ('wesTdb', units.K),
'halTdb_mea' : ('halTdb', units.K),
'easTdb_mea' : ('easTdb', units.K),
'wesPhvac_mea' : ('wesPhvac', units.W),
'halPhvac_mea' : ('halPhvac', units.W),
'easPhvac_mea' : ('easPhvac', units.W),
'Ptot_mea' : ('Ptot', units.W)}
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate test building
self.building_est = systems.RealFromCSV(self.building_source_file_path_est,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Collect measurement data
self.building_est.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building_est.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
def tearDown(self):
del self.model
del self.building_est
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Finish test
self._finish_estimate_validate('')
def test_estimate_and_validate_missing_measurements(self):
'''Test the estimation of a model's coefficients based on measured data.
Some of the validation measurement data is missing.
'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE.csv', timeseries=False);
# Instantiate validate building
building_val = systems.RealFromCSV(self.building_source_file_path_val_missing,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE_missing.csv', timeseries=False);
def test_estimate_and_validate_global_start_init(self):
'''Test the estimation of a model's coefficients based on measured data using global start and user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_winit')
def test_estimate_and_validate_global_start_woinit(self):
'''Test the estimation of a model's coefficients based on measured data using global start and no user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=False);
# Finish test
self._finish_estimate_validate('_global_start_woinit')
def test_estimate_and_validate_global_start_maxexceeded(self):
'''Test the estimation of a model's coefficients based on measured data using global start and maximum cpu time and iterations.'''
plt.close('all');
# Set maximum cpu time for JModelica
opt_options = self.model._estimate_method.opt_problem.get_optimization_options();
opt_options['IPOPT_options']['max_cpu_time'] = 60;
opt_options['IPOPT_options']['max_iter'] = 100;
self.model._estimate_method.opt_problem.set_optimization_options(opt_options);
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_maxexceeded')
def _finish_estimate_validate(self,tag):
'''Internal method for finishing the estimate and valudate tests.'''
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE{0}.csv'.format(tag), timeseries=False);
# All estimates if global estimate
try:
glo_est_data_test = self.model.get_global_estimate_data()
self.check_json(glo_est_data_test, 'estimate_gloest{0}.txt'.format(tag));
except:
pass
# Instantiate validate building
self.building_val = systems.RealFromCSV(self.building_source_file_path_val,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
self.building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = self.building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE{0}.csv'.format(tag), timeseries=False);
class EstimateFromJModelicaEmulationFMU(TestCaseMPCPy):
'''Test emulation-based parameter estimation of a model using JModelica.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v2.fmu');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate building
building_parameters_data = {};
building_parameters_data['lat'] = {};
building_parameters_data['lat']['Value'] = self.weather.lat;
self.building = systems.EmulationFromFMU(self.measurements, \
fmupath = self.building_source_file_path, \
zone_names = self.zone_names, \
parameter_data = building_parameters_data);
def tearDown(self):
del self.building
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Set exodata to building emulation
self.building.weather_data = self.weather.data;
self.building.internal_data = self.internal.data;
self.building.control_data = self.control.data;
self.building.tz_name = self.weather.tz_name;
# Collect measurement data
self.building.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name,
save_parameter_input_data=True);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Check parameter and input data were saved
df_test = pd.read_csv('mpcpy_simulation_inputs_model.csv', index_col='Time');
df_test.index = pd.to_datetime(df_test.index).tz_localize('UTC')
self.check_df(df_test, 'mpcpy_simulation_inputs_model.csv');
df_test = pd.read_csv('mpcpy_simulation_parameters_model.csv', index_col='parameter');
self.check_df(df_test, 'mpcpy_simulation_parameters_model.csv', timeseries=False);
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE.csv', timeseries=False);
# Validate on validation data
self.building.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = self.building.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE.csv', timeseries=False);
def test_estimate_error_continue(self):
'''Test that an error is thrown for estimation start_time of continue.
'''
plt.close('all');
# Exogenous collection time
start_time_exodata = '1/1/2015';
final_time_exodata = '1/30/2015';
# Estimation time
start_time_estimation = 'continue';
final_time_estimation = '1/4/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(start_time_exodata, final_time_exodata);
self.internal.collect_data(start_time_exodata, final_time_exodata);
self.control.collect_data(start_time_exodata, final_time_exodata);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name);
# Error when estimate model
with self.assertRaises(ValueError):
self.model.estimate(start_time_estimation, final_time_estimation, self.measurement_variable_list);
#%%
class EstimateFromUKF(TestCaseMPCPy):
'''Test the parameter estimation of a model using UKF.
'''
def setUp(self):
self.start_time = '1/1/2017';
self.final_time = '1/10/2017';
# Set measurements
self.measurements = {};
self.measurements['T_db'] = {'Sample' : variables.Static('T_db_sample', 1800, units.s)};
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
self.moinfo = (mopath, modelpath, {})
# Gather parameters
parameter_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Parameters.csv');
self.parameters = exodata.ParameterFromCSV(parameter_csv_filepath);
self.parameters.collect_data();
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
self.controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
self.controls.collect_data(self.start_time, self.final_time);
# Instantiate system
self.system = systems.EmulationFromFMU(self.measurements, \
moinfo = self.moinfo, \
control_data = self.controls.data);
# Get measurements
self.system.collect_measurements(self.start_time, self.final_time);
def tearDown(self):
del self.system
del self.controls
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
# Instantiate model
model = models.Modelica(models.UKF, \
models.RMSE, \
self.system.measurements, \
moinfo = self.moinfo, \
parameter_data = self.parameters.data, \
control_data = self.controls.data, \
version = '1.0');
# Estimate
model.estimate(self.start_time, self.final_time, ['T_db']);
# Validate
model.validate(self.start_time, self.final_time, 'validate', plot = 0);
# Check references
RMSE = {};
for key in model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE.csv', timeseries=False);
def test_error_fmu_version(self):
'''Test error raised if wrong fmu version.'''
# Check error raised with wrong fmu version (2.0 instead of 1.0)
with self.assertRaises(ValueError):
# Instantiate model
model = models.Modelica(models.UKF, \
models.RMSE, \
self.system.measurements, \
moinfo = self.moinfo, \
parameter_data = self.parameters.data, \
control_data = self.controls.data, \
version = '2.0');
#%% Occupancy tests
class OccupancyFromQueueing(TestCaseMPCPy):
'''Test the occupancy model using a queueing approach.
'''
def setUp(self):
# Testing time
self.start_time = '3/8/2013';
self.final_time = '3/15/2013 23:59';
# Setup building measurement collection from csv
self.csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'building', 'OccData.csv');
# Measurements
self.measurements = {};
self.measurements['occupancy'] = {'Sample' : variables.Static('occupancy_sample', 300, units.s)};
self.measurement_variable_map = {'Total People Count for the whole building (+)' : ('occupancy', units.unit1)};
# Instantiate building measurement source
self.building = systems.RealFromCSV(self.csv_filepath, \
self.measurements,
self.measurement_variable_map,
time_header = 'Date');
# Where to save ref occupancy model
self.occupancy_model_file = self.get_ref_path() + os.sep +'occupancy_model_estimated.txt';
def tearDown(self):
del self.building
del self.measurements
def test_estimate(self):
'''Test the estimation method.'''
plt.close('all');
# Training Time
start_time = '2/1/2013';
final_time = '7/24/2013 23:59';
# Collect measurements
self.building.collect_measurements(start_time, final_time);
# Instantiate occupancy model
occupancy = models.Occupancy(models.QueueModel, self.building.measurements);
# Estimate occupancy model parameters
np.random.seed(1);
occupancy.estimate(start_time, final_time);
try:
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
except IOError:
try:
os.makedirs(self.get_ref_path());
except OSError:
pass;
with open(self.occupancy_model_file, 'w') as f:
pickle.dump(occupancy, f);
def test_simulate(self):
'''Test occupancy prediction.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Simulate occupancy model
np.random.seed(1);
occupancy.simulate(self.start_time, self.final_time);
# Check references
df_test = occupancy.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = occupancy.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_validate(self):
'''Test occupancy prediction comparison with measured data.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Collect validation measurements
self.building.collect_measurements(self.start_time, self.final_time);
# Set valiation measurements in occupancy model
occupancy.measurements = self.building.measurements;
# Validate occupancy model with simulation options
simulate_options = occupancy.get_simulate_options();
simulate_options['iter_num'] = 5;
occupancy.set_simulate_options(simulate_options);
np.random.seed(1);
occupancy.validate(self.start_time, self.final_time, \
os.path.join(self.get_unittest_path(), 'outputs', \
'occupancy_model_validate'));
# Check references
RMSE = {};
for key in occupancy.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = occupancy.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE.csv', timeseries=False);
def test_get_load(self):
'''Test generation of occupancy load data using occupancy prediction.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Simulate occupancy model
simulate_options = occupancy.get_simulate_options();
simulate_options['iter_num'] = 5;
np.random.seed(1);
occupancy.simulate(self.start_time, self.final_time);
load = occupancy.get_load(100);
# Check references
df_test = load.to_frame(name='load');
df_test.index.name = 'Time';
self.check_df(df_test, 'get_load.csv');
def test_get_constraint(self):
'''Test generation of occupancy constraint data using occupancy prediction.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Simulate occupancy model
simulate_options = occupancy.get_simulate_options();
simulate_options['iter_num'] = 5;
np.random.seed(1);
occupancy.simulate(self.start_time, self.final_time);
constraint = occupancy.get_constraint(20, 25);
# Check references
df_test = constraint.to_frame(name='constraint');
df_test.index.name = 'Time';
self.check_df(df_test, 'get_constraint.csv');
def test_error_points_per_day(self):
'''Test occupancy prediction.'''
plt.close('all');
# Time
self.start_time = '3/1/2013';
self.final_time = '3/7/2013 23:59';
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Change occupant measurements to not be whole number in points per day
occupancy.measurements['occupancy']['Sample'] = variables.Static('occupancy_sample', 299, units.s);
# Estimate occupancy model parameters and expect error
with self.assertRaises(ValueError):
np.random.seed(1);
occupancy.estimate(self.start_time, self.final_time);
if __name__ == '__main__':
unittest.main()
| [
"mpcpy.exodata.ControlFromCSV",
"pickle.dump",
"pandas.DataFrame",
"pandas.read_csv",
"mpcpy.models.Occupancy",
"mpcpy.systems.EmulationFromFMU",
"mpcpy.exodata.ParameterFromCSV",
"os.environ.get",
"pickle.load",
"mpcpy.models.Modelica",
"mpcpy.exodata.WeatherFromEPW",
"mpcpy.exodata.InternalF... | [((46885, 46900), 'unittest.main', 'unittest.main', ([], {}), '()\n', (46898, 46900), False, 'import unittest\n'), ((1334, 1392), 'mpcpy.exodata.ControlFromCSV', 'exodata.ControlFromCSV', (['control_csv_filepath', 'variable_map'], {}), '(control_csv_filepath, variable_map)\n', (1356, 1392), False, 'from mpcpy import exodata\n'), ((1503, 1633), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'control_data': 'controls.data'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), control_data=controls.data)\n', (1518, 1633), False, 'from mpcpy import models\n'), ((2625, 2683), 'mpcpy.exodata.ControlFromCSV', 'exodata.ControlFromCSV', (['control_csv_filepath', 'variable_map'], {}), '(control_csv_filepath, variable_map)\n', (2647, 2683), False, 'from mpcpy import exodata\n'), ((2794, 2960), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'control_data': 'controls.data', 'save_parameter_input_data': '(True)'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), control_data=controls.data,\n save_parameter_input_data=True)\n', (2809, 2960), False, 'from mpcpy import models\n'), ((3795, 3870), 'mpcpy.systems.EmulationFromFMU', 'systems.EmulationFromFMU', (['self.measurements'], {'moinfo': '(mopath, modelpath, {})'}), '(self.measurements, moinfo=(mopath, modelpath, {}))\n', (3819, 3870), False, 'from mpcpy import systems\n'), ((4152, 4197), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Value"""', '(55000)', 'units.J_K'], {}), "('C_Value', 55000, units.J_K)\n", (4168, 4197), False, 'from mpcpy import variables\n'), ((4254, 4297), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Min"""', '(10000)', 'units.J_K'], {}), "('C_Min', 10000, units.J_K)\n", (4270, 4297), False, 'from mpcpy import variables\n'), ((4354, 4399), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Max"""', '(1000000)', 'units.J_K'], {}), "('C_Max', 1000000, units.J_K)\n", (4370, 4399), False, 'from mpcpy import variables\n'), ((4453, 4500), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Free"""', '(True)', 'units.boolean'], {}), "('C_Free', True, units.boolean)\n", (4469, 4500), False, 'from mpcpy import variables\n'), ((4546, 4679), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'parameter_data': 'parameter_data'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), parameter_data=parameter_data)\n', (4561, 4679), False, 'from mpcpy import models\n'), ((5091, 5146), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'index', 'columns': "['Value']"}), "(data=data, index=index, columns=['Value'])\n", (5103, 5146), True, 'import pandas as pd\n'), ((5528, 5603), 'mpcpy.systems.EmulationFromFMU', 'systems.EmulationFromFMU', (['self.measurements'], {'moinfo': '(mopath, modelpath, {})'}), '(self.measurements, moinfo=(mopath, modelpath, {}))\n', (5552, 5603), False, 'from mpcpy import systems\n'), ((5885, 5930), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Value"""', '(55000)', 'units.J_K'], {}), "('C_Value', 55000, units.J_K)\n", (5901, 5930), False, 'from mpcpy import variables\n'), ((5987, 6030), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Min"""', '(10000)', 'units.J_K'], {}), "('C_Min', 10000, units.J_K)\n", (6003, 6030), False, 'from mpcpy import variables\n'), ((6087, 6132), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Max"""', '(1000000)', 'units.J_K'], {}), "('C_Max', 1000000, units.J_K)\n", (6103, 6132), False, 'from mpcpy import variables\n'), ((6186, 6233), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Free"""', '(True)', 'units.boolean'], {}), "('C_Free', True, units.boolean)\n", (6202, 6233), False, 'from mpcpy import variables\n'), ((6340, 6384), 'mpcpy.variables.Static', 'variables.Static', (['"""R_Value"""', '(0.02)', 'units.K_W'], {}), "('R_Value', 0.02, units.K_W)\n", (6356, 6384), False, 'from mpcpy import variables\n'), ((6443, 6486), 'mpcpy.variables.Static', 'variables.Static', (['"""R_Min"""', '(0.001)', 'units.K_W'], {}), "('R_Min', 0.001, units.K_W)\n", (6459, 6486), False, 'from mpcpy import variables\n'), ((6545, 6586), 'mpcpy.variables.Static', 'variables.Static', (['"""R_Max"""', '(0.1)', 'units.K_W'], {}), "('R_Max', 0.1, units.K_W)\n", (6561, 6586), False, 'from mpcpy import variables\n'), ((6642, 6689), 'mpcpy.variables.Static', 'variables.Static', (['"""R_Free"""', '(True)', 'units.boolean'], {}), "('R_Free', True, units.boolean)\n", (6658, 6689), False, 'from mpcpy import variables\n'), ((6735, 6868), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'parameter_data': 'parameter_data'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), parameter_data=parameter_data)\n', (6750, 6868), False, 'from mpcpy import models\n'), ((7385, 7440), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'index': 'index', 'columns': "['Value']"}), "(data=data, index=index, columns=['Value'])\n", (7397, 7440), True, 'import pandas as pd\n'), ((7990, 8048), 'mpcpy.exodata.ControlFromCSV', 'exodata.ControlFromCSV', (['control_csv_filepath', 'variable_map'], {}), '(control_csv_filepath, variable_map)\n', (8012, 8048), False, 'from mpcpy import exodata\n'), ((8159, 8289), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'control_data': 'controls.data'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), control_data=controls.data)\n', (8174, 8289), False, 'from mpcpy import models\n'), ((9476, 9578), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}))\n', (9491, 9578), False, 'from mpcpy import models\n'), ((10249, 10351), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}))\n', (10264, 10351), False, 'from mpcpy import models\n'), ((10820, 10865), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Value"""', '(55000)', 'units.J_K'], {}), "('C_Value', 55000, units.J_K)\n", (10836, 10865), False, 'from mpcpy import variables\n'), ((10922, 10965), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Min"""', '(10000)', 'units.J_K'], {}), "('C_Min', 10000, units.J_K)\n", (10938, 10965), False, 'from mpcpy import variables\n'), ((11022, 11066), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Max"""', '(100000)', 'units.J_K'], {}), "('C_Max', 100000, units.J_K)\n", (11038, 11066), False, 'from mpcpy import variables\n'), ((11120, 11168), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Free"""', '(False)', 'units.boolean'], {}), "('C_Free', False, units.boolean)\n", (11136, 11168), False, 'from mpcpy import variables\n'), ((11222, 11355), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'parameter_data': 'parameter_data'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), parameter_data=parameter_data)\n', (11237, 11355), False, 'from mpcpy import models\n'), ((12184, 12229), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Value"""', '(55000)', 'units.J_K'], {}), "('C_Value', 55000, units.J_K)\n", (12200, 12229), False, 'from mpcpy import variables\n'), ((12286, 12329), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Min"""', '(10000)', 'units.J_K'], {}), "('C_Min', 10000, units.J_K)\n", (12302, 12329), False, 'from mpcpy import variables\n'), ((12386, 12430), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Max"""', '(100000)', 'units.J_K'], {}), "('C_Max', 100000, units.J_K)\n", (12402, 12430), False, 'from mpcpy import variables\n'), ((12484, 12531), 'mpcpy.variables.Static', 'variables.Static', (['"""C_Free"""', '(True)', 'units.boolean'], {}), "('C_Free', True, units.boolean)\n", (12500, 12531), False, 'from mpcpy import variables\n'), ((12585, 12718), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', 'self.measurements'], {'moinfo': '(mopath, modelpath, {})', 'parameter_data': 'parameter_data'}), '(models.JModelica, models.RMSE, self.measurements, moinfo=(\n mopath, modelpath, {}), parameter_data=parameter_data)\n', (12600, 12718), False, 'from mpcpy import models\n'), ((17118, 17148), 'os.environ.get', 'os.environ.get', (['"""MODELICAPATH"""'], {}), "('MODELICAPATH')\n", (17132, 17148), False, 'import os\n'), ((17307, 17348), 'mpcpy.exodata.WeatherFromEPW', 'exodata.WeatherFromEPW', (['self.weather_path'], {}), '(self.weather_path)\n', (17329, 17348), False, 'from mpcpy import exodata\n'), ((17374, 17479), 'mpcpy.exodata.InternalFromCSV', 'exodata.InternalFromCSV', (['self.internal_path', 'self.internal_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.internal_path, self.internal_variable_map,\n tz_name=self.weather.tz_name)\n', (17397, 17479), False, 'from mpcpy import exodata\n'), ((17502, 17604), 'mpcpy.exodata.ControlFromCSV', 'exodata.ControlFromCSV', (['self.control_path', 'self.control_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.control_path, self.control_variable_map,\n tz_name=self.weather.tz_name)\n', (17524, 17604), False, 'from mpcpy import exodata\n'), ((17976, 18115), 'mpcpy.systems.RealFromCSV', 'systems.RealFromCSV', (['self.building_source_file_path_est', 'self.measurements', 'self.measurement_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.building_source_file_path_est, self.measurements,\n self.measurement_variable_map, tz_name=self.weather.tz_name)\n', (17995, 18115), False, 'from mpcpy import systems\n'), ((19204, 19562), 'mpcpy.models.Modelica', 'models.Modelica', (['self.estimate_method', 'self.validation_method', 'self.building_est.measurements'], {'moinfo': '(self.mopath, self.modelpath, self.libraries)', 'zone_names': 'self.zone_names', 'weather_data': 'self.weather.data', 'internal_data': 'self.internal.data', 'control_data': 'self.control.data', 'parameter_data': 'self.parameters.data', 'tz_name': 'self.weather.tz_name'}), '(self.estimate_method, self.validation_method, self.\n building_est.measurements, moinfo=(self.mopath, self.modelpath, self.\n libraries), zone_names=self.zone_names, weather_data=self.weather.data,\n internal_data=self.internal.data, control_data=self.control.data,\n parameter_data=self.parameters.data, tz_name=self.weather.tz_name)\n', (19219, 19562), False, 'from mpcpy import models\n'), ((20394, 20410), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (20403, 20410), True, 'from matplotlib import pyplot as plt\n'), ((21028, 21044), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (21037, 21044), True, 'from matplotlib import pyplot as plt\n'), ((21821, 21844), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (21833, 21844), True, 'import pandas as pd\n'), ((21982, 22130), 'mpcpy.systems.RealFromCSV', 'systems.RealFromCSV', (['self.building_source_file_path_val_missing', 'self.measurements', 'self.measurement_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.building_source_file_path_val_missing, self.\n measurements, self.measurement_variable_map, tz_name=self.weather.tz_name)\n', (22001, 22130), False, 'from mpcpy import systems\n'), ((22862, 22885), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (22874, 22885), True, 'import pandas as pd\n'), ((23171, 23187), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (23180, 23187), True, 'from matplotlib import pyplot as plt\n'), ((23703, 23719), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (23712, 23719), True, 'from matplotlib import pyplot as plt\n'), ((24244, 24260), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (24253, 24260), True, 'from matplotlib import pyplot as plt\n'), ((25486, 25509), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (25498, 25509), True, 'import pandas as pd\n'), ((25912, 26051), 'mpcpy.systems.RealFromCSV', 'systems.RealFromCSV', (['self.building_source_file_path_val', 'self.measurements', 'self.measurement_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.building_source_file_path_val, self.measurements,\n self.measurement_variable_map, tz_name=self.weather.tz_name)\n', (25931, 26051), False, 'from mpcpy import systems\n'), ((26796, 26819), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (26808, 26819), True, 'import pandas as pd\n'), ((29658, 29688), 'os.environ.get', 'os.environ.get', (['"""MODELICAPATH"""'], {}), "('MODELICAPATH')\n", (29672, 29688), False, 'import os\n'), ((29847, 29888), 'mpcpy.exodata.WeatherFromEPW', 'exodata.WeatherFromEPW', (['self.weather_path'], {}), '(self.weather_path)\n', (29869, 29888), False, 'from mpcpy import exodata\n'), ((29914, 30019), 'mpcpy.exodata.InternalFromCSV', 'exodata.InternalFromCSV', (['self.internal_path', 'self.internal_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.internal_path, self.internal_variable_map,\n tz_name=self.weather.tz_name)\n', (29937, 30019), False, 'from mpcpy import exodata\n'), ((30042, 30144), 'mpcpy.exodata.ControlFromCSV', 'exodata.ControlFromCSV', (['self.control_path', 'self.control_variable_map'], {'tz_name': 'self.weather.tz_name'}), '(self.control_path, self.control_variable_map,\n tz_name=self.weather.tz_name)\n', (30064, 30144), False, 'from mpcpy import exodata\n'), ((30661, 30823), 'mpcpy.systems.EmulationFromFMU', 'systems.EmulationFromFMU', (['self.measurements'], {'fmupath': 'self.building_source_file_path', 'zone_names': 'self.zone_names', 'parameter_data': 'building_parameters_data'}), '(self.measurements, fmupath=self.\n building_source_file_path, zone_names=self.zone_names, parameter_data=\n building_parameters_data)\n', (30685, 30823), False, 'from mpcpy import systems\n'), ((31343, 31359), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (31352, 31359), True, 'from matplotlib import pyplot as plt\n'), ((32582, 32971), 'mpcpy.models.Modelica', 'models.Modelica', (['self.estimate_method', 'self.validation_method', 'self.building.measurements'], {'moinfo': '(self.mopath, self.modelpath, self.libraries)', 'zone_names': 'self.zone_names', 'weather_data': 'self.weather.data', 'internal_data': 'self.internal.data', 'control_data': 'self.control.data', 'parameter_data': 'self.parameters.data', 'tz_name': 'self.weather.tz_name', 'save_parameter_input_data': '(True)'}), '(self.estimate_method, self.validation_method, self.building\n .measurements, moinfo=(self.mopath, self.modelpath, self.libraries),\n zone_names=self.zone_names, weather_data=self.weather.data,\n internal_data=self.internal.data, control_data=self.control.data,\n parameter_data=self.parameters.data, tz_name=self.weather.tz_name,\n save_parameter_input_data=True)\n', (32597, 32971), False, 'from mpcpy import models\n'), ((33710, 33776), 'pandas.read_csv', 'pd.read_csv', (['"""mpcpy_simulation_inputs_model.csv"""'], {'index_col': '"""Time"""'}), "('mpcpy_simulation_inputs_model.csv', index_col='Time')\n", (33721, 33776), True, 'import pandas as pd\n'), ((33938, 34013), 'pandas.read_csv', 'pd.read_csv', (['"""mpcpy_simulation_parameters_model.csv"""'], {'index_col': '"""parameter"""'}), "('mpcpy_simulation_parameters_model.csv', index_col='parameter')\n", (33949, 34013), True, 'import pandas as pd\n'), ((34722, 34745), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (34734, 34745), True, 'import pandas as pd\n'), ((35419, 35442), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (35431, 35442), True, 'import pandas as pd\n'), ((35663, 35679), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (35672, 35679), True, 'from matplotlib import pyplot as plt\n'), ((36325, 36678), 'mpcpy.models.Modelica', 'models.Modelica', (['self.estimate_method', 'self.validation_method', 'self.building.measurements'], {'moinfo': '(self.mopath, self.modelpath, self.libraries)', 'zone_names': 'self.zone_names', 'weather_data': 'self.weather.data', 'internal_data': 'self.internal.data', 'control_data': 'self.control.data', 'parameter_data': 'self.parameters.data', 'tz_name': 'self.weather.tz_name'}), '(self.estimate_method, self.validation_method, self.building\n .measurements, moinfo=(self.mopath, self.modelpath, self.libraries),\n zone_names=self.zone_names, weather_data=self.weather.data,\n internal_data=self.internal.data, control_data=self.control.data,\n parameter_data=self.parameters.data, tz_name=self.weather.tz_name)\n', (36340, 36678), False, 'from mpcpy import models\n'), ((37966, 38014), 'mpcpy.exodata.ParameterFromCSV', 'exodata.ParameterFromCSV', (['parameter_csv_filepath'], {}), '(parameter_csv_filepath)\n', (37990, 38014), False, 'from mpcpy import exodata\n'), ((38288, 38346), 'mpcpy.exodata.ControlFromCSV', 'exodata.ControlFromCSV', (['control_csv_filepath', 'variable_map'], {}), '(control_csv_filepath, variable_map)\n', (38310, 38346), False, 'from mpcpy import exodata\n'), ((38469, 38569), 'mpcpy.systems.EmulationFromFMU', 'systems.EmulationFromFMU', (['self.measurements'], {'moinfo': 'self.moinfo', 'control_data': 'self.controls.data'}), '(self.measurements, moinfo=self.moinfo,\n control_data=self.controls.data)\n', (38493, 38569), False, 'from mpcpy import systems\n'), ((39084, 39265), 'mpcpy.models.Modelica', 'models.Modelica', (['models.UKF', 'models.RMSE', 'self.system.measurements'], {'moinfo': 'self.moinfo', 'parameter_data': 'self.parameters.data', 'control_data': 'self.controls.data', 'version': '"""1.0"""'}), "(models.UKF, models.RMSE, self.system.measurements, moinfo=\n self.moinfo, parameter_data=self.parameters.data, control_data=self.\n controls.data, version='1.0')\n", (39099, 39265), False, 'from mpcpy import models\n'), ((39880, 39903), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (39892, 39903), True, 'import pandas as pd\n'), ((41460, 41573), 'mpcpy.systems.RealFromCSV', 'systems.RealFromCSV', (['self.csv_filepath', 'self.measurements', 'self.measurement_variable_map'], {'time_header': '"""Date"""'}), "(self.csv_filepath, self.measurements, self.\n measurement_variable_map, time_header='Date')\n", (41479, 41573), False, 'from mpcpy import systems\n'), ((42010, 42026), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (42019, 42026), True, 'from matplotlib import pyplot as plt\n'), ((42282, 42345), 'mpcpy.models.Occupancy', 'models.Occupancy', (['models.QueueModel', 'self.building.measurements'], {}), '(models.QueueModel, self.building.measurements)\n', (42298, 42345), False, 'from mpcpy import models\n'), ((42401, 42418), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (42415, 42418), True, 'import numpy as np\n'), ((42912, 42928), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (42921, 42928), True, 'from matplotlib import pyplot as plt\n'), ((43100, 43117), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (43114, 43117), True, 'import numpy as np\n'), ((43553, 43569), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (43562, 43569), True, 'from matplotlib import pyplot as plt\n'), ((44163, 44180), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (44177, 44180), True, 'import numpy as np\n'), ((44607, 44630), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'RMSE'}), '(data=RMSE)\n', (44619, 44630), True, 'import pandas as pd\n'), ((44824, 44840), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (44833, 44840), True, 'from matplotlib import pyplot as plt\n'), ((45115, 45132), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (45129, 45132), True, 'import numpy as np\n'), ((45525, 45541), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (45534, 45541), True, 'from matplotlib import pyplot as plt\n'), ((45816, 45833), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (45830, 45833), True, 'import numpy as np\n'), ((46219, 46235), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (46228, 46235), True, 'from matplotlib import pyplot as plt\n'), ((46597, 46647), 'mpcpy.variables.Static', 'variables.Static', (['"""occupancy_sample"""', '(299)', 'units.s'], {}), "('occupancy_sample', 299, units.s)\n", (46613, 46647), False, 'from mpcpy import variables\n'), ((708, 754), 'mpcpy.variables.Static', 'variables.Static', (['"""T_db_sample"""', '(1800)', 'units.s'], {}), "('T_db_sample', 1800, units.s)\n", (724, 754), False, 'from mpcpy import variables\n'), ((13464, 13531), 'mpcpy.models.Modelica', 'models.Modelica', (['models.JModelica', 'models.RMSE', '{}'], {'fmupath': 'fmupath'}), '(models.JModelica, models.RMSE, {}, fmupath=fmupath)\n', (13479, 13531), False, 'from mpcpy import models\n'), ((15716, 15764), 'mpcpy.variables.Static', 'variables.Static', (['"""wesTdb_sample"""', '(1800)', 'units.s'], {}), "('wesTdb_sample', 1800, units.s)\n", (15732, 15764), False, 'from mpcpy import variables\n'), ((15817, 15865), 'mpcpy.variables.Static', 'variables.Static', (['"""halTdb_sample"""', '(1800)', 'units.s'], {}), "('halTdb_sample', 1800, units.s)\n", (15833, 15865), False, 'from mpcpy import variables\n'), ((15918, 15966), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (15934, 15966), False, 'from mpcpy import variables\n'), ((16021, 16069), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (16037, 16069), False, 'from mpcpy import variables\n'), ((16124, 16172), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (16140, 16172), False, 'from mpcpy import variables\n'), ((16227, 16275), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (16243, 16275), False, 'from mpcpy import variables\n'), ((16326, 16374), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (16342, 16374), False, 'from mpcpy import variables\n'), ((28803, 28851), 'mpcpy.variables.Static', 'variables.Static', (['"""wesTdb_sample"""', '(1800)', 'units.s'], {}), "('wesTdb_sample', 1800, units.s)\n", (28819, 28851), False, 'from mpcpy import variables\n'), ((28904, 28952), 'mpcpy.variables.Static', 'variables.Static', (['"""halTdb_sample"""', '(1800)', 'units.s'], {}), "('halTdb_sample', 1800, units.s)\n", (28920, 28952), False, 'from mpcpy import variables\n'), ((29005, 29053), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (29021, 29053), False, 'from mpcpy import variables\n'), ((29108, 29156), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (29124, 29156), False, 'from mpcpy import variables\n'), ((29211, 29259), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (29227, 29259), False, 'from mpcpy import variables\n'), ((29314, 29362), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (29330, 29362), False, 'from mpcpy import variables\n'), ((29413, 29461), 'mpcpy.variables.Static', 'variables.Static', (['"""easTdb_sample"""', '(1800)', 'units.s'], {}), "('easTdb_sample', 1800, units.s)\n", (29429, 29461), False, 'from mpcpy import variables\n'), ((37536, 37582), 'mpcpy.variables.Static', 'variables.Static', (['"""T_db_sample"""', '(1800)', 'units.s'], {}), "('T_db_sample', 1800, units.s)\n", (37552, 37582), False, 'from mpcpy import variables\n'), ((40240, 40421), 'mpcpy.models.Modelica', 'models.Modelica', (['models.UKF', 'models.RMSE', 'self.system.measurements'], {'moinfo': 'self.moinfo', 'parameter_data': 'self.parameters.data', 'control_data': 'self.controls.data', 'version': '"""2.0"""'}), "(models.UKF, models.RMSE, self.system.measurements, moinfo=\n self.moinfo, parameter_data=self.parameters.data, control_data=self.\n controls.data, version='2.0')\n", (40255, 40421), False, 'from mpcpy import models\n'), ((41213, 41263), 'mpcpy.variables.Static', 'variables.Static', (['"""occupancy_sample"""', '(300)', 'units.s'], {}), "('occupancy_sample', 300, units.s)\n", (41229, 41263), False, 'from mpcpy import variables\n'), ((43041, 43055), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (43052, 43055), False, 'import pickle\n'), ((43682, 43696), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (43693, 43696), False, 'import pickle\n'), ((44953, 44967), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (44964, 44967), False, 'import pickle\n'), ((45654, 45668), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (45665, 45668), False, 'import pickle\n'), ((46445, 46459), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (46456, 46459), False, 'import pickle\n'), ((46768, 46785), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (46782, 46785), True, 'import numpy as np\n'), ((33802, 33831), 'pandas.to_datetime', 'pd.to_datetime', (['df_test.index'], {}), '(df_test.index)\n', (33816, 33831), True, 'import pandas as pd\n'), ((42573, 42587), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (42584, 42587), False, 'import pickle\n'), ((42806, 42831), 'pickle.dump', 'pickle.dump', (['occupancy', 'f'], {}), '(occupancy, f)\n', (42817, 42831), False, 'import pickle\n')] |
from layer import Layer
import numpy as np
class FCLayer(Layer):
def __init__(self,input_size,output_size):
self.weights = np.random.rand(input_size,output_size)-0.5
self.bias = np.random.rand(1,output_size)-0.5
def forward_propagation(self,input_data):
self.input = input_data
self.output= np.dot(self.input,self.weights)+self.bias
return self.output
def backward_propagation(self,output_error, learning_rate):
input_error = np.dot(output_error,self.weights.T)
weights_error = np.dot(self.input.T,output_error)
#updateparameters
self.weights -= learning_rate * weights_error
self.bias -= learning_rate * output_error
return input_error | [
"numpy.dot",
"numpy.random.rand"
] | [((490, 526), 'numpy.dot', 'np.dot', (['output_error', 'self.weights.T'], {}), '(output_error, self.weights.T)\n', (496, 526), True, 'import numpy as np\n'), ((550, 584), 'numpy.dot', 'np.dot', (['self.input.T', 'output_error'], {}), '(self.input.T, output_error)\n', (556, 584), True, 'import numpy as np\n'), ((136, 175), 'numpy.random.rand', 'np.random.rand', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (150, 175), True, 'import numpy as np\n'), ((199, 229), 'numpy.random.rand', 'np.random.rand', (['(1)', 'output_size'], {}), '(1, output_size)\n', (213, 229), True, 'import numpy as np\n'), ((333, 365), 'numpy.dot', 'np.dot', (['self.input', 'self.weights'], {}), '(self.input, self.weights)\n', (339, 365), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from .image_processor import ImageProcessor
from .thermal_image import ThermalImage
from .util import save_image, VideoCreator
class OpenCvImageProcessor(ImageProcessor):
def __init__(self):
self.video_creator = VideoCreator()
self.recording = False
def on_image_received(self, thermal_image: ThermalImage):
cv_image = cv2.cvtColor(np.array(thermal_image.image), cv2.COLOR_RGB2BGR)
mask = thermal_image.thermal_mask
mask = np.where(mask, 255, 0)
mask = mask.reshape(thermal_image.height, thermal_image.width, 1).astype(np.uint8)
edged = cv2.Canny(mask, 30, 200)
contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(mask, contours, -1, 255, 40)
inpainted_image = cv2.inpaint(cv_image, mask, 1, cv2.INPAINT_NS)
cv2.imshow("Image", cv_image)
cv2.imshow("Mask", mask)
cv2.imshow("inpainted Image", inpainted_image)
cv2.waitKey(1)
if thermal_image.should_take_photo():
save_image(inpainted_image)
if thermal_image.should_capture():
self.video_creator.add_frame(inpainted_image, not self.recording)
if not self.recording: self.recording = True
if not thermal_image.should_capture() and self.recording:
self.video_creator.save()
self.recording = False
def on_connection_closed(self):
print("Connection closed")
self.video_creator.save()
self.recording = False
| [
"cv2.drawContours",
"cv2.inpaint",
"numpy.where",
"cv2.imshow",
"numpy.array",
"cv2.findContours",
"cv2.Canny",
"cv2.waitKey"
] | [((508, 530), 'numpy.where', 'np.where', (['mask', '(255)', '(0)'], {}), '(mask, 255, 0)\n', (516, 530), True, 'import numpy as np\n'), ((639, 663), 'cv2.Canny', 'cv2.Canny', (['mask', '(30)', '(200)'], {}), '(mask, 30, 200)\n', (648, 663), False, 'import cv2\n'), ((694, 759), 'cv2.findContours', 'cv2.findContours', (['edged', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (710, 759), False, 'import cv2\n'), ((768, 813), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contours', '(-1)', '(255)', '(40)'], {}), '(mask, contours, -1, 255, 40)\n', (784, 813), False, 'import cv2\n'), ((841, 887), 'cv2.inpaint', 'cv2.inpaint', (['cv_image', 'mask', '(1)', 'cv2.INPAINT_NS'], {}), '(cv_image, mask, 1, cv2.INPAINT_NS)\n', (852, 887), False, 'import cv2\n'), ((896, 925), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'cv_image'], {}), "('Image', cv_image)\n", (906, 925), False, 'import cv2\n'), ((934, 958), 'cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (944, 958), False, 'import cv2\n'), ((967, 1013), 'cv2.imshow', 'cv2.imshow', (['"""inpainted Image"""', 'inpainted_image'], {}), "('inpainted Image', inpainted_image)\n", (977, 1013), False, 'import cv2\n'), ((1022, 1036), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1033, 1036), False, 'import cv2\n'), ((399, 428), 'numpy.array', 'np.array', (['thermal_image.image'], {}), '(thermal_image.image)\n', (407, 428), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import torch
import os, sys
import math
from subspace_inference import models, losses, posteriors, utils
# from swag.posteriors import SWAG, EllipticalSliceSampling, BenchmarkPyro, BenchmarkVIModel
from regression import run
from bayesian_benchmarks.data import get_regression_data
from bayesian_benchmarks.models.nnet.neural_linear import NLRegressionRunner
parser = argparse.ArgumentParser()
parser.add_argument("--model", default='RegNet', nargs='?', type=str)
parser.add_argument("--dataset", default='energy', nargs='?', type=str)
parser.add_argument("--split", default=0, nargs='?', type=int)
parser.add_argument("--seed", default=0, nargs='?', type=int)
parser.add_argument('--database_path', default='', help='output database')
parser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')
parser.add_argument('--epochs', type=int, default=50, metavar='N', help='number of epochs to train (default: 200)')
parser.add_argument('--save_freq', type=int, default=25, metavar='N', help='save frequency (default: 25)')
parser.add_argument('--eval_freq', type=int, default=5, metavar='N', help='evaluation frequency (default: 5)')
parser.add_argument('--lr_init', type=float, default=0.01, metavar='LR', help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)')
parser.add_argument('--batch_size', type=int, default=400, metavar='N', help='input batch size (default: 128)')
parser.add_argument('--model_variance', action='store_true', help='whether NN should also model variance')
parser.add_argument('--noise_var', action='store_true', help='whether NN should have a noise variance term')
parser.add_argument('--no_schedule', action='store_true', help='store schedule')
parser.add_argument('--uci-small', action='store_true')
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
#torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
args.device = None
if torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
print('Preparing directory %s' % args.dir)
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print('Preparing dataset %s' % args.dataset)
dataset = get_regression_data(args.dataset, split=args.split)
print(dataset.N, dataset.D, dataset.name)
print('Using model %s' % args.model)
model_cfg = getattr(models, args.model)
print('Preparing model')
print(*model_cfg.args)
if not args.uci_small:
if dataset.N > 6000:
model_cfg.kwargs['dimensions'] = [1000, 1000, 500, 50]
else:
model_cfg.kwargs['dimensions'] = [1000, 500, 50,]
else:
# similarly to DVI paper;
# protein dataset case
if dataset.N > 40000:
model_cfg.kwargs['dimensions'] = [100]
else:
model_cfg.kwargs['dimensions'] = [50]
if args.batch_size is None:
args.batch_size = dataset.N // 10
print('Using batch size', args.batch_size)
if args.epochs is 0:
args.epochs = int(np.ceil(6000 * args.batch_size / dataset.N))
print('Number of epochs is: ', args.epochs)
print(model_cfg.kwargs)
if args.model_variance:
print('Model has heteroscedastic regression')
output_dim=2
noise_var = None
else:
output_dim = 1
noise_var = 0.5
#todo: incorporate into command line args
criterion = losses.GaussianLikelihood
# define a regressionrunner class to fit w/in confines of regression.py
regression_model = NLRegressionRunner(
base = model_cfg.base,
epochs = args.epochs,
criterion = criterion,
batch_size=args.batch_size,
momentum = args.momentum, wd=args.wd, lr_init=args.lr_init,
use_cuda = torch.cuda.is_available(),
const_lr=args.no_schedule, double_bias_lr=True,
model_variance=args.model_variance,
input_dim=dataset.D, output_dim=output_dim, apply_var=args.noise_var, **model_cfg.kwargs
)
mname = args.model + 'NL_LP'
bb_args = argparse.Namespace(model=mname, dataset=args.dataset, split=args.split, seed=args.seed, database_path=args.database_path)
bb_result = run(bb_args, data=dataset, model=regression_model, is_test=args.database_path=='')
#print(bb_result)
print([(k, bb_result[k])] for k in sorted(bb_result))
utils.save_checkpoint(
args.dir,
args.epochs,
model_state_dict=regression_model.model.state_dict(),
optimizer=regression_model.optimizer.state_dict(),
result=bb_result
)
| [
"torch.manual_seed",
"bayesian_benchmarks.data.get_regression_data",
"numpy.ceil",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"regression.run",
"torch.cuda.is_available",
"argparse.Namespace",
"torch.cuda.manual_seed",
"torch.device"
] | [((405, 430), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (428, 430), False, 'import argparse\n'), ((2033, 2061), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2050, 2061), False, 'import torch\n'), ((2062, 2095), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2084, 2095), False, 'import torch\n'), ((2255, 2280), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2278, 2280), False, 'import torch\n'), ((2409, 2445), 'os.makedirs', 'os.makedirs', (['args.dir'], {'exist_ok': '(True)'}), '(args.dir, exist_ok=True)\n', (2420, 2445), False, 'import os, sys\n'), ((2594, 2622), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2611, 2622), False, 'import torch\n'), ((2623, 2656), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2645, 2656), False, 'import torch\n'), ((2713, 2764), 'bayesian_benchmarks.data.get_regression_data', 'get_regression_data', (['args.dataset'], {'split': 'args.split'}), '(args.dataset, split=args.split)\n', (2732, 2764), False, 'from bayesian_benchmarks.data import get_regression_data\n'), ((4375, 4500), 'argparse.Namespace', 'argparse.Namespace', ([], {'model': 'mname', 'dataset': 'args.dataset', 'split': 'args.split', 'seed': 'args.seed', 'database_path': 'args.database_path'}), '(model=mname, dataset=args.dataset, split=args.split,\n seed=args.seed, database_path=args.database_path)\n', (4393, 4500), False, 'import argparse\n'), ((4510, 4599), 'regression.run', 'run', (['bb_args'], {'data': 'dataset', 'model': 'regression_model', 'is_test': "(args.database_path == '')"}), "(bb_args, data=dataset, model=regression_model, is_test=args.\n database_path == '')\n", (4513, 4599), False, 'from regression import run\n'), ((2300, 2320), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2312, 2320), False, 'import torch\n'), ((2345, 2364), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2357, 2364), False, 'import torch\n'), ((2456, 2492), 'os.path.join', 'os.path.join', (['args.dir', '"""command.sh"""'], {}), "(args.dir, 'command.sh')\n", (2468, 2492), False, 'import os, sys\n'), ((3460, 3503), 'numpy.ceil', 'np.ceil', (['(6000 * args.batch_size / dataset.N)'], {}), '(6000 * args.batch_size / dataset.N)\n', (3467, 3503), True, 'import numpy as np\n'), ((4120, 4145), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4143, 4145), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.