arxiv_id
stringlengths 0
16
| text
stringlengths 10
1.65M
|
|---|---|
from Perceptron.functions.function import Function
import numpy as np
class SoftMax(Function):
"""
Class representing the softmax function
"""
def __init__(self):
"""Construct of the softmax"""
super().__init__()
self.is_diff = True
def compute(self, a):
"""
Compute the softmax function on the given value
:return the computed value given of the constructor
"""
a -= np.max(a)
sm = (np.exp(a).T / np.sum(np.exp(a), axis=0)).T
return sm
def compute_derivative(self, a):
"""
Compute the derivative of the softmax function on the given value
:return: the value calculated on the derivative of the softmax
"""
# Reshape the 1-d softmax to 2-d so that np.dot will do the matrix multiplication
s = self.compute().reshape(-1, 1)
return np.diagflat(s) - np.dot(s, s.T)
|
|
# coding: utf-8
""" Astropy coordinate class for the Ophiuchus coordinate system """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import numpy as np
from astropy.coordinates import frame_transform_graph
from astropy.utils.data import get_pkg_data_filename
import astropy.coordinates as coord
import astropy.units as u
__all__ = ["Ophiuchus", "R"]
class Ophiuchus(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Ophiuchus stream.
For more information about how to use this class, see the Astropy documentation
on `Coordinate Frames <http://docs.astropy.org/en/latest/coordinates/frames.html>`_.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
phi1 : `Angle`, optional, must be keyword
The longitude-like angle corresponding to the orbit.
phi2 : `Angle`, optional, must be keyword
The latitude-like angle corresponding to the orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
"""
default_representation = coord.SphericalRepresentation
frame_specific_representation_info = {
'spherical': [coord.RepresentationMapping('lon', 'phi1'),
coord.RepresentationMapping('lat', 'phi2'),
coord.RepresentationMapping('distance', 'distance')],
'unitspherical': [coord.RepresentationMapping('lon', 'phi1'),
coord.RepresentationMapping('lat', 'phi2')]
}
# read the rotation matrix (previously generated)
R = np.loadtxt(get_pkg_data_filename('rotationmatrix.txt'))
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Ophiuchus)
def galactic_to_oph():
""" Compute the transformation from Galactic spherical to
heliocentric Oph coordinates.
"""
return R
# Oph to Galactic coordinates
@frame_transform_graph.transform(coord.StaticMatrixTransform, Ophiuchus, coord.Galactic)
def oph_to_galactic():
""" Compute the transformation from heliocentric Oph coordinates to
spherical Galactic.
"""
return galactic_to_oph().T
|
|
import ipdb
import os
import math
import benepar
import spacy
import hashlib
import ntpath
import collections
import numpy as np
import pandas as pd
import jieba
from tqdm import tqdm
from nltk import ngrams as compute_ngrams
import _pickle as pickle
class TextAnalyzer:
def __init__(self, do_lower=True, language=None, load_spacy_model=False):
"""
https://github.com/neural-dialogue-metrics/Distinct-N/blob/master/distinct_n/metrics.py
"""
self.do_lower = do_lower
self.language = language
self.load_spacy_model = load_spacy_model
self.pickle_dir = '../spacy_temp'
if not os.path.isdir(self.pickle_dir):
os.makedirs(self.pickle_dir)
print(f"Make new dirs {self.pickle_dir} for pickling temp spacy results")
if load_spacy_model:
if language == 'cn':
self.spacy_parser = spacy.load("zh_core_web_trf") # zh_core_web_trf
benepar_model = 'benepar_zh2'
elif language == 'en':
benepar.download('benepar_en3')
self.spacy_parser = spacy.load("en_core_web_trf") # en_core_web_trf
benepar_model = 'benepar_en3'
else:
raise NotImplementedError
self.benepar_model = benepar_model
benepar.download(benepar_model)
self.spacy_parser.add_pipe("benepar", config={"model": benepar_model})
print(f'Spacy add benepar pipe done! Model: {benepar_model}')
else:
self.spacy_parser = None
def read_text_from_file(self, path):
texts = []
with open(path, 'r') as f:
for i, raw_line in enumerate(f):
line = raw_line.strip()
if line:
texts.append(line)
return texts
def compute_ngram_distinct_from_file(self, file_path):
texts = []
with open(file_path, 'r') as f:
for line in f:
texts.append(line.strip())
self.compute_ngram_distinct(texts)
def check_basic(self, texts):
unique_tokens = len(set([x for text in texts for x in text.split()]))
avg_sen_len = np.average([len(text.split()) for text in texts])
print(f"[Dataset basic] Number of unique tokens: {unique_tokens}, average sentence length: {avg_sen_len:.3f}")
return unique_tokens, avg_sen_len
def compute_ngram_distinct(self, texts, n_grams=(1, 2, 3)):
result = {'ngram': [], 'distinct': []}
for n_gram in n_grams:
distinct_value = []
for text in texts:
if self.do_lower:
text = text.lower()
sen_ngrams = compute_ngrams(text.split(), n_gram)
if self.language == 'cn':
sen_ngrams = [''.join(x) for x in sen_ngrams]
elif self.language == 'en':
sen_ngrams = [' '.join(x) for x in sen_ngrams]
else:
raise NotImplementedError
try:
# distinct_value.append(len(set(sen_ngrams)) / len(sen_ngrams))
distinct_value.append(len(set(sen_ngrams)) / len(text.split()))
except ZeroDivisionError:
print('ZeroDivisionError!')
continue
# if n_gram == 2 and self.language == 'en':
# print(f"distinct_value: {distinct_value}")
# ipdb.set_trace()
avg_distinct_value = np.average(distinct_value)
result['ngram'].append(n_gram)
result['distinct'].append(avg_distinct_value)
result = pd.DataFrame(result)
return result
def compute_ngram(self, texts, n_gram):
n_gram_freq_dict = collections.defaultdict(lambda: 0)
n_gram_idf_dict = collections.defaultdict(lambda: 0)
for text in texts:
if self.do_lower:
text = text.lower()
sen_ngrams = compute_ngrams(text.split(), n_gram)
sen_ngrams = [''.join(x) for x in sen_ngrams]
for x in set(sen_ngrams):
n_gram_idf_dict[x] += 1
for x in sen_ngrams:
n_gram_freq_dict[x] += 1
n_gram_idf_dict = {k: math.log(len(texts) / v) for k, v in n_gram_idf_dict.items()}
return n_gram_freq_dict, n_gram_idf_dict
def analyse_concreteness(self, texts, language):
if language == 'en':
concreteness = pd.read_csv('../static/Concreteness_ratings_Brysbaert_et_al_BRM.csv')
elif language == 'cn':
concreteness = pd.read_csv('../static/Concreteness_ratings_cn_bigrams.csv')
else:
raise NotImplementedError
words_concreteness = dict(zip(concreteness['Word'].values, concreteness['Conc.M'].values))
target_pos = ('VERB', 'NOUN', 'ADV', 'ADJ')
concreteness_dict = collections.defaultdict(lambda: [])
for text in tqdm(texts, total=len(texts)):
if self.do_lower:
text = text.lower()
text_pickle_path, to_parse_text = self._pickle_path_for_spacy(text, 'pos', 'text_analyzer2')
token_pickle_path = self._get_token_pickle_path(text_pickle_path)
if os.path.isfile(text_pickle_path) and os.path.isfile(token_pickle_path):
pos_tags, tokens = pickle.load(open(text_pickle_path, 'rb')), \
pickle.load(open(token_pickle_path, 'rb'))
else:
spacy_parse_result = self._spacy_parse_text(to_parse_text,
'pos',
text_pickle_path,
dump_res=True,
return_token=True)
if spacy_parse_result is not None:
pos_tags, tokens = spacy_parse_result
else:
continue
concreteness_tmp_dict = collections.defaultdict(lambda: [])
for pos_tag, token in zip(pos_tags, tokens):
if pos_tag in target_pos:
if language == 'en':
if token in words_concreteness:
concreteness_tmp_dict[pos_tag].append(words_concreteness[token])
elif language == 'cn':
token_list = list(token)
concreteness = 0.0
if len(token_list) < 2:
pass
elif len(token_list) == 2:
concreteness += words_concreteness.get(''.join(token_list), 0.0)
else:
bigram_token = list(compute_ngrams(token_list, 2))
for bigram in bigram_token:
concreteness += words_concreteness.get(''.join(bigram), 0.0)
if concreteness > 0.0:
concreteness_tmp_dict[pos_tag].append(concreteness)
else:
raise NotImplementedError
concreteness_tmp_dict = {k: np.average(v) for k, v in concreteness_tmp_dict.items()}
for pos in target_pos:
if pos not in concreteness_tmp_dict:
concreteness_tmp_dict[pos] = 0.0
for k, v in concreteness_tmp_dict.items():
concreteness_dict[k].append(v)
return concreteness_dict
def analyse_stopwords(self, texts):
if self.language == 'en':
stopword_path = '../static/en_nltk_baidu_stopwords'
elif self.language == 'cn':
stopword_path = '../static/cn_baidu_stopwords'
else:
raise NotImplementedError
stopwords = []
with open(stopword_path, 'r') as f:
for line in f:
line = line.strip()
if line:
stopwords.append(line)
stopwords = set(stopwords)
stopword_sen_ratio = []
stopwords_ratio = collections.defaultdict(lambda: 0)
for text in texts:
if self.do_lower:
text = text.lower()
if self.language == 'en':
text_tokenized = text.split(' ')
elif self.language == 'cn':
text_tokenized = jieba.lcut(text)
else:
raise NotImplementedError
stopword_count = 0
for x in text_tokenized:
if x in stopwords:
stopwords_ratio[x] += 1
stopword_count += 1
stopword_ratio = stopword_count / len(text_tokenized)
stopword_sen_ratio.append(stopword_ratio)
total_stopword_count = np.sum(list(stopwords_ratio.values()))
stopwords_ratio = {k: v / total_stopword_count for k, v in stopwords_ratio.items()}
return stopword_sen_ratio, stopwords_ratio
def _get_token_pickle_path(self, text_pickle_path):
pickle_base_dir = os.path.dirname(text_pickle_path)
pickle_file_name = ntpath.basename(text_pickle_path)
token_pickle_name = pickle_file_name.split('.')[0] + f'_token.' + pickle_file_name.split('.')[1]
token_pickle_path = os.path.join(pickle_base_dir, token_pickle_name)
return token_pickle_path
def _spacy_parse_text(self, to_parse_text, parse_choice, text_pickle_path,
return_token=False,
dump_res=True,
dump_token=False):
try:
parse_res = self.spacy_parser(str(to_parse_text))
except Exception as e:
if 'exceeds the maximum supported length' in str(e):
return None
else:
ipdb.set_trace()
return None
parse_text = []
tokens = []
for token in parse_res:
if return_token:
tokens.append(token.text)
if parse_choice == 'pos':
parse_text.append(token.pos_)
elif parse_choice == 'dep':
parse_text.append(token.dep_)
elif parse_choice == 'ner':
for ent in parse_res.ents:
parse_text.append(ent.label_)
if dump_res:
if parse_text:
if dump_res:
pickle.dump(parse_text, open(text_pickle_path, 'wb'))
if dump_token:
assert return_token
assert tokens
pickle.dump(tokens, open(self._get_token_pickle_path(text_pickle_path), 'wb'))
else:
return None
if return_token:
return parse_text, tokens
else:
return parse_text
def _pickle_path_for_spacy(self, text, parse_choice, save_prefix):
if self.language == 'en':
to_parse_text = text
elif self.language == 'cn':
to_parse_text = text.replace(' ', '')
else:
raise NotImplementedError
model_name = self.spacy_parser.meta['name'] + '_' + self.spacy_parser.meta[
'lang'] + '_' + self.benepar_model
# text_analyzer2
text_md5 = hashlib.md5(f'{save_prefix}_{model_name}_{to_parse_text}_{parse_choice}'.encode()).hexdigest()
text_pickle_path = os.path.join(self.pickle_dir, f'{text_md5}.pkl')
return text_pickle_path, to_parse_text
def load_parsed_texts_by_spacy(self, texts, parse_choice, dump_res=False):
assert self.spacy_parser is not None
assert parse_choice in {'pos', 'ner', 'dep'}
all_results = []
for text in tqdm(texts, total=len(texts)):
text_pickle_path, to_parse_text = self._pickle_path_for_spacy(text, parse_choice, 'text_analyzer2')
# filter text
if len(to_parse_text) < 10:
continue
if os.path.isfile(text_pickle_path):
try:
parse_text = pickle.load(open(text_pickle_path, 'rb'))
except Exception as e:
print(f'Pickle exception: {e}')
parse_text = self._spacy_parse_text(to_parse_text,
parse_choice,
text_pickle_path,
dump_res=dump_res)
else:
parse_text = self._spacy_parse_text(to_parse_text,
parse_choice,
text_pickle_path,
dump_res=dump_res)
if parse_text is None:
continue
else:
all_results.extend(parse_text)
all_results = collections.Counter(all_results)
total_count = np.sum(list(all_results.values()))
all_results = {k: v / total_count for k, v in all_results.items()}
df_result = pd.DataFrame(all_results.items())
df_result = df_result.sort_values(df_result.columns[1], ascending=False)
return df_result
|
|
import numpy as np
from numpy import random
import time
input = random.randint(100,500,size=(500,500))
vector = random.randint(100,500,size=(500,1))
output = np.zeros((500,1))
np.savetxt("input.txt",input)
np.savetxt("vector.txt",vector)
start_time = time.time()
for m in range(1):
for i in range(500):
for j in range(1):
for k in range(500):
output[i][j] += input[i][k]*vector[k][j]
finish_time = time.time()
np.savetxt("output.txt",output)
print("计算结束,结果存入输出文件中!")
result=1000*(finish_time-start_time)
print("本次处理所用时间:%f ms"%(result))
|
|
import random
import gym
import numpy as np
import torch
from yarll.common.evaluation import evaluate_policy
def zipsame(*seqs):
"""
Performs a zip function, but asserts that all zipped elements are of the same size
:param seqs: a list of arrays that are zipped together
:return: the zipped arguments
"""
length = len(seqs[0])
assert all(len(seq) == length for seq in seqs[1:])
return zip(*seqs)
def set_global_seeds(seed):
"""
set the seed for python random, tensorflow, numpy and gym spaces
:param seed: (int) the seed
"""
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def mpi_rank_or_zero():
"""
Return the MPI rank if mpi is installed. Otherwise, return 0.
:return: (int)
"""
try:
import mpi4py
return mpi4py.MPI.COMM_WORLD.Get_rank()
except ImportError:
return 0
def flatten_lists(listoflists):
"""
Flatten a python list of list
:param listoflists: (list(list))
:return: (list)
"""
return [el for list_ in listoflists for el in list_]
def verify_env_policy(policy, env: gym.Env):
# verify Env's observation space matches the action space
env.reset()
if isinstance(env.action_space, list):
action = []
for ac_space in env.action_space:
action.append(torch.tensor(ac_space.sample()))
else:
action = torch.tensor(env.action_space.sample())
try:
env.step(action)
except:
raise Exception("The environment provided does not accept a proper action! Make sure env.step() expects the "
"same type / dimension as env.action_space.sample().")
# verify Policy and Env compatibility
try:
evaluate_policy(policy, env, n_eval_episodes=1)
except:
raise Exception("The provided policy does not provide a valid action according to the provided "
"environment!")
def total_episode_reward_logger(rew_acc, rewards, masks, writer, steps):
"""
calculates the cumulated episode reward, and prints to tensorflow log the output
:param rew_acc: (np.array float) the total running reward
:param rewards: (np.array float) the rewards
:param masks: (np.array bool) the end of episodes
:param writer: (TensorFlow Session.writer) the writer to log to
:param steps: (int) the current timestep
:return: (np.array float) the updated total running reward
:return: (np.array float) the updated total running reward
"""
for env_idx in range(rewards.shape[0]):
dones_idx = np.sort(np.argwhere(masks[env_idx]))
if len(dones_idx) == 0:
rew_acc[env_idx] += sum(rewards[env_idx])
else:
rew_acc[env_idx] += sum(rewards[env_idx, :dones_idx[0, 0]])
writer.add_scalar("Episode rewards", rew_acc[env_idx], steps + dones_idx[0, 0])
for k in range(1, len(dones_idx[:, 0])):
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[k-1, 0]:dones_idx[k, 0]])
writer.add_scalar("Episode rewards", rew_acc[env_idx], steps + dones_idx[k, 0])
rew_acc[env_idx] = sum(rewards[env_idx, dones_idx[-1, 0]:])
return rew_acc
|
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.io
import os
from sklearn.preprocessing import MinMaxScaler
import logging
logging.basicConfig(level=logging.INFO)
class Dataset:
attribute = None
train_feature = None
train_label = None
dataset_folder = None
seen_class = None
unseen_class = None
test_unseen_feature = None
test_unseen_label = None
test_seen_feature = None
test_seen_label = None
def __init__(self, folder: str):
self.dataset_folder = folder
def read(self, dataset_name: str, preprocessing: bool = False, validation: bool = False):
matcontent = scipy.io.loadmat(os.path.join(self.dataset_folder, dataset_name, "res101.mat"))
feature = matcontent['features'].T
# all_file = matcontent['image_files']
label = matcontent['labels'].astype(int).squeeze() - 1
matcontent = scipy.io.loadmat(os.path.join(self.dataset_folder, dataset_name, "att_splits.mat"))
# numpy array index starts from 0, matlab starts from 1
trainval_loc = matcontent['trainval_loc'].squeeze() - 1
train_loc = matcontent['train_loc'].squeeze() - 1
val_unseen_loc = matcontent['val_loc'].squeeze() - 1
test_seen_loc = matcontent['test_seen_loc'].squeeze() - 1
test_unseen_loc = matcontent['test_unseen_loc'].squeeze() - 1
if matcontent.get("val_seen_loc") is not None:
val_seen_loc = matcontent['val_seen_loc'].squeeze() - 1
val_unseen_loc = matcontent['val_unseen_loc'].squeeze() - 1
else:
val_seen_loc = None
logging.info("This dataset does not support GZSL validation")
if not validation:
trloc = trainval_loc
tsloc = test_seen_loc
tuloc = test_unseen_loc
if preprocessing:
scaler = MinMaxScaler()
_train_feature = scaler.fit_transform(feature[trloc])
_test_seen_feature = scaler.transform(feature[tsloc])
_test_unseen_feature = scaler.transform(feature[tuloc])
else:
_train_feature = feature[trloc]
_test_seen_feature = feature[tsloc]
_test_unseen_feature = feature[tuloc]
self.test_seen_feature = _test_seen_feature
self.test_seen_label = label[tsloc]
else:
trloc = train_loc
tsloc = val_seen_loc
tuloc = val_unseen_loc
if preprocessing:
scaler = MinMaxScaler()
_train_feature = scaler.fit_transform(feature[trloc])
if val_seen_loc is not None:
_test_seen_feature = scaler.transform(feature[tsloc])
_test_unseen_feature = scaler.transform(feature[tuloc])
else:
_train_feature = feature[trloc]
if val_seen_loc is not None:
_test_seen_feature = feature[tsloc]
_test_unseen_feature = feature[tuloc]
if val_seen_loc is not None:
self.test_seen_feature = _test_seen_feature
self.test_seen_label = label[tsloc]
self.attribute = matcontent['att'].T
self.train_feature = _train_feature
self.train_label = label[trloc]
self.test_unseen_feature = _test_unseen_feature
self.test_unseen_label = label[tuloc]
self.unseen_class = np.unique(self.test_unseen_label)
self.seen_class = np.unique(self.train_label)
logging.info("preprocessing: {} - validation: {}".format(preprocessing, validation))
logging.info("features: {} - attributes: {}".format(feature.shape, self.attribute.shape))
logging.info("seen classes: {} - unseen classes: {}".format(len(self.seen_class), len(self.unseen_class)))
test_seen_count = 0
if self.test_seen_label is not None:
test_seen_count = len(self.test_seen_label)
logging.info("training: {} - test seen: {} - test unseen: {}".format(len(self.train_label), test_seen_count, len(self.test_unseen_label)))
def attributes(self):
return self.attribute.astype(np.float32)
def unseen_classes(self):
return self.unseen_class.astype(np.int32)
def seen_classes(self):
return self.seen_class.astype(np.int32)
def attribute_size(self):
return self.attribute.shape[1]
def feature_size(self):
return self.train_feature.shape[1]
def train_features(self):
return self.train_feature.astype(np.float32)
def train_attributes(self):
return self.attribute[self.train_label].astype(np.float32)
def train_labels(self):
return self.train_label.astype(np.int32)
def attribute_seen(self):
return self.attribute[self.seen_class].astype(np.float32)
def test_unseen_features(self):
return self.test_unseen_feature.astype(np.float32)
def test_unseen_labels(self):
return self.test_unseen_label.astype(np.int32)
def test_seen_features(self):
assert self.test_seen_feature is not None, "Validation set does not support GZSL"
return self.test_seen_feature.astype(np.float32)
def test_seen_labels(self):
assert self.test_seen_label is not None, "Validation set does not support GZSL"
return self.test_seen_label.astype(np.int32)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import os
import numpy as np
from numpy import pi,cos,sin
import pandas as pd
import logging
from plotnine import *
from scipy.stats.mstats import winsorize
from plotnine.stats.stat_summary import bootstrap_statistics
#%% put PUPIL LABS data into PANDAS DF
def gaze_to_pandas(gaze):
# Input: gaze data as dictionary
# Output: pandas dataframe with gx, gy, confidence, smpl_time pupillabsdata, diameter and (calculated) pupil area (pa)
import pandas as pd
list_diam= []
list_pa= []
for idx,p in enumerate(gaze):
if p:
if 'surface' in gaze[0]['topic']:
# we have a surface mapped dictionairy. We have to get the real base_data
# the schachtelung is: surfacemapped => base_data World Mapped => base_data pupil
p_basedata = p['base_data']['base_data']
else:
p_basedata = p['base_data']
# take the mean over all pupil-diameters
diam = 0
pa = 0
for idx_bd,bd in enumerate(p_basedata):
pa = convert_diam_to_pa(bd['ellipse']['axes'][0], bd['ellipse']['axes'][1])
diam = diam + bd['diameter']
diam = diam/(idx_bd+1)
list_diam.append(diam)
list_pa.append(pa)
df = pd.DataFrame({'gx':[p['norm_pos'][0] for p in gaze if p],
'gy':[p['norm_pos'][1] for p in gaze if p],
'confidence': [p['confidence'] for p in gaze if p],
'smpl_time':[p['timestamp'] for p in gaze if p],
'diameter':list_diam,
'pa': list_pa
})
return df
def convert_diam_to_pa(axes1, axes2):
return math.pi * float(axes1) * float(axes2) * 0.25
#%% adding information to dfs
def add_msg_to_event(etevents,etmsgs,timefield = 'start_time', direction='backward'):
# combine the event df with the msg df
etevents = etevents.sort_values('start_time')
etmsgs = etmsgs.sort_values('msg_time')
# make a merge on the msg time and the start time of the events
merged_etevents = pd.merge_asof(etevents,etmsgs,left_on='start_time',right_on='msg_time',direction=direction)
return merged_etevents
def add_events_to_samples(etsamples, etevents):
# Calls append_eventtype_to_sample for each event
# Also adds blink_id
logger = logging.getLogger(__name__)
logger.info(etevents.type.unique())
for evt in etevents.type.unique():
etsamples = append_eventtype_to_sample(etsamples,etevents,eventtype=evt)
# add blink id
if evt == 'blink':
# counts up the blink_id
# Pure Magic
etsamples.loc[:,'blink_id'] = (1*(etsamples['type']=='blink')) * ((1*(etsamples['type']=='blink')).diff()==1).cumsum()
return(etsamples)
def append_eventtype_to_sample(etsamples,etevents,eventtype,timemargin=None):
# get a logger
logger = logging.getLogger(__name__)
logger.debug('Appending eventtype: %s to samples',eventtype)
if timemargin is None:
if eventtype== 'blink':
logger.info('Taking Default value for timemargin (blink = -0.1s/0.1s)')
timemargin = [-.1,.1]
else:
logger.info('Taking Default value for timemargin (fix/saccade = 0s)')
timemargin = [0,0]
# get index of the rows that have that eventtype
ix_event = etevents['type']==eventtype
# get list of start and end indeces in the etsamples df
eventstart = etevents.loc[ix_event,'start_time']+float(timemargin[0])
eventend = etevents.loc[ix_event,'end_time']+float(timemargin[1])
flat_ranges = eventtime_to_sampletime(etsamples,eventstart,eventend)
# all etsamples with ix in ranges , will the eventype in the column type
if len(flat_ranges) > 0:
etsamples.loc[etsamples.index[flat_ranges], 'type'] = eventtype
return etsamples
def eventtime_to_sampletime(etsamples,eventstart,eventend):
# due to timemargin strange effects can occur and we need to clip
mintime = etsamples.smpl_time.iloc[0]
maxtime = etsamples.smpl_time.iloc[-1]
eventstart.loc[eventstart < mintime] = mintime
eventstart.loc[eventstart > maxtime] = maxtime
eventend.loc[eventend < mintime] = mintime
eventend.loc[eventend > maxtime] = maxtime
if len(eventstart)!=len(eventend):
raise error
startix = np.searchsorted(etsamples.smpl_time,eventstart)
endix = np.searchsorted(etsamples.smpl_time,eventend)
#print('%i events of %s found'%(len(startix),eventtype))
# make a list of ranges to have all indices in between the startix and endix
ranges = [list(range(s,e)) for s,e in zip(startix,endix)]
flat_ranges = [item for sublist in ranges for item in sublist]
flat_ranges = np.intersect1d(flat_ranges,range(etsamples.shape[0]))
return(flat_ranges)
#%% last fixation (e.g. for large GRID)
def only_last_fix(merged_etevents, next_stim = ['condition','block', 'element']):
# we group by block and element and then take the last fixation
# TODO commented out cause it raises weird error
# for HMM we define alle smooth pursuit as fixations
# merged_etevents.type[merged_etevents.type == 'smoothpursuit'] = 'fixation'
# use only fixation events and group by block and element and then take the last one of it
large_grid_df = merged_etevents[merged_etevents.type == 'fixation'].groupby(next_stim).last()
large_grid_df.reset_index(level= next_stim, inplace=True)
return large_grid_df
#%% function to make groupby easier
def group_to_level_and_take_mean(raw_condition_df, lowestlevel):
"""
make a groupby
"""
if lowestlevel=='subject':
# get df grouped by et and subject
# --> takes the mean of the accuracy and precision measures over all blocks
grouped_df = raw_condition_df.groupby(['et', 'subject']).mean().reset_index(level=['et', 'subject'])
elif lowestlevel=='block':
# get df grouped by et, subject and block
# --> makes a mean for each block of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject','block']).mean().reset_index(level=['et','subject','block'])
elif lowestlevel=='element_positions':
# get df grouped by et, subject and block
# --> makes a mean for each block of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject', 'block','posx', 'posy']).mean().reset_index(level=['et', 'subject', 'block','posx', 'posy'])
elif lowestlevel=='condition':
# get df grouped by et, subject and GRID condition
# --> makes a mean for each Gridcondition of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject', 'condition']).mean().reset_index(level=['et', 'subject', 'condition'])
else:
raise ValueError('This level is unknown / not implemented')
return grouped_df
#%% set dtypes of dataframe and make the labes ready to get plotted
def set_dtypes(df):
"""
Set the dtype of the categories, so that plotting is easier and more pretty.
E.g. set column 'et' from object to categorical
"""
# make all object variables categorical
df[df.select_dtypes(['object']).columns] = df.select_dtypes(['object']).apply(lambda x: x.astype('category'))
# list of categorical variables that have to be treated separately as they were not object dtypes
categorial_var = ["block", "trial", "pic_id"]
# set columns to correct dtype
for column in categorial_var:
if column in df:
# fill none values to not have problems with integers
df[column] = df[column].fillna(-1)
# convert ids to interger and round them to make them look nicely
df[column] = pd.to_numeric(df[column], downcast='integer')
df[column] = df[column].round(0).astype(int)
# convert -1 back to None
df[column] = df[column].astype(str)
df[column] = df[column].replace('-1', np.nan)
# old version
#df[column] = df[column].astype('category')
# logging.debug('dtypes of the df after: %s', df.dtypes)
return df
def set_to_full_names(df):
"""
rename columns and values to their full name
e.g. et --> Eye-Tracker
"""
# TODO maybe more renaming?
# maybe dont do this but rather use xaxis relabeling
# rename columnnames
# df = df.rename(index=str, columns={"et": "Eye-Tracker", "pic_id": "picture id", "fix_count": "number of fixations"})
#rename values
df.loc[:,'et'] = df['et'].map({'el': 'EyeLink', 'pl': 'Pupil Labs'})
return df
#%% everything related to VISUAL DEGREES
def size_px2deg(px, mm_per_px=0.276,distance=600):
"""
function to get the picture size of the freeviewing task
from pixels into visual angle
"""
deg = 2*np.arctan2(px/2*mm_per_px,distance)*180/np.pi
return deg
def px2deg(px, orientation, mm_per_px=0.276,distance=600):
# VD
# "gx_px - gx_px-midpoint"
# subtract center of our BENQ
if orientation == 'horizontal':
center_x = 1920 / 2
px = px - center_x
elif orientation == 'vertical':
center_y = 1080 / 2
px = px - center_y
else:
raise('unknown option')
deg = np.arctan2(px*mm_per_px,distance)*180/np.pi
return deg
def sph2cart(theta_sph,phi_sph,rho_sph=1):
xyz_sph = np.asarray([rho_sph * sin(theta_sph) * cos(phi_sph),
rho_sph * sin(theta_sph) * sin(phi_sph),
rho_sph * cos(theta_sph)])
return xyz_sph
#%% LOAD & SAVE & FIND file
def load_file(et,subject,datapath='/net/store/nbp/projects/etcomp/',outputprefix='',cleaned=True):
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
et = outputprefix+et
try:
if cleaned:
filename_samples = str(et) + '_cleaned_samples.csv'
else:
filename_samples = str(et) + '_samples.csv'
filename_msgs = str(et) + '_msgs.csv'
filename_events = str(et) + '_events.csv'
etsamples = pd.read_csv(os.path.join(preprocessed_path,filename_samples))
etmsgs = pd.read_csv(os.path.join(preprocessed_path,filename_msgs))
etevents = pd.read_csv(os.path.join(preprocessed_path,filename_events))
except FileNotFoundError as e:
print(e)
raise e
return etsamples,etmsgs,etevents
def save_file(data,et,subject,datapath,outputprefix=''):
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
# create new folder if there is none
if not os.path.exists(preprocessed_path):
os.makedirs(preprocessed_path)
et = outputprefix+et
# dump data in csv
filename_samples = str(et) + '_samples.csv'
filename_cleaned_samples = str(et) + '_cleaned_samples.csv'
filename_msgs = str(et) + '_msgs.csv'
filename_events = str(et) + '_events.csv'
# make separate csv file for every df
data[0].to_csv(os.path.join(preprocessed_path, filename_samples), index=False)
data[1].to_csv(os.path.join(preprocessed_path, filename_cleaned_samples), index=False)
data[2].to_csv(os.path.join(preprocessed_path, filename_msgs), index=False)
data[3].to_csv(os.path.join(preprocessed_path, filename_events), index=False)
def findFile(path,ftype):
# finds file for el edf
out = [edf for edf in os.listdir(path) if edf.endswith(ftype)]
return(out)
def get_subjectnames(datapath='/net/store/nbp/projects/etcomp/'):
return os.listdir(datapath)
#%% Tic Toc Matlab equivalent to time things
import time
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print( "Elapsed time: %f seconds.\n" %tempTimeInterval )
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
def plot_around_event(etsamples,etmsgs,etevents,single_eventormsg,plusminus=(-1,1),bothET=True,plotevents=True):
import re
assert(type(single_eventormsg)==pd.Series)
try:
t0 = single_eventormsg.start_time
eventtype = 'event'
except:
t0 = single_eventormsg.msg_time
eventtype = 'msg'
tstart = t0 + plusminus[0]
tend = t0 + plusminus[1]
query = '1==1'
if ("subject" in etsamples.columns) & ("subject" in single_eventormsg.index):
query = query+"& subject == @single_eventormsg.subject"
if not bothET:
query = query+"& eyetracker==@single_eventormsg.eyetracker"
samples_query = "smpl_time>=@tstart & smpl_time <=@tend & "+query
msg_query = "msg_time >=@tstart & msg_time <=@tend & "+query
event_query = "end_time >=@tstart & start_time <=@tend & "+query
etmsgs = etmsgs.query(msg_query)
longstring = etmsgs.to_string(columns=['exp_event'],na_rep='',float_format='%.1f',index=False,header=False,col_space=0)
longstring = re.sub(' +',' ',longstring)
splitstring = longstring.split(sep="\n")
if len(splitstring) == etmsgs.shape[0]-1:
# last element was a Nan blank and got removed
splitstring.append(' ')
etmsgs.loc[:,'label'] = splitstring
p = (ggplot()
+ geom_point(aes(x='smpl_time',y='gx',color='type',shape='eyetracker'),data=etsamples.query(samples_query)) # samples
+ geom_text(aes(x='msg_time',y=2,label="label"),color='black',position=position_jitter(width=0),data=etmsgs)# label msg/trigger
+ geom_vline(aes(xintercept='msg_time'),color='black',data=etmsgs) # triggers/msgs
)
if etevents.query(event_query).shape[0]>0:
pass
if plotevents:
p = p + geom_segment(aes(x="start_time",y=0,xend="end_time",yend=0,color='type'),alpha=0.5,size=2,data=etevents.query(event_query))
if eventtype == 'event':
p = (p + annotate("line",x=[single_eventormsg.start_time,single_eventormsg.end_time],y=0,color='black')
+ annotate("point",x=[single_eventormsg.start_time,single_eventormsg.end_time],y=0,color='black'))
if eventtype=='msg':
if single_eventormsg.condition == 'GRID':
p = (p + annotate("text",x=single_eventormsg.end_time,y=single_eventormsg.posx+5,label=single_eventormsg.accuracy)
+ geom_hline(yintercept=single_eventormsg.posx))
return(p)
# define 20% winsorized means
def winmean(x,perc = 0.2,axis=0):
return(np.mean(winsorize(x,perc,axis=axis),axis=axis))
def winmean_cl_boot(series, n_samples=10000, confidence_interval=0.95,
random_state=None):
return bootstrap_statistics(series, winmean,
n_samples=n_samples,
confidence_interval=confidence_interval,
random_state=random_state)
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
|
|
import viewport
from math import cos, sin, pi
import time
import numpy
from OpenGL.GL import *
import udim_vt_lib
import perf_overlay_lib
PREPASS_VERTEX_SHADER_SOURCE = """
#version 460 core
layout(location = 0) uniform mat4 modelViewProjection;
layout(location = 0) in vec3 P;
layout(location = 1) in vec2 uv;
layout(location = 0) out vec2 outUv;
void main() {
outUv = uv * 2;
gl_Position = modelViewProjection * vec4(P, 1.0);
}
"""
PREPASS_FRAG_SHADER_SOURCE = """
#version 460 core
layout(location = 0) in vec2 uv;
// We could half pack the derivs into ZW of the UVs as halfs directly
layout(location = 0) out vec2 outUv;
layout(location = 1) out vec4 outUvDerivs;
void main() {
outUv = uv;
// Could be abs'd and still work, GL doesnt have a unorm half format tho
outUvDerivs = vec4(dFdx(uv), dFdy(uv));
}
"""
class Renderer(object):
def __init__(self):
self.window = viewport.Window()
self.camera = viewport.Camera()
self.window.on_init = self._init
self.window.on_draw = self._draw
self.window.on_resize = self._resize
self.window.on_drag = self._drag
self.window.on_keypress = self._keypress
self.main_geom = None
self.udim_offset = None
self.udim_info_start = None
self.udim_info = None
self.dirty_base_update = True
self.timer_overlay = perf_overlay_lib.TimerSamples256Overlay()
def dirty_base(self):
self.dirty_base_update = True
def run(self):
self.window.run()
def _init(self, wnd):
glClearColor(0.5, 0.5, 0.5, 0.0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_STENCIL_TEST)
glDisable(GL_CULL_FACE)
self.main_geom = viewport.load_obj(
# "data/cubeWithNormals.obj",
"data/armadillo.obj",
(
viewport.ObjGeomAttr.P,
viewport.ObjGeomAttr.UV,
)
)
self._main_geom_model = numpy.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 1.5, 0, 1],
], dtype=numpy.float32)
self._prepass_program = viewport.generate_shader_program(
GL_VERTEX_SHADER=PREPASS_VERTEX_SHADER_SOURCE,
GL_FRAGMENT_SHADER=PREPASS_FRAG_SHADER_SOURCE,
)
self._apply_vt_program = viewport.generate_shader_program(
GL_COMPUTE_SHADER=udim_vt_lib.APPLY_VT_TEXTURES_CS
)
self._framebuffer_depth = viewport.FramebufferTarget(
GL_DEPTH32F_STENCIL8,
True,
custom_texture_settings={
GL_TEXTURE_WRAP_S: GL_CLAMP_TO_EDGE,
GL_TEXTURE_WRAP_T: GL_CLAMP_TO_EDGE,
GL_TEXTURE_MIN_FILTER: GL_LINEAR,
GL_TEXTURE_MAG_FILTER: GL_LINEAR,
}
)
self._framebuffer_col = viewport.FramebufferTarget(
GL_RGBA8,
True,
custom_texture_settings={
GL_TEXTURE_WRAP_S: GL_CLAMP_TO_EDGE,
GL_TEXTURE_WRAP_T: GL_CLAMP_TO_EDGE,
GL_TEXTURE_MIN_FILTER: GL_LINEAR,
GL_TEXTURE_MAG_FILTER: GL_LINEAR,
}
)
self._fb_uv = viewport.FramebufferTarget(
GL_RG32F,
True,
custom_texture_settings={
GL_TEXTURE_WRAP_S: GL_CLAMP_TO_EDGE,
GL_TEXTURE_WRAP_T: GL_CLAMP_TO_EDGE,
GL_TEXTURE_MIN_FILTER: GL_LINEAR,
GL_TEXTURE_MAG_FILTER: GL_LINEAR,
}
)
self._fb_uv_derivs = viewport.FramebufferTarget(
GL_RGBA32F,
True,
custom_texture_settings={
GL_TEXTURE_WRAP_S: GL_CLAMP_TO_EDGE,
GL_TEXTURE_WRAP_T: GL_CLAMP_TO_EDGE,
GL_TEXTURE_MIN_FILTER: GL_LINEAR,
GL_TEXTURE_MAG_FILTER: GL_LINEAR,
}
)
self._prepass_framebuffer = viewport.Framebuffer(
(
self._framebuffer_depth,
self._fb_uv,
self._fb_uv_derivs,
),
wnd.width,
wnd.height
)
self._scene_col_fb = viewport.Framebuffer(
(
viewport.ProxyFramebufferTarget(self._framebuffer_depth),
self._framebuffer_col,
),
wnd.width,
wnd.height,
)
udim_ind_data = udim_vt_lib.UdimIndirectionBuilder([
udim_vt_lib.UdimEntry(
(0, 0),
udim_vt_lib.Image(r"C:\Users\thoth\Desktop\im0.png")
),
udim_vt_lib.UdimEntry(
(1, 0),
udim_vt_lib.Image(r"C:\Users\thoth\Desktop\im1.png")
),
udim_vt_lib.UdimEntry(
(0, 1),
udim_vt_lib.Image(r"C:\Users\thoth\Desktop\im2.png")
),
udim_vt_lib.UdimEntry(
(1, 1),
udim_vt_lib.Image(r"C:\Users\thoth\Desktop\mickey.png")
),
udim_vt_lib.UdimEntry(
(10, 100),
udim_vt_lib.Image(r"C:\Users\thoth\Desktop\im2.png")
),
])
self.udim_offset = (
udim_ind_data.udim_offset[0],
udim_ind_data.udim_offset[1],
udim_ind_data.udim_info_start.shape[1],
udim_ind_data.udim_info_start.shape[0]
)
self._buffers_ptr = (ctypes.c_int * 1)()
glCreateBuffers(1, self._buffers_ptr)
self.udim_info = self._buffers_ptr[0]
udim_info_raw = udim_ind_data.udim_info.tobytes()
glNamedBufferStorage(self.udim_info, len(udim_info_raw), udim_info_raw, 0)
self._udim_info_start_ptr = ctypes.c_int()
glCreateTextures(GL_TEXTURE_2D, 1, self._udim_info_start_ptr)
self._udim_info_start = self._udim_info_start_ptr.value
glTextureParameteri(self._udim_info_start, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTextureParameteri(self._udim_info_start, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTextureParameteri(self._udim_info_start, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST)
glTextureParameteri(self._udim_info_start, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTextureStorage2D(
self._udim_info_start,
1,
GL_R32UI,
udim_ind_data.udim_info_start.shape[1],
udim_ind_data.udim_info_start.shape[0]
)
glTextureSubImage2D(
self._udim_info_start,
0, 0, 0,
udim_ind_data.udim_info_start.shape[1],
udim_ind_data.udim_info_start.shape[0],
GL_RED_INTEGER,
GL_UNSIGNED_INT,
udim_ind_data.udim_info_start.tobytes()
)
self._vt_indirection_ptr = ctypes.c_int()
glCreateTextures(GL_TEXTURE_2D_ARRAY, 1, self._vt_indirection_ptr)
self._vt_indirection = self._vt_indirection_ptr.value
glTextureParameteri(self._vt_indirection, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTextureParameteri(self._vt_indirection, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTextureParameteri(self._vt_indirection, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST)
glTextureParameteri(self._vt_indirection, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTextureStorage3D(
self._vt_indirection,
1,
GL_R16UI,
udim_ind_data.mip_indirection_size[0],
udim_ind_data.mip_indirection_size[1],
udim_ind_data.mip_indirection_size[2]
)
clear_vt_ptr = ctypes.c_long()
clear_vt_ptr.value = ~0
glClearTexImage(self._vt_indirection, 0, GL_RED_INTEGER, GL_UNSIGNED_SHORT, clear_vt_ptr)
self._virtual_texture_dim = (8192, 8192, 2)
self._inv_virtual_texture_size = (1/8192, 1/8192)
self._virtual_texture_ptr = ctypes.c_int()
glCreateTextures(GL_TEXTURE_2D_ARRAY, 1, self._virtual_texture_ptr)
self._virtual_texture = self._virtual_texture_ptr.value
glTextureParameteri(self._virtual_texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTextureParameteri(self._virtual_texture, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTextureParameteri(self._virtual_texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glTextureParameteri(self._virtual_texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTextureStorage3D(
self._virtual_texture,
1,
GL_RGBA8,
self._virtual_texture_dim[0],
self._virtual_texture_dim[1],
self._virtual_texture_dim[2]
)
self._max_feedback_values = 1024 * 1024
self._feedback_buffers_ptr = (ctypes.c_int * 2)()
glCreateBuffers(2, self._feedback_buffers_ptr)
self._feedback_counter = self._feedback_buffers_ptr[0]
self._feedback_storage = self._feedback_buffers_ptr[1]
glNamedBufferStorage(self._feedback_counter, 4, None, 0)
glNamedBufferStorage(
self._feedback_storage,
self._max_feedback_values * 8, # u64 per result
None,
0
)
self.camera.look_at(
numpy.array([0, 3, 0]),
numpy.array([0.83922848, 3.71858291, 0.52119542]),
)
glViewport(0, 0, wnd.width, wnd.height)
def _draw(self, wnd):
# Turn this on to make things slow
if False:
if not hasattr(self, "COUNTER_TMP"):
self.COUNTER_TMP = 0
if not hasattr(self, "UNIQUE_TMP"):
self.UNIQUE_TMP = 0
counter_ptr = ctypes.c_int()
glGetNamedBufferSubData(
self._feedback_counter,
0,
4,
counter_ptr
)
# The < 1024 * 1024 thing shouldnt be needed
if counter_ptr.value > 0:
count = min(1024*1024, counter_ptr.value)
tiles_data_ptr = (ctypes.c_uint64 * count)()
glGetNamedBufferSubData(
self._feedback_storage,
0,
8 * count,
tiles_data_ptr
)
uniq_tiles = set(tiles_data_ptr)
if self.UNIQUE_TMP != len(uniq_tiles):
print("uniq: ", len(uniq_tiles))
self.UNIQUE_TMP = len(uniq_tiles)
if counter_ptr.value > self.COUNTER_TMP:
self.COUNTER_TMP = counter_ptr.value
print(self.COUNTER_TMP)
# Draw stencil-depth HiZ
if self.dirty_base_update:
with self._prepass_framebuffer.bind():
glStencilFunc(GL_ALWAYS, 1, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE)
glStencilMask(0xFF)
glDepthFunc(GL_LEQUAL)
glClear(GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)
glUseProgram(self._prepass_program)
glUniformMatrix4fv(0, 1, GL_FALSE, (self._main_geom_model * self.camera.view_projection).flatten())
self.main_geom.draw()
self.dirty_base_update = False
# Clear feedback stuff
clear_ptr = ctypes.c_int()
clear_ptr.value = 0
glClearNamedBufferData(
self._feedback_counter,
GL_R32UI,
GL_RED_INTEGER,
GL_UNSIGNED_INT,
clear_ptr
)
# We don't need to clear the feedback storage, as we use the count
# to determine how much to read
# glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_BUFFER_UPDATE_BARRIER_BIT)
glMemoryBarrier(GL_ALL_BARRIER_BITS)
glUseProgram(self._apply_vt_program)
glBindImageTexture(
0,
self._framebuffer_col.texture,
0,
0,
0,
GL_WRITE_ONLY,
GL_RGBA8
)
glBindImageTexture(
1,
self._fb_uv.texture,
0,
0,
0,
GL_READ_ONLY,
GL_RG32F
)
glBindImageTexture(
2,
self._fb_uv_derivs.texture,
0,
0,
0,
GL_READ_ONLY,
GL_RGBA32F
)
glBindTextureUnit(3, self._framebuffer_depth.texture)
glBindImageTexture(
4,
self._udim_info_start,
0,
0,
0,
GL_READ_ONLY,
GL_R32UI
)
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 5, self.udim_info)
glBindTextureUnit(6, self._vt_indirection)
glBindTextureUnit(7, self._virtual_texture)
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 8, self._feedback_counter)
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 9, self._feedback_storage)
glUniform4i(
0,
self.udim_offset[0],
self.udim_offset[1],
self.udim_offset[2],
self.udim_offset[3]
)
glUniform2i(
1,
wnd.width,
wnd.height,
)
glUniform2f(
2,
self._inv_virtual_texture_size[0],
self._inv_virtual_texture_size[1],
)
glDispatchCompute((wnd.width + 7)//8, (wnd.height + 7)//8, 1)
# glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT)
glMemoryBarrier(GL_ALL_BARRIER_BITS)
self._scene_col_fb.blit_to_back(
wnd.width,
wnd.height,
GL_COLOR_BUFFER_BIT,
GL_NEAREST
)
self.timer_overlay.update(wnd.width, wnd.height)
wnd.redraw()
def _resize(self, wnd, width, height):
self._prepass_framebuffer.resize(width, height)
self._scene_col_fb.resize(width, height)
# self._draw_framebuffer.resize(width, height)
self.dirty_base()
glViewport(0, 0, width, height)
self.camera.set_aspect(width/height)
def _keypress(self, wnd, key, x, y):
# Move the camera
shift = key.isupper()
key = key.lower()
move_amount = 0.1 + 0.9 * shift
if key == b'w':
self.camera.move_local(numpy.array([0, 0, move_amount]))
elif key == b's':
self.camera.move_local(numpy.array([0, 0, -move_amount]))
elif key == b'a':
self.camera.move_local(numpy.array([move_amount, 0, 0]))
elif key == b'd':
self.camera.move_local(numpy.array([-move_amount, 0, 0]))
elif key == b'q':
self.camera.move_local(numpy.array([0, move_amount, 0]))
elif key == b'e':
self.camera.move_local(numpy.array([0, -move_amount, 0]))
elif key == b'.':
self._text_size += 0.5
elif key == b',':
self._text_size -= 0.5
# Wireframe / Solid etc
elif key == b'1':
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
elif key == b'2':
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# No redraw
else:
return
self.dirty_base()
wnd.redraw()
def _drag(self, wnd, x, y, button):
deriv_u = x / wnd.width
deriv_v = y / wnd.height
sin_u = sin(deriv_u * pi)
cos_u = cos(deriv_u * pi)
sin_v = sin(deriv_v * pi)
cos_v = cos(deriv_v * pi)
ortho = self.camera.orthonormal_basis
# Y
M = numpy.matrix([
[cos_u, 0, sin_u],
[0, 1, 0],
[-sin_u, 0, cos_u],
])
# XY stuff
if button == wnd.RIGHT:
N = numpy.matrix([
[cos_v, -sin_v, 0],
[sin_v, cos_v, 0],
[0, 0, 1],
])
else:
N = numpy.matrix([
[1, 0, 0],
[0, cos_v, -sin_v],
[0, sin_v, cos_v],
])
N = ortho * N * ortho.I
M *= N
self.camera.append_3x3_transform(M)
self.dirty_base()
wnd.redraw()
if __name__ == "__main__":
Renderer().run()
|
|
try:
from pycorenlp import StanfordCoreNLP
except:
pass
from subprocess import call
import numpy as np
from get_args import *
import os
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "O"
def get_iso_lang_abbreviation():
iso_lang_dict = {}
lang_iso_dict = {}
with open("iso_lang_abbr.txt") as file:
lines = file.read().splitlines()
for line in lines:
lang_iso_dict.update({line.split(":")[0]:line.split(":")[1]})
iso_lang_dict.update({line.split(":")[1]:line.split(":")[0]})
return iso_lang_dict, lang_iso_dict
class DependencyParser(object):
def __init__(self, sent):
self.sent = sent
self.stanford = StanfordCoreNLP('http://localhost:9001')
self.properties = {'annotators': 'tokenize,ssplit,pos,depparse,parse', 'outputFormat': 'json'}
#call(["java -mx4g -cp '*' edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001 -timeout 15000"])
def find_dep_words_pos_offsets(self, sent):
output = self.stanford.annotate(sent, properties=self.properties)
penn_treebank = output['sentences'][0]['parse'].replace("\n", "")
triples = []
for part in output['sentences'][0]['enhancedPlusPlusDependencies']:
triples.append(part['dep']+"/dep="+str(part['dependent']-1)+"/gov="+str(part['governor']-1))
words = []
words_dict = {}
pos_tags = []
offset_start_dic = {}
offset_end_dic = {}
for i, word in enumerate(output['sentences'][0]['tokens']):
words.append(word["word"])
pos_tags.append(word["pos"])
offset_start_dic.update({word["characterOffsetBegin"]: word["index"]-1})
offset_end_dic.update({word["characterOffsetEnd"]-1: word["index"]})
words_dict.update({word["index"]-1: word["word"]})
return penn_treebank, triples, words, pos_tags, offset_start_dic, offset_end_dic, words_dict
class Embeddings(object):
def __init__(self, emb_filename, emb_type, vocab, dim_word):
self.emb_filename = emb_filename
self.dim = dim_word
self.trimmed_filename = ".".join(emb_filename.split(".")[:-1]) + "_trimmed.npz"
if emb_type == "fasttext" or emb_type == "glove":
self.embed_vocab = self.get_emb_vocab()
else:
self.embed_vocab = self.get_multi_emb_vocab()
if not os.path.isfile(self.trimmed_filename):
if emb_type == "fasttext":
self.export_trimmed_fasttext_vectors(vocab)
self.embed_vocab = self.get_emb_vocab()
elif emb_type == "glove":
self.export_trimmed_glove_vectors(vocab)
self.embed_vocab = self.get_emb_vocab()
else:
self.export_trimmed_multi_vectors(vocab)
self.embed_vocab = self.get_multi_emb_vocab()
def export_trimmed_fasttext_vectors(self, vocab):
"""Saves fasttext monolingual vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
"""
embeddings = np.zeros([len(vocab), self.dim])
with open(self.emb_filename) as f:
next(f)
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(self.trimmed_filename, embeddings=embeddings)
def export_trimmed_glove_vectors(self, vocab):
"""Saves glove vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
"""
embeddings = np.zeros([len(vocab), self.dim])
with open(self.emb_filename) as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(self.trimmed_filename, embeddings=embeddings)
def export_trimmed_multi_vectors(self, vocab):
"""Saves glove vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
"""
embeddings = np.zeros([len(vocab), self.dim])
with open(self.emb_filename) as f:
for line in f:
line = line.strip().split(' ')
word = line[0].split("_")[1]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(self.trimmed_filename, embeddings=embeddings)
def get_trimmed_vectors(self):
"""
Args:
filename: path to the npz file
Returns:
matrix of embeddings (np array)
"""
try:
with np.load(self.trimmed_filename) as data:
print(data["embeddings"])
return data["embeddings"]
except IOError:
raise Exception("Could not find or load file!!", self.trimmed_filename)
def get_emb_vocab(self):
"""Load vocab from file
Returns:
vocab: set() of strings
"""
print("Building vocab...")
vocab = set()
with open(self.emb_filename) as f:
lines = f.readlines()
for line in lines[1:]:
word = line.strip().split(' ')[0]
vocab.add(word)
print("- done. {} tokens".format(len(vocab)))
return vocab
def get_multi_emb_vocab(self):
"""Load vocab from file
Returns:
vocab: set() of strings
"""
print("Building vocab...")
vocab = set()
with open(self.emb_filename) as f:
lines = f.readlines()
for line in lines:
word = line.strip().split(' ')[0].split("_")[1]
vocab.add(word)
print("- done. {} tokens".format(len(vocab)))
return vocab
class CoNLLDataset(object):
"""Class that iterates over CoNLL Dataset
__iter__ method yields a tuple (words, tags)
words: list of raw words
tags: list of raw tags
If processing_word and processing_tag are not None,
optional preprocessing is appplied
Example:
```python
data = CoNLLDataset(filename)
for sentence, tags in data:
pass
```
"""
def __init__(self, filename, lang, mode, processing_word=None, processing_tag=None,
max_iter=None):
"""
Args:
filename: path to the file
processing_words: (optional) function that takes a word as input
processing_tags: (optional) function that takes a tag as input
max_iter: (optional) max number of sentences to yield
"""
self.filename = filename
self.processing_word = processing_word
self.processing_tag = processing_tag
self.max_iter = max_iter
self.length = None
self.lang = lang
self.mode = mode
def __iter__(self):
niter = 0
with open(self.filename) as f:
words, tags = [], []
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith("-DOCSTART-"):
if len(words) != 0:
niter += 1
if self.max_iter is not None and niter > self.max_iter:
break
yield words, tags
words, tags = [], []
else:
ls = line.split(' ')
word, tag = ls[0], ls[1]
if self.processing_word is not None:
word = self.processing_word(word)
if self.processing_tag is not None:
tag = self.processing_tag(tag)
words += [word]
tags += [tag]
def __len__(self):
"""Iterates once over the corpus to set and store length"""
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
def get_vocabs(datasets):
"""Build vocabulary from an iterable of datasets objects
Args:
datasets: a list of dataset objects
Returns:
a set of all the words in the dataset
"""
print("Building vocab...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags
def get_char_vocab(datasets):
"""Build char vocabulary from an iterable of datasets objects
Args:
dataset: a iterator yielding tuples (sentence, tags)
Returns:
a set of all the characters in the dataset
"""
vocab_char = set()
for dataset in datasets:
for words, _ in dataset:
for word in words:
vocab_char.update(word)
return vocab_char
def get_processing_word(vocab_words=None, vocab_chars=None,
lowercase=False, chars=False, allow_unk=True):
"""Return lambda function that transform a word (string) into list,
or tuple of (list, id) of int corresponding to the ids of the word and
its corresponding characters.
Args:
vocab: dict[word] = idx
Returns:
f("cat") = ([12, 4, 32], 12345)
= (list of char ids, word id)
"""
def f(word):
# 0. get chars of words
if vocab_chars is not None and chars == True:
char_ids = []
for char in word:
# ignore chars out of vocabulary
if char in vocab_chars:
char_ids += [vocab_chars[char]]
# 1. preprocess word
if lowercase:
word = word.lower()
if word.isdigit():
word = NUM
# 2. get id of word
#print("len(vocab_words):", len(vocab_words))
if vocab_words is not None:
if word in vocab_words:
word = vocab_words[word]
else:
if allow_unk:
word = vocab_words[UNK]
#print("len(vocab_words):", len(vocab_words))
else:
raise Exception("Unknow key is not allowed. Check that " \
"your vocab (tags?) is correct =>"+ str(len(vocab_words)))
# 3. return tuple char ids, word id
if vocab_chars is not None and chars == True:
return char_ids, word
else:
return word
return f
def write_vocab(vocab, filename):
"""Writes a vocab to a file
Writes one word per line.
Args:
vocab: iterable that yields word
filename: path to vocab file
Returns:
write a word per line
"""
print("Writing vocab...")
with open(filename, "w") as f:
for i, word in enumerate(vocab):
if i != len(vocab) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
print("- done. {} tokens".format(len(vocab)))
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
try:
d = dict()
with open(filename) as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise Exception("Could not find or load file!!", filename)
return d
def pad_sequences(sequences, pad_tok, nlevels=1):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
nlevels: "depth" of padding, for the case where we have characters ids
Returns:
a list of list where each sublist has same length
"""
if nlevels == 1:
max_length = max(map(lambda x : len(x), sequences))
sequence_padded, sequence_length = _pad_sequences(sequences,
pad_tok, max_length)
elif nlevels == 2:
max_length_word = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequence_padded, sequence_length = [], []
for seq in sequences:
# all words are same length now
sp, sl = _pad_sequences(seq, pad_tok, max_length_word)
sequence_padded += [sp]
sequence_length += [sl]
max_length_sentence = max(map(lambda x : len(x), sequences))
sequence_padded, _ = _pad_sequences(sequence_padded,
[pad_tok]*max_length_word, max_length_sentence)
sequence_length, _ = _pad_sequences(sequence_length, 0,
max_length_sentence)
return sequence_padded, sequence_length
def _pad_sequences(sequences, pad_tok, max_length):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
Returns:
a list of list where each sublist has same length
"""
sequence_padded, sequence_length = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_length] + [pad_tok]*max(max_length - len(seq), 0)
sequence_padded += [seq_]
sequence_length += [min(len(seq), max_length)]
return sequence_padded, sequence_length
def minibatches(data, minibatch_size):
"""
Args:
data: generator of (sentence, tags) tuples
minibatch_size: (int)
Yields:
list of tuples
"""
x_batch, y_batch = [], []
for lang in data:
for (x, y) in data[lang]:
if len(x_batch) == minibatch_size:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def minibatches_test(data, minibatch_size):
"""
Args:
data: generator of (sentence, tags) tuples
minibatch_size: (int)
Yields:
list of tuples
"""
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatch_size:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def get_chunk_type(tok, idx_to_tag):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
tag_name = idx_to_tag[tok]
tag_class = tag_name.split('-')[0]
tag_type = tag_name.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
pass
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
if __name__ == '__main__':
args = get_args()
if args.task == "trigger":
task = "TriggerIdentification"
elif args.task == "argument":
task = "ArgumentIdentification"
else:
task = "Joint"
## Dataset Directory
pre_dir = args.root_dir + args.pre_dir + "tagging-new/" + task + "/"
iso_lang_dict, lang_iso_dict = get_iso_lang_abbreviation()
processing_word = get_processing_word(lowercase=True)
if args.use_neg_eg:
ext = "_with_neg_eg"
else:
ext = "_wout_neg_eg"
train = CoNLLDataset(pre_dir+"English/train" + ext + ".txt", "English", processing_word)
|
|
import pandas as pd
import numpy as np
import datetime
from pyloopkit.dose import DoseType
# from tidepool_data_science_simulator.models.simple_metabolism_model import get_iob_from_sbr, simple_metabolism_model
from tidepool_data_science_simulator.legacy.risk_metrics_ORIG import get_bgri, lbgi_risk_score, hbgi_risk_score
# %% create pandas dataframes from the input data
def dict_inputs_to_dataframes(input_data):
# define the dataframes to store the data in
df_basal_rate = pd.DataFrame()
df_carb = pd.DataFrame()
df_carb_ratio = pd.DataFrame()
df_dose = pd.DataFrame()
df_glucose = pd.DataFrame()
df_last_temporary_basal = pd.DataFrame()
df_misc = pd.DataFrame()
df_sensitivity_ratio = pd.DataFrame()
df_settings = pd.DataFrame()
df_target_range = pd.DataFrame()
for k in input_data.keys():
if type(input_data[k]) != dict:
if "basal_rate" in k:
df_basal_rate[k] = input_data.get(k)
elif "carb_ratio" in k:
df_carb_ratio[k] = input_data.get(k)
elif "carb" in k:
df_carb[k] = input_data.get(k)
elif "dose" in k:
df_dose[k] = input_data.get(k)
elif "glucose" in k:
df_glucose[k] = input_data.get(k)
elif "last_temporary_basal" in k:
# TODO: change how this is dealt with in pyloopkit
df_last_temporary_basal[k] = input_data.get(k)
elif "sensitivity_ratio" in k:
df_sensitivity_ratio[k] = input_data.get(k)
elif "target_range" in k:
df_target_range[k] = input_data.get(k)
else:
if np.size(input_data.get(k)) == 1:
if type(input_data[k]) == list:
df_misc.loc[k, 0] = input_data.get(k)[0]
else:
df_misc.loc[k, 0] = input_data.get(k)
else:
if "settings_dictionary" in k:
settings_dictionary = input_data.get("settings_dictionary")
for sk in settings_dictionary.keys():
if np.size(settings_dictionary.get(sk)) == 1:
if type(settings_dictionary[sk]) == list:
df_settings.loc[sk, "settings"] = settings_dictionary.get(
sk
)[0]
else:
df_settings.loc[sk, "settings"] = settings_dictionary.get(
sk
)
else:
if sk in ["model", "default_absorption_times"]:
# TODO: change this in the loop algorithm
# to take 2 to 3 inputs instead of 1
df_settings.loc[sk, "settings"] = str(
settings_dictionary.get(sk)
)
return (
df_basal_rate,
df_carb,
df_carb_ratio,
df_dose,
df_glucose,
df_last_temporary_basal,
df_misc,
df_sensitivity_ratio,
df_settings,
df_target_range,
)
def dataframe_inputs_to_dict(dfs, df_misc, df_settings):
# write the dataframes back to one dictionary
input_dictionary = dict()
input_dictionary = df_misc.to_dict()[0]
for df in dfs:
for col in df.columns:
if "units" not in col:
input_dictionary[col] = df[col].tolist()
else:
input_dictionary[col] = df[col].unique()[0]
input_dictionary["settings_dictionary"] = df_settings.to_dict()["settings"]
# set the format back for the edge cases
input_dictionary["settings_dictionary"]["model"] = np.safe_eval(
input_dictionary["settings_dictionary"]["model"]
)
input_dictionary["settings_dictionary"]["default_absorption_times"] = np.safe_eval(
input_dictionary["settings_dictionary"]["default_absorption_times"]
)
input_dictionary["offset_applied_to_dates"] = int(
input_dictionary["offset_applied_to_dates"]
)
return input_dictionary
def input_dict_to_one_dataframe(input_data):
# get dataframes from input
(
df_basal_rate,
df_carb,
df_carb_ratio,
df_dose,
df_glucose,
df_last_temporary_basal,
df_misc,
df_sensitivity_ratio,
df_settings,
df_target_range,
) = dict_inputs_to_dataframes(input_data)
# combine the dataframes into one big dataframe,
# put glucose at end since that trace is typically long
combined_df = pd.DataFrame()
combined_df = pd.concat([combined_df, df_settings])
combined_df = pd.concat([combined_df, df_misc])
dfs = [
df_basal_rate,
df_carb,
df_carb_ratio,
df_dose,
df_last_temporary_basal,
df_sensitivity_ratio,
df_target_range,
df_glucose,
]
for df in dfs:
combined_df = pd.concat([combined_df, df.T])
# move settings back to the front of the dataframe
combined_df = combined_df[np.append("settings", combined_df.columns[0:-1])]
return combined_df
def str2bool(string_):
return string_.lower() in ("yes", "true", "t", "1")
def input_table_to_dict(input_df):
dict_ = dict()
# first parse and format the settings
all_settings = input_df["settings"].dropna()
dict_["settings_dictionary"] = all_settings.to_dict()
for k in dict_["settings_dictionary"].keys():
if k in ["dynamic_carb_absorption_enabled", "retrospective_correction_enabled"]:
dict_["settings_dictionary"][k] = str2bool(dict_["settings_dictionary"][k])
else:
dict_["settings_dictionary"][k] = np.safe_eval(
dict_["settings_dictionary"][k]
)
if "suspend_threshold" not in dict_["settings_dictionary"].keys():
dict_["settings_dictionary"]["suspend_threshold"] = None
# then parse and format the rest
input_df_T = input_df.drop(columns=["settings"]).dropna(axis=0, how="all").T
input_df_columns = input_df_T.columns
for col in input_df_columns:
if "units" in col:
dict_[col] = input_df_T[col].dropna().unique()[0]
elif "offset" in col:
dict_[col] = int(np.safe_eval(input_df_T[col].dropna()[0]))
elif "time_to_calculate" in col:
dict_[col] = datetime.datetime.fromisoformat(
pd.to_datetime(input_df_T[col].dropna()[0]).isoformat()
)
else:
temp_df = input_df_T[col].dropna()
temp_array = []
for v in temp_df.values:
if ":" in v:
if len(v) == 7:
obj = datetime.time.fromisoformat(
pd.to_datetime(v).strftime("%H:%M:%S")
)
elif len(v) == 8:
obj = datetime.time.fromisoformat(v)
elif len(v) > 8:
obj = datetime.datetime.fromisoformat(
pd.to_datetime(v).isoformat()
)
else:
obj = np.safe_eval(v)
elif "DoseType" in v:
obj = DoseType.from_str(v[9:])
else:
obj = np.safe_eval(v)
temp_array = np.append(temp_array, obj)
dict_[col] = list(temp_array)
return dict_
def create_contiguous_ts(date_min, date_max, freq="1s"):
date_range = pd.date_range(date_min, date_max, freq=freq)
contig_ts = pd.DataFrame(date_range, columns=["datetime"])
contig_ts["time"] = contig_ts["datetime"].dt.start_time
return contig_ts
def get_setting(current_time, df, setting_value_name, setting_time_name):
continguous_ts = create_contiguous_ts(
current_time.date(), current_time.date() + datetime.timedelta(days=1), freq="1s"
)
df_ts = pd.merge(
continguous_ts, df, left_on="time", right_on=setting_time_name, how="left"
)
df_ts[setting_value_name].fillna(method="ffill", inplace=True)
setting_value_at_current_time = df_ts.loc[
df_ts["datetime"] == current_time, setting_value_name
].values[0]
setting_value_at_current_time
return setting_value_at_current_time
def transform_input_scenario_to_simulation_df(
scenario_filepath, simulation_duration_hours
):
# LOAD & VIEW SCENARIO INPUTS FROM GOOGLE DRIVE
# worksheet = gc.open(scenario_file_names[scenario_number]).sheet1
# rows = worksheet.get_all_values()
# col_headings = rows[0]
data = pd.read_csv(scenario_filepath, sep="\t")
custom_table_df = data.set_index("setting_name")
# create output dataframes
metab_dur_mins = 8 * 60 # 8 hours
# +CS - Simulation lasts as long as simulation is specified or metabolism model requires
sim_dur_mins = np.max([simulation_duration_hours * 60, metab_dur_mins])
# +CS - Why is this length sim_dur_mins * 2 and sim_df is sim_dur_mins?
delta_bgs_df = pd.DataFrame(index=np.arange(0, sim_dur_mins * 2, 5))
iob_df = delta_bgs_df.copy()
sim_df = pd.DataFrame(index=np.arange(0, sim_dur_mins, 5))
scenario_results = pd.DataFrame()
# show inputs
custom_table_df
# %% RUN INITIAL SCENARIO THROUGH DIABETES METABOLISM MODEL
# get inputs from custom scenario
# NOTE: this line next line is needed bc we are pulling from gsheet instead of .csv
custom_table_df[custom_table_df == ""] = np.nan
inputs_from_file = input_table_to_dict(custom_table_df)
# convert inputs to dataframes
(
basal_rates,
carb_events,
carb_ratios,
dose_events,
cgm_df,
df_last_temporary_basal,
df_misc,
isfs,
df_settings,
df_target_range,
) = dict_inputs_to_dataframes(inputs_from_file)
print("running scenario through simple diabetes metabolism model...")
t0 = inputs_from_file.get("time_to_calculate_at")
bg_t0_actual = cgm_df.loc[
cgm_df["glucose_dates"] == t0, "actual_blood_glucose"
].values[0]
bg_t0_loop = cgm_df.loc[cgm_df["glucose_dates"] == t0, "glucose_values"].values[0]
# get actual and loop carb amounts
carb_amount_actual = carb_events.loc[
carb_events["carb_dates"] == t0, "actual_carbs"
].values[0]
carb_amount_loop = carb_events.loc[
carb_events["carb_dates"] == t0, "carb_values"
].values[0]
# get actual and loop insulin amounts
insulin_amount_actual = dose_events.loc[
dose_events["dose_start_times"] == t0, "actual_doses"
].values[0]
insulin_amount_loop = dose_events.loc[
dose_events["dose_start_times"] == t0, "dose_values"
].values[0]
# get actual and loop cir
cir_index = carb_ratios[
t0.time() >= carb_ratios["carb_ratio_start_times"]
].index.values.min()
cir_actual = carb_ratios.loc[cir_index, "actual_carb_ratios"]
cir_loop = carb_ratios.loc[cir_index, "carb_ratio_values"]
# get actual and loop isf
isf_index = isfs[
t0.time() >= isfs["sensitivity_ratio_start_times"]
].index.values.min()
isf_actual = isfs.loc[isf_index, "actual_sensitivity_ratios"]
isf_loop = isfs.loc[isf_index, "sensitivity_ratio_values"]
(
delta_bg_array_metab,
ts_array_metab,
carbs_consumed_array_metab,
insulin_delivered_array_metab,
iob_array_metab,
) = simple_metabolism_model(
carb_amount=carb_amount_actual,
insulin_amount=insulin_amount_actual,
CIR=cir_actual,
ISF=isf_actual,
)
delta_bgs_df["initial_scenario"] = np.nan
# +CS - these aren't bg_times. they are a boolean array mask for bgs indices up to metabolism duration
bg_metab_mask = (delta_bgs_df.index >= 0) & (delta_bgs_df.index < metab_dur_mins)
delta_bgs_df.loc[bg_metab_mask, "initial_scenario"] = delta_bg_array_metab
# get scheduled basal rate
sbr_index = basal_rates[
t0.time() >= basal_rates["basal_rate_start_times"]
].index.values.min()
sbr_loop_scalar = basal_rates.loc[sbr_index, "basal_rate_values"]
sbr_actual_scalar = basal_rates.loc[sbr_index, "actual_basal_rates"]
# calculate the amount of insulin onboard from scheduled basal rate
iob_from_sbr_array_metab = get_iob_from_sbr(sbr_loop_scalar)
# capture the insulin that will be onboard for the next 8 hours
iob_df["initial_scenario"] = np.nan
iob_df.loc[bg_metab_mask, "initial_scenario"] = (
iob_array_metab + iob_from_sbr_array_metab
)
bg_timeseries = bg_t0_actual + np.cumsum(delta_bg_array_metab)
sim_df.loc[bg_metab_mask, "pump_bgs"] = bg_timeseries
pump_LBGI, pump_HBGI, pump_BGRI = get_bgri(bg_timeseries)
scenario_results.loc["LBGI", "pumpValue"] = pump_LBGI
scenario_results.loc["LBGI", "pumpRiskScore"] = lbgi_risk_score(pump_LBGI)
scenario_results.loc["HBGI", "pumpValue"] = pump_HBGI
scenario_results.loc["HBGI", "pumpRiskScore"] = hbgi_risk_score(pump_HBGI)
scenario_results.loc["BGRI", "pumpValue"] = pump_BGRI
return scenario_results, inputs_from_file
|
|
import os
from numpy.lib.npyio import save
from tqdm import trange
from argparse import ArgumentParser
import logging
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from imitation_cl.train.utils import check_cuda, set_seed, get_sequence
from imitation_cl.model.hypernetwork import TargetNetwork, str_to_ints, str_to_act
from imitation_cl.model.node import NODE
from imitation_cl.data.lasa import LASA
from imitation_cl.data.helloworld import HelloWorld
from imitation_cl.data.utils import get_minibatch
from imitation_cl.plot.trajectories import plot_ode_simple
from imitation_cl.metrics.traj_metrics import mean_swept_error, mean_frechet_error_fast as mean_frechet_error, dtw_distance_fast as dtw_distance
from imitation_cl.logging.utils import custom_logging_setup, write_dict, read_dict, Dictobject
#TODO Remove later
# Warning is a PyTorch bug
import warnings
warnings.filterwarnings("ignore", message="Setting attributes on ParameterList is not supported.")
def parse_args(return_parser=False):
parser = ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help='Location of dataset')
parser.add_argument('--num_iter', type=int, required=True, help='Number of training iterations')
parser.add_argument('--tsub', type=int, default=20, help='Length of trajectory subsequences for training')
parser.add_argument('--replicate_num', type=int, default=0, help='Number of times the final point of the trajectories should be replicated for training')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--tnet_dim', type=int, default=2, help='Dimension of target network input and output')
parser.add_argument('--tnet_arch', type=str, default='200,200,200', help='Hidden layer units of the target network')
parser.add_argument('--tnet_act', type=str, default='elu', help='Target network activation function')
parser.add_argument('--explicit_time', type=int, default=0, help='1: Use time as an explicit network input, 1: Do not use time')
parser.add_argument('--dummy_run', type=int, default=0, help='1: Dummy run, no evaluation, 0: Actual training run')
parser.add_argument('--data_class', type=str, required=True, help='Dataset class for training')
parser.add_argument('--seed', type=int, required=True, help='Seed for reproducability')
parser.add_argument('--seq_file', type=str, required=True, help='Name of file containing sequence of demonstration files')
parser.add_argument('--log_dir', type=str, default='logs/', help='Main directory for saving logs')
parser.add_argument('--description', type=str, required=True, help='String identifier for experiment')
# Args for plot formatting
parser.add_argument('--plot_fs', type=int, default=10, help='Fontsize to be used in the plots')
parser.add_argument('--figw', type=float, default=16.0, help='Plot width')
parser.add_argument('--figh', type=float, default=3.3, help='Plot height')
parser.add_argument('--task_names_path', type=str, required=True, help='Path of the JSON file with task names used in plot')
if return_parser:
# This is used by the slurm creator script
# When running this script directly, this has no effect
return parser
else:
args = parser.parse_args()
return args
def train_task(args, task_id, tnet, node, device):
filenames = get_sequence(args.seq_file)
data = None
if args.data_class == 'LASA':
data = LASA(data_dir=args.data_dir, filename=filenames[task_id], replicate_num=args.replicate_num)
elif args.data_class == 'HelloWorld':
data = HelloWorld(data_dir=args.data_dir, filename=filenames[task_id])
else:
raise NotImplementedError(f'Unknown dataset class {args.data_class}')
node.set_target_network(tnet)
tnet.train()
node.train()
node = node.to(device)
# For optimizing the weights and biases of the NODE
theta_optimizer = optim.Adam(node.target_network.weights, lr=args.lr)
# Start training iterations
for training_iters in trange(args.num_iter):
### Train theta and task embedding.
theta_optimizer.zero_grad()
# Set the target network in the NODE
node.set_target_network(tnet)
t, y_all = get_minibatch(data.t[0], data.pos, tsub=args.tsub)
# The time steps
t = t.to(device)
# Subsequence trajectories
y_all = y_all.to(device)
# Starting points
y_start = y_all[:,0].float()
# Predicted trajectories - forward simulation
y_hat = node(t.float(), y_start)
# MSE
loss = ((y_hat-y_all)**2).mean()
# Calling loss_task.backward computes the gradients w.r.t. the loss for the
# current task.
loss.backward()
# Update the NODE params
theta_optimizer.step()
return tnet, node
def eval_task(args, task_id, tnet, node, device):
tnet.eval()
tnet = tnet.to(device)
node = node.to(device)
filenames = get_sequence(args.seq_file)
data = None
if args.data_class == 'LASA':
data = LASA(data_dir=args.data_dir, filename=filenames[task_id])
elif args.data_class == 'HelloWorld':
data = HelloWorld(data_dir=args.data_dir, filename=filenames[task_id])
else:
raise NotImplementedError(f'Unknown dataset class {args.data_class}')
# Set the target network in the NODE
node.set_target_network(tnet)
node = node.float()
node.eval()
# The time steps
t = torch.from_numpy(data.t[0]).float().to(device)
# The starting position
# (n,d-dimensional, where n is the num of demos and
# d is the dimension of each point)
y_start = torch.from_numpy(data.pos[:,0]).float().to(device)
# The entire demonstration trajectory
y_all = torch.from_numpy(data.pos).float().to(device)
# Predicted trajectory
y_hat = node(t, y_start) # forward simulation
# Compute trajectory metrics
y_all_np = y_all.cpu().detach().numpy()
y_hat_np = y_hat.cpu().detach().numpy()
# De-normalize the data before computing trajectories
y_all_np = data.unnormalize(y_all_np)
y_hat_np = data.unnormalize(y_hat_np)
metric_swept_err, metric_swept_errs = mean_swept_error(y_all_np, y_hat_np)
metric_frechet_err, metric_frechet_errs = mean_frechet_error(y_all_np, y_hat_np)
metric_dtw_err, metric_dtw_errs = dtw_distance(y_all_np, y_hat_np)
eval_traj_metrics = {'swept': metric_swept_err,
'frechet': metric_frechet_err,
'dtw': metric_dtw_err}
# Store the metric errors
# Convert np arrays to list so that these can be written to JSON
eval_traj_metric_errors = {'swept': metric_swept_errs.tolist(),
'frechet': metric_frechet_errs.tolist(),
'dtw': metric_dtw_errs.tolist()}
# Data that is used for creating a plot of demonstration
# trajectories and predicted trajectories
plot_data = [t, y_all, node.ode_rhs, y_hat.detach()]
return eval_traj_metrics, eval_traj_metric_errors, plot_data
def train_all(args):
# Create logging folder and set up console logging
save_dir, identifier = custom_logging_setup(args)
# Check if cuda is available
cuda_available, device = check_cuda()
logging.info(f'cuda_available: {cuda_available}')
# Create a target network with parameters
tnet = TargetNetwork(n_in=args.tnet_dim+args.explicit_time,
n_out=args.tnet_dim,
hidden_layers=str_to_ints(args.tnet_arch),
activation_fn=str_to_act(args.tnet_act),
use_bias=True,
no_weights=False,
init_weights=None,
dropout_rate=-1,
use_batch_norm=False,
bn_track_stats=False,
distill_bn_stats=False,
out_fn=None,
device=device).to(device)
# The NODE uses the target network as the RHS of its
# differential equation
# In addition this NODE has a trainable task embedding vector,
# one for each task
node = NODE(tnet, explicit_time=args.explicit_time).to(device)
# Extract the list of demonstrations from the text file
# containing the sequence of demonstrations
seq = get_sequence(args.seq_file)
num_tasks = len(seq)
eval_resuts=None
for task_id in range(num_tasks):
logging.info(f'#### Training started for task_id: {task_id} (task {task_id+1} out of {num_tasks}) ###')
# Train on the current task_id
tnet, node = train_task(args, task_id, tnet, node, device)
# At the end of every task store the latest networks
logging.info('Saving models')
torch.save(tnet, os.path.join(save_dir, 'models', f'tnet_{task_id}.pth'))
torch.save(node, os.path.join(save_dir, 'models', f'node_{task_id}.pth'))
logging.info('Done')
return save_dir
def eval_all(args, save_dir):
"""
Evaluates all saved models after training for
all tasks is complete
"""
# Check if cuda is available
cuda_available, device = check_cuda()
logging.info(f'cuda_available: {cuda_available}')
# Dict for storing evaluation results
# This will be written to a json file in the log folder
eval_results = dict()
# For storing command line arguments for this run
eval_results['args'] = read_dict(os.path.join(save_dir, 'commandline_args.json'))
# For storing the evaluation results
eval_results['data'] = {'metrics': dict(), 'metric_errors': dict()}
# Create a target network with parameters
tnet = TargetNetwork(n_in=args.tnet_dim+args.explicit_time,
n_out=args.tnet_dim,
hidden_layers=str_to_ints(args.tnet_arch),
activation_fn=str_to_act(args.tnet_act),
use_bias=True,
no_weights=False,
init_weights=None,
dropout_rate=-1,
use_batch_norm=False,
bn_track_stats=False,
distill_bn_stats=False,
out_fn=None,
device=device).to(device)
# The NODE uses the target network as the RHS of its
# differential equation
node = NODE(tnet, explicit_time=args.explicit_time).to(device)
# Extract the list of demonstrations from the text file
# containing the sequence of demonstrations
seq = get_sequence(args.seq_file)
num_tasks = len(seq)
# After the last task has been trained, we create a plot
# showing the performance on all the tasks
figw, figh = args.figw, args.figh
plt.subplots_adjust(left=1/figw, right=1-1/figw, bottom=1/figh, top=1-1/figh)
fig, axes = plt.subplots(figsize=(figw, figh),
sharey=True,
sharex=True,
ncols=num_tasks if num_tasks<=10 else (num_tasks//2),
nrows=1 if num_tasks<=10 else 2,
subplot_kw={'aspect': 1})
for task_id in range(num_tasks):
logging.info(f'#### Evaluation started for task_id: {task_id} (task {task_id+1} out of {num_tasks}) ###')
eval_results['data']['metrics'][f'train_task_{task_id}'] = dict()
eval_results['data']['metric_errors'][f'train_task_{task_id}'] = dict()
# Load the networks for the current task_id
tnet = torch.load(os.path.join(save_dir, 'models', f'tnet_{task_id}.pth'))
node = torch.load(os.path.join(save_dir, 'models', f'node_{task_id}.pth'))
r, c = 0, 0
# Each network is only evaluated on the task it is trained on
eval_task_id = task_id
# Evaluate on all the past and current task_ids
logging.info(f'Loaded network trained on task {task_id}, evaluating on task {eval_task_id}')
# Figure is plotted only for the last task
eval_traj_metrics, eval_traj_metric_errors, plot_data = eval_task(args, eval_task_id, tnet, node, device)
# Plot the trajectories for the current trained model
# on the correct axis
# Read the task names to use in the plot
task_names_map = read_dict(args.task_names_path)
r = 1 if num_tasks<=10 else eval_task_id//(num_tasks//2)
c = eval_task_id if num_tasks<=10 else eval_task_id%(num_tasks//2)
t, y_all, ode_rhs, y_hat = plot_data
ax = axes[c] if num_tasks<=10 else axes[r][c]
handles, labels = plot_ode_simple(t, y_all, ode_rhs, y_hat, ax=ax, explicit_time=args.explicit_time)
name = list(task_names_map.values())[eval_task_id]
ax.set_title(name, fontsize=args.plot_fs)
# Remove axis labels and ticks
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.xaxis.get_label().set_visible(False)
ax.yaxis.get_label().set_visible(False)
logging.info(f'Evaluated trajectory metrics: {eval_traj_metrics}')
# Store the evaluated metrics
eval_results['data']['metrics'][f'train_task_{task_id}'][f'eval_task_{eval_task_id}'] = eval_traj_metrics
eval_results['data']['metric_errors'][f'train_task_{task_id}'][f'eval_task_{eval_task_id}'] = eval_traj_metric_errors
fig.legend(handles, labels, loc='lower center', fontsize=args.plot_fs, ncol=len(handles))
fig.subplots_adjust(hspace=-0.2, wspace=0.1)
# Save the evaluation plot
plt.savefig(os.path.join(save_dir, f'plot_trajectories_{args.description}.pdf'), bbox_inches='tight')
# Write the evaluation results to a file in the log dir
write_dict(os.path.join(save_dir, 'eval_results.json'), eval_results)
logging.info('All evaluation done')
if __name__ == '__main__':
# Parse commandline arguments
args = parse_args()
# Set the seed for reproducability
set_seed(args.seed)
# Training
save_dir = train_all(args)
# Evaluation
args = Dictobject(read_dict(os.path.join(save_dir, 'commandline_args.json')))
if args.dummy_run == 0:
eval_all(args, save_dir)
logging.info('Completed')
|
|
import cv2
import math
from operator import itemgetter
import numpy as np
try:
import onnxruntime
except ImportError:
onnxruntime = None
class ORTWrapper:
def __init__(self, onnx_f) -> None:
self.onnx_f = onnx_f
so = onnxruntime.SessionOptions()
so.intra_op_num_threads = 6
so.execution_mode = onnxruntime.ExecutionMode.ORT_PARALLEL
so.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
self.ort_session = onnxruntime.InferenceSession(
onnx_f, sess_options=so)
# todo: extract possible input hw for vision models
def infer(self, imgs):
inputs = [imgs]
assert len(inputs) == len(self.ort_session.get_inputs()
), 'inputs must same with model.'
ort_inputs = dict(
(self.ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))
ort_outs = self.ort_session.run(None, ort_inputs)
outs_dict = dict()
for i, oo in enumerate(self.ort_session.get_outputs()):
n = oo.name
outs_dict[n] = ort_outs[i]
return outs_dict
BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
[6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27])
def normalize(img, img_mean, img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
def pad_width(img, stride, pad_value, min_dims):
h, w, _ = img.shape
pad = []
pad.append(int(math.floor((min_dims[0] - h) / 2.0)))
pad.append(int(math.floor((min_dims[1] - w) / 2.0)))
pad.append(int(min_dims[0] - h - pad[0]))
pad.append(int(min_dims[1] - w - pad[1]))
padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3],
cv2.BORDER_CONSTANT, value=pad_value)
return padded_img, pad
def connections_nms(a_idx, b_idx, affinity_scores):
# From all retrieved connections that share the same starting/ending keypoints leave only the top-scoring ones.
order = affinity_scores.argsort()[::-1]
affinity_scores = affinity_scores[order]
a_idx = a_idx[order]
b_idx = b_idx[order]
idx = []
has_kpt_a = set()
has_kpt_b = set()
for t, (i, j) in enumerate(zip(a_idx, b_idx)):
if i not in has_kpt_a and j not in has_kpt_b:
idx.append(t)
has_kpt_a.add(i)
has_kpt_b.add(j)
idx = np.asarray(idx, dtype=np.int32)
return a_idx[idx], b_idx[idx], affinity_scores[idx]
def group_keypoints(all_keypoints_by_type, pafs, pose_entry_size=20, min_paf_score=0.05):
pose_entries = []
all_keypoints = np.array(
[item for sublist in all_keypoints_by_type for item in sublist])
points_per_limb = 10
grid = np.arange(points_per_limb, dtype=np.float32).reshape(1, -1, 1)
all_keypoints_by_type = [np.array(keypoints, np.float32)
for keypoints in all_keypoints_by_type]
for part_id in range(len(BODY_PARTS_PAF_IDS)):
part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]]
kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]]
n = len(kpts_a)
m = len(kpts_b)
if n == 0 or m == 0:
continue
# Get vectors between all pairs of keypoints, i.e. candidate limb vectors.
a = kpts_a[:, :2]
a = np.broadcast_to(a[None], (m, n, 2))
b = kpts_b[:, :2]
vec_raw = (b[:, None, :] - a).reshape(-1, 1, 2)
# Sample points along every candidate limb vector.
steps = (1 / (points_per_limb - 1) * vec_raw)
points = steps * grid + a.reshape(-1, 1, 2)
points = points.round().astype(dtype=np.int32)
x = points[..., 0].ravel()
y = points[..., 1].ravel()
# Compute affinity score between candidate limb vectors and part affinity field.
field = part_pafs[y, x].reshape(-1, points_per_limb, 2)
vec_norm = np.linalg.norm(vec_raw, ord=2, axis=-1, keepdims=True)
vec = vec_raw / (vec_norm + 1e-6)
affinity_scores = (field * vec).sum(-1).reshape(-1, points_per_limb)
valid_affinity_scores = affinity_scores > min_paf_score
valid_num = valid_affinity_scores.sum(1)
affinity_scores = (affinity_scores *
valid_affinity_scores).sum(1) / (valid_num + 1e-6)
success_ratio = valid_num / points_per_limb
# Get a list of limbs according to the obtained affinity score.
valid_limbs = np.where(np.logical_and(
affinity_scores > 0, success_ratio > 0.8))[0]
if len(valid_limbs) == 0:
continue
b_idx, a_idx = np.divmod(valid_limbs, n)
affinity_scores = affinity_scores[valid_limbs]
# Suppress incompatible connections.
a_idx, b_idx, affinity_scores = connections_nms(
a_idx, b_idx, affinity_scores)
connections = list(zip(kpts_a[a_idx, 3].astype(np.int32),
kpts_b[b_idx, 3].astype(np.int32),
affinity_scores))
if len(connections) == 0:
continue
if part_id == 0:
pose_entries = [np.ones(pose_entry_size)
* -1 for _ in range(len(connections))]
for i in range(len(connections)):
pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
pose_entries[i][-1] = 2
pose_entries[i][-2] = np.sum(
all_keypoints[connections[i][0:2], 2]) + connections[i][2]
elif part_id == 17 or part_id == 18:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
pose_entries[j][kpt_b_id] = connections[i][1]
elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
pose_entries[j][kpt_a_id] = connections[i][0]
continue
else:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0]:
pose_entries[j][kpt_b_id] = connections[i][1]
num += 1
pose_entries[j][-1] += 1
pose_entries[j][-2] += all_keypoints[connections[i]
[1], 2] + connections[i][2]
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = connections[i][0]
pose_entry[kpt_b_id] = connections[i][1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connections[i]
[0:2], 2]) + connections[i][2]
pose_entries.append(pose_entry)
filtered_entries = []
for i in range(len(pose_entries)):
if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
continue
filtered_entries.append(pose_entries[i])
pose_entries = np.asarray(filtered_entries)
return pose_entries, all_keypoints
def extract_keypoints(heatmap, all_keypoints, total_keypoint_num):
heatmap[heatmap < 0.1] = 0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode='constant')
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] -
1, 1:heatmap_with_borders.shape[1]-1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] -
1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] -
1, 0:heatmap_with_borders.shape[1]-2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0],
1:heatmap_with_borders.shape[1]-1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] -
2, 1:heatmap_with_borders.shape[1]-1]
heatmap_peaks = (heatmap_center > heatmap_left) &\
(heatmap_center > heatmap_right) &\
(heatmap_center > heatmap_up) &\
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] -
1, 1:heatmap_center.shape[1]-1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[
1], np.nonzero(heatmap_peaks)[0])) # (w, h)
keypoints = sorted(keypoints, key=itemgetter(0))
suppressed = np.zeros(len(keypoints), np.uint8)
keypoints_with_score_and_id = []
keypoint_num = 0
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i+1, len(keypoints)):
if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 +
(keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
suppressed[j] = 1
keypoint_with_score_and_id = (keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]],
total_keypoint_num + keypoint_num)
keypoints_with_score_and_id.append(keypoint_with_score_and_id)
keypoint_num += 1
all_keypoints.append(keypoints_with_score_and_id)
return keypoint_num
|
|
import logging
import time
import sys
import networkx as nx
from multiprocessing import Pool
from fractions import Fraction
import numpy as np
import scipy.spatial as spatial
from opensfm import dataset
from opensfm import geo
from opensfm import matching
logger = logging.getLogger(__name__)
class Command:
name = 'match_features'
help = 'Match features between image pairs'
def add_arguments(self, parser):
parser.add_argument('dataset', help='dataset to process')
def run(self, args):
# setup
data = dataset.DataSet(args.dataset)
images = data.images()
exifs = {im: data.load_exif(im) for im in images}
processes = data.config.get('processes', 1)
start = time.time()
#===== tag matching =====#
# if a tag detection algorithm was used
if data.config.get('use_apriltags',False) or data.config.get('use_arucotags',False) or data.config.get('use_chromatags',False):
# all possible pairs
pairs = match_candidates_all(images)
all_pairs = {im: [] for im in images}
for im1, im2 in pairs:
all_pairs[im1].append(im2)
logger.info('Matching tags in {} image pairs'.format(len(pairs)))
# limit used detections
ignore_tag_list = create_ignore_tag_list(data)
print 'Ignore Tag List: '
print ignore_tag_list
# context
ctx = Context()
ctx.data = data
ctx.ignore_tag_list = ignore_tag_list
args = match_arguments(all_pairs, ctx)
# run match
if processes == 1:
for arg in args:
match_tags(arg)
else:
p = Pool(processes)
p.map(match_tags, args)
#=== end tag matching ===#
#===== feature matching =====#
# setup pairs for matching
pairs = match_candidates_all(images)
logger.info('{} Initial matching image pairs'.format(len(pairs)))
tag_pairs = set()
meta_pairs = set()
tag_prune_mode = data.config.get('prune_with_tags','none')
# tag pairs
if tag_prune_mode == 'strict' or tag_prune_mode == 'medium' or tag_prune_mode == 'loose':
tag_pairs = match_candidates_from_tags(images, data)
logger.info('{} Tag matching image pairs'.format(len(tag_pairs)))
# no tag pairs, but still make tag graph for resectioning
else:
# build tag matches dictionary
tag_matches = {}
for im1 in images:
try:
im1_tag_matches = data.load_tag_matches(im1)
except IOError:
continue
for im2 in im1_tag_matches:
tag_matches[im1, im2] = im1_tag_matches[im2]
tags_graph = matching.create_tags_graph(tag_matches, data.config)
data.save_tags_graph(tags_graph)
# prune with metadata
if data.config.get('prune_with_metadata',True):
meta_pairs = match_candidates_from_metadata(images, exifs, data)
logger.info('{} Meta matching image pairs'.format(len(meta_pairs)))
if tag_pairs:
pairs = pairs.intersection(tag_pairs)
if meta_pairs:
pairs = pairs.intersection(meta_pairs)
logger.info('{} Final matching image pairs'.format(len(pairs)))
# build pairs into dictionary
final_pairs = {im: [] for im in images}
for im1, im2 in pairs:
final_pairs[im1].append(im2)
# context
ctx = Context()
ctx.data = data
ctx.cameras = ctx.data.load_camera_models()
ctx.exifs = exifs
ctx.p_pre, ctx.f_pre = load_preemptive_features(data)
args = match_arguments(final_pairs, ctx)
# match
if processes == 1:
for arg in args:
match(arg)
else:
p = Pool(processes)
p.map(match, args)
#=== end feature matching ===#
end = time.time()
with open(ctx.data.profile_log(), 'a') as fout:
fout.write('match_features: {0}\n'.format(end - start))
class Context:
pass
def create_ignore_tag_list(data):
# variables
ignore_tag_list = []
keep_ratio = data.config.get('ratio_tags_to_keep',1.0)
# round ratio to 1% values (e.g. 0.009 becomes 0.01)
keep_ratio = int(keep_ratio*100) / 100.0
# load tag json and images
tag_detections = data.load_tag_detection()
# get unique ids set
tag_ids = set()
for image in tag_detections:
dets = tag_detections[image]
for det in dets:
tag_ids.add(det.id)
# get keep fraction
keep_fraction = Fraction(keep_ratio).limit_denominator()
num_tags_to_keep = int(len(tag_ids) * keep_ratio)
# add to ignore list
n = 1
for id in tag_ids:
if n > keep_fraction.numerator:
ignore_tag_list.append(id)
if n == keep_fraction.denominator:
n = 1
continue
n+=1
# count number of detections per image after removing from ignore list
detcounts = {}
detsum = 0
for image in tag_detections:
detct = 0
dets = tag_detections[image]
for det in dets:
if det.id not in ignore_tag_list:
detct += 1
detcounts[image] = detct
detsum += detct
# print
logger.debug('Unique Tag IDs Found: '+str(len(tag_ids)))
logger.debug('Keep Ratio: '+str(keep_ratio))
logger.debug('Keep Fraction: '+str(keep_fraction.numerator)+' / '+str(keep_fraction.denominator))
logger.debug('Number of Tags to Keep: '+str(len(tag_ids)-len(ignore_tag_list)))
logger.debug('Number of Unique Tags per Image: '+str( (len(tag_ids)-len(ignore_tag_list)) / len(data.images() )))
logger.debug('Number of total Tag Detections: '+str(detsum))
logger.debug('Number of Tag Detections per Images: '+str( float(detsum) / len(data.images() ) ))
# return
return ignore_tag_list
def load_preemptive_features(data):
p, f = {}, {}
if data.config['preemptive_threshold'] > 0:
logger.debug('Loading preemptive data')
for image in data.images():
try:
p[image], f[image] = \
data.load_preemtive_features(image)
except IOError:
p, f, c = data.load_features(image)
p[image], f[image] = p, f
preemptive_max = min(data.config.get('preemptive_max', p[image].shape[0]), p[image].shape[0])
p[image] = p[image][:preemptive_max, :]
f[image] = f[image][:preemptive_max, :]
return p, f
def has_gps_info(exif):
return (exif and
'gps' in exif and
'latitude' in exif['gps'] and
'longitude' in exif['gps'])
def match_candidates_by_distance(images, exifs, reference, max_neighbors, max_distance):
"""Find candidate matching pairs by GPS distance."""
if max_neighbors <= 0 and max_distance <= 0:
return set()
max_neighbors = max_neighbors or 99999999
max_distance = max_distance or 99999999.
k = min(len(images), max_neighbors + 1)
points = np.zeros((len(images), 3))
for i, image in enumerate(images):
gps = exifs[image]['gps']
points[i] = geo.topocentric_from_lla(
gps['latitude'], gps['longitude'], gps['altitude'],
reference['latitude'], reference['longitude'], reference['altitude'])
tree = spatial.cKDTree(points)
pairs = set()
for i, image in enumerate(images):
distances, neighbors = tree.query( points[i], k=k, distance_upper_bound=max_distance )
for j in neighbors:
if i != j and j < len(images):
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs
def match_candidates_by_time(images, exifs, max_neighbors):
"""Find candidate matching pairs by time difference."""
if max_neighbors <= 0:
return set()
k = min(len(images), max_neighbors + 1)
times = np.zeros((len(images), 1))
for i, image in enumerate(images):
times[i] = exifs[image]['capture_time']
tree = spatial.cKDTree(times)
pairs = set()
for i, image in enumerate(images):
distances, neighbors = tree.query(times[i], k=k)
for j in neighbors:
if i != j and j < len(images):
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs
def match_candidates_by_order(images, exifs, max_neighbors):
"""Find candidate matching pairs by sequence order."""
if max_neighbors <= 0:
return set()
n = (max_neighbors + 1) / 2
pairs = set()
for i, image in enumerate(images):
a = max(0, i - n)
b = min(len(images), i + n)
for j in range(a, b):
if i != j:
pairs.add( tuple( sorted( (images[i], images[j]) )))
return pairs
def match_candidates_from_metadata(images, exifs, data):
"""Compute candidate matching pairs"""
max_distance = data.config['matching_gps_distance']
gps_neighbors = data.config['matching_gps_neighbors']
time_neighbors = data.config['matching_time_neighbors']
order_neighbors = data.config['matching_order_neighbors']
if not data.reference_lla_exists():
data.invent_reference_lla()
reference = data.load_reference_lla()
if not all(map(has_gps_info, exifs.values())):
if gps_neighbors != 0:
logger.warn("Not all images have GPS info. "
"Disabling matching_gps_neighbors.")
gps_neighbors = 0
max_distance = 0
images.sort()
d = match_candidates_by_distance(images, exifs, reference, gps_neighbors, max_distance)
t = match_candidates_by_time(images, exifs, time_neighbors)
o = match_candidates_by_order(images, exifs, order_neighbors)
pairs = d | t | o
res = {im: [] for im in images}
for im1, im2 in pairs:
res[im1].append(im2)
return res
def match_candidates_from_tags(images,data):
"""Compute candidate matching pairs from tag connections"""
# build tag matches dictionary
tag_matches = {}
for im1 in images:
try:
im1_tag_matches = data.load_tag_matches(im1)
except IOError:
continue
for im2 in im1_tag_matches:
tag_matches[im1, im2] = im1_tag_matches[im2]
#print tag_matches
tags_graph = matching.create_tags_graph(tag_matches, data.config)
data.save_tags_graph(tags_graph)
# create pairs
pairs = set()
images_with_no_tags = []
for i, im1 in enumerate(images):
# get tags from tags_graph if they were detected for this image
try:
tags = tags_graph[im1]
except:
images_with_no_tags.append(im1)
continue
# for each tag seen in im1
for tag in tags:
matched_images = tags_graph[tag]
# for each image connected to that tag
for im2 in matched_images:
# skip self connection
if im1 == im2:
continue
pairs.add( tuple( sorted( (im1,im2) )))
# get tag prune mode
tag_prune_mode = data.config.get('prune_with_tags','none')
# add all possible matches for any image with no tag connections
if tag_prune_mode == 'medium' or tag_prune_mode == 'loose':
print 'tag matching strictness at least medium.'
for image in images_with_no_tags:
for candidate in images:
if image == candidate:
continue
pairs.add( tuple( sorted( (image,candidate) )))
# merge separated components
if tag_prune_mode == 'loose':
print 'tag matching strictness is loose.'
# check for multiple connected components
cc = sorted(nx.connected_components(tags_graph), key = len, reverse=True)
if len(cc) > 1:
# print
print 'Merging',len(cc),'tag graph connected components in loose mode:'
# for each cc
for cc_ct1 in range(0,len(cc)):
for cc_ct2 in range(cc_ct1+1,len(cc)):
print ' Merging cc'+str(cc_ct1)+' with cc'+str(cc_ct2)
# pull out cc's
cc1 = cc[cc_ct1]
cc2 = cc[cc_ct2]
# for each node in cc1
for im1 in cc1:
# skip the tag nodes
if im1 not in images:
continue
# for each node in cc2
for im2 in cc2:
# skip the tag nodes
if im2 not in images:
continue
# add pair
#print 'adding pair: ',im1,' <==> ',im2
pairs.add( tuple( sorted( (im1,im2) )))
# return pairs
return pairs
def match_candidates_all(images):
"""All pairwise images are candidate matches"""
# empty set
pairs = set()
# enumerate all possible pairs
for i, image in enumerate(images):
for j in range(i+1,len(images)):
pairs.add( tuple( sorted( (images[i], images[j]) )))
# store pairs as dictionary
#res = {im: [] for im in images}
#for im1, im2 in pairs:
# res[im1].append(im2)
# return
return pairs
def match_arguments(pairs, ctx):
for i, (im, candidates) in enumerate(pairs.items()):
yield im, candidates, i, len(pairs), ctx
def match_tags(args):
"""Compute all tag matches for a single image"""
im1, candidates, i, n, ctx = args
logger.info('Tag Matching {} - {} / {}'.format(im1, i + 1, n))
ignore_tag_list = ctx.ignore_tag_list
# tag matching
if ctx.data.config.get('use_apriltags',False) or ctx.data.config.get('use_arucotags',False) or ctx.data.config.get('use_chromatags',False):
# load features
im1_tag_matches = {}
try:
p1, f1, i1, c1 = ctx.data.load_tag_features(im1)
except:
return
# for each candidate image of im1
for im2 in candidates:
# try to load tag features for image
try:
p2, f2, i2, c2 = ctx.data.load_tag_features(im2)
except:
continue
# store tag matches
tag_matches = []
# iterate image1 tag ids
for id1 in range(0,p1.shape[0],4):
# ignore if id in ignore_tag_list
if f1[id1] in ignore_tag_list:
continue
# iterate image2 tag ids
for id2 in range(0,p2.shape[0],4):
# match found
if f1[id1] == f2[id2]:
# for each corner of that tag id
for i in range(0,4):
# add match point and tag id
tag_matches.append( [id1 + i, id2 + i, f1[id1]] )
# matches for im1 to im2
if tag_matches:
im1_tag_matches[im2] = tag_matches
# save tag matches
if im1_tag_matches:
#print im1_tag_matches
ctx.data.save_tag_matches(im1, im1_tag_matches)
def match(args):
"""Compute all matches for a single image"""
im1, candidates, i, n, ctx = args
logger.info('Matching {} - {} / {}'.format(im1, i + 1, n))
config = ctx.data.config
robust_matching_min_match = config['robust_matching_min_match']
preemptive_threshold = config['preemptive_threshold']
lowes_ratio = config['lowes_ratio']
preemptive_lowes_ratio = config['preemptive_lowes_ratio']
im1_matches = {}
for im2 in candidates:
# preemptive matching
if preemptive_threshold > 0:
t = time.time()
config['lowes_ratio'] = preemptive_lowes_ratio
matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config)
config['lowes_ratio'] = lowes_ratio
logger.debug("Preemptive matching {0}, time: {1}s".format(len(matches_pre), time.time() - t))
if len(matches_pre) < preemptive_threshold:
logger.debug("Discarding based of preemptive matches {0} < {1}".format(len(matches_pre), preemptive_threshold))
continue
# symmetric matching
t = time.time()
p1, f1, c1 = ctx.data.load_features(im1)
i1 = ctx.data.load_feature_index(im1, f1)
p2, f2, c2 = ctx.data.load_features(im2)
i2 = ctx.data.load_feature_index(im2, f2)
matches = matching.match_symmetric(f1, i1, f2, i2, config)
logger.debug('{} - {} has {} candidate matches'.format(im1, im2, len(matches)))
if len(matches) < robust_matching_min_match:
im1_matches[im2] = []
continue
# robust matching
t_robust_matching = time.time()
camera1 = ctx.cameras[ctx.exifs[im1]['camera']]
camera2 = ctx.cameras[ctx.exifs[im2]['camera']]
rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config)
if len(rmatches) < robust_matching_min_match:
im1_matches[im2] = []
continue
im1_matches[im2] = rmatches
logger.debug('Robust matching time : {0}s'.format( time.time() - t_robust_matching))
logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t))
ctx.data.save_matches(im1, im1_matches)
|
|
"""Custom utilities for interacting with the Materials Project.
Mostly for getting and manipulating structures. With all of the function definitions and docstrings,
these are more verbose """
import fnmatch
import os
from pymatgen import MPRester
from fireworks import LaunchPad
import numpy as np
import scipy
# TODO: wrap MPRester calls in a try-except block to catch errors and retry automatically
eV_per_atom_to_J_per_mol = scipy.constants.eV*scipy.constants.Avogadro
J_per_mol_to_eV_per_atom = 1/(scipy.constants.eV*scipy.constants.Avogadro)
def mp_structures_from_ids(mp_ids, API_KEY=None):
"""Returns a list of structures from MP ids
Args:
mp_ids ([str]): list of Materials Project ids in the form of 'mp-###'
API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None.
Returns:
List of Structure objects
"""
structs = []
with MPRester(API_KEY) as mpr:
for mp_id in mp_ids:
structs.append(mpr.get_structure_by_material_id(mp_id))
return structs
def mp_structures_from_system(system, API_KEY=None):
"""Supply a chemical system (e.g. Fe-Cr) and get all of the structures back
Args:
system (str): system name (e.g. Fe-Cr)
API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None.
Returns:
List of Structure objects
"""
with MPRester(API_KEY) as mpr:
structs = mpr.get_structures(system)
return structs
def mp_structures_and_energies_from_system(system, API_KEY=None):
"""Supply a chemical system (e.g. Fe-Cr) and get dicts of the structures and properties back
Args:
system (str): system name (e.g. Fe-Cr), but could also be mp-ids or formula
API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None.
Returns:
List of {"material_id": id, "pretty_formula": formula, "energy_per_atom"}
"""
with MPRester(API_KEY) as mpr:
entries = mpr.get_data(system)
return entries
def mp_sorted_structures_from_system(system, filter_energy=0.2, API_KEY=None):
"""Supply a chemical system (e.g. Fe-Cr) and get back Structures sorted by energy above hull
Energies too far above the hull can be removed with the filter_energy
Args:
system (str): system name (e.g. Fe-Cr), but could also be mp-ids or formula
filter_energy (float): Maximum energy above hull allowed in eV
API_KEY (str): your Materials Project API_KEY. Will try to use environment key if None.
Returns:
List of Structure objects sorted by energy above hull
"""
entries = mp_structures_and_energies_from_system(system, API_KEY=API_KEY)
# if Structure objects cannot be created from the entries
mp_ids = [entry["material_id"] for entry in entries]
energies_above_hull = [entry["e_above_hull"] for entry in entries]
sorted_mp_ids = [mp_id for energy, mp_id in sorted(zip(energies_above_hull, mp_ids)) if energy <= filter_energy]
sorted_structs = mp_structures_from_ids(sorted_mp_ids)
return sorted_structs
def get_launchpad(launchpad_file=None):
"""
Returns a LaunchPad object. If the launchpad_file is None, then try to auto load from environment
Args:
launchpad_file (File-like): A file-like or file path to the LaunchPad file.
Returns:
LaunchPad
"""
if launchpad_file:
if isinstance(launchpad_file, file):
# a file object was found
ext = launchpad_file.name.split('.')[-1]
if ext == 'yaml':
launchpad = LaunchPad.from_format(launchpad_file.read(), f_format='yaml')
else:
# assume json
launchpad = LaunchPad.from_format(launchpad_file.read())
else:
# assume launchpad_file is a path
launchpad = LaunchPad.from_file(launchpad_file)
else:
launchpad = LaunchPad.auto_load()
return launchpad
def update_fws_spec(wf, spec_dict, fw_name_constraint=None):
"""
Update the fireworks matching the name constraint with the passed spec_dict. Can be used for
generically updating the spec as long as update can be expressed as a dictionary.
Args:
wf (Workflow): The original workflow object
spec_dict (dict): the keys and values to update in the spec, e.g. {'_queueadapter': {'walltime': '24:00:00'}}
fw_name_constraint (str): a constraint on the FW name
Returns:
Workflow
"""
for fw in wf.fws:
if fw_name_constraint is None or fw_name_constraint in fw.name:
fw.spec.update(spec_dict)
return wf
def recursive_glob(start, pattern):
"""
Recursively glob for the given pattern from the start directory.
Taken from ESPEI.
Args:
start (str): Path of the directory to walk while for file globbing
pattern (str): Filename pattern to match in the glob
Returns:
[str]: List of matched filenames
"""
matches = []
for root, dirnames, filenames in os.walk(start):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return sorted(matches)
def sort_x_by_y(x, y):
"""Sort a list of x in the order of sorting y"""
return [xx for _, xx in sorted(zip(y, x), key=lambda pair: pair[0])]
def supercell_scaling_by_target_atoms(structure, min_atoms=60, max_atoms=120,
target_shape='sc', lower_search_limit=-2, upper_search_limit=2,
verbose=False):
"""
Find a the supercell scaling matrix that gives the most cubic supercell for a
structure, where the supercell has between the minimum and maximum nubmer of atoms.
Parameters
----------
structure : pymatgen.Structure
Unitcell of a structure
min_atoms : target number of atoms in the supercell, defaults to 5
max_atoms : int
Maximum number of atoms allowed in the supercell
target_shape : str
Target shape of supercell. Could choose 'sc' for simple cubic or 'fcc' for face centered
cubic. Default is 'sc'.
lower_search_limit : int
How far to search below the 'ideal' cubic scaling. Default is -2.
upper_search_limit : int
How far to search below the 'ideal' cubic scaling. Default is 2.
verbose : bool
Whether to print extra details on the cell shapes and scores. Useful for debugging.
Returns
-------
numpy.ndarray
2d array of a scaling matrix, e.g. [[3,0,0],[0,3,0],[0,0,3]]
Notes
-----
The motiviation for this is for use in phonon calculations and defect calculations.
It is important that defect atoms are far enough apart that they do not interact.
Scaling unit cells that are not cubic by even dimensions might result in interacting
defects. An example would be a tetragonal cell with 2x8x8 Ang lattice vectors being
made into a 2x2x2 supercell. Atoms along the first dimension would not be very far
apart.
We are using a pure Python implementation from ASE, which is not very fast for a given
supercell size. This allows for a variable supercell size, so it's going to be slow
for a large range of atoms.
The search limits are passed directloy to ``find_optimal_cell_shape_pure_python``.
They define the search space for each individual supercell based on the "ideal" scaling.
For example, a cell with 4 atoms and a target size of 110 atoms might have an ideal scaling
of 3x3x3. The search space for a lower and upper limit of -2/+2 would be 1-5. Since the
calculations are based on the cartesian product of 3x3 matrices, large search ranges are
very expensive.
"""
from ase.build import get_deviation_from_optimal_cell_shape, find_optimal_cell_shape_pure_python
# range of supercell sizes in number of unitcells
supercell_sizes = range(min_atoms//len(structure), max_atoms//len(structure) + 1)
optimal_supercell_shapes = [] # numpy arrays of optimal shapes
optimal_supercell_scores = [] # will correspond to supercell size
# find the target shapes
for sc_size in supercell_sizes:
optimal_shape = find_optimal_cell_shape_pure_python(structure.lattice.matrix, sc_size, target_shape, upper_limit=upper_search_limit, lower_limit=lower_search_limit)
optimal_supercell_shapes.append(optimal_shape)
optimal_supercell_scores.append(get_deviation_from_optimal_cell_shape(optimal_shape, target_shape))
if verbose:
for i in range(len(supercell_sizes)):
print('{} {:0.4f} {}'.format(supercell_sizes[i], optimal_supercell_scores[i], optimal_supercell_shapes[i].tolist()))
# find the most optimal cell shape along the range of sizes
optimal_sc_shape = optimal_supercell_shapes[np.argmin(optimal_supercell_scores)]
return optimal_sc_shape
|
|
import torch
import numpy as np
# z y
# '\` /|\
# \ |
# \ |
# \ |
# \|
# x <--------------------------------
# |\
# | \
# | \
# | \
# | \
# | \
def light_direction(random=0.5, device='cpu'):
# theta: [0, pi] angle between the light direction and the positive direction of y axis
# gamma: [0, pi] angle between the projection of the light on x-z plan and the positive direction of x axis
angle_range = [np.pi / 4, np.pi * 3 / 4]
rand_num = np.random.rand()
if rand_num < random:
theta, gamma = torch.rand(2) * (angle_range[1] - angle_range[0]) + angle_range[0]
direction = torch.tensor([[torch.sin(theta) * torch.cos(gamma),
torch.cos(theta),
-torch.sin(theta) * torch.sin(gamma)]]).to(device)
else:
direction = torch.tensor([[0.0, 0.0, -3.0]]).to(device)
return direction
def light_point(random=0.5, device='cpu'):
rand_num = np.random.rand()
if rand_num < random:
direction = light_direction(random=True, device=device)
dist = np.random.uniform(2, 5)
position = dist * direction
else:
position = torch.tensor([[0.0, 0.0, -10.0]]).to(
device) # if not random, set a far distance to simulate the parallel direction light
return position
|
|
import numpy as np
import argparse, os, sys, h5py
from hfd.variables import label_df
parser = argparse.ArgumentParser(description='Add latent annotations to h5s.')
parser.add_argument('folder', type=str, help='Folder to search for h5 files.')
parser.add_argument('fontsize', type=int, help='Fontsize.')
args = parser.parse_args()
folder = args.folder
fontsize =args.fontsize
labels = ['initial_geometry', 'medial_geometry', 'final_geometry', 'all_geometry']
bof = ['atom_bof', 'atom_mod_rotations_bof']
files = []
for d, _, files in os.walk(folder):
for fname in files:
if '{}.h5'.format(fontsize) in fname:
with h5py.File(os.path.join(d, fname), 'a') as f:
for l in labels:
try:
del f[l]
except KeyError:
pass
f.create_dataset(l, data=label_df[l].values)
for l in bof:
try:
del f[l]
except KeyError:
pass
f.create_dataset(l, data=np.stack([*label_df[l].values]))
|
|
import os
import shutil
import subprocess
from typing import List
import cv2
import numpy as np
from PIL import Image
from skatingAI.utils.utils import BodyParts, segmentation_class_colors, body_part_classes
class DataAdmin(object):
def __init__(self, chunk_amount: int = 1):
self.chunk_amount = chunk_amount
self.path_processed_dir = f"{os.getcwd()}/processed"
self.dataset_url = "https://cv.iri.upc-csic.es/Dataset/"
self.labels = ["rgb", "segmentation_clothes", "segmentation_body", "skeleton"]
self.file_count = 0
if chunk_amount < 1 or chunk_amount * 5 > 40:
raise AssertionError(f"ChunkNumber must be between 1 and 8 but was {chunk_amount}")
self._create_processed_folders()
def _create_processed_folders(self):
base = f"{self.path_processed_dir}/numpy/"
folders = ["rgb", "rgbb", "masks", "skeletons"]
for folder in folders:
if not os.path.exists(base + folder):
print(f"create new folder: {base + folder}")
os.makedirs(base + folder)
def _delete_dirs(self):
print("\n...start to delete folders")
subfolders = [f.path for f in os.scandir(self.path_processed_dir) if f.is_dir()]
files = [f.path for f in os.scandir(self.path_processed_dir) if f.is_file()]
for file in files:
os.remove(file)
print(f"successfully deleted [{file}]")
for folder in subfolders:
if "numpy" not in folder:
shutil.rmtree(folder)
print(f"successfully deleted [{folder}]")
def process_data(self):
""" just process data, if download and extraction was already successful """
max_chunks = self.chunk_amount * 5
for i in range(1, max_chunks, 5):
self._scandir4img()
#self._delete_dirs()
print("all data was successfully downloaded")
def download_and_process_data(self):
max_chunks = self.chunk_amount * 5
for i in range(1, max_chunks, 5):
for label in self.labels:
for chunk in ['woman', 'man']:
# download data
self._thread_download_data(label, chunk, i)
# process data
success = self._scandir4img()
if success:
self._delete_dirs()
print("all data was successfully downloaded")
def _thread_download_data(self, label: str, chunk: str, i: int):
# download chunk with wget
file_name = f"{label}_{chunk}{i:02d}_{i + 4:02d}.tar.gz"
download_chunk_url = f"{self.dataset_url + label}/{file_name}"
save_url = f"{self.path_processed_dir}/{file_name}"
if not os.path.exists(save_url):
print(f"start to download {download_chunk_url} to {save_url}")
print("this may take a while...")
proc = subprocess.Popen(["wget", '-q', download_chunk_url, '-O', save_url])
n = proc.wait()
print(f"downloaded: {i}")
# extract chunk
os.system(f"tar -xzkf {save_url} -C {self.path_processed_dir}")
def _scandir4img(self, dir: str = f"{os.getcwd()}/processed/rgb"):
chunks_women_men = [f.path for f in os.scandir(dir) if f.is_dir()]
if len(chunks_women_men) < 10:
print(f"There are only {len(chunks_women_men)} folders. Something must be wrong.")
return False
# paths_videos = []
for dir in list(chunks_women_men):
paths_videos = [f.path for f in os.scandir(dir) if f.is_dir()]
for dir_video in list(paths_videos):
camera_dirs = [f.path for f in os.scandir(dir_video) if f.is_dir()]
for cam in camera_dirs:
cam_sub_dir = [f.path for f in os.scandir(cam) if f.is_dir()][0]
segmentation_body_dir = cam.replace(f"rgb", 'segmentation_body')
segmentation_clothes_dir = cam.replace(f"rgb", 'segmentation_clothes')
skeleton_dir = cam.replace(f"rgb", 'skeleton')
if os.path.exists(segmentation_body_dir) and \
os.path.exists(segmentation_clothes_dir) and \
os.path.exists(skeleton_dir):
self._read_imgs2numpy(cam_sub_dir, segmentation_body_dir, segmentation_clothes_dir,
skeleton_dir)
else:
print(f"{cam_sub_dir.split('/')[-3:]} does not exist for all components.")
return True
def _read_imgs2numpy(self, rgb_dir, segmentation_body_dir, segmentation_clothes_dir, skeleton_dir):
# check weather directories exist for masks, segmentation_body, segmentation_clothes, skeleton
all_imgs = {'masks': [], 'rgb': [], 'rgbb': [], 'skeletons': []}
file_names_rgb = [f.path for f in os.scandir(rgb_dir) if f.is_file()]
video_name = '__'.join(segmentation_body_dir.split('/')[-3:-1])
camera_name = segmentation_body_dir.split('/')[-1]
for file_name_rgb in sorted(file_names_rgb):
file_name = file_name_rgb.split('.')[-2].split('/')[-1]
file_name_segmentation_body = f"{segmentation_body_dir}/{file_name}.png"
file_name_segmentation_clothes = f"{segmentation_clothes_dir}/{file_name}.png"
file_name_skeleton = f"{skeleton_dir}/{file_name}.txt"
if os.path.exists(file_name_rgb) and \
os.path.exists(file_name_segmentation_body) and \
os.path.exists(file_name_segmentation_clothes) and \
os.path.exists(file_name_skeleton):
rgb_img = cv2.imread(file_name_rgb)
segmentation_clothes_img = cv2.imread(file_name_segmentation_clothes)
all_imgs['rgb'].append(cv2.imread(file_name_rgb))
all_imgs['rgbb'].append(self._create_rgbb(rgb_img, segmentation_clothes_img))
all_imgs['masks'].append(self._preprocess_img2classes(
np.asarray(Image.open(file_name_segmentation_body).convert('RGB'))))
skeleton_arr = np.loadtxt(file_name_skeleton)[:, :2][
[0, 1, 5, 9, 10, 11, 12, 33, 34, 35, 36, 57, 58, 59, 61, 62, 63, 64, 66]]
skeleton_arr = np.reshape(skeleton_arr, -1)
all_imgs['skeletons'].append(skeleton_arr)
else:
print(f"{file_name_rgb.split('/')[-3:]} does not exist for all components.")
for img_sequence in all_imgs:
np.savez_compressed(f"{self.path_processed_dir}/numpy/{img_sequence}/{video_name}_{camera_name}",
all_imgs[img_sequence])
self.file_count += 1
print(f"[{self.file_count}] saved `{video_name}:{camera_name}` mask and rgb as compressed npz.")
return all_imgs
def _preprocess_img2classes(self, img):
body_mask = np.zeros(img.shape[:2])
sb = {**segmentation_class_colors}
sb.pop(BodyParts.bg.name)
for i, key in enumerate(sb):
body_mask[(img == sb[key]).all(axis=2)] = body_part_classes[key]
return body_mask.astype(np.uint8)
def _create_rgbb(self, rgb_img, segmentation_clothes_img):
img = rgb_img.copy()
img[(segmentation_clothes_img == [153, 153, 153]).all(axis=2)] = [0, 0, 0]
return img
DataAdmin(chunk_amount=8).download_and_process_data()
|
|
from __future__ import annotations
import enum
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union, cast
if TYPE_CHECKING:
from arkouda.categorical import Categorical
import numpy as np # type: ignore
from typeguard import typechecked
from arkouda.client import generic_msg
from arkouda.dtypes import int64 as akint64
from arkouda.dtypes import uint64 as akuint64
from arkouda.infoclass import list_registry
from arkouda.logger import getArkoudaLogger
from arkouda.pdarrayclass import (
RegistrationError,
create_pdarray,
pdarray,
unregister_pdarray_by_name,
)
from arkouda.pdarraycreation import arange
from arkouda.strings import Strings
__all__ = ["unique", "GroupBy", "broadcast", "GROUPBY_REDUCTION_TYPES"]
groupable_element_type = Union[pdarray, Strings, "Categorical"]
groupable = Union[groupable_element_type, Sequence[groupable_element_type]]
def unique(
pda: groupable, return_groups: bool = False, assume_sorted: bool = False # type: ignore
) -> Union[
groupable, Tuple[groupable, pdarray, pdarray, int] # type: ignore
]: # type: ignore
"""
Find the unique elements of an array.
Returns the unique elements of an array, sorted if the values are integers.
There is an optional output in addition to the unique elements: the number
of times each unique value comes up in the input array.
Parameters
----------
pda : (list of) pdarray, Strings, or Categorical
Input array.
return_groups : bool, optional
If True, also return grouping information for the array.
assume_sorted : bool, optional
If True, assume pda is sorted and skip sorting step
Returns
-------
unique : (list of) pdarray, Strings, or Categorical
The unique values. If input dtype is int64, return values will be sorted.
permutation : pdarray, optional
Permutation that groups equivalent values together (only when return_groups=True)
segments : pdarray, optional
The offset of each group in the permuted array (only when return_groups=True)
Raises
------
TypeError
Raised if pda is not a pdarray or Strings object
RuntimeError
Raised if the pdarray or Strings dtype is unsupported
Notes
-----
For integer arrays, this function checks to see whether `pda` is sorted
and, if so, whether it is already unique. This step can save considerable
computation. Otherwise, this function will sort `pda`.
Examples
--------
>>> A = ak.array([3, 2, 1, 1, 2, 3])
>>> ak.unique(A)
array([1, 2, 3])
"""
from arkouda.categorical import Categorical as Categorical_
if not return_groups and hasattr(pda, "unique"):
return cast(Categorical_, pda).unique()
# Get all grouping keys
if hasattr(pda, "_get_grouping_keys"):
# Single groupable array
nkeys = 1
grouping_keys = cast(list, cast(groupable_element_type, pda)._get_grouping_keys())
else:
# Sequence of groupable arrays
nkeys = len(pda)
grouping_keys = []
first = True
for k in pda:
if first:
size = k.size
first = False
elif k.size != size:
raise ValueError("Key arrays must all be same size")
if not hasattr(k, "_get_grouping_keys"):
raise TypeError(f"{type(k)} does not support grouping")
grouping_keys.extend(cast(list, k._get_grouping_keys()))
keynames = [k.name for k in grouping_keys]
keytypes = [k.objtype for k in grouping_keys]
effectiveKeys = len(grouping_keys)
repMsg = generic_msg(
cmd="unique",
args="{} {} {:n} {} {}".format(
return_groups, assume_sorted, effectiveKeys, " ".join(keynames), " ".join(keytypes)
),
)
if return_groups:
parts = cast(str, repMsg).split("+")
permutation = create_pdarray(cast(str, parts[0]))
segments = create_pdarray(cast(str, parts[1]))
unique_key_indices = create_pdarray(cast(str, parts[2]))
else:
unique_key_indices = create_pdarray(cast(str, repMsg))
if nkeys == 1:
unique_keys = pda[unique_key_indices]
else:
unique_keys = tuple([a[unique_key_indices] for a in pda])
if return_groups:
return (unique_keys, permutation, segments, nkeys)
else:
return unique_keys
class GroupByReductionType(enum.Enum):
SUM = "sum"
PROD = "prod"
MEAN = "mean"
MIN = "min"
MAX = "max"
ARGMIN = "argmin"
ARGMAX = "argmax"
NUNUNIQUE = "nunique"
ANY = "any"
ALL = "all"
OR = "or"
AND = "and"
XOR = "xor"
def __str__(self) -> str:
"""
Overridden method returns value, which is useful in outputting
a GroupByReductionType as a request parameter
"""
return self.value
def __repr__(self) -> str:
"""
Overridden method returns value, which is useful in outputting
a GroupByReductionType as a request parameter
"""
return self.value
GROUPBY_REDUCTION_TYPES = frozenset(
[member.value for _, member in GroupByReductionType.__members__.items()]
)
class GroupBy:
"""
Group an array or list of arrays by value, usually in preparation
for aggregating the within-group values of another array.
Parameters
----------
keys : (list of) pdarray, Strings, or Categorical
The array to group by value, or if list, the column arrays to group by row
assume_sorted : bool
If True, assume keys is already sorted (Default: False)
Attributes
----------
nkeys : int
The number of key arrays (columns)
size : int
The length of the input array(s), i.e. number of rows
permutation : pdarray
The permutation that sorts the keys array(s) by value (row)
unique_keys : (list of) pdarray, Strings, or Categorical
The unique values of the keys array(s), in grouped order
ngroups : int
The length of the unique_keys array(s), i.e. number of groups
segments : pdarray
The start index of each group in the grouped array(s)
logger : ArkoudaLogger
Used for all logging operations
Raises
------
TypeError
Raised if keys is a pdarray with a dtype other than int64
Notes
-----
Integral pdarrays, Strings, and Categoricals are natively supported, but
float64 and bool arrays are not.
For a user-defined class to be groupable, it must inherit from pdarray
and define or overload the grouping API:
1) a ._get_grouping_keys() method that returns a list of pdarrays
that can be (co)argsorted.
2) (Optional) a .group() method that returns the permutation that
groups the array
If the input is a single array with a .group() method defined, method 2
will be used; otherwise, method 1 will be used.
"""
Reductions = GROUPBY_REDUCTION_TYPES
def __init__(
self, keys: Optional[groupable], assume_sorted: bool = False, hash_strings: bool = True, **kwargs
) -> None:
# Type Checks required because @typechecked was removed for causing other issues
# This prevents non-bool values that can be evaluated to true (ie non-empty arrays)
# from causing unexpected results. Experienced when forgetting to wrap multiple key arrays in [].
# See Issue #1267
if not isinstance(assume_sorted, bool):
raise TypeError("assume_sorted must be of type bool.")
if not isinstance(hash_strings, bool):
raise TypeError("hash_strings must be of type bool.")
self.logger = getArkoudaLogger(name=self.__class__.__name__)
self.assume_sorted = assume_sorted
self.hash_strings = hash_strings
self.permutation: pdarray
self.name: Optional[str] = None
if (
"orig_keys" in kwargs
and "permutation" in kwargs
and "unique_keys" in kwargs
and "segments" in kwargs
):
self.keys = cast(groupable, kwargs.get("orig_keys", None))
self.unique_keys = kwargs.get("unique_keys", None)
self.permutation = kwargs.get("permutation", None)
self.segments = kwargs.get("segments", None)
self.nkeys = len(self.keys)
elif keys is None:
raise ValueError("No keys passed to GroupBy.")
else:
self.keys = cast(groupable, keys)
self.unique_keys, self.permutation, self.segments, self.nkeys = unique( # type: ignore
self.keys, return_groups=True, assume_sorted=self.assume_sorted
)
self.size = self.permutation.size
self.ngroups = self.segments.size
def count(self) -> Tuple[groupable, pdarray]:
"""
Count the number of elements in each group, i.e. the number of times
each key appears.
Parameters
----------
none
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
counts : pdarray, int64
The number of times each unique key appears
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 2, 3, 1, 2, 4, 3, 4, 3, 4])
>>> g = ak.GroupBy(a)
>>> keys,counts = g.count()
>>> keys
array([1, 2, 3, 4])
>>> counts
array([1, 2, 4, 3])
"""
cmd = "countReduction"
args = "{} {}".format(cast(pdarray, self.segments).name, self.size)
repMsg = generic_msg(cmd=cmd, args=args)
self.logger.debug(repMsg)
return self.unique_keys, create_pdarray(repMsg)
def aggregate(
self, values: groupable, operator: str, skipna: bool = True
) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group another
array of values and apply a reduction to each group's values.
Parameters
----------
values : pdarray
The values to group and reduce
operator: str
The name of the reduction operator to use
Returns
-------
unique_keys : groupable
The unique keys, in grouped order
aggregates : groupable
One aggregate value per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if the requested operator is not supported for the
values dtype
Examples
--------
>>> keys = ak.arange(0, 10)
>>> vals = ak.linspace(-1, 1, 10)
>>> g = ak.GroupBy(keys)
>>> g.aggregate(vals, 'sum')
(array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), array([-1, -0.77777777777777768,
-0.55555555555555536, -0.33333333333333348, -0.11111111111111116,
0.11111111111111116, 0.33333333333333348, 0.55555555555555536, 0.77777777777777768,
1]))
>>> g.aggregate(vals, 'min')
(array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), array([-1, -0.77777777777777779,
-0.55555555555555558, -0.33333333333333337, -0.11111111111111116, 0.11111111111111116,
0.33333333333333326, 0.55555555555555536, 0.77777777777777768, 1]))
"""
operator = operator.lower()
if operator not in self.Reductions:
raise ValueError(f"Unsupported reduction: {operator}\nMust be one of {self.Reductions}")
# TO DO: remove once logic is ported over to Chapel
if operator == "nunique":
return self.nunique(values)
# All other aggregations operate on pdarray
if cast(pdarray, values).size != self.size:
raise ValueError("Attempt to group array using key array of different length")
if self.assume_sorted:
permuted_values = cast(pdarray, values)
else:
permuted_values = cast(pdarray, values)[cast(pdarray, self.permutation)]
cmd = "segmentedReduction"
args = "{} {} {} {}".format(permuted_values.name, self.segments.name, operator, skipna)
repMsg = generic_msg(cmd=cmd, args=args)
self.logger.debug(repMsg)
if operator.startswith("arg"):
return (self.unique_keys, cast(pdarray, self.permutation[create_pdarray(repMsg)]))
else:
return self.unique_keys, create_pdarray(repMsg)
def sum(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and sum each group's values.
Parameters
----------
values : pdarray
The values to group and sum
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_sums : pdarray
One sum per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
Notes
-----
The grouped sum of a boolean ``pdarray`` returns integers.
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.sum(b)
(array([2, 3, 4]), array([8, 14, 6]))
"""
return self.aggregate(values, "sum", skipna)
def prod(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and compute the product of each group's
values.
Parameters
----------
values : pdarray
The values to group and multiply
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_products : pdarray, float64
One product per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object
ValueError
Raised if the key array size does not match the values size
or if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if prod is not supported for the values dtype
Notes
-----
The return dtype is always float64.
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.prod(b)
(array([2, 3, 4]), array([12, 108.00000000000003, 8.9999999999999982]))
"""
return self.aggregate(values, "prod", skipna)
def mean(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and compute the mean of each group's
values.
Parameters
----------
values : pdarray
The values to group and average
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_means : pdarray, float64
One mean value per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object
ValueError
Raised if the key array size does not match the values size
or if the operator is not in the GroupBy.Reductions array
Notes
-----
The return dtype is always float64.
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.mean(b)
(array([2, 3, 4]), array([2.6666666666666665, 2.7999999999999998, 3]))
"""
return self.aggregate(values, "mean", skipna)
def min(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and return the minimum of each group's
values.
Parameters
----------
values : pdarray
The values to group and find minima
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_minima : pdarray
One minimum per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object or if min is
not supported for the values dtype
ValueError
Raised if the key array size does not match the values size
or if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if min is not supported for the values dtype
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.min(b)
(array([2, 3, 4]), array([1, 1, 3]))
"""
if values.dtype == bool:
raise TypeError("min is only supported for pdarrays of dtype float64, uint64, and int64")
return self.aggregate(values, "min", skipna)
def max(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and return the maximum of each
group's values.
Parameters
----------
values : pdarray
The values to group and find maxima
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_maxima : pdarray
One maximum per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object or if max is
not supported for the values dtype
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if max is not supported for the values dtype
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.max(b)
(array([2, 3, 4]), array([4, 4, 3]))
"""
if values.dtype == bool:
raise TypeError("max is only supported for pdarrays of dtype float64, uint64, and int64")
return self.aggregate(values, "max", skipna)
def argmin(self, values: pdarray) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and return the location of the first
minimum of each group's values.
Parameters
----------
values : pdarray
The values to group and find argmin
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_argminima : pdarray, int64
One index per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object or if argmax
is not supported for the values dtype
ValueError
Raised if the key array size does not match the values
size or if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if argmin is not supported for the values dtype
Notes
-----
The returned indices refer to the original values array as
passed in, not the permutation applied by the GroupBy instance.
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.argmin(b)
(array([2, 3, 4]), array([5, 4, 2]))
"""
if values.dtype == bool:
raise TypeError("argmin is only supported for pdarrays of dtype float64, uint64, and int64")
return self.aggregate(values, "argmin")
def argmax(self, values: pdarray) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and return the location of the first
maximum of each group's values.
Parameters
----------
values : pdarray
The values to group and find argmax
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_argmaxima : pdarray, int64
One index per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray object or if argmax
is not supported for the values dtype
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
Notes
-----
The returned indices refer to the original values array as passed in,
not the permutation applied by the GroupBy instance.
Examples
--------
>>> a = ak.randint(1,5,10)
>>> a
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> g = ak.GroupBy(a)
>>> g.keys
array([3, 3, 4, 3, 3, 2, 3, 2, 4, 2])
>>> b = ak.randint(1,5,10)
>>> b
array([3, 3, 3, 4, 1, 1, 3, 3, 3, 4])
>>> g.argmax(b)
(array([2, 3, 4]), array([9, 3, 2]))
"""
if values.dtype == bool:
raise TypeError("argmax is only supported for pdarrays of dtype float64, uint64, and int64")
return self.aggregate(values, "argmax")
def nunique(self, values: groupable) -> Tuple[groupable, pdarray]:
"""
Using the permutation stored in the GroupBy instance, group another
array of values and return the number of unique values in each group.
Parameters
----------
values : pdarray, int64
The values to group and find unique values
Returns
-------
unique_keys : groupable
The unique keys, in grouped order
group_nunique : groupable
Number of unique values per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the dtype(s) of values array(s) does/do not support
the nunique method
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if nunique is not supported for the values dtype
Examples
--------
>>> data = ak.array([3, 4, 3, 1, 1, 4, 3, 4, 1, 4])
>>> data
array([3, 4, 3, 1, 1, 4, 3, 4, 1, 4])
>>> labels = ak.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4])
>>> labels
ak.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4])
>>> g = ak.GroupBy(labels)
>>> g.keys
ak.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4])
>>> g.nunique(data)
array([1,2,3,4]), array([2, 2, 3, 1])
# Group (1,1,1) has values [3,4,3] -> there are 2 unique values 3&4
# Group (2,2,2) has values [1,1,4] -> 2 unique values 1&4
# Group (3,3,3) has values [3,4,1] -> 3 unique values
# Group (4) has values [4] -> 1 unique value
"""
# TO DO: defer to self.aggregate once logic is ported over to Chapel
# return self.aggregate(values, "nunique")
ukidx = self.broadcast(arange(self.ngroups), permute=True)
# Test if values is single array, i.e. either pdarray, Strings,
# or Categorical (the last two have a .group() method).
# Can't directly test Categorical due to circular import.
if isinstance(values, pdarray):
if cast(pdarray, values).dtype != akint64 and cast(pdarray, values).dtype != akuint64:
raise TypeError("nunique unsupported for this dtype")
togroup = [ukidx, values]
elif hasattr(values, "group"):
togroup = [ukidx, values]
else:
for v in values:
if (
isinstance(values, pdarray)
and cast(pdarray, values).dtype != akint64
and cast(pdarray, values).dtype != akuint64
):
raise TypeError("nunique unsupported for this dtype")
togroup = [ukidx] + list(values)
# Find unique pairs of (key, val)
g = GroupBy(togroup)
# Group unique pairs again by original key
g2 = GroupBy(g.unique_keys[0], assume_sorted=True)
# Count number of unique values per key
_, nuniq = g2.count()
# Re-join unique counts with original keys (sorting guarantees same order)
return self.unique_keys, nuniq
def any(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:
"""
Using the permutation stored in the GroupBy instance, group another
array of values and perform an "or" reduction on each group.
Parameters
----------
values : pdarray, bool
The values to group and reduce with "or"
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_any : pdarray, bool
One bool per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray or if the pdarray
dtype is not bool
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
"""
if values.dtype != bool:
raise TypeError("any is only supported for pdarrays of dtype bool")
return self.aggregate(values, "any") # type: ignore
def all(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:
"""
Using the permutation stored in the GroupBy instance, group
another array of values and perform an "and" reduction on
each group.
Parameters
----------
values : pdarray, bool
The values to group and reduce with "and"
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
group_any : pdarray, bool
One bool per unique key in the GroupBy instance
Raises
------
TypeError
Raised if the values array is not a pdarray or if the pdarray
dtype is not bool
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if all is not supported for the values dtype
"""
if values.dtype != bool:
raise TypeError("all is only supported for pdarrays of dtype bool")
return self.aggregate(values, "all") # type: ignore
def OR(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:
"""
Bitwise OR of values in each segment.
Using the permutation stored in the GroupBy instance, group
another array of values and perform a bitwise OR reduction on
each group.
Parameters
----------
values : pdarray, int64
The values to group and reduce with OR
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
result : pdarray, int64
Bitwise OR of values in segments corresponding to keys
Raises
------
TypeError
Raised if the values array is not a pdarray or if the pdarray
dtype is not int64
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if all is not supported for the values dtype
"""
if values.dtype != akint64 and values.dtype != akuint64:
raise TypeError("OR is only supported for pdarrays of dtype int64 or uint64")
return self.aggregate(values, "or") # type: ignore
def AND(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:
"""
Bitwise AND of values in each segment.
Using the permutation stored in the GroupBy instance, group
another array of values and perform a bitwise AND reduction on
each group.
Parameters
----------
values : pdarray, int64
The values to group and reduce with AND
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
result : pdarray, int64
Bitwise AND of values in segments corresponding to keys
Raises
------
TypeError
Raised if the values array is not a pdarray or if the pdarray
dtype is not int64
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if all is not supported for the values dtype
"""
if values.dtype != akint64 and values.dtype != akuint64:
raise TypeError("AND is only supported for pdarrays of dtype int64 or uint64")
return self.aggregate(values, "and") # type: ignore
def XOR(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:
"""
Bitwise XOR of values in each segment.
Using the permutation stored in the GroupBy instance, group
another array of values and perform a bitwise XOR reduction on
each group.
Parameters
----------
values : pdarray, int64
The values to group and reduce with XOR
Returns
-------
unique_keys : (list of) pdarray or Strings
The unique keys, in grouped order
result : pdarray, int64
Bitwise XOR of values in segments corresponding to keys
Raises
------
TypeError
Raised if the values array is not a pdarray or if the pdarray
dtype is not int64
ValueError
Raised if the key array size does not match the values size or
if the operator is not in the GroupBy.Reductions array
RuntimeError
Raised if all is not supported for the values dtype
"""
if values.dtype != akint64 and values.dtype != akuint64:
raise TypeError("XOR is only supported for pdarrays of dtype int64 or uint64")
return self.aggregate(values, "xor") # type: ignore
@typechecked
def broadcast(self, values: pdarray, permute: bool = True) -> pdarray:
"""
Fill each group's segment with a constant value.
Parameters
----------
values : pdarray
The values to put in each group's segment
permute : bool
If True (default), permute broadcast values back to the ordering
of the original array on which GroupBy was called. If False, the
broadcast values are grouped by value.
Returns
-------
pdarray
The broadcast values
Raises
------
TypeError
Raised if value is not a pdarray object
ValueError
Raised if the values array does not have one
value per segment
Notes
-----
This function is a sparse analog of ``np.broadcast``. If a
GroupBy object represents a sparse matrix (tensor), then
this function takes a (dense) column vector and replicates
each value to the non-zero elements in the corresponding row.
Examples
--------
>>> a = ak.array([0, 1, 0, 1, 0])
>>> values = ak.array([3, 5])
>>> g = ak.GroupBy(a)
# By default, result is in original order
>>> g.broadcast(values)
array([3, 5, 3, 5, 3])
# With permute=False, result is in grouped order
>>> g.broadcast(values, permute=False)
array([3, 3, 3, 5, 5]
>>> a = ak.randint(1,5,10)
>>> a
array([3, 1, 4, 4, 4, 1, 3, 3, 2, 2])
>>> g = ak.GroupBy(a)
>>> keys,counts = g.count()
>>> g.broadcast(counts > 2)
array([True False True True True False True True False False])
>>> g.broadcast(counts == 3)
array([True False True True True False True True False False])
>>> g.broadcast(counts < 4)
array([True True True True True True True True True True])
"""
if values.size != self.segments.size:
raise ValueError("Must have one value per segment")
cmd = "broadcast"
args = "{} {} {} {} {}".format(
self.permutation.name, self.segments.name, values.name, permute, self.size
)
repMsg = generic_msg(cmd=cmd, args=args)
return create_pdarray(repMsg)
@staticmethod
def build_from_components(user_defined_name: str = None, **kwargs) -> GroupBy:
"""
function to build a new GroupBy object from component keys and permutation.
Parameters
----------
user_defined_name : str (Optional) Passing a name will init the new GroupBy
and assign it the given name
kwargs : dict Dictionary of components required for rebuilding the GroupBy.
Expected keys are "orig_keys", "permutation", "unique_keys", and "segments"
Returns
-------
GroupBy
The GroupBy object created by using the given components
"""
if (
"orig_keys" in kwargs
and "permutation" in kwargs
and "unique_keys" in kwargs
and "segments" in kwargs
):
g = GroupBy(None, **kwargs)
g.name = user_defined_name
return g
else:
missingKeys = []
if "orig_keys" not in kwargs:
missingKeys.append("orig_keys")
if "permutation" not in kwargs:
missingKeys.append("permutation")
if "unique_keys" not in kwargs:
missingKeys.append("unique_keys")
if "segments" not in kwargs:
missingKeys.append("segments")
raise ValueError(f"Can't build GroupBy. kwargs is missing required keys: {missingKeys}.")
def _get_groupby_required_pieces(self) -> Dict:
"""
Internal function that returns a dictionary with all required components of self
Returns
-------
Dict
Dictionary of all required components of self
Components (keys, permutation)
"""
requiredPieces = frozenset(["keys", "permutation", "unique_keys", "segments"])
return {piece_name: getattr(self, piece_name) for piece_name in requiredPieces}
@typechecked
def register(self, user_defined_name: str) -> GroupBy:
"""
Register this GroupBy object and underlying components with the Arkouda server
Parameters
----------
user_defined_name : str
user defined name the GroupBy is to be registered under,
this will be the root name for underlying components
Returns
-------
GroupBy
The same GroupBy which is now registered with the arkouda server and has an updated name.
This is an in-place modification, the original is returned to support a
fluid programming style.
Please note you cannot register two different GroupBys with the same name.
Raises
------
TypeError
Raised if user_defined_name is not a str
RegistrationError
If the server was unable to register the GroupBy with the user_defined_name
See also
--------
unregister, attach, unregister_groupby_by_name, is_registered
Notes
-----
Objects registered with the server are immune to deletion until
they are unregistered.
"""
from arkouda import Categorical
# By registering unique properties first, we can ensure no overlap in naming between
# two registered GroupBy's since this will throw a RegistrationError before any of
# the dynamically created names are registered
self.permutation.register(f"{user_defined_name}.permutation")
self.segments.register(f"{user_defined_name}.segments")
if isinstance(self.keys, (Strings, pdarray, Categorical)):
self.keys.register(f"{user_defined_name}_{self.keys.objtype}.keys")
self.unique_keys.register(f"{user_defined_name}_{self.keys.objtype}.unique_keys")
elif isinstance(self.keys, Sequence):
for x in range(len(self.keys)):
# Possible for multiple types in a sequence, so we have to check each key's
# type individually
if isinstance(self.keys[x], (Strings, pdarray, Categorical)):
self.keys[x].register(f"{x}_{user_defined_name}_{self.keys[x].objtype}.keys")
self.unique_keys[x].register(
f"{x}_{user_defined_name}_{self.keys[x].objtype}.unique_keys"
)
else:
raise RegistrationError(f"Unsupported key type found: {type(self.keys)}")
self.name = user_defined_name
return self
def unregister(self):
"""
Unregister this GroupBy object in the arkouda server which was previously
registered using register() and/or attached to using attach()
Raises
------
RegistrationError
If the object is already unregistered or if there is a server error
when attempting to unregister
See also
--------
register, attach, unregister_groupby_by_name, is_registered
Notes
-----
Objects registered with the server are immune to deletion until
they are unregistered.
"""
if not self.name:
raise RegistrationError(
"This item does not have a name and does not appear to be registered."
)
# Unregister all components in keys in the case of a Sequence
if isinstance(self.keys, Sequence):
for x in range(len(self.keys)):
self.keys[x].unregister()
self.unique_keys[x].unregister()
else:
self.keys.unregister()
self.unique_keys.unregister()
self.permutation.unregister()
self.segments.unregister()
self.name = None # Clear our internal GroupBy object name
def is_registered(self) -> bool:
"""
Return True if the object is contained in the registry
Returns
-------
bool
Indicates if the object is contained in the registry
Raises
------
RegistrationError
Raised if there's a server-side error or a mismatch of registered components
See Also
--------
register, attach, unregister, unregister_groupby_by_name
Notes
-----
Objects registered with the server are immune to deletion until
they are unregistered.
"""
import warnings
from arkouda import Categorical
if self.name is None:
return False # unnamed GroupBy cannot be registered
if isinstance(self.keys, Sequence): # Sequence - Check for all components
from re import compile
registry = list_registry()
# Only check for a single component of Categorical to ensure correct count.
regEx = compile(
f"^\\d+_{self.name}_.+\\.keys$|^\\d+_{self.name}_.+\\.unique_keys$|"
f"^\\d+_{self.name}_.+\\.unique_keys(?=\\.categories$)"
)
cat_regEx = compile(f"^\\d+_{self.name}_{Categorical.objtype}\\.keys(?=\\.codes$)")
simple_registered = list(filter(regEx.match, registry))
cat_registered = list(filter(cat_regEx.match, registry))
if f"{self.name}.permutation" in registry:
simple_registered.append(f"{self.name}.permutation")
if f"{self.name}.segments" in registry:
simple_registered.append(f"{self.name}.segments")
# In the case of Categorical, unique keys is registered with only categories and codes and
# overwrites keys.categories
total = (len(self.keys) * 2) + 2
registered = len(simple_registered) + len(cat_registered)
if 0 < registered < total:
warnings.warn(
f"WARNING: GroupBy {self.name} expected {total} components to be registered,"
f" but only located {registered}."
)
return False
else:
return registered == total
else:
parts_registered: List[bool] = []
for k, v in GroupBy._get_groupby_required_pieces(self).items():
if k != "unique_keys" or not isinstance(self.unique_keys, Categorical):
reg = v.is_registered()
parts_registered.append(reg)
if any(parts_registered) and not all(parts_registered): # test for error
warnings.warn(
f"WARNING: GroupBy {self.name} expected {len(parts_registered)} "
f"components to be registered, but only located {sum(parts_registered)}."
)
return False
else:
return any(parts_registered)
@staticmethod
def attach(user_defined_name: str) -> GroupBy:
"""
Function to return a GroupBy object attached to the registered name in the
arkouda server which was registered using register()
Parameters
----------
user_defined_name : str
user defined name which GroupBy object was registered under
Returns
-------
GroupBy
The GroupBy object created by re-attaching to the corresponding server components
Raises
------
RegistrationError
if user_defined_name is not registered
See Also
--------
register, is_registered, unregister, unregister_groupby_by_name
"""
from re import compile, match
from arkouda.categorical import Categorical
keys: List[groupable] = []
unique_keys = []
matches = []
regEx = compile(
f"^{user_defined_name}_.+\\.keys$|^\\d+_{user_defined_name}_.+\\.keys$|"
f"^{user_defined_name}_.+\\.unique_keys$|^\\d+_{user_defined_name}_.+\\.unique_keys$|"
f"^(?:\\d+_)?{user_defined_name}_{Categorical.objtype}\\.unique_keys(?=\\.categories$)"
)
# Using the regex, cycle through the registered items and find all the pieces of
# the GroupBy's keys
for name in list_registry():
x = match(regEx, name)
if x is not None:
matches.append(x.group())
matches.sort()
if len(matches) == 0:
raise RegistrationError(f"No registered elements with name '{user_defined_name}'")
for name in matches:
# Parse the name for the dtype and use the proper create method to create the element
if f"_{Strings.objtype}." in name or f"_{pdarray.objtype}." in name:
keys_resp = cast(str, generic_msg(cmd="attach", args=name))
dtype = keys_resp.split()[2]
if ".unique_keys" in name:
if dtype == Strings.objtype:
unique_keys.append(Strings.from_return_msg(keys_resp))
else: # pdarray
unique_keys.append(create_pdarray(keys_resp))
else:
if dtype == Strings.objtype:
keys.append(Strings.from_return_msg(keys_resp))
else: # pdarray
keys.append(create_pdarray(keys_resp))
elif f"_{Categorical.objtype}.unique_keys" in name:
# Due to unique_keys overwriting keys.categories, we have to use unique_keys.categories
# to create the keys Categorical
unique_key = Categorical.attach(name)
key_name = name.replace(".unique_keys", ".keys")
catParts = {
"categories": unique_key.categories,
"codes": pdarray.attach(f"{key_name}.codes"),
"_akNAcode": pdarray.attach(f"{key_name}._akNAcode"),
}
# Grab optional components if they exist
if f"{user_defined_name}.permutation" in matches:
catParts["permutation"] = pdarray.attach(f"{key_name}.permutation")
if f"{user_defined_name}.segments" in matches:
catParts["segments"] = pdarray.attach(f"{key_name}.segments")
unique_keys.append(unique_key)
keys.append(Categorical(None, **catParts))
else:
raise RegistrationError(
f"Unknown type associated with registered item: {user_defined_name}."
f" Supported types are: {groupable}"
)
if len(keys) == 0:
raise RegistrationError(
f"Unable to attach to '{user_defined_name}' or '{user_defined_name}'"
f" is not registered"
)
perm_resp = generic_msg(cmd="attach", args=f"{user_defined_name}.permutation")
segments_resp = generic_msg(cmd="attach", args=f"{user_defined_name}.segments")
parts = {
"orig_keys": keys if len(keys) > 1 else keys[0],
"unique_keys": unique_keys if len(unique_keys) > 1 else unique_keys[0],
"permutation": create_pdarray(perm_resp),
"segments": create_pdarray(segments_resp),
}
g = GroupBy.build_from_components(
user_defined_name, **parts
) # Call build_from_components method
return g
@staticmethod
@typechecked
def unregister_groupby_by_name(user_defined_name: str) -> None:
"""
Function to unregister GroupBy object by name which was registered
with the arkouda server via register()
Parameters
----------
user_defined_name : str
Name under which the GroupBy object was registered
Raises
-------
TypeError
if user_defined_name is not a string
RegistrationError
if there is an issue attempting to unregister any underlying components
See Also
--------
register, unregister, attach, is_registered
"""
# We have 2 components, unregister both of them
from re import compile, match
from arkouda.categorical import Categorical
registry = list_registry()
# keys, unique_keys, or categorical components
regEx = compile(
f"^{user_defined_name}_.+\\.keys$|^\\d+_{user_defined_name}_.+\\.keys$|"
f"^{user_defined_name}_.+\\.unique_keys$|^\\d+_{user_defined_name}_.+\\.unique_keys$|"
f"^(?:\\d+_)?{user_defined_name}_{Categorical.objtype}\\.unique_keys(?=\\.categories$)|"
f"^(\\d+_)?{user_defined_name}_{Categorical.objtype}\\.keys\\.(_)?([A-Z,a-z])+$"
)
for name in registry:
# Search through registered items and find matches to the given name
x = match(regEx, name)
if x is not None:
print(x.group())
# Only categorical requires a separate unregister case
if f"_{Categorical.objtype}.unique_keys" in x.group():
Categorical.unregister_categorical_by_name(x.group())
else:
unregister_pdarray_by_name(x.group())
if f"{user_defined_name}.permutation" in registry:
unregister_pdarray_by_name(f"{user_defined_name}.permutation")
if f"{user_defined_name}.segments" in registry:
unregister_pdarray_by_name(f"{user_defined_name}.segments")
def broadcast(
segments: pdarray,
values: pdarray,
size: Union[int, np.int64, np.uint64] = -1,
permutation: Union[pdarray, None] = None,
):
"""
Broadcast a dense column vector to the rows of a sparse matrix or grouped array.
Parameters
----------
segments : pdarray, int64
Offsets of the start of each row in the sparse matrix or grouped array.
Must be sorted in ascending order.
values : pdarray
The values to broadcast, one per row (or group)
size : int
The total number of nonzeros in the matrix. If permutation is given, this
argument is ignored and the size is inferred from the permutation array.
permutation : pdarray, int64
The permutation to go from the original ordering of nonzeros to the ordering
grouped by row. To broadcast values back to the original ordering, this
permutation will be inverted. If no permutation is supplied, it is assumed
that the original nonzeros were already grouped by row. In this case, the
size argument must be given.
Returns
-------
pdarray
The broadcast values, one per nonzero
Raises
------
ValueError
- If segments and values are different sizes
- If segments are empty
- If number of nonzeros (either user-specified or inferred from permutation)
is less than one
Examples
--------
# Define a sparse matrix with 3 rows and 7 nonzeros
>>> row_starts = ak.array([0, 2, 5])
>>> nnz = 7
# Broadcast the row number to each nonzero element
>>> row_number = ak.arange(3)
>>> ak.broadcast(row_starts, row_number, nnz)
array([0 0 1 1 1 2 2])
# If the original nonzeros were in reverse order...
>>> permutation = ak.arange(6, -1, -1)
>>> ak.broadcast(row_starts, row_number, permutation=permutation)
array([2 2 1 1 1 0 0])
"""
if segments.size != values.size:
raise ValueError("segments and values arrays must be same size")
if segments.size == 0:
raise ValueError("cannot broadcast empty array")
if permutation is None:
if size == -1:
raise ValueError("must either supply permutation or size")
pname = "none"
permute = False
else:
pname = permutation.name
permute = True
size = permutation.size
if size < 1:
raise ValueError("result size must be greater than zero")
cmd = "broadcast"
args = "{} {} {} {} {}".format(pname, segments.name, values.name, permute, size)
repMsg = generic_msg(cmd=cmd, args=args)
return create_pdarray(repMsg)
|
|
import re
import numpy as np
from enum import Enum
from syntactical_analysis.sa_utils import State, Input, Token
__all__ = [
'Lexer',
]
class Lexer:
def __init__(self):
self.separators = ['(', ')', '[', ']', r'\{', r'\}', '.', ',', ':', ';', ' ', r'\cdot']
self.operators = ['+', '-', '=', '/', '>', '<', '%', r'\%',
r'\&', r'\times', r'\div', r'\ast', r'\cup', r'\cap'
]
self.state_table_data = [
[1, 3, 11, 8, 9, 10],
[1, 2, 11, 2, 2, 2],
[0, 0, 0, 0, 0, 0],
[4, 3, 5, 4, 4, 4],
[0, 0, 0, 0, 0, 0],
[11, 6, 11, 11, 11, 11],
[7, 6, 7, 7, 7, 7],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]
self.state_table = [[None for _ in range(len(self.state_table_data[0]))]
for _ in range(len(self.state_table_data))]
for i in range(len(self.state_table_data)):
for j in range(len(self.state_table_data[0])):
self.state_table[i][j] = State(self.state_table_data[i][j], None)
self.keywords = ['sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'and', 'or']
self.greeks = [r'\sigma', r'\Sigma', r'\gamma', r'\delta', r'\Delta',
r'\eta', r'\theta', r'\epsilon', r'\lambda', r'\mu',
r'\Pi', r'\rho', r'\phi', r'\omega', r'\ohm']
self.special_chars = [r'\infty', r'\exists', r'\forall', r'\#', r'\$'] + self.greeks
@staticmethod
def get_characters(chars):
result = []
for c in chars:
if c not in [r'\n', r'\t', r'\r']:
result.append(c)
return result
@staticmethod
def change_ip_order(exp):
if "=" in exp:
ix_eq = exp.index('=')
if ix_eq > 1:
result = exp[ix_eq + 1:] + exp[ix_eq:ix_eq + 1] + exp[:ix_eq]
return result
else:
return exp
else:
result = ['e', '='] + exp
return result
def checkInput(self, chars):
if re.match("[a-zA-Z]", chars):
return Input.LETTER
elif re.match("[0-9]", chars):
return Input.DIGIT
elif chars in ['$']:
return Input.DOLLAR
elif chars in ['.', r'\cdot']:
return Input.DOT
elif chars in self.separators:
return Input.SEP
elif chars in self.operators:
return Input.OP
elif chars in self.special_chars:
return Input.SPECIAL
else:
raise ValueError("INVALID INPUT: "+str(chars))
def generate_tokens(self, expression):
# changes inputs to format i= expression
expression = self.change_ip_order(expression)
# added this because ints, letters and real require a separator at the end to move to final state
expression = expression + [' ']
prev_state = State.INITIAL
lexeme = ''
tokens = []
i = 0
while i < len(expression):
backup = False
final = False
alpha = expression[i]
x = prev_state.getvalue
y = self.checkInput(alpha).getvalue
current_state = self.state_table[x][y]
if current_state.getvalue == 2:
if lexeme in self.keywords:
tokens.append(Token(value=1, lexeme=lexeme))
else:
tokens.append(Token(value=0, lexeme=lexeme))
final = True
backup = True
elif current_state.getvalue == 4:
tokens.append(Token(value=2, lexeme=lexeme))
final = True
backup = True
elif current_state.getvalue == 7:
tokens.append(Token(value=3, lexeme=lexeme))
final = True
backup = True
elif current_state.getvalue == 8:
tokens.append(Token(value=4, lexeme=alpha))
final = True
backup = False
elif current_state.getvalue == 9:
tokens.append(Token(value=5, lexeme=alpha))
final = True
backup = False
elif current_state.getvalue == 10:
tokens.append(Token(value=6, lexeme=alpha))
final = True
backup = False
elif current_state.getvalue == 11:
raise ValueError("Reached Dead State")
if final:
lexeme = ''
prev_state = State.INITIAL
else:
lexeme += alpha
prev_state = current_state
if not backup:
i += 1
# ignore the last separator added at the end of expression at the start of this function
return tokens[:-1]
def main():
expression = ['z', '=', 'x', '+', 'y']
lexer = Lexer()
tokens = lexer.generate_tokens(expression)
print("Generated tokens are ")
for i in tokens:
print(i.name, i.lexeme)
if __name__ == '__main__':
main()
|
|
import os
import mini_topsim.parameters as par
import numpy as np
from scipy.interpolate import interp1d
def init_sputtering():
"""
initializes the get_sputter_yield module variable
Depending on the set parameters this function either attaches a callable
object that implements the yamamura function to the get_sputter_yield
variable, or one that reads the sputter yields from a given table.
"""
global get_sputter_yield
if par.SPUTTER_YIELD_FILE == '':
get_sputter_yield = Sputter_yield_Yamamura(par.SPUTTER_YIELD_0,
par.SPUTTER_YIELD_F, par.SPUTTER_YIELD_B)
else:
get_sputter_yield = Sputter_yield_table(par.SPUTTER_YIELD_FILE)
class Sputter_yield_Yamamura():
"""
describes a callable object that implements the yamamura function
"""
def __init__(self, y0, f, b):
self.y0 = y0
self.f = f
self.b = b
def __call__(self, costheta, sintheta=None):
"""
calculates the sputter yield and its derivative
Calculates the sputter yield according to the yamamura function
and its derivative with respect to theta.
:param costheta: the cosine of the angle between the surface normal
and the sputter beam direction.
:param sintheta: the sine of the angle between the surface normal
and the sputter beam direction (default value None).
:returns: Sputter yield Y and its derivative
"""
y = self.y0 * costheta**(-self.f) * np.exp(self.b * (1 - 1/costheta))
if sintheta is None:
theta = np.arccos(costheta)
sintheta = np.sin(theta)
y_deriv = self.y0 * sintheta * np.exp(self.b * (1 - 1/costheta)) * \
costheta**(-self.f - 2) * (self.f * costheta - self.b)
# removes division by 0 errors. If costheta = 0 -> Y should be 0
y[np.isnan(y)] = 0
y_deriv[np.isnan(y_deriv)] = 0
return y, y_deriv
class Sputter_yield_table():
"""
describes a callable object that interpolates sputter yields from a given file
"""
def __init__(self, filename):
filepath = os.path.join(os.path.dirname(__file__), 'tables/', filename)
print(filepath)
data = np.genfromtxt(filepath, skip_header=1)
tiltvals = data[:, 0]
yieldvals = data[:, 1]
self.yfunc = interp1d(tiltvals, yieldvals)
def __call__(self, costheta, sintheta=None):
"""
interpolates sputter yields from given data in a file
:param costheta: the cosine of the angle between the surface normal
and the sputter beam direction
:param sintheta: the sine of the angle between the surface normal
and the sputter beam direction (default value None).
:returns: Sputter yield Y
"""
if sintheta is not None:
# if sintheta available, calculate with it because
# sine is injective in the relevant interval [-pi/2,pi/2]
theta = np.arcsin(sintheta)
else:
theta = np.arccos(costheta)
return self.yfunc(theta)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import os
import pandas as pd
import numpy as np
import argparse
def main():
# Parse args
args = parse_arguments()
# Load
df = pd.read_csv(args.i, sep="\t")
# Drop NA
df = df.loc[~pd.isnull(df[args.col]), :]
# Calc sum of posterior probabilities
post_sum = reduce(sum_log10, list(df[args.col]))
# Add posterior probs to table
df["post_prob"] = df[args.col].apply(lambda logbf: (10**logbf) / (10**post_sum))
# Add cumulative posterior prob
df = df.sort_values("post_prob", ascending=False)
df["post_prob_cumsum"] = np.cumsum(df["post_prob"])
df.to_csv(args.o, sep="\t", index=False)
return 0
def sum_log10(a, b):
return np.log10(10**a + 10**b)
def parse_arguments():
""" Parses command line arguments.
"""
# Create top level parser.
parser = argparse.ArgumentParser(description="Calculate credible set.")
# Add options
parser.add_argument('--i', metavar="<input>",
help=('Input file'),
required=True,
type=str)
parser.add_argument('--o', metavar="<output>",
help=('Output file'),
required=True,
type=str)
parser.add_argument('--col', metavar="<col name>",
help=('Column containing SNP log10 bayes factors (Default: bayesian_add_log10_bf)'),
required=False,
default="bayesian_add_log10_bf",
type=str)
# Parse the arguments
args = parser.parse_args()
# Parse the arguments
return args
if __name__ == '__main__':
main()
|
|
import copy
import numpy as np
from pgdrive.envs import PGDriveEnvV2
from pgdrive.scene_creator.vehicle.base_vehicle import BaseVehicle
from pgdrive.scene_creator.vehicle_module.distance_detector import DetectorMask
from pgdrive.utils import panda_position
def _line_intersect(theta, center, point1, point2, maximum: float = 10000):
"""
theta: the direction of the laser
center: the center of the source of laser
point1: one point of the line intersection
point2: another poing of the line intersection
maximum: the return value if no intersection
"""
x0, y0 = center
x1, y1 = point1
x2, y2 = point2
dx1 = np.cos(theta)
dy1 = np.sin(theta)
dx2 = x2 - x1
dy2 = y2 - y1
DET = -dx1 * dy2 + dy1 * dx2
if abs(DET) < 1e-9:
return maximum
r = 1.0 / DET * (-dy2 * (x1 - x0) + dx2 * (y1 - y0))
s = 1.0 / DET * (-dy1 * (x1 - x0) + dx1 * (y1 - y0))
if 0 - 1e-5 < s < 1 + 1e-5 and r >= 0:
return r
return maximum
def _search_angle(point1, point2, num_lasers, start, heading, perceive_distance: float = 50):
laser_heading = np.arange(0, num_lasers) * 2 * np.pi / num_lasers + heading
result = []
for laser_index in range(num_lasers):
ret = _line_intersect(theta=laser_heading[laser_index], center=start, point1=point1, point2=point2, maximum=1e8)
assert 0 <= ret < 1e9
if ret <= 1e8:
result.append(ret / 1e8)
else:
result.append(1.0)
return np.asarray(result)
def _test_mask(mask, stick_1_heading_deg, stick_2_heading_deg, max_span, stick1_x, stick2_x):
stick_1_heading = np.deg2rad(stick_1_heading_deg)
stick_2_heading = np.deg2rad(stick_2_heading_deg)
stick_1_heading = stick_1_heading % (2 * np.pi)
stick_2_heading = stick_2_heading % (2 * np.pi)
stick1_pos = (stick1_x, 0)
stick2_pos = (stick2_x, 0)
mask.update_mask(
position_dict={
"stick1": stick1_pos,
"stick2": stick2_pos
},
heading_dict={
"stick1": stick_1_heading,
"stick2": stick_2_heading
},
is_target_vehicle_dict={
"stick1": True,
"stick2": True
}
)
mask_1 = mask.get_mask("stick1")
mask_2 = mask.get_mask("stick2")
left_of_stick2 = (stick2_pos[0], -max_span / 2)
right_of_stick2 = (stick2_pos[0], max_span / 2)
real_mask_1 = _search_angle(
point1=left_of_stick2,
point2=right_of_stick2,
num_lasers=360,
start=stick1_pos,
heading=stick_1_heading,
perceive_distance=100 * max_span
) < 1.0
res = np.stack([real_mask_1, mask_1])
assert all(mask_1[real_mask_1]) # mask 1 should at least include all True of real mask.
if abs(stick1_x - stick2_x) > max_span:
assert sum(abs(mask_1.astype(int) - real_mask_1.astype(int))) <= 3
left_of_stick1 = (stick1_pos[0], -max_span / 2)
right_of_stick1 = (stick1_pos[0], max_span / 2)
real_mask_2 = _search_angle(
point1=left_of_stick1,
point2=right_of_stick1,
num_lasers=360,
start=stick2_pos,
heading=stick_2_heading,
perceive_distance=100 * max_span
) < 1.0
res2 = np.stack([real_mask_2, mask_2])
assert all(mask_2[real_mask_2]) # mask 1 should at least include all True of real mask.
if abs(stick1_x - stick2_x) > max_span:
assert sum(abs(mask_2.astype(int) - real_mask_2.astype(int))) <= 3
def test_detector_mask():
# A infinite long (1e7) stick 2 in front of (0.01m) stick 1.
pos_xy = [0, -1, 1, -100, 100]
angles = [0, 0.01, 30, 89, 90, 91, 130, 180, 181, 270, 360, 400]
angles += [-a for a in angles]
mask = DetectorMask(num_lasers=360, max_span=1e7)
_test_mask(mask, -270, 30, 1e7, 0, -1)
mask = DetectorMask(num_lasers=360, max_span=1)
_test_mask(mask, -180, -300, 1, 0, -1)
_test_mask(mask, -361, 270, 1, 0, -100)
for max_span in [1e7, 1, 0.1]:
mask = DetectorMask(num_lasers=360, max_span=max_span)
for pos1_x in pos_xy:
for pos2_x in pos_xy:
angles1 = np.random.choice(angles, 5)
angles2 = np.random.choice(angles, 5)
for stick_1_heading_deg in angles1:
for stick_2_heading_deg in angles2:
_test_mask(mask, stick_1_heading_deg, stick_2_heading_deg, max_span, pos1_x, pos2_x)
print("Finish. ", max_span, pos1_x, pos2_x)
def test_detector_mask_in_lidar():
env = PGDriveEnvV2({"traffic_density": 1.0, "map": "SSSSS", "random_traffic": False})
try:
env.reset()
span = 2 * max(env.vehicle.WIDTH, env.vehicle.LENGTH)
detector_mask = DetectorMask(
env.config.vehicle_config.lidar.num_lasers, span, max_distance=env.config.vehicle_config.lidar.distance
)
ep_count = 0
for _ in range(3000):
o, r, d, i = env.step([0, 1])
mask_ratio = env.scene_manager.detector_mask.get_mask_ratio()
print("Mask ratio: ", mask_ratio)
print("We have: {} vehicles!".format(env.scene_manager.traffic_manager.get_vehicle_num()))
v = env.vehicle
v.lidar.perceive(
v.position,
v.heading_theta,
v.pg_world.physics_world.dynamic_world,
extra_filter_node={v.chassis_np.node()},
detector_mask=None
)
old_cloud_points = np.array(copy.deepcopy(env.vehicle.lidar.get_cloud_points()))
position_dict = {}
heading_dict = {}
is_target_vehicle_dict = {}
for v in env.scene_manager.traffic_manager.vehicles:
position_dict[v.name] = v.position
heading_dict[v.name] = v.heading_theta
is_target_vehicle_dict[v.name] = True if isinstance(v, BaseVehicle) else False
detector_mask.update_mask(
position_dict=position_dict, heading_dict=heading_dict, is_target_vehicle_dict=is_target_vehicle_dict
)
real_mask = old_cloud_points != 1.0
mask = detector_mask.get_mask(env.vehicle.name)
stack = np.stack([old_cloud_points, real_mask, mask])
if not all(mask[real_mask]):
print('stop')
assert all(mask[real_mask]) # mask 1 should at least include all True of real mask.
print(
"Num of true in our mask: {}, in old mask: {}. Overlap: {}. We have {} more.".format(
sum(mask.astype(int)), sum(real_mask.astype(int)), sum(mask[real_mask].astype(int)),
sum(mask.astype(int)) - sum(real_mask.astype(int))
)
)
# assert sum(abs(mask.astype(int) - real_mask.astype(int))) <= 3
v = env.vehicle
v.lidar.perceive(
v.position,
v.heading_theta,
v.pg_world.physics_world.dynamic_world,
extra_filter_node={v.chassis_np.node()},
detector_mask=mask
)
new_cloud_points = np.array(copy.deepcopy(env.vehicle.lidar.get_cloud_points()))
np.testing.assert_almost_equal(old_cloud_points, new_cloud_points)
if d:
env.reset()
ep_count += 1
if ep_count == 3:
break
finally:
env.close()
def test_cutils_lidar():
def _old_perceive(
self,
vehicle_position,
heading_theta,
pg_physics_world,
extra_filter_node: set = None,
detector_mask: np.ndarray = None
):
"""
Call me to update the perception info
"""
assert detector_mask is not "WRONG"
# coordinates problem here! take care
extra_filter_node = extra_filter_node or set()
pg_start_position = panda_position(vehicle_position, self.height)
# init
self.cloud_points.fill(1.0)
self.detected_objects = []
# lidar calculation use pg coordinates
mask = self.mask
# laser_heading = self._lidar_range + heading_theta
# point_x = self.perceive_distance * np.cos(laser_heading) + vehicle_position[0]
# point_y = self.perceive_distance * np.sin(laser_heading) + vehicle_position[1]
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
for laser_index in range(self.num_lasers):
# # coordinates problem here! take care
if (detector_mask is not None) and (not detector_mask[laser_index]):
# update vis
if self.cloud_points_vis is not None:
laser_end = self._get_laser_end(laser_index, heading_theta, vehicle_position)
self._add_cloud_point_vis(laser_index, laser_end)
continue
laser_end = self._get_laser_end(laser_index, heading_theta, vehicle_position)
result = pg_physics_world.rayTestClosest(pg_start_position, laser_end, mask)
node = result.getNode()
if node in extra_filter_node:
# Fall back to all tests.
results = pg_physics_world.rayTestAll(pg_start_position, laser_end, mask)
hits = results.getHits()
hits = sorted(hits, key=lambda ret: ret.getHitFraction())
for result in hits:
if result.getNode() in extra_filter_node:
continue
self.detected_objects.append(result)
self.cloud_points[laser_index] = result.getHitFraction()
break
else:
hits = result.hasHit()
self.cloud_points[laser_index] = result.getHitFraction()
if node:
self.detected_objects.append(result)
# update vis
if self.cloud_points_vis is not None:
self._add_cloud_point_vis(laser_index, result.getHitPos() if hits else laser_end)
return self.cloud_points
from pgdrive.utils.cutils import _get_fake_cutils
_fake_cutils = _get_fake_cutils()
def fake_cutils_perceive(
self,
vehicle_position,
heading_theta,
pg_physics_world,
extra_filter_node: set = None,
detector_mask: np.ndarray = None
):
cloud_points, _, _ = _fake_cutils.cutils_perceive(
cloud_points=self.cloud_points,
detector_mask=detector_mask.astype(dtype=np.uint8) if detector_mask is not None else None,
mask=self.mask,
lidar_range=self._lidar_range,
perceive_distance=self.perceive_distance,
heading_theta=heading_theta,
vehicle_position_x=vehicle_position[0],
vehicle_position_y=vehicle_position[1],
num_lasers=self.num_lasers,
height=self.height,
pg_physics_world=pg_physics_world,
extra_filter_node=extra_filter_node if extra_filter_node else set(),
require_colors=self.cloud_points_vis is not None,
ANGLE_FACTOR=self.ANGLE_FACTOR,
MARK_COLOR0=self.MARK_COLOR[0],
MARK_COLOR1=self.MARK_COLOR[1],
MARK_COLOR2=self.MARK_COLOR[2]
)
return cloud_points
env = PGDriveEnvV2({"map": "C", "traffic_density": 1.0, "environment_num": 10})
try:
for _ in range(3):
env.reset()
ep_count = 0
for _ in range(3000):
o, r, d, i = env.step([0, 1])
v = env.vehicle
new_cloud_points = v.lidar.perceive(
v.position,
v.heading_theta,
v.pg_world.physics_world.dynamic_world,
extra_filter_node={v.chassis_np.node()},
detector_mask=None
)
new_cloud_points = np.array(copy.deepcopy(new_cloud_points))
old_cloud_points = _old_perceive(
v.lidar, v.position, v.heading_theta, v.pg_world.physics_world.dynamic_world, {v.chassis_np.node()},
None
)
np.testing.assert_almost_equal(new_cloud_points, old_cloud_points)
fake_cutils_cloud_points = fake_cutils_perceive(
v.lidar,
v.position,
v.heading_theta,
v.pg_world.physics_world.dynamic_world,
extra_filter_node={v.chassis_np.node()},
detector_mask=None
)
np.testing.assert_almost_equal(new_cloud_points, fake_cutils_cloud_points)
# assert sum(abs(mask.astype(int) - real_mask.astype(int))) <= 3
v = env.vehicle
v.lidar.perceive(
v.position,
v.heading_theta,
v.pg_world.physics_world.dynamic_world,
extra_filter_node={v.chassis_np.node()},
detector_mask=env.scene_manager.detector_mask.get_mask(v.name)
)
new_cloud_points = np.array(copy.deepcopy(env.vehicle.lidar.get_cloud_points()))
np.testing.assert_almost_equal(old_cloud_points, new_cloud_points)
if d:
env.reset()
ep_count += 1
if ep_count == 3:
break
finally:
env.close()
if __name__ == '__main__':
# test_detector_mask()
# test_detector_mask_in_lidar()
test_cutils_lidar()
|
|
#!/usr/bin/env python
import os, random, subprocess, sys, threading
from decimal import *
import numpy as np
libs = ["atlas", "cublas", "mkl", "plasma"]
# libs = ["cublas", "plasma", "ublas"]
prefix = 1000**3
# Process manipulation
#------------------------------------------------------------------------------#
class UblasRun:
def __init__(self):
self.p = subprocess.Popen("./build/run", bufsize=102400,
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
self.p.stdin.write("10 1000 1000 1000\n")
self.p.stdin.flush()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.p.stdin.write("none 0 0 0\n")
self.p.stdin.flush()
def get_time(self, lib, m, k, n):
self.p.stdin.write(str(lib)+" "+str(m)+" "+str(k)+" "+str(n)+"\n")
self.p.stdin.flush()
# return float(self.p.stdout.readline().strip())
return self.p.stdout.readline().strip()
class UblasQuery:
def __init__(self):
self.p = subprocess.Popen("./build/query", bufsize=102400,
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.p.stdin.write("0 0 0\n")
self.p.stdin.flush()
def get_lib(self, m, k, n):
self.p.stdin.write(str(m)+" "+str(k)+" "+str(n)+"\n")
self.p.stdin.flush()
return self.p.stdout.readline().strip()
# Training
#------------------------------------------------------------------------------#
def make_train_file(testpoints, vals, path="training.data"):
with open(path, "w") as fp:
fp.write(str(len(testpoints))+" 3 4\n\n")
for tp,point in zip(testpoints,vals):
out = ["-1" for x in range(4)]
out[point.index(min(point))] = "1"
fp.write(" ".join(map(str, tp))+"\n")
fp.write(" ".join(out)+"\n")
# Analysis
#------------------------------------------------------------------------------#
def test_random(bound, samples):
print " Random Sampling"
print " range: ",bound
print " samples:",samples
if len(sys.argv) > 1:
logpoints = int(sys.argv[1])
else:
logpoints = 10
testpoints = [[random.randint(2,bound),random.randint(2,bound),
random.randint(2,bound)] for i in range(samples)]
# testpoints = [[x,x,x] for x in range(8,512,2)]
# dimpoints = range(100,1000,200)
# dimpoints = [80]+[int(dp) for dp in np.logspace(2, 2.71, num=12)]
# dimpoints = [int(dp) for dp in np.logspace(1.78, 2.71, num=logpoints)]
# dimpoints = [int(dp) for dp in np.logspace(1.78, 3.0, num=logpoints)]
# print dimpoints
# testpoints = [[m,k,n] for m in dimpoints for k in dimpoints for n in dimpoints]
numm = len(testpoints)
print "num testpoints",numm
# sys.exit(0)
with open("run-in", "w") as fp:
fp.write("8 "+str(bound)+" "+str(bound)+" "+str(bound)+"\n");
for tp in testpoints:
for l in libs:
fp.write(l+" "+" ".join(map(str, tp))+"\n")
with open("query-in", "w") as fp:
for tp in testpoints:
fp.write(" ".join(map(str, tp))+"\n")
print "=== running commands ==="
os.system("./build/run < run-in > run-out")
os.system("./build/query < query-in > query-out")
print "========================"
with open("query-out") as fp:
selected = fp.read().split()
with open("run-out") as fp:
vals = map(lambda x: Decimal(x), fp.read().split())
vals = zip(*[iter(vals)]*len(libs))
vals = [[v for v in val] for val in vals]
# make_train_file(testpoints, vals, str(len(testpoints))+"-training.data")
# sys.exit(0)
optimals = [libs[p.index(min(p))] for p in vals]
return sum([1 for i,j in zip(selected,optimals) if i==j])
# ublas = [v[libs.index(i)] for i,v in zip(selected,vals)]
# atlas = [i for i in optimals if i == "atlas"]
# cublas = [i for i in optimals if i == "cublas"]
# mkl = [i for i in optimals if i == "mkl"]
# plasma = [i for i in optimals if i == "plasma"]
# print optimals
# print ublas
# print vals
ctatlas = sum([v[libs.index("atlas")] for v in vals])
ctcublas = sum([v[libs.index("cublas")] for v in vals])
ctmkl = sum([v[libs.index("mkl")] for v in vals])
ctplasma = sum([v[libs.index("plasma")] for v in vals])
ctublas = sum([v[libs.index("ublas")] for v in vals])
# ctublas = sum(ublas)
# print "c. time atlas =",ctatlas
# print "c. time cublas =",ctcublas
# print "c. time mkl =",ctmkl
# print "c. time plasma =",ctplasma
# print
# print "c. time ublas =",ctublas
# print
# print
# print " atlas saving =",(ctatlas-ctublas)/ctatlas
# print "cublas saving =",(ctcublas-ctublas)/ctcublas
# print " mkl saving =",(ctmkl-ctublas)/ctmkl
# print "plasma saving =",(ctplasma-ctublas)/ctplasma
# return (ctatlas-ctublas)/ctatlas, (ctcublas-ctublas)/ctcublas,\
# (ctmkl-ctublas)/ctmkl, (ctplasma-ctublas)/ctplasma
# print "ublas =",float(len(ublas))/samples
# print "atlas =",float(len(atlas))/samples
# print "cublas =",float(len(cublas))/samples
# print "mkl =",float(len(mkl))/samples
# print "plasma =",float(len(plasma))/samples
# bads = [[tp,p,l] for tp,p,l in zip(testpoints,vals,selected) if l != libs[p.index(min(p))]]
# bads = [[tp, (libs[p.index(min(p))], min(p)), (l,p[libs.index(l)])] for tp,p,l in bads]
# for b in bads:
# print " ",b
# print len(bads),"/",len(selected),"wrong."
def test_speedup(trials=100):
atlas, cublas, mkl, plasma = [], [], [], []
for i in range(trials):
print "="*50,"trial",(i+1),"/",trials
a, c, m, p = test_random(512, 10)
atlas.append(a)
cublas.append(c)
mkl.append(m)
plasma.append(p)
atlas = map(float, atlas)
cublas = map(float, cublas)
mkl = map(float, mkl)
plasma = map(float, plasma)
print " atlas saving =",np.mean(atlas), "~",np.std(atlas)
print "cublas saving =",np.mean(cublas),"~",np.std(cublas)
print " mkl saving =",np.mean(mkl), "~",np.std(mkl)
print "plasma saving =",np.mean(plasma),"~",np.std(plasma)
with open("speedup", "w") as fp:
fp.write("ATLAS,CuBLAS,MKL,PLASMA\n")
for a,c,m,p in zip(atlas,cublas,mkl,plasma):
fp.write(",".join(map(str, [a,c,m,p]))+"\n")
def test_training_sampling(trials=100):
ss = 100
accuarcy = []
for i in range(trials):
print "="*50,i+1,"/",trials
accuarcy.append(float(test_random(512,ss))/ss)
with open("training-samples-accuracy", "w") as fp:
fp.write("\n".join(map(str, accuarcy)))
# Sample UBLAS selection
#------------------------------------------------------------------------------#
if __name__ == "__main__":
# with UblasRun() as run:
# print "time for atlas",run.get_time("atlas", 800, 800, 800)
# with UblasQuery() as query:
# print "lib is",query.get_lib(400,400,400)o
# atlas, cublas, mkl, plasma = test_random(512, 10)
# print " atlas saving =",atlas
# print "cublas saving =",cublas
# print " mkl saving =",mkl
# print "plasma saving =",plasma
print float(test_random(512, 100))/1
# test_speedup()
# test_training_sampling()
|
|
import torchvision, torchvision.transforms
import sys, os
sys.path.insert(0,"../torchxrayvision/")
import torchxrayvision as xrv
import matplotlib.pyplot as plt
import torch
from torch.nn import functional as F
import glob
import numpy as np
import skimage, skimage.filters
import captum, captum.attr
import torch, torch.nn
import pickle
import pandas as pd
def get_data(dataset_str, masks=False, unique_patients=False,
transform=None, data_aug=None, merge=True, views = ["PA","AP"],
pathologies=None):
dataset_dir = "/home/groups/akshaysc/joecohen/"
datasets = []
if "covid" in dataset_str:
dataset = xrv.datasets.COVID19_Dataset(
imgpath=dataset_dir + "/covid-chestxray-dataset/images",
csvpath=dataset_dir + "/covid-chestxray-dataset/metadata.csv",
transform=transform, data_aug=data_aug, semantic_masks=masks,
views=views)
datasets.append(dataset)
if "pc" in dataset_str:
dataset = xrv.datasets.PC_Dataset(
imgpath=dataset_dir + "/images-512-PC",
transform=transform, data_aug=data_aug,
unique_patients=unique_patients,
views=views)
datasets.append(dataset)
if "rsna" in dataset_str:
dataset = xrv.datasets.RSNA_Pneumonia_Dataset(
imgpath=dataset_dir + "/kaggle-pneumonia-jpg/stage_2_train_images_jpg",
transform=transform, data_aug=data_aug,
unique_patients=unique_patients, pathology_masks=masks,
views=views)
datasets.append(dataset)
if "nih" in dataset_str:
dataset = xrv.datasets.NIH_Dataset(
imgpath=dataset_dir + "/images-512-NIH",
transform=transform, data_aug=data_aug,
unique_patients=unique_patients, pathology_masks=masks,
views=views)
datasets.append(dataset)
if "siim" in dataset_str:
dataset = xrv.datasets.SIIM_Pneumothorax_Dataset(
imgpath=dataset_dir + "SIIM_TRAIN_TEST/dicom-images-train/",
csvpath=dataset_dir + "SIIM_TRAIN_TEST/train-rle.csv",
transform=transform, data_aug=data_aug,
unique_patients=unique_patients, pathology_masks=masks)
datasets.append(dataset)
if "chex" in dataset_str:
dataset = xrv.datasets.CheX_Dataset(
imgpath=dataset_dir + "/CheXpert-v1.0-small",
csvpath=dataset_dir + "/CheXpert-v1.0-small/train.csv",
transform=transform, data_aug=data_aug,
unique_patients=False,
views=views)
datasets.append(dataset)
if "google" in dataset_str:
dataset = xrv.datasets.NIH_Google_Dataset(
imgpath=dataset_dir + "/images-512-NIH",
transform=transform, data_aug=data_aug,
views=views)
datasets.append(dataset)
if "mimic_ch" in dataset_str:
dataset = xrv.datasets.MIMIC_Dataset(
imgpath="/scratch/users/joecohen/data/MIMICCXR-2.0/files/",
csvpath=dataset_dir + "/MIMICCXR-2.0/mimic-cxr-2.0.0-chexpert.csv.gz",
metacsvpath=dataset_dir + "/MIMICCXR-2.0/mimic-cxr-2.0.0-metadata.csv.gz",
transform=transform, data_aug=data_aug,
unique_patients=unique_patients,
views=views)
datasets.append(dataset)
if "openi" in dataset_str:
dataset = xrv.datasets.Openi_Dataset(
imgpath=dataset_dir + "/OpenI/images/",
transform=transform, data_aug=data_aug,
views=views)
datasets.append(dataset)
if "vin" in dataset_str:
dataset = xrv.datasets.VinBrain_Dataset(
imgpath=dataset_dir + "vinbigdata-chest-xray-abnormalities-detection/train",
csvpath=dataset_dir + "vinbigdata-chest-xray-abnormalities-detection/train.csv",
pathology_masks=masks,
transform=transform, data_aug=data_aug,
views=views)
datasets.append(dataset)
if "objectcxr" in dataset_str:
dataset = xrv.datasets.ObjectCXR_Dataset(imgzippath=dataset_dir + "/object-CXR/train.zip",
csvpath=dataset_dir + "/object-CXR/train.csv",
pathology_masks=masks,
transform=transform,
data_aug=data_aug)
datasets.append(dataset)
if not pathologies is None:
for d in datasets:
xrv.datasets.relabel_dataset(pathologies, d)
if merge:
newlabels = set()
for d in datasets:
newlabels = newlabels.union(d.pathologies)
# if "Support Devices" in newlabels:
# newlabels.remove("Support Devices")
print(list(newlabels))
for d in datasets:
xrv.datasets.relabel_dataset(list(newlabels), d)
dmerge = xrv.datasets.Merge_Dataset(datasets)
return dmerge
else:
return datasets
|
|
# By Nick Erickson
# Contains Save/Load Functions
import json
import os
import pickle
import numpy as np
from utils import globs as G
def save_memory_subset(agent, pointer_start, pointer_end, frame_saved, skip=8):
memory = agent.brain.brain_memory
if pointer_end < pointer_start:
pointer_end += memory.max_size
idxs = []
for i in range(pointer_start, pointer_end, skip):
idx = i % memory.max_size
idxs.append(idx)
np.savez_compressed(
agent.data_location+'memory_frame_' + str(frame_saved) + '.npz',
s = memory.s [idxs],
a = memory.a [idxs],
r = memory.r [idxs],
s_ = memory.s_[idxs],
t = memory.t [idxs]
)
print('Memory Saved...')
def load_weights(agent, filename_input=None): # TODO: Update this function
if agent.args.directory == 'default':
agent.args.directory = G.CUR_FOLDER
results_location = G.RESULT_FOLDER_FULL + '/' + agent.args.directory
data_location = G.DATA_FOLDER_FULL + '/' + agent.args.directory
os.makedirs(results_location,exist_ok=True) # Generates results folder
os.makedirs(data_location,exist_ok=True) # Generates data folder
agent.results_location = results_location + '/'
agent.data_location = data_location + '/'
filename = 'model'
if agent.args.weight_override:
filename = agent.args.weight_override
elif agent.args.run_count_load > 0:
agent.run_count = agent.args.run_count_load
agent.metrics.total_size = agent.run_count
filename = filename + '_' + str(agent.args.run_count_load)
if filename_input:
filename = filename_input
if agent.args.mode == 'run':
try:
agent.h.extra.observe = 999999999 # Never train
except:
pass
agent.mode = 'observe'
agent.epsilon = 0
print("Now we load weight from " + agent.results_location + filename + '.h5')
agent.brain.model.load_weights(agent.results_location + filename + '.h5')
print("Weights loaded successfully")
elif agent.args.mode == 'train_old': # Continue training old network
agent.epsilon = agent.h.epsilon_init
print("Now we load weight from " + agent.results_location + filename + '.h5')
agent.brain.model.load_weights(agent.results_location + filename + '.h5')
print("Weights loaded successfully, training")
elif agent.args.mode == 'gather': # Gather data, then exit
print('Gathering Data')
if agent.args.weight_override:
agent.epsilon = agent.h.epsilon_init
print ("Now we load weight from " + agent.results_location + filename + '.h5')
agent.brain.model.load_weights(agent.results_location + filename + '.h5')
else: # Train new
print('Training new network!')
agent.epsilon = agent.h.epsilon_init
def save_weights(agent, addon=None):
name = 'model'
if addon:
name = name + '_' + str(addon)
print("Saving Model...")
agent.brain.model.save_weights(agent.results_location + name + '.h5', overwrite=True)
with open(agent.results_location + name + '.json', "w") as outfile:
json.dump(agent.brain.model.to_json(), outfile)
# Saves memory, hyperparams, and screen info
def save_all(agent):
save_memory_v2(agent)
hyper_file = agent.data_location + 'hyper'
screen_file = agent.data_location + 'screen'
save_class(agent.h, hyper_file)
save_class(agent.args.screen, screen_file)
# For Memory_v2 agents
def save_memory_v2(agent):
memory = agent.brain.brain_memory
np.savez_compressed(
agent.data_location+'memory.npz',
s = memory.s ,
a = memory.a ,
r = memory.r ,
s_ = memory.s_,
t = memory.t
)
print('Memory Saved...')
"""
# Saves memory, hyperparameters, and screen parameters
def saveMemory(agent):
d = np.array(agent.memory.D)
dLen = d.shape[0]
statesShape = list(d[0][0].shape)
states_Shape = list(d[0][3].shape)
states = np.zeros([dLen] + statesShape, dtype='float16')
actions = np.zeros(dLen, dtype='int_')
rewards = np.zeros(dLen, dtype='float64')
states_ = np.zeros([dLen] + states_Shape, dtype='float16')
terminals = np.zeros(dLen, dtype='bool_')
for i in range(dLen):
states[i] = d[i][0]
actions[i] = d[i][1]
rewards[i] = d[i][2]
states_[i] = d[i][3]
terminals[i] = d[i][4]
#print(np.mean(states[i]))
np.savez_compressed(
agent.data_location+'memory.npz',
states=states,
actions=actions,
rewards=rewards,
states_=states_,
terminals=terminals
)
#a = np.load(agent.results_location+'memory.npz')
#return #a
"""
# Saves class info
def save_class(object_, location):
with open(location,"wb") as file:
pickle.dump(object_, file)
def load_class(location):
with open(location,"rb") as file:
return pickle.load(file)
def load_memory_v2(agent, memory_location, extra=''):
memory = np.load(memory_location + 'memory' + extra + '.npz')
agent.brain.brain_memory.s = memory['s' ]
agent.brain.brain_memory.a = memory['a' ]
agent.brain.brain_memory.r = memory['r' ]
agent.brain.brain_memory.s_ = memory['s_']
agent.brain.brain_memory.t = memory['t' ]
agent.brain.brain_memory.size = agent.brain.brain_memory.s.shape[0]
agent.brain.brain_memory.max_size = agent.brain.brain_memory.size
agent.brain.brain_memory.total_saved = agent.brain.brain_memory.size
agent.brain.brain_memory.is_full = True
print('Importing', agent.brain.brain_memory.size, 'states')
def load_memory_direct(memory_location, extra=''):
memory = np.load(memory_location + 'memory' + extra + '.npz')
s = memory['s' ]
a = memory['a' ]
r = memory['r' ]
s_ = memory['s_']
t = memory['t' ]
return s, a, r, s_, t
"""
def loadMemory(agent, memory_location):
memory = np.load(memory_location+'memory.npz')
states = memory['states']
actions = memory['actions']
rewards = memory['rewards']
states_ = memory['states_']
terminals = memory['terminals']
memoryLen = states.shape[0]
print('Importing', memoryLen, 'states:')
for i in range(memoryLen):
if i % 10000 == 0:
print(i,'/',memoryLen)
agent.memory.add([states[i], actions[i], rewards[i], states_[i], terminals[i]])
print('Import complete!')
"""
|
|
from astropy import table, constants as const, units as u
import numpy as np
import os
import mpmath
# Abbbreviations:
# eqd = equivalent duration
# ks = 1000 s (obvious perhaps :), but not a common unit)
#region defaults and constants
# some constants
h, c, k_B = const.h, const.c, const.k_B
default_flarespec_path = os.path.join(os.path.dirname(__file__), 'relative_energy_budget.ecsv')
default_flarespec = table.Table.read(default_flarespec_path, format='ascii.ecsv')
default_flarespec = default_flarespec.filled(0)
fuv = [912., 1700.] * u.AA
nuv = [1700., 3200.] * u.AA
version = '1.0'
# the default function for estimating flare peak flux
@u.quantity_input(eqd=u.s)
def boxcar_height_function_default(eqd):
eqd_s = eqd.to('s').value
return 0.3*eqd_s**0.6
# other flare defaults
flare_defaults = dict(eqd_min = 100.*u.s,
eqd_max = 1e6*u.s,
ks_rate = 8/u.d, # rate of ks flares for Si IV (Fig 6 of Loyd+ 2018)
cumulative_index = 0.75, # power law index of FUV flares for all stars (Table 5 of Loyd+ 2018)
boxcar_height_function = boxcar_height_function_default,
decay_boxcar_ratio = 1./2.,
BB_SiIV_Eratio=160, # Hawley et al. 2003
T_BB = 9000*u.K, # Hawley et al. 2003
clip_BB = True,
SiIV_quiescent=0.1*u.Unit('erg s-1 cm-2'), # for GJ 832 with bolometric flux equal to Earth
SiIV_normed_flare_spec=default_flarespec)
#endregion
#region boilerplate code
def _kw_or_default(kws, keys):
"""Boilerplate for pulling from the default dictionary if a desired key isn't present."""
values = []
for key in keys:
if key not in kws or kws[key] is None:
kws[key] = flare_defaults[key]
values.append(kws[key])
return values
def _check_unit(func, var, unit):
"""Boilerplate for checking units of a variable."""
try:
var.to(unit)
except (AttributeError, u.UnitConversionError):
raise ValueError('Variable {} supplied to the {} must be an '
'astropy.Units.Quantity object with units '
'convertable to {}'.format(var, func, unit))
def _integrate_spec_table(spec_table):
"""Integrate a spectrum defined in a table with 'w0', 'w1', and 'Edensity' columns."""
return np.sum((spec_table['w1'] - spec_table['w0']) * spec_table['Edensity'])
#endregion code
#region documentation tools
# there is a lot of duplicated documetation here, so to make sure it is consistent I am going to define it in only one
# place and then insert it into the docstrings, at the cost of readability when actually looking at the source. Sorry
# about that. However, pulling up help on each function should work well, and, like I said, it's more consistent.
_fd = flare_defaults
_flare_params_doc = "flare_params : dictionary\n" \
" Parameters of the flare model. If a parameter is not sepcified, \n" \
" the default is taken from the flare_simulator.flare_defaults \n" \
" dictionary. Parameters relevant to this function are:"
_param_doc_dic = dict(eqd_min = "eqd_min : astropy quantity, units of time\n"
" Minimum flare equivalent duration to be considered.\n"
" Default is {}."
"".format(_fd['eqd_min']),
eqd_max = "eqd_max : astropy quantity, units of time\n"
" Maxium flare equivalent duration to be considered. \n"
" Default is {}."
"".format(_fd['eqd_max']),
ks_rate = "ks_rate : astropy quantity, units of time-1\n"
" Rate of Si IV flares with an equivalent duration of 1000 s. \n"
" Default is {}."
"".format(_fd['ks_rate']),
cumulative_index= "cumulative_index : float\n"
" Cumulative index of a power-law relating the frequency of flares\n"
" greater than a given energy to that energy. Default is {}."
"".format(_fd['cumulative_index']),
boxcar_height_function = "boxcar_height_function : function\n"
" Function relating the peak flare flux (height of the boxcar \n"
" portion of the boxcar-decay model) to the equivalent duration \n"
" of the flare. The function must accept an equivalent duration \n"
" as an astropy quantity with units of time as its only input. \n"
" Default is the function height = 0.3 * equivalent_duration**0.6",
decay_boxcar_ratio = "decay_boxcar_ratio : float\n"
" Ratio between the the amount of flare energy contained in \n"
" the boxcar portion of the boxcar-decay model and the decay \n"
" portion. This actually determines the time-constant of the \n"
" decay. I'm not sure if I actually like that... Default is {}."
"".format(_fd['decay_boxcar_ratio']),
BB_SiIV_Eratio = "BB_SiIV_Eratio : float\n"
" Ratio of the blackbody energy to the Si IV energy of the flare.\n"
" Default is {}.".format(_fd['BB_SiIV_Eratio']),
T_BB = "T_BB : astropy quantity, units of temperature\n"
" Temperature of the flare blackbody continuum. \n"
" Default is {}.".format(_fd['T_BB']),
SiIV_quiescent = "SiIV_quiescent : astropy quantity, units of energy time-1 length-2\n"
" Quiescent flux of the star in the Si IV 1393,1402 AA lines. \n"
" Default is representative of an inactive M dwarf at the distance \n"
" where the bolometric irradiation equals that of Earth,\n"
" {}.".format(_fd['SiIV_quiescent']),
SiIV_normed_flare_spec = "SiIV_normed_flare_spec : astropy table\n"
" Spectral energy budget of the flare (excluding the blackbody) \n"
" normalized to the combined flux of the Si IV 1393,1402 AA lines. \n"
" The energy budget should be an astropy table with columns of\n"
" 'w0' : start of each spectral bin, units of length\n"
" 'w1' : end of each spectral bin, units of length\n"
" 'Edensity' : energy emitted by that flare in the spectral\n"
" bin divided by the width of the bin, units of \n"
" energy length-1\n"
" Default is loaded from the 'relative_energy_budget.ecsv' file.",
clip_BB = "clip_BB : True|False\n"
" If True (default), do not include blackbody flux in the FUV range \n"
" and shortward. This is done because BB flux is presumed to be \n"
" included in the flare SED at EUV and FUV wavelengths assembled by \n"
" Loyd+ 2018 that is the default here. However, should be changed to\n"
" False if, e.g., a hotter or more energetic blackbody is adopted.")
_tbins_doc = 'tbins : astropy quantity array, units of time\n' \
' Edges of the lightcurve time bins.'
_wbins_doc = 'wbins : astropy quantity array, units of length\n' \
' Edges of the spectral bins.'
_t0_doc = 't0 : astropy quantity, units of time\n' \
' Start time of flare.'
_eqd_doc = 'eqd : astropy quantity, units of time\n' \
' Equivalent duration of flare in the Si IV 1393,1402 line \n' \
' (flare energy divided by star\'s quiescent luminosity\n' \
' in the same band).'
def add_indent(txt):
return " " + txt.replace('\n', '\n ')
def _get_param_string(*keys):
strings = [_param_doc_dic[key] for key in keys]
strings = list(map(add_indent, strings))
strings = list(map(add_indent, strings))
return '\n'.join([_flare_params_doc] + strings)
def _format_doc(func, **kws):
func.__doc__ = func.__doc__.format(**kws)
#endregion
#region fast planck function computations
_Li = mpmath.fp.polylog
def _P3(x):
"""Dang, I should have cited where I got this. Now it is lost."""
e = np.exp(-x)
return _Li(4, e) + x*_Li(3, e) + x**2/2*_Li(2, e) + x**3/6*_Li(1, e)
_P3 = np.vectorize(_P3)
@u.quantity_input(w=u.AA, T=u.K)
def _blackbody_partial_integral(w, T):
"""
Integral of blackbody surface flux at wavelengths from 0 to w.
Parameters
----------
w : astropy quantity, units of length
wavelength to which to integrate
T : astropy quantity, units of temperature
temperature of blackbody
Returns
-------
I : astropy quantity
"""
x = (h*c/w/k_B/T).to('').value
I = 12 * np.pi * (k_B*T)**4 / c**2 / h**3 * _P3(x)
return I.to('erg s-1 cm-2')
@u.quantity_input(wbins=u.AA, T=u.K)
def blackbody_binned(wbins, T, bolometric=None):
"""
Quick computation of blackbody surface flux integrated within wbins.
This is especially helpful if there are large wavelength bins where taking the value of the Planck function at the
midpoint might give inaccurate results.
Parameters
----------
{wbins}
T : astropy quantity, units of temperature
temperature of blackbody
bolometric : astropy quantity, units of energy time-1 length-2
value of the bolometric blackbody flux by which to normalize the
output. A value of None gives the flux at the surface of the
emitter.
Returns
-------
flux_density : astropy quantity, units of energy time-1 length-3
The flux spectral density of the blackbody in each wbin, generally in units of erg s-1 cm-2 AA-1.
"""
# take difference of cumulative integral at each bin edge to get flux in each bin
F = np.diff(_blackbody_partial_integral(wbins, T))
# divide by bin widths to get flux density
f = F / np.diff(wbins)
# renormalize, if desired, and return
if bolometric is None:
return f.to('erg s-1 cm-2 AA-1')
else:
fbolo = const.sigma_sb*T**4
fnorm = (f/fbolo).to(1/wbins.unit)
return fnorm*bolometric
_format_doc(blackbody_binned, wbins=_wbins_doc)
@u.quantity_input(wbins=u.AA, T=u.K)
def blackbody_points(w, T, bolometric=None):
"""
Compute the flux spectral density of the emission from a blackbody.
Returns the value at each w, rather than the value averaged over wbins. For the latter, use blackbody_binned.
Parameters
----------
w : astropy quantity array, units of length
Wavelengths at which to compute flux density.
T : astropy quantity, units of temperature
temperature of blackbody
bolometric : astropy quantity, units of energy time-1 length-2
value of the bolometric blackbody flux by which to normalize the
output. A value of None gives the flux at the surface of the
emitter.
Returns
-------
flux_density : astropy quantity, units of energy time-1 length-3
The flux spectral density of the blackbody at each w, generally in units of erg s-1 cm-2 AA-1.
"""
# compute flux density from Planck function (with that extra pi factor to get rid of per unit solid angle portion)
f = np.pi * 2 * const.h * const.c ** 2 / w ** 5 / (np.exp(const.h * const.c / const.k_B / T / w) - 1)
# return flux density, renormalized if desired
if bolometric is None:
return f.to('erg s-1 cm-2 AA-1')
else:
fbolo = const.sigma_sb*T**4
fnorm = (f/fbolo).to(1/w.unit)
return fnorm*bolometric
#endregion
#region utilities
def rebin(bins_new, bins_old, y):
"""
Rebin some binned values.
Parameters
----------
bins_new : array
New bin edges.
bins_old : array
Old bin edges.
y : array
Binned values (average of some function like a spectrum across
each bin).
Returns
-------
y_new : array
Rebinned values.
"""
# politely let user no that quantity input is not desired for this
if any(isinstance(x, u.Quantity) for x in [bins_new, bins_old, y]):
raise ValueError('No astropy Quantity input for this function, please.')
if np.any(bins_old[1:] <= bins_old[:-1]) or np.any(bins_new[1:] <= bins_new[:-1]):
raise ValueError('Old and new bin edges must be monotonically increasing.')
# compute cumulative integral of binned data
areas = y*np.diff(bins_old)
I = np.cumsum(areas)
I = np.insert(I, 0, 0)
# compute average value in new bins
Iedges = np.interp(bins_new, bins_old, I)
y_new = np.diff(Iedges)/np.diff(bins_new)
return y_new
def power_rv(min, max, cumulative_index, n):
"""
Random values drawn from a power-law distribution.
Parameters
----------
min : float
Minimum value of the distribution.
max : float
Maximum value of the distribution.
cumulative_index : float
Index of the cumulative distribution.
n : integer
Number of values to draw.
Returns
-------
values : array
Array of random values.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any(isinstance(x, u.Quantity) for x in [min, max, cumulative_index]):
raise ValueError('No astropy Quantity input for this function, please.')
# I found it easier to just make my own than figure out the numpy power, pareto, etc. random number generators
a = cumulative_index
norm = min**-a - max**-a
# cdf = 1 - ((x**-a - max**-a)/norm)
x_from_cdf = lambda c: ((1-c)*norm + max**-a)**(-1/a)
x_uniform = np.random.uniform(size=n)
return x_from_cdf(x_uniform)
def shot_times(rate, time_span):
"""
Generate random times of events that when binned into even intervals would yield counts that are Poisson distributed.
Parameters
----------
rate : float
Average rate of events.
time_span : float
Length of time over which to generate events.
Returns
-------
times : array
Times at which random events occurr.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any(isinstance(x, u.Quantity) for x in [rate, time_span]):
raise ValueError('No astropy Quantity input for this function, please.')
# generate wait times from exponential distribution (for poisson stats)
# attempt drawing 10 std devs more "shots" than the number expected to fill time_span so chances are very low it
# won't be filled
avg_wait_time = 1. / rate
navg = time_span / avg_wait_time
ndraw = int(navg + 10*np.sqrt(navg))
wait_times = np.random.exponential(avg_wait_time, size=ndraw)
# cumulatively sum wait_times to get actual event times
tshot = np.cumsum(wait_times)
# if the last event occurs before user-specified length of time, try again. Else, return the times.
if tshot[-1] < time_span:
return shot_times(rate, time_span)
else:
return tshot[tshot < time_span]
def boxcar_decay(tbins, t0, area_box, height_box, area_decay):
"""
Compute the lightcurve from one or more boxcar-decay functions.
Parameters
----------
tbins : array
edges of the time bins used for the lightcurve
t0 : float or array
start times of the boxcar-decays
area_box : float or array
areas of the boxcar portion of the boxcar-decays
height_box : float or array
heights of the boxcar-decays
area_decay : float or array
areas of the decay portions of the boxcar-decays
Returns
-------
y : array
lightcurve values
Notes
-----
This function is a bottleneck when creating a lightcurve from a long
series of flares. If this code is to be adapted for quick simulation
of years-long series of flares, this is where the speedup needs to
happen.
"""
# politely let user know that, in this instance, astropy Quantities are not wanted
if any(isinstance(x, u.Quantity) for x in [tbins, t0, area_box, height_box, area_decay]):
raise ValueError('No astropy Quantity input for this function, please.')
# this is going to have to be ugly for it to be fast, I think
# standardize t0, area_box, height_box, and area_decay for array input
t0, area_box, height_box, area_decay = [np.reshape(a, [-1]) for a in [t0, area_box, height_box, area_decay]]
# compute end of box, start of decay
t1 = t0 + area_box/height_box
# correct for portions hanging over ends of tbins
t0 = np.copy(t0)
t0[t0 < tbins[0]] = tbins[0]
t1[t1 > tbins[-1]] = tbins[-1]
# initialize y array
y = np.zeros((len(t0), len(tbins)-1))
i_rows = np.arange(y.shape[0])
# add starting portion of box to first bin that is only partially covered by it
i0 = np.searchsorted(tbins, t0, side='right')
frac = (tbins[i0] - t0)/(tbins[i0] - tbins[i0-1])
y[i_rows, i0-1] += frac*height_box
# add box to bins fully covered by it
inbox = (tbins[None, :-1] > t0[:, None]) & (tbins[None, 1:] < t1[:, None])
y += height_box[:,None]*inbox
# add ending fraction of box to last bin that is partially covered by it
i1 = np.searchsorted(tbins, t1, side='left')
frac = (t1 - tbins[i1-1])/(tbins[i1] - tbins[i1-1])
y[i_rows, i1-1] += frac*height_box
# deal with any cases where the box was entirely within a bin
j = i0 == i1
y[i_rows[j], i0[j]-1] = area_box[j]/(tbins[i0][j] - tbins[i0-1][j])
# add decay
# compute cumulative decay integral at all time points
amp_decay = height_box
tau_decay = area_decay / amp_decay
with np.errstate(over='ignore', invalid='ignore'):
Idecay = -amp_decay[:,None]*tau_decay[:,None]*np.exp(-(tbins[None,:] - t1[:,None])/tau_decay[:,None])
ydecay = np.diff(Idecay, 1)/np.diff(tbins)
keep = tbins[:-1] > t1[:, None]
y[keep] += ydecay[keep]
# add fractional piece of exponential
i1 = np.searchsorted(tbins, t1, side='right')
inrange = i1 < len(tbins)
i_rows, i1 = i_rows[inrange], i1[inrange]
Idecay1 = -amp_decay*tau_decay
ydecay1 = (Idecay[i_rows, i1] - Idecay1[i_rows])/(tbins[i1] - tbins[i1-1])
y[i_rows, i1-1] += ydecay1
return np.sum(y, 0)
#endregion
#region front end functions
@u.quantity_input(eqd=u.AA, filter_response=u.Unit(''))
def filter_to_SiIV_energy(filter_wave, filter_response, energy, **flare_params):
"""
Convenience function for converting the energy in a photometric filter to the Si IV energy of a flare.
Parameters
----------
filter_wave : astropy quantity array, units of length
Wavelengths of filter response curve.
filter_response : array, unitless
Filter response at filter_wave.
energy : float or astropy quantity, units of energy
Energy of the flare in the specified filter.
{flare_params}
Returns
-------
energy_SiIV : float or astropy quantity
Energy of the flare in the Si IV 1393,1402 AA line.
"""
# get filter-convolved fraction of flare energy relative to Si IV
w_mids = (filter_wave[1:] + filter_wave[:-1])/2.
w_bins = np.insert(w_mids.value, [0,len(w_mids)],
filter_wave[[0,-1]].value)*filter_wave.unit
flux = flare_spectrum(w_bins, 1.0, **flare_params)
filter_fraction = np.sum(filter_response*flux*np.diff(w_bins))
# then just invert to get the energy in Si IV given the filter energy
return energy/filter_fraction
_format_doc(filter_to_SiIV_energy, flare_params=_get_param_string('BB_SiIV_Eratio', 'T_BB', 'SiIV_normed_flare_spec'))
@u.quantity_input(tbins=u.s, t0=u.s, eqd=u.s)
def flare_lightcurve(tbins, t0, eqd, **flare_params):
"""
Return a lightcurve for a single flare normalized to quiescent flux.
Parameters
----------
{tbins}
{t0}
{eqd}
{flare_params}
Returns
-------
y : array
Quiescent-normalized lightcurve of the flare.
"""
# get relevant flare parameters
values = _kw_or_default(flare_params, ['boxcar_height_function', 'decay_boxcar_ratio'])
boxcar_height_function, decay_boxcar_ratio = values
# compute boxcar parameters
boxcar_height = boxcar_height_function(eqd)
boxcar_area = eqd/(1 + decay_boxcar_ratio)
decay_area = boxcar_area * decay_boxcar_ratio
# make units uniform
tunit = tbins.unit
tbins, t0, eqd, boxcar_area, decay_area = [x.to(tunit).value for x in [tbins, t0, eqd, boxcar_area, decay_area]]
y = boxcar_decay(tbins, t0, boxcar_area, boxcar_height, decay_area)
return y
_format_doc(flare_lightcurve, flare_params=_get_param_string('boxcar_height_function', 'decay_boxcar_ratio'),
tbins=_tbins_doc, eqd=_eqd_doc, t0=_t0_doc)
def flare_rate(**flare_params):
"""
Rate of flares spanning the given energy range.
Parameters
----------
{flare_params}
Returns
-------
rate : astropy quantity
"""
# get relevant flare parameters and check units
values = _kw_or_default(flare_params, ['eqd_min', 'eqd_max', 'ks_rate', 'cumulative_index'])
eqd_min, eqd_max, ks_rate, cumulative_index = values
_check_unit(flare_rate, ks_rate, 's-1')
[_check_unit(flare_rate, v, 's') for v in [eqd_min, eqd_max]]
# make sure no stupid input
if eqd_min <= 0:
raise ValueError('Flare rate diverges at eqd_min == 0. Only eqd_min > 0 makes sense.')
# compute rate
rate = ks_rate * ((eqd_min/u.ks).to('')**-cumulative_index - (eqd_max/u.ks).to('')**-cumulative_index)
return rate.to('d-1')
_format_doc(flare_rate, flare_params=_get_param_string('eqd_min', 'eqd_max', 'ks_rate', 'cumulative_index'))
@u.quantity_input(time_span=u.s)
def flare_series(time_span, **flare_params):
"""
Start times and equivalent durations for a randomly generated series of flares.
Parameters
----------
time_span : astropy quantity, units of time
{flare_params}
Returns
-------
t_flare : astropy quantity array, units of time
Start times of the random flares.
eqd : astropy quantity array, units of time
Equivalent durations of the random flares.
"""
values = _kw_or_default(flare_params, ['eqd_min', 'eqd_max', 'cumulative_index'])
eqd_min, eqd_max, cumulative_index = values
[_check_unit(flare_series, v, 's') for v in [eqd_min, eqd_max]]
# get the expected flare rate
rate = flare_rate(**flare_params)
# draw flares at that rate
tunit = time_span.unit
rate = rate.to(tunit**-1).value
time_span = time_span.value
t_flare = shot_times(rate, time_span) * tunit
n = len(t_flare)
# draw energies for those flares
eqd_min, eqd_max = [x.to(tunit).value for x in [eqd_min, eqd_max]]
eqd = power_rv(eqd_min, eqd_max, cumulative_index, n) * tunit
return t_flare, eqd
_format_doc(flare_series, flare_params=_get_param_string('eqd_min', 'eqd_max', 'cumulative_index'))
@u.quantity_input(tbins=u.s)
def flare_series_lightcurve(tbins, return_flares=False, **flare_params):
"""
Generate a series of random flares and return their lightcurve.
Parameters
----------
{tbins}
return_flares : bool
If True, return the start times and equivalent durations of
the flares.
{flare_params}
Returns
-------
y : array
Quiescent-normalized ightcurve values in each tbin.
tflares : astropy quantity array, units of time, optional
Start time of random flares.
eqd : astropy quantity array, units of time, optional
Equivalent durations of the random flares.
"""
# generate random flares
time_span = tbins[-1] - tbins[0]
tflares, eqds = flare_series(time_span, **flare_params)
# make lightcurve from those flares
y = flare_lightcurve(tbins, tflares, eqds, **flare_params)
if return_flares:
return y, (tflares, eqds)
else:
return y
_format_doc(flare_series_lightcurve, tbins=_tbins_doc,
flare_params=_get_param_string('eqd_min', 'eqd_max', 'cumulative_index', 'boxcar_height_function',
'decay_boxcar_ratio'))
@u.quantity_input(wbins=u.AA)
def flare_spectrum(wbins, SiIV, **flare_params):
"""
Return the flare spectrum scaled to match the energy or equivalent duration specified by SiIV and binned according
to wbins.
Parameters
----------
{wbins}
SiIV : float or astropy quantity
Equivalent duration or energy of the flare in the Si IV 1393,1402 AA
line. This could also be peak flux or some other quantity, but note
that you should probably specificy your own 'Edensity' column of the
SiIV_normed_flare_spec table to match if so.
{flare_params}
Returns
-------
spectrum : astropy quantity array, units variabile according to units of SiIV
Energy spectral density or other spectral density of the flare spectrum
in each wbin.
"""
BBratio, T, flarespec, clip_BB = _kw_or_default(flare_params, ['BB_SiIV_Eratio', 'T_BB', 'SiIV_normed_flare_spec',
'clip_BB'])
# rebin energy density from specified flare SED (from MUSCLES data by default)
fs_bins = np.append(flarespec['w0'], flarespec['w1'][-1]) * flarespec['w0'].unit
fs_density = flarespec['Edensity'].quantity
fs_bins = fs_bins.to(wbins.unit)
FUV_and_lines = rebin(wbins.value, fs_bins.value, fs_density.value) * fs_density.unit * SiIV
# get the blackbody (should not be included in SED) emission in each bin. Add to regions shortward of FUV as
# desired by user
BBbolo = BBratio * SiIV
if clip_BB:
red = (wbins[1:] > fuv[1])
BBbins = np.insert(wbins[1:][red], 0, fuv[1])
BB = blackbody_binned(BBbins, T, bolometric=BBbolo)
# add SED and blackbody
result = FUV_and_lines
result[red] += BB
else:
BB = blackbody_binned(wbins, T, bolometric=BBbolo)
result = FUV_and_lines + BB
return result
_format_doc(flare_spectrum, wbins=_wbins_doc,
flare_params=_get_param_string('BB_SiIV_Eratio', 'T_BB', 'SiIV_normed_flare_spec'))
@u.quantity_input(wbins=u.AA, tbins=u.s, t0=u.s, eqd=u.s)
def flare_spectra(wbins, tbins, t0, eqd, **flare_params):
"""
Return a series of flare spectra averaged over each tbin for a flare starting at t0 with equivalent duration eqd
in Si IV.
Parameters
----------
{wbins}
{tbins}
{t0}
{eqd}
{flare_params}
Returns
-------
spectra : astropy quantity array, variable units
Array of spectra in each tbin, where the array has dimensions
(len(tbins)-1, len(wbins)-1). Units will match the product
of the eqd and SiIV_quiescent units, divided by time and length.
"""
# get quiescent Si IV flux
SiIVq, = _kw_or_default(flare_params, ['SiIV_quiescent'])
# get lightcurve of flare
lightcurve = flare_lightcurve(tbins, t0, eqd, **flare_params)
# get spectrum of flare
spectrum = flare_spectrum(wbins, SiIVq, **flare_params)
# multiply spectrum by (quiecent-normalized) lightcurve to get array of spectra in each tbin
return np.outer(lightcurve, spectrum.value)*spectrum.unit
_format_doc(flare_spectra, wbins=_wbins_doc, tbins=_tbins_doc, t0=_t0_doc, eqd=_eqd_doc,
flare_params=_get_param_string('SiIV_quiescent', 'boxcar_height_function', 'decay_boxcar_ratio',
'BB_SiIV_Eratio', 'T_BB', 'SiIV_normed_flare_spec'))
@u.quantity_input(wbins=u.AA, tbins=u.s)
def flare_series_spectra(wbins, tbins, **flare_params):
"""
Generate time-evolving spectra from a random series of flares.
Parameters
----------
{wbins}
{tbins}
{flare_params}
Returns
-------
spectra : astropy quantity array
Array of spectra in each tbin, where the array has dimensions
(len(tbins)-1, len(wbins)-1). Units will match the product of
the eqd and SiIV_quiescent units, divided by time and length.
"""
# get quiescent Si IV flux
SiIVq, = _kw_or_default(flare_params, ['SiIV_quiescent'])
# get lightcurve of a series of random flares
lightcurve = flare_series_lightcurve(tbins, **flare_params)
# get spectrum of flares
spectrum = flare_spectrum(wbins, SiIVq, **flare_params)
# multiply spectrum by (quiecent-normalized) lightcurve to get array of spectra in each tbin
return np.outer(lightcurve, spectrum.value)*spectrum.unit
_format_doc(flare_series_spectra, wbins=_wbins_doc, tbins=_tbins_doc,
flare_params=_get_param_string('SiIV_quiescent', 'eqd_min', 'eqd_max', 'cumulative_index',
'boxcar_height_function', 'decay_boxcar_ratio', 'BB_SiIV_Eratio', 'T_BB',
'SiIV_normed_flare_spec'))
#endregion
|
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set(style="whitegrid", font_scale=1.5, context="talk")
"""
For details on the params below, see the matplotlib docs:
https://matplotlib.org/users/customizing.html
"""
plt.rcParams["axes.edgecolor"] = "0.6"
plt.rcParams["figure.dpi"] = 200
plt.rcParams["font.family"] = "serif"
plt.rcParams["grid.color"] = "0.85"
plt.rcParams["savefig.dpi"] = 300
plt.rcParams["legend.columnspacing"] *= 0.8
plt.rcParams["legend.edgecolor"] = "0.6"
plt.rcParams["legend.markerscale"] = 1.0
plt.rcParams["legend.framealpha"] = "1"
plt.rcParams["legend.handlelength"] *= 1.5
plt.rcParams["legend.numpoints"] = 2
plt.rcParams["text.usetex"] = True
plt.rcParams["xtick.major.pad"] = -3
plt.rcParams["ytick.major.pad"] = -2
def plot_data(
data,
x=None,
plot_type="lineplot",
filepath=None,
save_fig=True,
figsize=[12.0, 6.0],
):
"""
Args:
data (2d array): 2d array with dimensions: num_topics x num_time_slices
Examples:
A simple example.
>>> import numpy as np
>>> data = np.arange(40).reshape([4,10])
>>> plot_data(data, save_fig=False)
>>> plot_data(data, plot_type='lineplot', save_fig=False)
>>> plot_data(data, plot_type='stackplot', save_fig=False)
An example using a Pipeline.
>>> import numpy as np
>>> from functools import partial
>>> from twitter_analysis_tools.utils import Pipeline
>>> data = [i*np.arange(10).T for i in range(1, 20)]
>>> data_pipeline = Pipeline(data)
>>> data_pipeline = data_pipeline.add_map(partial(np.expand_dims, axis=1))
>>> topic_distributions = np.concatenate(list(data_pipeline), axis=1)
>>> plot_data(topic_distributions, plot_type='stackplot', save_fig=False)
>>> plot_data(topic_distributions, plot_type='lineplot', save_fig=False)
"""
# Get dimensions.
num_topics, num_time_slices = data.shape
sns.set_palette(sns.husl_palette(num_topics))
# Create labels.
# TODO: pass labels in as argument.
labels = ["Topic {}".format(i) for i in range(1, num_topics + 1)]
if x is None:
x = np.arange(num_time_slices)
# Plot
fig = plt.figure(figsize=figsize)
# Plot data.
if plot_type == "lineplot":
for topic in range(num_topics):
plt.plot(x, data[topic, :], label=labels[topic])
if plot_type == "stackplot":
plt.stackplot(x, data, labels=labels)
# Put the legend out of the figure
plt.legend(
bbox_to_anchor=(1.05, 0.5),
loc="center left",
borderaxespad=0.0,
prop={"size": 10},
)
plt.xticks(rotation=45)
if save_fig:
if filepath is None:
raise Exception("Filepath must be specified if save_fig=True.")
fig.savefig(filepath + ".svg", bbox_inches="tight", transparent=True)
fig.savefig(filepath + ".png", bbox_inches="tight", transparent=True)
plt.close()
def sliding_average(data, window=10):
"""Average data over sliding window.
Args:
data (ndarray): data to average with dimensions: msrmts x num_samples.
window (int): size of the sliding window to average over.
Example:
>>> import numpy as np
>>> data = np.arange(24).reshape((4,6))
>>> sliding_average(data, window=5)
array([[ 2., 3.],
[ 8., 9.],
[14., 15.],
[20., 21.]])
An exception is raised if there is insufficient data to average over.
>>> import numpy as np
>>> data = np.arange(24).reshape((4,6))
>>> sliding_average(data, window=10)
Traceback (most recent call last):
...
Exception: Not enough data to average over with window of size 10.
"""
if data.shape[1] < window:
raise Exception(
"Not enough data to average over with window of size {}.".format(window)
)
# Make a copy to store averaged data (We could alternatively do this in place).
averaged = np.zeros((data.shape[0], data.shape[1] - window + 1))
# Average over sliding window.
for i in range(averaged.shape[1]):
# flake8: noqa: E203
averaged[:, i] = np.mean(data[:, i : i + window], axis=1)
return averaged
|
|
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forward fill the values."""
import haiku as hk
from jax import numpy as jnp
class Ffill(hk.Module):
"""Ffill current element."""
def __init__(self, periods=1, name=None):
super().__init__(name=name)
def __call__(self, x):
valid_value = hk.get_state(
"valid_value",
shape=x.shape,
dtype=x.dtype,
init=lambda shape, dtype: jnp.full(shape, jnp.nan, dtype),
)
valid_value = jnp.where(jnp.isnan(x), valid_value, x)
hk.set_state("valid_value", valid_value)
return valid_value
|
|
'''Scaler operation'''
import numpy as np
from .base import Operation
class Scaler(Operation):
'''Scaler Operation'''
@staticmethod
def apply(data, xmin: float, xmax: float):
'''Applies scaling in forward direction'''
return (data - xmin) / (xmax - xmin)
@staticmethod
def reverse(data, xmin: float, xmax: float):
'''Applies scaling in reverse direction'''
return ((xmax - xmin) * data) + xmin
@classmethod
def forward(cls, datasets, context, **kwargs):
'''Rescales dMRI data to range [0,1] independently in each shell.
Args:
datasets (Dict[str,Any]):
'mask': (np.ndarray) -> shape (i, j, k)
'dmri_in': (np.ndarray) -> shape (i, j, k, q_in)
'bvec_in': (np.ndarray) -> shape (3, q_in)
'bvec_out': (np.ndarray) -> shape (3, q_out)
context (Dict[str,Any]):
...
Keyword Args:
shell (int): Shell being processed
norms (Dict[int,Tuple[float,float]]): Normalisations for each shell
apply_bvec (bool): Apply normalisation to b-vector. Default: False
Modifies:
datasets (Dict[str,Any]):
~ 'dmri_in': (np.ndarray) -> shape (i, j, k, q_in)
~ 'bvec_in': (np.ndarray) -> shape (3, q_in)
~ 'bvec_in': (np.ndarray) -> shape (3, q_out)
...
context (Dict[str,Any]):
+ 'xmin': (float)
+ 'xmax': (float)
...
'''
print('Normalizing dMRI data...')
# Get xmin xmax
xmin, xmax = 0.0, kwargs['norms'][kwargs['shell']]
# Apply rescaling
datasets['dmri_in'] = cls.apply(datasets['dmri_in'], xmin, xmax)
if kwargs.get('apply_bvec', False):
datasets['bvec_in'] = datasets['bvec_in'] * kwargs['shell'] / 1000.0
datasets['bvec_out'] = datasets['bvec_out'] * kwargs['shell'] / 1000.0
# Save scale units
context['xmin'] = xmin
context['xmax'] = xmax
@classmethod
def backward(cls, datasets, context, **kwargs):
'''Rescales dMRI data back to original intensity range
Args:
datasets (Dict[str,Any]):
'dmri_out': (np.ndarray) -> shape (i, j, k, q_out)
context (Dict[str,Any]):
'xmin': (float)
'xmax': (float)
...
Modifies:
datasets (Dict[str,Any]):
~ 'dmri_out': (np.ndarray) -> shape (i, j, k, q_out)
context (Dict[str,Any]):
- 'xmin': (float)
- 'xmax': (float)
...
'''
print('Rescaling dMRI data to original intensity range...')
xmin, xmax = context.pop('xmin'), context.pop('xmax')
datasets['dmri_out'] = cls.reverse(datasets['dmri_out'], xmin, xmax)
class ScalerNorm(Scaler):
'''Scaling with individual normalisation'''
@classmethod
def forward(cls, datasets, context, **kwargs):
'''Rescales dMRI data to range [0,1] independently in each shell.
Args:
datasets (Dict[str,Any]):
'mask': (np.ndarray) -> shape (i, j, k)
'dmri_in': (np.ndarray) -> shape (i, j, k, q_in)
'bvec_in': (np.ndarray) -> shape (3, q_in)
'bvec_out': (np.ndarray) -> shape (3, q_out)
context (Dict[str,Any]):
...
Keyword Args:
shell (int): Shell being processed
pcent (int): Maximum intensity percentile, as an int. e.g.
99% = 99
apply_bvec (bool): Apply normalisation to b-vector. Default: False
Modifies:
datasets (Dict[str,Any]):
~ 'dmri_in': (np.ndarray) -> shape (i, j, k, q_in)
~ 'bvec_in': (np.ndarray) -> shape (3, q_in)
~ 'bvec_in': (np.ndarray) -> shape (3, q_out)
...
context (Dict[str,Any]):
+ 'xmin': (float)
+ 'xmax': (float)
...
'''
print('Normalizing dMRI data...')
pcent = kwargs['pcent']
# Get xmin xmax
xmin = 0.0
xmax = np.percentile(datasets['dmri_in'][datasets['mask'].astype(np.bool)], pcent)
# Apply rescaling
datasets['dmri_in'] = cls.apply(datasets['dmri_in'], xmin, xmax)
if kwargs.get('apply_bvec', False):
datasets['bvec_in'] = datasets['bvec_in'] * kwargs['shell'] / 1000.0
datasets['bvec_out'] = datasets['bvec_out'] * kwargs['shell'] / 1000.0
# Save scale units
context['xmin'] = xmin
context['xmax'] = xmax
|
|
import logging as log
import pandas as pd
import numpy as np
import sklearn as sk
from pprint import pprint
def ewma(df, col, span):
log.info('Adding {0} ewma to df on {1}'.format(span, col))
ewma = pd.stats.moments.ewma(df[col], span=span)
# print df
return ewma
def rsi(df, n):
"""
RSI = 100 - 100/(1 + RS*)
*Where RS = Average of x days' up closes / Average of x days' down closes.
"""
delta = np.diff(df['close'])
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = pd.rolling_mean(dUp, n)
RolDown = np.absolute(pd.rolling_mean(dDown, n))
RS = RolUp / RolDown
RS[0:n-1] = 0
RS = np.insert(RS, 0, 0)
# print '\nRS'
# print len(RS)
# print RS[:20]
rsiCalc = lambda x: 100 - 100 / (1 + x)
rsi = [rsiCalc(rs) for rs in RS]
# print rsi[:20]
return pd.Series(rsi, index=df.index)
class FeatureFactory():
def averageTrueRange(self, highs, lows, closes, n):
'''
ATR
'''
yesterdays = list(closes)
yesterdays.insert(0, closes[0])
atr = [max(high - low, abs(high - yesterday), abs(low - yesterday)) for high, low, close, yesterday in zip(highs, lows, closes, yesterdays)]
atrs = pd.DataFrame(atr)
atrs = pd.rolling_mean(atrs, n)
atrs = atrs.fillna(atr[n])
return atrs.as_matrix()
def averageDirectionalIndex(self):
"""
Calculation for Average Directional Index
TR := SUM(MAX(MAX(HIGH-LOW,ABS(HIGH-REF(CLOSE,1))),ABS(LOW-REF(CLOSE,1))),N);
HD := HIGH-REF(HIGH,1);
LD := REF(LOW,1)-LOW;
DMP:= SUM(IF(HD>0 & HD>LD,HD,0),N);
DMM:= SUM(IF(LD>0 & LD>HD,LD,0),N);
PDI:= DMP*100/TR;
MDI:= DMM*100/TR;
ADX:= MA(ABS(MDI-PDI)/(MDI+PDI)*100,N)
"""
pass
def getTopsAndBots(self, highs, lows, closes):
tops, bots = [], []
for i in xrange(len(highs)):
hs, ls = [0] * 5, [0] * 5
close = closes[i]
for k, n in enumerate([8, 13, 21, 34, 55]):
start = max(0, i - n) + 1
# print 'start', start
end = i + 1
# print 'end', end
if end - start < 5:
# print 'skipping < 5'
continue
# print len(highs[start:end])
hs[k] = max(highs[start:end]) / close
ls[k] = min(lows[start:end]) / close
# if len(hs):
# print hs
# print ls
# raise Exception('b')
if len(hs) != 5 or len(ls) != 5:
raise Exception('get hs and ls has bad lengths')
tops.append(hs)
bots.append(ls)
if len(tops) != len(closes) or len(bots) != len(closes):
raise Exception('getTopsAndBots has bad lengths')
return [tops, bots]
def extractChiMoku(self, highs, lows, closes):
tenkanSen = []
kijunSen = []
senkouSpanB = []
for i in xrange(len(highs)):
# avg of highest high and lowest low over past 9 ticks
tenkanSenHigh = max(highs[max(0, i-9):i+1])
tenkanSenLow = min(lows[max(0, i-9):i+1])
tenkanSen.append((tenkanSenHigh + tenkanSenLow) / 2)
# avg of highest high and lowest low over past 26 ticks
kijunSenHigh = max(highs[max(0, i-26):i+1])
kijunSenLow = min(lows[max(0, i-26):i+1])
kijunSen.append((kijunSenHigh + kijunSenLow) / 2)
# (Highest high + Lowest low) / 2 over the last 52 trading days plotted 26 days ahead.
senkouSpanBHigh = max(highs[max(0, i-52):i+1])
senkouSpanBLow = min(lows[max(0, i-52):i+1])
senkouSpanB.append((senkouSpanBHigh + senkouSpanBLow) / 2)
# (Tenkan Sen + Kijun Sen) / 2 plotted 26 days ahead.
senkouSpanA = [(tenkanSen[0] + kijunSen[0]) / 2] * 256
senkouSpanA.extend([(t + k) / 2 for t, k in zip(tenkanSen, kijunSen)])
senkouSpanA = senkouSpanA[:len(highs)]
# The closing price plotted 26 trading days behind.
chikouSpan = [closes[0]] * 26
chikouSpan.extend(closes)
chikouSpan = chikouSpan[:len(highs)]
# pprint(tenkanSen[-5:])
# pprint(kijunSen[-5:])
# pprint(senkouSpanA)
return tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
def getNames(self):
names = [
'volume / ema20v', 'volume / ema8v', 'volume / ema5v',
'ema5v / ema20v', 'ema5v / ema8v',
'ema8v / ema20v',
'topShadow / topShadowsMean',
'body / bodiesMean',
'bar / barsMean',
'botShadow / botShadowsMean',
'top[8]', 'top[13]', 'top[21]', 'top[34]', 'top[55]',
'bot[8]', 'bot[13]', 'bot[21]', 'bot[34]', 'bot[55]',
'ema21 / ema34', 'ema34 / ema55', 'ema55 / ema89', 'ema89 / ema144',
# RSI
'rsi13', 'rsi21', 'rsi34', 'rsi55',
'atr21', 'atr34', 'atr55', 'atr89', 'atr144',
# chimoku
'tenkanKijunBullishWeak', 'tenkanKijunBullishNeutral', 'tenkanKijunBullishStrong',
'tenkanKijunBearishWeak', 'tenkanKijunBearishNeutral', 'tenkanKijunBearishStrong',
'kijunPriceBullishWeak', 'kijunPriceBullishNeutral', 'kijunPriceBullishStrong',
'kijunPriceBearishWeak', 'kijunPriceBearishNeutral', 'kijunPriceBearishStrong',
'kumoBullish', 'kumoBearish',
'senkouSpanBullishWeak', 'senkouSpanBullishNeutral', 'senkouSpanBullishStrong',
'senkouSpanBearishWeak', 'senkouSpanBearishNeutral', 'senkouSpanBearishStrong',
]
return names
def getFeatures(self, opens, highs, lows, closes, volumes):
topShadows = [high - max(open, close) for open, high, close in zip(opens, highs, closes)]
topShadowsMean = np.mean(topShadows)
bodies = [abs(open - close) for open, close in zip(opens, closes)]
bodiesMean = np.mean(bodies)
bars = [abs(high - low) for high, low in zip(highs, lows)]
barsMean = np.mean(bars)
botShadows = [min(open, close) - low for open, low, close in zip(opens, lows, closes)]
botShadowsMean = np.mean(botShadows)
tops, bots = self.getTopsAndBots(highs, lows, closes)
ema5vs = self.ema(volumes, 5)
ema8vs = self.ema(volumes, 8)
ema20vs = self.ema(volumes, 20)
ema21s = self.ema(closes, 21)
ema34s = self.ema(closes, 34)
ema55s = self.ema(closes, 55)
ema89s = self.ema(closes, 89)
ema144s = self.ema(closes, 144)
rsi13s = self.rsi(closes, 13)
rsi21s = self.rsi(closes, 21)
rsi34s = self.rsi(closes, 34)
rsi55s = self.rsi(closes, 55)
atr21s = self.averageTrueRange(highs, lows, closes, 21)
atr34s = self.averageTrueRange(highs, lows, closes, 34)
atr55s = self.averageTrueRange(highs, lows, closes, 55)
atr89s = self.averageTrueRange(highs, lows, closes, 89)
atr144s = self.averageTrueRange(highs, lows, closes, 144)
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan = self.extractChiMoku(highs, lows, closes)
data = [
[
volume / ema20v, volume / ema8v, volume / ema5v,
ema5v / ema20v, ema5v / ema8v,
ema8v / ema20v,
topShadow / topShadowsMean,
body / bodiesMean,
bar / barsMean,
botShadow / botShadowsMean,
top[0], top[1], top[2], top[3], top[4],
bot[0], bot[1], bot[2], bot[3], bot[4],
ema21 / ema34, ema34 / ema55, ema55 / ema89, ema89 / ema144,
rsi13, rsi21, rsi34, rsi55,
atr21, atr34, atr55, atr89, atr144,
# TENKAN & KIJUN
# weak bullish
1 if tenkanSen > kijunSen and kijunSen < senkouSpanA else 0,
# neutral bullish
1 if tenkanSen > kijunSen and senkouSpanA > kijunSen > senkouSpanB else 0,
# strong bullish
1 if tenkanSen > kijunSen and kijunSen > senkouSpanA else 0,
# weak bearish
1 if tenkanSen < kijunSen and kijunSen > senkouSpanA else 0,
# neutral bearish
1 if tenkanSen < kijunSen and senkouSpanA < kijunSen < senkouSpanB else 0,
# strong bearish
1 if tenkanSen < kijunSen and kijunSen < senkouSpanA else 0,
# KIJUN & PRICE
# weak bullish
1 if close > kijunSen and kijunSen < senkouSpanA else 0,
# neutral bullish
1 if close > kijunSen and senkouSpanA > kijunSen > senkouSpanB else 0,
# strong bullish
1 if close > kijunSen and kijunSen > senkouSpanA else 0,
# weak bearish
1 if close < kijunSen and kijunSen > senkouSpanA else 0,
# neutral bearish
1 if close < kijunSen and senkouSpanA < kijunSen < senkouSpanB else 0,
# strong bearish
1 if close < kijunSen and kijunSen < senkouSpanA else 0,
# KUMO BREAKOUT
# bullish
1 if close > senkouSpanA else 0,
# bearish
1 if close < senkouSpanA else 0,
# SENKOU SPAN
# weak bullish
1 if senkouSpanA > senkouSpanB and close < senkouSpanA else 0,
# neutral bullish
1 if senkouSpanA > senkouSpanB and senkouSpanA > close > senkouSpanB else 0,
# strong bullish
1 if senkouSpanA > senkouSpanB and close > senkouSpanA else 0,
# weak bearish
1 if senkouSpanA < senkouSpanB and close > senkouSpanA else 0,
# neutral bearish
1 if senkouSpanA < senkouSpanB and senkouSpanA < close < senkouSpanB else 0,
# strong bearish
1 if senkouSpanA < senkouSpanB and close < senkouSpanA else 0,
]
for close,
volume, ema5v, ema8v, ema20v,
topShadow, body, bar, botShadow,
top, bot,
ema21, ema34, ema55, ema89, ema144,
rsi13, rsi21, rsi34, rsi55,
atr21, atr34, atr55, atr89, atr144,
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
in zip(closes,
volumes, ema5vs, ema8vs, ema20vs,
topShadows, bodies, bars, botShadows,
tops, bots,
ema21s, ema34s, ema55s, ema89s, ema144s,
rsi13s, rsi21s, rsi34s, rsi55s,
atr21s, atr34s, atr55s, atr89s, atr144s,
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
)
]
# print data
return data
|
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _inner_ops as inner
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self, begin, end, strides):
super(Net, self).__init__()
self.strided_slice = inner.StridedSliceAICPU()
self.begin = begin
self.end = end
self.strides = strides
def construct(self, input):
return self.strided_slice(input, self.begin, self.end, self.strides)
input_x = np.array([[[0, 1, 2], [3, 4, 5]],
[[6, 7, 8], [9, 10, 11]],
[[12, 13, 14], [15, 16, 17]]
]).astype(np.float32)
begin = (1, 0, 0)
end = (2, 2, 3)
strides = (1, 1, 2)
def test_net():
net = Net(begin, end, strides)
tinput = Tensor(input_x)
output = net(tinput)
print(output.asnumpy())
assert np.all([[[6, 8], [9, 11]]] == output.asnumpy())
|
|
"""
Reading gif (lawnmover.gif)
"""
# Import Library
import cv2 as cv
import numpy as np
import os
# Absolute path to read
abs_path = os.path.dirname(os.path.dirname(__file__))
gif_path = os.path.join(abs_path, 'input/gif/lawnmover.gif')
# Read a gif with video capture
gif = cv.VideoCapture(gif_path)
frame_counter = 0
while True:
check,frame = gif.read()
frame_counter += 1
cv.imshow('Capture',frame)
if frame_counter==gif.get(cv.CAP_PROP_FRAME_COUNT):
frame_counter = 0
gif.set(cv.CAP_PROP_POS_FRAMES, 0)
# capture a key in every 25 millisecond, also helps to slow down the frames
key = cv.waitKey(25)
# quit with pressing "q"
if key==ord('q'):
break
# Looping video reference
# https://stackoverflow.com/questions/10057234/opencv-how-to-restart-a-video-when-it-finishes/19009639
gif.release()
cv.destroyAllWindows()
|
|
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def matrix_networks_plot(M, network_colors, dpi = 300, colorbar = False, group = None, ses = None, suffix = None, out_dir = None):
"""Creates and saves matrixplot with networks color labels. """
small = 15
medium = 15
bigger = 15
plt.rc('font', size=small) # controls default text sizes
plt.rc('axes', titlesize=small) # fontsize of the axes title
plt.rc('axes', linewidth=2.2)
plt.rc('axes', labelsize=medium) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small) # fontsize of the tick labels
plt.rc('ytick', labelsize=small) # fontsize of the tick labels
plt.rc('legend', fontsize=small) # legend fontsize
plt.rc('figure', titlesize=bigger) # fontsize of the figure title
plt.rc('lines', linewidth=2.2, color='gray')
g = sns.clustermap(M,
cmap="RdBu_r",
row_cluster=False,
col_cluster=False,
row_colors=network_colors,
col_colors=network_colors,
linewidths=0,
yticklabels=False,
xticklabels=False,
vmax = 0.8)
# Adjust the postion of the main colorbar for the heatmap
g.cax.set_position([.97, .2, .03, .45])
g.fig.suptitle(f'{group}: {ses}', size = 20)
#g.ax_heatmap.set_title(f'{group}: {ses}', size = 15)
g.cax.set_visible(colorbar)
if out_dir == None:
"Figure not saved"
else:
if suffix != None:
g.savefig(f'{out_dir}{group}_{ses}_{suffux}.pdf', dpi=dpi)
else:
g.savefig(f'{out_dir}{group}_{ses}.pdf', dpi=dpi)
def swarm_box_plot(x, y, hue, data):
plt.style.use('seaborn-white')
plt.rcParams['font.family'] = 'Helvetica'
plt.figure(figsize = (8, 6))
ax = sns.swarmplot(x = x, y = y, hue = hue, data = data, dodge = True, alpha = 0.8, size = 8)
ax = sns.boxplot(x = x, y = y, hue = hue, data = data, dodge = True,
showcaps = False, boxprops = {'facecolor':'None'},
showfliers = False)
plt.xticks(np.arange(4), ('1', '2', '3', '4'))
ax.set(xlabel='Scan')
ax.tick_params(axis='both', color = 'black', length = 5, width = 2)
return ax
def swarm_box_plot_integ(x, hue, data, net1, net2):
plt.style.use('seaborn-white')
plt.rcParams['font.family'] = 'Helvetica'
small = 25
medium = 25
bigger = 25
plt.rc('font', size=small) # controls default text sizes
plt.rc('axes', titlesize=small) # fontsize of the axes title
plt.rc('axes', linewidth=2.2)
plt.rc('axes', labelsize=medium) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small) # fontsize of the tick labels
plt.rc('ytick', labelsize=small) # fontsize of the tick labels
plt.rc('legend', fontsize=small) # legend fontsize
plt.rc('figure', titlesize=bigger) # fontsize of the figure title
plt.rc('lines', linewidth=2.2, color='gray')
plt.figure(figsize = (8, 6))
ax = sns.swarmplot(x = x, y = f'{net1}_integration', hue = hue, data = data[data['Network'] == net2], dodge = True, alpha = 0.8, size = 8)
ax = sns.boxplot(x = x, y = f'{net1}_integration', hue = hue, data = data[data['Network'] == net2], dodge = True,
showcaps = False, boxprops = {'facecolor':'None'},
showfliers = False)
plt.xticks(np.arange(4), ('Naive', 'Early', 'Middle', 'Late'))
ax.set(xlabel='Scan')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.tick_params(axis='both', color = 'black', length = 5, width = 2)
return ax
def matrix_networks_slope_plot(M, network_colors, dpi=300, colorbar=False, group=None, suffix=None, out_dir=None, vmin = -0.08, vmax = 0.08):
"""Creates and saves matrixplot with networks color labels. """
small = 15
medium = 15
bigger = 15
plt.rc('font', size=small) # controls default text sizes
plt.rc('axes', titlesize=small) # fontsize of the axes title
plt.rc('axes', linewidth=2.2)
plt.rc('axes', labelsize=medium) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small) # fontsize of the tick labels
plt.rc('ytick', labelsize=small) # fontsize of the tick labels
plt.rc('legend', fontsize=small) # legend fontsize
plt.rc('figure', titlesize=bigger) # fontsize of the figure title
plt.rc('lines', linewidth=2.2, color='gray')
g = sns.clustermap(M,
cmap="RdBu_r",
row_cluster=False,
col_cluster=False,
row_colors=network_colors,
col_colors=network_colors,
linewidths=0,
yticklabels=False,
xticklabels=False, vmin = vmin, vmax = vmax)
# Adjust the postion of the main colorbar for the heatmap
g.cax.set_position([.97, .2, .03, .45])
g.fig.suptitle(f'{group}', size=20)
#g.ax_heatmap.set_title(f'{group}: {ses}', size = 15)
g.cax.set_visible(colorbar)
if out_dir == None:
"Figure not saved"
else:
if suffix != None:
g.savefig(f'{out_dir}{group}_{ses}_{suffux}.pdf', dpi=dpi)
else:
g.savefig(f'{out_dir}{group}_{ses}.pdf', dpi=dpi)
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from sklearn.metrics import f1_score, average_precision_score, confusion_matrix
from sklearn.cluster.bicluster import SpectralCoclustering
import numpy as np
import csv
from loss_func import eszsl_loss_func, sje_loss_func, devise_loss_func, ale_loss_func
def rearrange_predicates(predicates, predicate_groups, groups):
permute_predicates=[]
for group in groups:
permute_predicates.extend([predicates.index(att) for att in predicate_groups[group]])
return permute_predicates
def compute_multilabel_metrics(y_true, y_pred):
gt = []
pred = []
for key in y_true:
gt.append(y_true[key])
pred.append(y_pred[key])
# f1 = f1_score(np.round(np.hstack(gt)), np.round(np.hstack(pred)), average='micro')
mean_AP = average_precision_score(np.hstack(gt), np.hstack(pred), average='micro')
return mean_AP
def compute_acc(y_pred, y_true, prior_matrix):
prior_matrix = torch.cuda.FloatTensor(prior_matrix)
class_scores = F.softmax(torch.mm(y_pred, prior_matrix), dim=1)
_, predicted = torch.max(class_scores, 1)
batch_acc = torch.sum((predicted==y_true.data)).float()
return batch_acc
def class_averaged_top1_acc(y_true, y_pred, prior_matrix):
class_scores = np.matmul(y_pred, prior_matrix)
predicted_classes = np.array([np.argmax(output) for output in class_scores])
cm = confusion_matrix(y_true, predicted_classes)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
acc = sum(cm.diagonal())/prior_matrix.shape[1]
return acc
def get_paths_from_file(filepath):
with open(filepath, 'r') as f:
reader = csv.reader(f)
next(reader) # flushing first row which as directory
image_path_with_label = []
for row in reader:
image_path_with_label.append(row[1])
return image_path_with_label
def get_indices(predicates, objects):
return [predicates.index(object) if object in predicates else -1 for object in objects]
def get_corr_weights(predicates, iou_matrix, task_p, adv_p):
index1, index2 = get_indices(predicates, task_p), get_indices(predicates, adv_p)
delta_corr = iou_matrix[index1,:][:, index2]
max_delta_corr = np.max(delta_corr)
# taking average delta corr of adversarial task with respect to all task in the group
delta_corr_vector = np.average(delta_corr, axis=0)
return max_delta_corr, delta_corr_vector
def find_class_balanced_wts(train_img_names, attr_labels_dict):
train_att_mat = np.zeros([len(train_img_names), 64])
for i in range(len(train_img_names)):
train_att_mat[i] = attr_labels_dict[train_img_names[i]]
attr_count = {}
for i in range(64):
attr_count[i] = {0:len(train_img_names)-np.sum(train_att_mat, axis=0)[i], 1:np.sum(train_att_mat, axis=0)[i]}
return attr_count
def diff_corr(corr_train, corr_test):
dis_corr = (corr_train - corr_test)
dis_corr = np.sign(corr_train)*dis_corr
return dis_corr.clip(0,np.inf)
def create_spectral_groups(num_clusters, random_state, train_img_names, img2att, rept_img_dict, test_prior, predicates):
train_att_mat=np.zeros([len(train_img_names), 64])
count=0
for img_name in set(train_img_names):
if rept_img_dict[img_name]!=0:
for j in range(rept_img_dict[img_name]):
train_att_mat[count+j]=img2att[img_name+'_'+str(j+1)]
count+=rept_img_dict[img_name]
else:
train_att_mat[count]=img2att[img_name+'_1']
count+=1
corr_train = np.corrcoef(train_att_mat.transpose())
nans = np.isnan(corr_train)
corr_train[nans] = 0
corr_test = np.corrcoef(test_prior)
nans = np.isnan(corr_test)
corr_test[nans] = 0
dis_corr = diff_corr(corr_train, corr_test)
dis_corr += 0.0001*np.random.rand(len(corr_train), len(corr_train))
model = SpectralCoclustering(n_clusters=num_clusters, random_state=random_state)
model.fit(dis_corr)
group_dict = {}
for i,val in enumerate(model.row_labels_):
if 'g_' + str(val) not in group_dict:
group_dict['g_' + str(val)] = [predicates[i]]
else:
group_dict['g_' + str(val)].append(predicates[i])
return group_dict, dis_corr
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def train_epoch(model, training_generator, loss_weights, optimizer, device, loss_dict, zero_shot, prior_matrix, args):
epoch_scores = {}
if zero_shot:
if args.zsl_loss_func=='eszsl': loss_dict['conc_l'] = eszsl_loss_func(prior_matrix)
if args.zsl_loss_func=='sje': loss_dict['conc_l'] = sje_loss_func(prior_matrix, args.margin)
if args.zsl_loss_func=='devise': loss_dict['conc_l'] = devise_loss_func(prior_matrix, args.margin)
if args.zsl_loss_func=='ale': loss_dict['conc_l'] = ale_loss_func(prior_matrix, args.margin)
y_true = {}
y_pred = {}
model.train()
runningLoss = 0
for i, (inputs, labels) in enumerate(training_generator):
loss = {}
loss_all_groups = 0.0
inputs = inputs.float().to(device)
# Initialize gradients to zero
optimizer.zero_grad()
with torch.set_grad_enabled(True):
# Feed-forward
output_dict = model(inputs)
for group in loss_dict:
if '_x_' not in group:
ground_truth = labels[group].detach().cpu().numpy()
if group=='conc_l':
prediction = output_dict[group].detach().cpu().numpy()
else:
prediction = torch.sigmoid(output_dict[group]).detach().cpu().numpy()
if group not in y_true:
y_true[group] = ground_truth
else:
if group=='conc_l':
y_true[group] = np.hstack([y_true[group], ground_truth])
else:
y_true[group] = np.vstack([y_true[group], ground_truth])
if group not in y_pred:
y_pred[group] = prediction
else:
y_pred[group] = np.vstack([y_pred[group], prediction])
labels[group] = labels[group].to(device)
# Computing loss per group
if '_x_' in group or group=='conc_l':
loss[group] = loss_dict[group](output_dict[group], labels[group])
else:
loss[group] = loss_dict[group](torch.sigmoid(output_dict[group]), labels[group])
if loss_weights:
loss_all_groups += loss[group]*loss_weights[group]
if not loss_weights: #baseline experiment
loss_all_groups = sum(loss.values())
# accumulate loss
runningLoss += loss_all_groups.item()
# Backpropagate loss and compute gradients
loss_all_groups.backward()
# Update the network parameters
optimizer.step()
if zero_shot:
epoch_scores['acc'] = class_averaged_top1_acc(y_true=y_true['conc_l'], y_pred=y_pred['conc_l'], prior_matrix=prior_matrix)
else:
epoch_scores['mAP'] = compute_multilabel_metrics(y_true=y_true, y_pred=y_pred)
return(model, runningLoss/len(training_generator), epoch_scores)
def calculate_ec_wt(group_class_scores):
mean_entropy = np.mean(np.array([x*np.log(x+1e-7) for x in group_class_scores]), axis=1)
conditioning_wt = np.exp(mean_entropy)
return conditioning_wt
def train_epoch_ec(model, training_generator, loss_weights, optimizer, device, loss_dict, adv_dict, zero_shot, prior_matrix, args):
epoch_scores = {}
if zero_shot:
if args.zsl_loss_func=='eszsl': loss_dict['conc_l'] = eszsl_loss_func(prior_matrix)
if args.zsl_loss_func=='sje': loss_dict['conc_l'] = sje_loss_func(prior_matrix, args.margin)
if args.zsl_loss_func=='devise': loss_dict['conc_l'] = devise_loss_func(prior_matrix, args.margin)
if args.zsl_loss_func=='ale': loss_dict['conc_l'] = ale_loss_func(prior_matrix, args.margin)
y_true = {}
y_pred = {}
model.train()
runningLoss = 0
for i, (inputs, labels) in enumerate(training_generator):
loss = {}
cond_wt_dict = {}
loss_all_groups = 0.0
inputs = inputs.float().to(device)
# Initialize gradients to zero
optimizer.zero_grad()
with torch.set_grad_enabled(True):
# Feed-forward
output_dict = model(inputs)
for group in output_dict:
if '_x_' not in group:
if group=='conc_l':
prediction = output_dict[group].detach().cpu().numpy()
else:
prediction = torch.sigmoid(output_dict[group]).detach().cpu().numpy()
cond_wts_per_sample = calculate_ec_wt(prediction)
for adv_branch in adv_dict:
if adv_branch['parent'] == 'latent_'+group:
cond_wt_dict[adv_branch['node_name']] = torch.cuda.FloatTensor(loss_weights[adv_branch['node_name']]*cond_wts_per_sample)
if group not in y_pred:
y_pred[group] = prediction
else:
y_pred[group] = np.vstack([y_pred[group], prediction])
for group in loss_dict:
if '_x_' not in group:
ground_truth = labels[group].detach().cpu().numpy()
if group not in y_true:
y_true[group] = ground_truth
else:
if group=='conc_l':
y_true[group] = np.hstack([y_true[group], ground_truth])
else:
y_true[group] = np.vstack([y_true[group], ground_truth])
labels[group] = labels[group].to(device)
# Computing loss per group
if '_x_' in group or group=='conc_l':
loss[group] = loss_dict[group](output_dict[group], labels[group])
else:
loss[group] = loss_dict[group](torch.sigmoid(output_dict[group]), labels[group])
if '_x_' in group:
adv_loss = loss[group]*cond_wt_dict[group]
loss_all_groups += adv_loss.mean()
else:
loss_all_groups += loss[group]*loss_weights[group]
# accumulate loss
runningLoss += loss_all_groups.item()
# Backpropagate loss and compute gradients
loss_all_groups.backward()
# Update the network parameters
optimizer.step()
if zero_shot:
epoch_scores['acc'] = class_averaged_top1_acc(y_true=y_true['conc_l'], y_pred=y_pred['conc_l'], prior_matrix=prior_matrix)
else:
epoch_scores['mAP'] = compute_multilabel_metrics(y_true=y_true, y_pred=y_pred)
return(model, runningLoss/len(training_generator), epoch_scores)
def val_epoch(model, validation_generator, loss_dict, zero_shot, prior_matrix, args, device):
epoch_scores = {}
if zero_shot:
if args.zsl_loss_func=='eszsl': loss_dict['conc_l'] = eszsl_loss_func(prior_matrix)
if args.zsl_loss_func=='sje': loss_dict['conc_l'] = sje_loss_func(prior_matrix, args.margin)
if args.zsl_loss_func=='devise': loss_dict['conc_l'] = devise_loss_func(prior_matrix, args.margin)
if args.zsl_loss_func=='ale': loss_dict['conc_l'] = ale_loss_func(prior_matrix, args.margin)
y_true = {}
y_pred = {}
model.eval()
runningLoss = 0.0
for i, (inputs, labels) in enumerate(validation_generator):
loss = {}
loss_all_groups = 0.0
inputs = inputs.float().to(device)
with torch.set_grad_enabled(False):
# Feed-forward
output_dict = model(inputs)
for group in loss_dict:
if '_x_' not in group:
ground_truth = labels[group].cpu().numpy()
if group=='conc_l':
prediction = output_dict[group].cpu().numpy()
else:
prediction = torch.sigmoid(output_dict[group]).cpu().numpy()
if group not in y_true:
y_true[group] = ground_truth
else:
if group=='conc_l':
y_true[group] = np.hstack([y_true[group], ground_truth])
else:
y_true[group] = np.vstack([y_true[group], ground_truth])
if group not in y_pred:
y_pred[group] = prediction
else:
y_pred[group] = np.vstack([y_pred[group], prediction])
labels[group] = labels[group].to(device)
if '_x_' in group or group=='conc_l':
loss[group] = loss_dict[group](output_dict[group], labels[group]).mean()
else:
loss[group] = loss_dict[group](torch.sigmoid(output_dict[group]), labels[group])
loss_all_groups = sum(loss.values()).item()
runningLoss += loss_all_groups
if zero_shot:
epoch_scores['acc'] = class_averaged_top1_acc(y_true=y_true['conc_l'], y_pred=y_pred['conc_l'], prior_matrix=prior_matrix)
else:
epoch_scores['mAP'] = compute_multilabel_metrics(y_true=y_true, y_pred=y_pred)
return(runningLoss/len(validation_generator), epoch_scores)
def test_model(model, test_generator, device, loss_dict, zero_shot, prior_matrix):
epoch_scores = {}
if zero_shot:
loss_dict['conc_l'] = eszsl_loss_func(prior_matrix)
y_true = {}
y_pred = {}
model.eval()
with torch.no_grad():
for i, (inputs, labels) in enumerate(test_generator):
inputs = inputs.float().to(device)
# Feed-forward
output_dict = model(inputs)
for group in loss_dict:
if '_x_' not in group:
ground_truth = labels[group].cpu().numpy()
if group=='conc_l':
prediction = output_dict[group].cpu().numpy()
else:
prediction = torch.sigmoid(output_dict[group]).cpu().numpy()
if group not in y_true:
y_true[group] = ground_truth
else:
if group=='conc_l':
y_true[group] = np.hstack([y_true[group], ground_truth])
else:
y_true[group] = np.vstack([y_true[group], ground_truth])
if group not in y_pred:
y_pred[group] = prediction
else:
y_pred[group] = np.vstack([y_pred[group], prediction])
if zero_shot:
epoch_scores['acc'] = class_averaged_top1_acc(y_true=y_true['conc_l'], y_pred=y_pred['conc_l'], prior_matrix=prior_matrix)
else:
epoch_scores['mAP'] = compute_multilabel_metrics(y_true=y_true, y_pred=y_pred)
return epoch_scores
|
|
import re
import collections
import operator
import numpy as np
import scipy.sparse as sp
from .osutils import get_linewise
# some helper functions for easy access to the libsvmformat
#
# <label> <index1>:<value1> <index2>:<value2> ... <indexn>:<valuen> # comment
#
def create_libsvmline(label, features, comment=None):
"""
creates a libsvm format line with provided label (integer),
the sorted dictionary of features and an optional comment
"""
assert(isinstance(label, int))
assert(isinstance(features, collections.OrderedDict))
# prepare features dict as string
features_str = " ".join(
"%d:%g" % (no + 1, v)
for no, v in enumerate(features.values())
if v != 0
)
if comment is not None:
# return libsvm line with comment
return "%d %s # %s\n" % (label, features_str, comment)
# return libsvm line without comment
return "%d %s\n" % (label, features_str)
def parse_libsvm_format(line):
"""
parse a line from a libsvm format file and
split it into label, features and optional comment
"""
# use regex to obtain label and comment
res = line.split("#", 1)
label, features = res[0].split(" ", 1)
comment = res[1].strip() if len(res) == 2 else None
# use regex to obtain all features from the given line and
# parse them to feature_no (int) and value (float) and sort them
features = collections.OrderedDict(
sorted([
(int(feat[0]), float(feat[1]))
for feat in re.compile(
r"(\d+):([+-]?([0-9]*[.])?[0-9]+)+"
).findall(features)
], key=operator.itemgetter(0))
)
return int(label), features, comment
def get_libsvm_format(libsvmfile):
"""
returns the parsed libsvm format file
"""
# read labels, features and comments from libsvm file
labels, features, comments = list(zip(
*get_linewise(libsvmfile, parse_libsvm_format)
))
# prepare sparse features matrix X
rows, cols, vals = [], [], []
for no, row in enumerate(features):
for k, v in list(row.items()):
rows.append(no)
cols.append(k)
vals.append(v)
X = sp.csr_matrix((vals, (rows, cols)))
# prepare labels array
y = np.array(labels, "i")
return y, X, comments
def parse_libsvm_pred_format(line):
"""
parse a line from a libsvm pred file and
split it into predicted label and score
"""
parts = line.split(",")
return (
(int(parts[0]), float(parts[1]) if len(parts) == 2 else np.nan)
)
def get_libsvm_pred(libsvmfile):
"""
returns the predicted labels and scores from the parsed libsvm pred file
"""
pred_labels, pred_scores = list(zip(
*get_linewise(libsvmfile, parse_libsvm_pred_format)
))
return np.array(pred_labels, "i"), np.array(pred_scores, "f")
def zip_features(features, feature_names):
"""
simply zips the features obtained by parsing a libsvm file i.e.
an ordered dict of (feature_no, value) pairs with a list of
feature names
"""
return collections.OrderedDict(
(feature_names[feat - 1], features[feat])
for feat in list(features.keys())
)
|
|
import configparser
import json
import numpy as np
import os
from path import Path
from cv2 import imread
from tqdm import tqdm
class test_framework_stillbox(object):
def __init__(self, root, test_files, seq_length=3, min_depth=1e-3, max_depth=80, step=1):
self.root = root
self.min_depth, self.max_depth = min_depth, max_depth
self.gt_files, self.img_files, self.displacements = read_scene_data(root, test_files, seq_length, step)
def __getitem__(self, i):
tgt = imread(self.img_files[i][0]).astype(np.float32)
return {'tgt': tgt,
'ref': [imread(img).astype(np.float32) for img in self.img_files[i][1]],
'path': self.img_files[i][0],
}
def __len__(self):
return len(self.img_files)
def get_displacements(scene, index, ref_indices):
speed = np.around(np.linalg.norm(scene['speed']), decimals=3)
assert(all(i < scene['length'] and i >= 0 for i in ref_indices)), str(ref_indices)
return [speed*scene['time_step']*abs(index - i) for i in ref_indices]
def read_scene_data(data_root, test_list, seq_length=3, step=1):
config = configparser.ConfigParser()
config.read(os.path.join(data_root, 'seqinfo.ini'))
im_files = []
# how many frames around the current (tgt) frame should be taken into account by the network
demi_length = (seq_length - 1) // 2
# by default 1 frame before and 1 after: [-1, 1]
shift_range = [step*i for i in list(range(-demi_length, 0)) + list(range(1, demi_length + 1))]
for index, sample in enumerate(tqdm(test_list)):
if os.path.isfile(sample):
# clamp indices between 1 and the maximum number of frames in the scene
capped_indices_range = list(map(lambda x: min(max(0, index + x), int(config['Sequence']['seqLength']) - 1), shift_range))
ref_imgs_path = [test_list[ref_index] for ref_index in capped_indices_range]
im_files.append([sample, ref_imgs_path])
else:
print('{} missing'.format(sample))
return None, im_files, None
def generate_mask(gt_depth, min_depth, max_depth):
mask = np.logical_and(gt_depth > min_depth,
gt_depth < max_depth)
# crop gt to exclude border values
# if used on gt_size 100x100 produces a crop of [-95, -5, 5, 95]
gt_height, gt_width = gt_depth.shape
crop = np.array([0.05 * gt_height, 0.95 * gt_height,
0.05 * gt_width, 0.95 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
return mask
|
|
# --------------
# import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Code starts here
data = pd.read_csv(path)
print(data.shape)
print(data.describe())
data.drop(columns = "Serial Number", axis = 1, inplace = True)
print(data.shape)
# code ends here
# --------------
#Importing header files
from scipy.stats import chi2_contingency
import scipy.stats as stats
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 11) # Df = number of variable categories(in purpose) - 1
print("Critical Value: ", critical_value)
# Code starts here
return_rating = data.morningstar_return_rating.value_counts()
risk_rating = data.morningstar_risk_rating.value_counts()
observed = pd.concat([return_rating.transpose(), risk_rating.transpose()], axis=1, keys= ['return','risk'])
chi2, p, dof, ex = chi2_contingency(observed)
if chi2 > critical_value:
print("Reject the Null Hypothesis")
else:
print("Fail to reject the Null Hypothesis")
# Code ends here
# --------------
# Code starts here
correlation = data.corr().abs()
us_correlation = correlation.unstack()
us_correlation = us_correlation.sort_values(ascending = False)
#print(us_correlation.head())
max_correlated = us_correlation[(us_correlation > 0.75) & (us_correlation != 1)]
print("Highly correlated Features: \n", max_correlated)
data.drop(columns = ["morningstar_rating", "portfolio_stocks", "category_12", "sharpe_ratio_3y"], axis = 1, inplace = True)
print("Shape of the data after dropping the highly correlated features: ", data.shape)
# code ends here
# --------------
# Code starts here
fig, (ax_1, ax_2) = plt.subplots(nrows=2)
data[["price_earning"]].boxplot(ax=ax_1)
ax_1.set_title("Price Earning")
data[["net_annual_expenses_ratio"]].boxplot(ax=ax_2)
ax_2.set_title("Net Annual Expenses Ratio")
fig.tight_layout()
plt.show()
# code ends here
# --------------
# import libraries
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score,mean_squared_error
from math import sqrt
# Code starts here
X = data.drop(columns = "bonds_aaa", axis = 1).copy()
y = data.bonds_aaa
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 3)
lr = LinearRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
rmse = sqrt(mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: ", rmse)
# Code ends here
# --------------
# import libraries
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import Ridge,Lasso
# regularization parameters for grid search
ridge_lambdas = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6, 10, 30, 60]
lasso_lambdas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1]
# Code starts here
ridge_model = Ridge()
ridge_grid = GridSearchCV(estimator=ridge_model, param_grid=dict(alpha=ridge_lambdas))
ridge_grid.fit(X_train, y_train)
ridge_pred = ridge_grid.predict(X_test)
ridge_rmse = np.sqrt(mean_squared_error(ridge_pred, y_test))
print("Ridge RMSE: ", ridge_rmse)
lasso_model = Lasso()
lasso_grid = GridSearchCV(estimator=lasso_model, param_grid=dict(alpha=lasso_lambdas))
lasso_grid.fit(X_train, y_train)
lasso_pred = lasso_grid.predict(X_test)
lasso_rmse = np.sqrt(mean_squared_error(lasso_pred, y_test))
print("Lasso RMSE: ", lasso_rmse)
# Code ends here
|
|
import os
import torch
import torch.utils.data
import pandas as pd
import numpy as np
class LoadDataset(torch.utils.data.Dataset):
def __init__(self, dataset_path):
"""
Args:
dataset_path (string): path to dataset file
"""
print("\nLoading datasets...")
self.df = pd.read_csv(dataset_path)
self.labels_unique = self.df.iloc[:, 0].unique().tolist()
print("\n{}\n".format(self.df.iloc[:, 0].value_counts())
def __getitem__(self, index):
data = self.df.iloc[index, :]
# ::BUG
# snippet `torch.tensor(list(data[2:]))` uses really high amount of RAM,
# doubling it usage for every iteration, i guess it has to do with
# garbage collector is not invalidating old memory good enough
val = torch.tensor(list(map(int, data[2:])))
label = data[0]
mal_hash = data[1]
return (val, mal_hash, label)
def __len__(self):
return self.df.shape[0]
class AutoEncoder(torch.nn.Module):
def __init__(self, layer_size):
"""
Initialize the AutoEncoder class
"""
# must have at least [input, hidden, output] layers
assert len(layer_size) >= 3
assert layer_size[0] == layer_size[-1] # input equals output
assert len(layer_size) % 2 == 1 # must have odd number of layers
# initialize nn.Module object
super(AutoEncoder, self).__init__()
# save parameters
self.layer_size = layer_size
# prepare func locally
self.relu = torch.nn.ReLU()
self.layers = torch.nn.ModuleList()
self.batchnorm = torch.nn.ModuleList()
for i in range(len(self.layer_size)-1):
self.layers.append(torch.nn.Linear(self.layer_size[i], self.layer_size[i+1]))
if i < len(self.layer_size)-2:
self.batchnorm.append(torch.nn.BatchNorm1d(self.layer_size[i+1]))
def forward(self, x):
# hidden layer
encoded = None
for i, (layer, batchnorm) in enumerate(zip(self.layers[:-1], self.batchnorm)):
x = layer(x)
x = batchnorm(x) # can only be applied for NxM, where N = batch size & M = data size
x = self.relu(x)
if i == len(self.layer_size)//2-1: # get middle (thus encoded data)
encoded = x
decoded = self.layers[-1](x)
return encoded, decoded
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("\nDevice used : {}".format('cuda' if torch.cuda.is_available() else 'cpu'))
print("Pytorch version: {}".format(torch.__version__))
if torch.cuda.is_available():
print(torch.cuda.get_device_name(0))
project_name = "DAE"
# hyper parameters
num_epochs = 1000 # how many iterations for complete single dataset training
learning_rate = 0.001
batch_size = 50 # batch per-training
enable_checkpoint = True
denoise_ratio = 0.2
checkpoint_name = 'checkpoint-{}.pt'.format(project_name) # model filename
layer_size = [10000, 3000, 500, 100, 20, 100, 500, 3000, 10000] # layers size
# load dataset
malware_data = LoadDataset(dataset_path='dataset.csv.xz')
# shuffle=True means for every epoch, the data is going to be re-shuffled
# pin_memory=True, ref: https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/
train_loader = torch.utils.data.DataLoader(malware_data, batch_size=batch_size, pin_memory=True, shuffle=True)
# setup appropriate objects
dae = AutoEncoder(layer_size).to(device)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(dae.parameters(), lr=learning_rate)
epoch = 0
# load previous checkpoint if it exists
if enable_checkpoint:
if os.path.exists(checkpoint_name):
print("Previous checkpoint model found!\n")
checkpoint = torch.load(checkpoint_name)
dae.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
dae.eval()
def denoise(x, ratio):
noise = np.random.binomial(1, ratio, size=x[0].shape[0])
noise = torch.tensor(noise).float().to(device)
return (x + noise) % 2
# train our model
while epoch < num_epochs:
avg_loss = 0
for i, (X, _, _) in enumerate(train_loader):
dae.train() # switch back to train mode
x = X.float().to(device)
x_noise = denoise(x, denoise_ratio) # denoise input data
_, outputs = dae(x_noise)
loss = criterion(outputs, x)
avg_loss += loss.item()
optimizer.zero_grad() # clear our previous calc
loss.backward() # calc all parameters gradient
optimizer.step() # apply weight tuning based on calculated gradient
if (i+1) % 5 == 0:
dae.eval() # turns off dropout and batch normalization
epoch_fmt = str(epoch+1).rjust(len(str(num_epochs)))
batch_fmt = str(i+1).rjust(len(str(len(train_loader))))
fmt_str = "Epochs [" + epoch_fmt + "/{}], Batch [" + batch_fmt + "/{}], Loss = {:.8f}"
print(fmt_str.format(num_epochs, len(train_loader), loss.item()))
avg_loss /= len(train_loader)
if (epoch+1) % 5 == 0:
print("\nAverage loss for epochs [{}] = {:.8f}".format(epoch+1, avg_loss))
# generate compressed malware output for testing
if (epoch+1) % 10 == 0:
with torch.no_grad():
# turns off dropout and batch normalization
dae.eval()
# save encoded form for all entire dataset
filename = "encoded-form-{}.csv".format(project_name)
encoded_data = [] # x, hash and label combined
# calculate total losses from all batches
for X, mal_hash, label in train_loader:
x = X.float().to(device)
encoded, _ = dae(x)
for each_encoded, each_hash, each_label in zip(encoded.tolist(), mal_hash, label):
encoded_data.append([each_label] + [each_hash] + each_encoded)
# export current compressed file into csv for previewing
print("\nExporting encoded malware form into csv..\n")
encoded_df = pd.DataFrame(encoded_data)
encoded_df.to_csv(filename, index=False, header=False)
# save model for every 10 iterations -- make sure we don't lost everything
if enable_checkpoint:
if (epoch+1) % 10 == 0:
print("\nSaving checkpoint model..\n")
torch.save({
'epoch': epoch+1,
'model_state_dict': dae.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, checkpoint_name)
epoch += 1
torch.save(dae.state_dict(), "{}-Trained-Model.pt".format(project_name))
if __name__ == '__main__':
main()
|
|
import logging
from collections import defaultdict
import time
import multiprocessing
import os
import argparse
import numpy as np
from flexp import flexp
from vsbd.dataset import DatasetReader
from vsbd.models import Vowpal, UniformPolicy
def parse_args():
"""
Parse input arguments of the program.
"""
parser = argparse.ArgumentParser(
description="Vowpal experiment"
)
parser.add_argument(
"--dataset_dir",
type=str,
required=True,
help="Path to the dataset directory"
)
parser.add_argument(
"--results_dir",
type=str,
required=True,
help="Path to a directory where results should be saved"
)
parser.add_argument(
"--vowpal_path",
type=str,
required=True,
help="Path to the vowpal binary"
)
parser.add_argument(
"--num_test_positions",
type=int,
required=True,
help="Number of positions to evaluate the models on (K in paper)"
)
parser.add_argument(
"--num_train_positions",
type=int,
default=None,
help="Number of positions from the beginning to train the models on. "
"If None, train on all positions."
)
parser.add_argument(
"--cb_types",
nargs="+",
type=str,
help="Contextual bandit types for vowpal (dm ips dr)"
)
parser.add_argument(
"--skip_train",
action="store_true",
help="Do not train the models (combine with --load_model_dir)"
)
parser.add_argument(
"--load_model_dir",
type=str,
default=None,
help="Load models (named model_{dm,ips,dr}.vw) from given directory"
)
parser.add_argument(
"--save_vowpal_input",
action="store_true",
help="Save vowpal input for debug purposes"
)
parser.add_argument(
"--save_propensities",
action="store_true",
help="Save propensities of trained model predictions on test set"
)
return parser.parse_args()
# shared chunk for multiprocessing
_chunk = {}
# multiprocessing helper function
def update_model(key):
models[key].update(_chunk["chunk"])
args = parse_args()
num_test_positions = args.num_test_positions
num_train_positions = args.num_train_positions
test_positions = (
list(range(num_test_positions))
if num_test_positions else None
)
train_positions = (
list(range(num_train_positions))
if num_train_positions else None
)
train_days = ["201808{:02d}".format(i) for i in range(13, 32)]
test_days = ["201809{:02d}".format(i) for i in range(1, 22)]
flexp.setup(
args.results_dir,
"linear_pos_vowpal_num_train_pos_{}_num_test_pos_{}_cb_{}".format(
num_train_positions,
num_test_positions,
"_".join(args.cb_types)
),
with_date=True
)
flexp.describe(
"save_all_input_vowpal {}, train on {}, test on {}, "
"num_train_positions {}, num_test_positions {}"
.format(
"_".join(args.cb_types), train_days, test_days,
num_train_positions, num_test_positions
)
)
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s"
)
np.random.seed(25)
logging.info("Reading dataset")
train = DatasetReader(
[
os.path.join(args.dataset_dir, str(train_day))
for train_day in train_days
],
chunksize=100000,
make_vowpal_train_input=True, use_positions=train_positions
).read()
test = DatasetReader(
[
os.path.join(args.dataset_dir, str(test_day))
for test_day in test_days
],
chunksize=100000,
make_vowpal_test_input=True, use_positions=test_positions
).read()
num_actions = 21
models = {
"uniform": UniformPolicy(num_actions=num_actions),
}
for cb_type in args.cb_types:
model_filename = "model_{}.vw".format(cb_type)
models["vowpal_{}".format(cb_type)] = Vowpal(
num_actions=num_actions,
vowpal_binary_path=args.vowpal_path,
cb_type=cb_type,
model_path=flexp.get_file_path(model_filename),
load_model_path=(
os.path.join(args.load_model_dir, model_filename)
if args.load_model_dir
else None
)
)
if not args.skip_train:
logging.info("Training...")
t = time.time()
for chunk in train:
chunk[["vowpal_train_input"]].to_csv(
flexp.get_file_path("vowpal_input.txt"),
index=False, header=None, sep="\t", mode="a"
)
logging.info(
"timestamp {}, chunk took {:.3f} s to load"
.format(chunk.timestamp.iloc[-1], time.time() - t)
)
_chunk["chunk"] = chunk
t = time.time()
with multiprocessing.Pool(len(models)) as pool:
pool.map(update_model, models.keys())
logging.info("updates took {:.3f} s".format(time.time() - t))
t = time.time()
for cb_type in args.cb_types:
models["vowpal_{}".format(cb_type)].stop()
def reshape(x):
"""Reshape a chunk column so that values for a SERP are in one row"""
return x.values.reshape(-1, num_test_positions)
def cumany(x, axis=0):
"""Cumulative any (modeled after np.cumprod)"""
return x.astype(bool).cumsum(axis=axis) > 0
def initial():
return np.zeros(num_test_positions)
ndcg_numerator = defaultdict(initial)
ctr_numerator = defaultdict(initial)
vctr_numerator = defaultdict(initial)
c_sum = defaultdict(initial)
predictions = defaultdict(list)
propensities = defaultdict(list)
num_records = 0
t = time.time()
for chunk in test:
if args.save_vowpal_input:
chunk["vowpal_test_input"].to_csv(
flexp.get_file_path("vowpal_test_input.txt"),
index=False, header=None, sep="\t", mode="a"
)
_chunk["chunk"] = chunk
logging.info(
"timestamp {}, chunk took {:.3f} s to load"
.format(chunk.timestamp.iloc[-1], time.time() - t)
)
t = time.time()
preds = [models[k].get_action_probs_batch(chunk) for k in models.keys()]
logging.info("getting predictions took {:.3f} s".format(time.time() - t))
t = time.time()
for key, new_predictions in zip(models.keys(), preds):
new_propensities = new_predictions[
range(len(chunk)), chunk.action
]
chunk["{}_propensity".format(key)] = new_propensities
if args.save_propensities:
path = flexp.get_file_path(key + ".propensities.txt")
with open(path, "a") as fout:
for p in new_propensities:
print(p, file=fout)
logging.info("appending propensities took {:.3f}".format(time.time() - t))
t = time.time()
for key in models.keys():
# we assert that the number of loaded test positions is the same for
# each SERP and reshape the propensities so that each SERP is in one
# row
new_propensity = reshape(chunk["{}_propensity".format(key)]).cumprod(1)
# clip propensity after every cumulative product step to comply with
# the awk version
_old_propensity = reshape(chunk["propensity"])
old_propensity = np.empty(_old_propensity.shape)
min_prop = 1e-5 * np.ones(len(chunk) // num_test_positions)
old_propensity[:, 0] = np.maximum(_old_propensity[:, 0], min_prop)
for i in range(1, num_test_positions):
old_propensity[:, i] = np.maximum(
old_propensity[:, i - 1] * _old_propensity[:, i],
min_prop
)
cs = new_propensity / old_propensity
# see Section 4 of the paper for metric definitions
ndcg_numerator[key] += (
cs *
(
reshape(chunk["reward"] == 2) * np.log(2) /
np.log(2 + np.arange(num_test_positions))
).cumsum(axis=1)
).sum(axis=0)
ctr_numerator[key] += (
cs * cumany(reshape(chunk["reward"] > 0), axis=1)
).sum(axis=0)
vctr_numerator[key] += (
cs *
cumany(
reshape(
(chunk["reward"] > 0) & (chunk["action"] != 0)
),
axis=1
)
).sum(axis=0)
c_sum[key] += cs.sum(axis=0)
num_records += len(chunk) // num_test_positions
logging.info("evaluating metrics took {}".format(time.time() - t))
# save metric values
for i in range(num_test_positions):
path = flexp.get_file_path("results_K={}.csv".format(i + 1))
with open(path, "w") as fout:
print("Policy", "CTR", "NDCG", "VCTR", "C", sep=";", file=fout)
for key in models.keys():
ndcg = ndcg_numerator[key][i] / c_sum[key][i]
ctr = ctr_numerator[key][i] / c_sum[key][i]
vctr = vctr_numerator[key][i] / c_sum[key][i]
c = c_sum[key][i] / num_records
print(key, ctr, ndcg, vctr, c, sep=";", file=fout)
logging.info(
"{} ndcg: {}, ctr: {}, vctr: {}, c: {}"
.format(key, ndcg, ctr, vctr, c)
)
|
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Concatenate, Dropout, Input
from tensorflow.keras.layers import ZeroPadding2D,Conv2DTranspose,LeakyReLU
from tensorflow.keras.layers import Conv2DTranspose, Concatenate
from tensorflow.keras.models import Model
from tensorflow.nn import max_pool_with_argmax
from tensorflow_addons.layers import MaxUnpooling2D
def get_colorization_model(model_name):
if model_name=="AE":
model = AE(RESNET_STRUCTURE=True).get_model()
model.compile(loss="mse", optimizer="adam", metrics=["mse"])
elif model_name=="SegNet":
model = SegNet(RESNET_STRUCTURE=True).get_model()
model.compile(loss="mse", optimizer="adam", metrics=["mse"])
elif model_name=="Unet":
model = Unet(input_ch=1, output_ch=2).get_model()
model.compile(loss="mse", optimizer="adam", metrics=["mse"])
return model
class EarlyStopping:
"""Earlystopping for Colorization Network"""
def __init__(self, patience=20):
self.patience = patience
self.counter = 0
self.min_loss = np.Inf
self.early_stop = False
def __call__(self, loss):
if loss > self.min_loss:
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
else: # when achive best score
self.min_loss = loss
self.counter = 0
class AE(object):
def __init__(self, input_ch=1, output_ch=2, filters=64*1, INPUT_IMAGE_SIZE=256, RESNET_STRUCTURE=True):
self.INPUT_CH = input_ch
self.OUTPUT_CH = output_ch
self.FILTERS = filters
self.INPUT_IMAGE_SIZE = INPUT_IMAGE_SIZE
self.RESNET_STRUCTURE = RESNET_STRUCTURE
self.AE = self.build_model()
def build_model(self):
def conv2d(layer_input, filters, f_size=3, stride=2, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=stride, padding='same')(layer_input)
if bn:
d = BatchNormalization(momentum=0.8)(d)
d = LeakyReLU(alpha=0.2)(d)
return d
def resnet_conv2d(layer_input, filters, f_size=3, stride=1, bn=True):
"""Layers used between downsampling and upsampling """
d = Conv2D(filters, kernel_size=f_size, strides=stride, padding='same')(layer_input)
if bn:
d = BatchNormalization(momentum=0.8)(d)
d = LeakyReLU(alpha=0.2)(d)
d = Conv2D(filters, kernel_size=f_size, strides=stride, padding='same')(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
d = d + layer_input
return d
def deconv2d(layer_input, filters, f_size=4, stride=2, bn=True):
"""Layers used during upsampling"""
u = Conv2DTranspose(filters, kernel_size=f_size, strides=stride, padding='same')(layer_input)
if bn:
u = BatchNormalization(momentum=0.8)(u)
u = LeakyReLU(alpha=0.2)(u)
return u
# Image input
inputs = Input(shape=(self.INPUT_IMAGE_SIZE,self.INPUT_IMAGE_SIZE,self.INPUT_CH))
x = Conv2D(32, kernel_size=1, strides=1, padding='same')(inputs)
x = LeakyReLU(alpha=0.2)(x)
""" Encoder """
# Downsampling
x = conv2d(x, self.FILTERS*1, f_size=4, stride=2, bn=False) # C64 1/4
x = conv2d(x, self.FILTERS*2, f_size=4, stride=2, bn=True) # C128 1/8
x = conv2d(x, self.FILTERS*4, f_size=4, stride=2, bn=True) # C256 1/16
x = conv2d(x, self.FILTERS*8, f_size=4, stride=2, bn=True) # C512 1/32
if self.RESNET_STRUCTURE:
# Resnet block bottom layers
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
""" Decoder """
# Upsampling
x = deconv2d(x, self.FILTERS*4, f_size=4, stride=2, bn=True)
x = deconv2d(x, self.FILTERS*2, f_size=4, stride=2, bn=True)
x = deconv2d(x, self.FILTERS*1, f_size=4, stride=2, bn=True)
x = deconv2d(x, 32, f_size=4, stride=2, bn=False)
output_img = Conv2D(self.OUTPUT_CH, kernel_size=4, strides=1, padding="same", activation='sigmoid')(x)
return Model(inputs, output_img)
def get_model(self):
return self.AE
class SegNet(object):
def __init__(self, input_ch=1, output_ch=2, filters=64*1, INPUT_IMAGE_SIZE=256, RESNET_STRUCTURE=True):
self.INPUT_CH = input_ch
self.OUTPUT_CH = output_ch
self.FILTERS = filters
self.INPUT_IMAGE_SIZE = INPUT_IMAGE_SIZE
self.RESNET_STRUCTURE = RESNET_STRUCTURE
self.SEGNET = self.build_model()
def build_model(self):
def conv_block(x, filters):
x = Conv2D(filters, kernel_size=3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# x = Conv2D(filters, kernel_size=3, padding="same")(x)
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
return x
def resnet_conv2d(layer_input, filters, f_size=3, stride=1, bn=True):
"""Layers used between downsampling and upsampling """
d = Conv2D(filters, kernel_size=f_size, strides=stride, padding='same')(layer_input)
d = BatchNormalization(momentum=0.8)(d)
d = LeakyReLU(alpha=0.2)(d)
d = Conv2D(filters, kernel_size=f_size, strides=stride, padding='same')(d)
d = BatchNormalization(momentum=0.8)(d)
d = d + layer_input
return d
# Image input
inputs = Input(shape=(self.INPUT_IMAGE_SIZE,self.INPUT_IMAGE_SIZE,self.INPUT_CH))
""" Encoder """
# Stage 1
x = conv_block(inputs, self.FILTERS*1)
x = conv_block(x, self.FILTERS*1)
x,idx_1 = max_pool_with_argmax(x, ksize=2, strides=2, padding="VALID")
# Stage 2
x = conv_block(x, self.FILTERS*2)
x = conv_block(x, self.FILTERS*2)
x,idx_2 = max_pool_with_argmax(x, ksize=2, strides=2, padding="VALID")
# Stage 3
x = conv_block(x, self.FILTERS*4)
x = conv_block(x, self.FILTERS*4)
x = conv_block(x, self.FILTERS*4)
x,idx_3 = max_pool_with_argmax(x, ksize=2, strides=2, padding="VALID")
# Stage 4
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
x,idx_4 = max_pool_with_argmax(x, ksize=2, strides=2, padding="VALID")
# Stage 5
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
x,idx_5 = max_pool_with_argmax(x, ksize=2, strides=2, padding="VALID")
if self.RESNET_STRUCTURE:
# Resnet block bottom layers
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
x = resnet_conv2d(x, self.FILTERS*8) # C512
""" Decoder """
# Stage 5u
x = MaxUnpooling2D(pool_size=2, strides=2, padding="SAME")(x,idx_5)
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
# Stage 4u
x = MaxUnpooling2D(pool_size=2, strides=2, padding="SAME")(x,idx_4)
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*8)
x = conv_block(x, self.FILTERS*4)
# Stage 3u
x = MaxUnpooling2D(pool_size=2, strides=2, padding="SAME")(x,idx_3)
x = conv_block(x, self.FILTERS*4)
x = conv_block(x, self.FILTERS*4)
x = conv_block(x, self.FILTERS*2)
# Stage 2u
x = MaxUnpooling2D(pool_size=2, strides=2, padding="SAME")(x,idx_2)
x = conv_block(x, self.FILTERS*2)
x = conv_block(x, self.FILTERS*1)
# Stage 1u
x = MaxUnpooling2D(pool_size=2, strides=2, padding="SAME")(x,idx_1)
x = conv_block(x, self.FILTERS*1)
x = conv_block(x, self.FILTERS*1)
outputs = Conv2D(self.OUTPUT_CH, kernel_size=1, strides=1, padding="same", activation='sigmoid')(x)
return Model(inputs, outputs)
def get_model(self):
return self.SEGNET
class Unet(object):
"""
Pix2pix-like U-Net
"""
def __init__(self, input_ch=1, output_ch=2, filters=64*1, INPUT_IMAGE_SIZE=256):
self.INPUT_CH = input_ch
self.OUTPUT_CH = output_ch
self.FILTERS = filters
self.INPUT_IMAGE_SIZE = INPUT_IMAGE_SIZE
self.CONV_FILTER_SIZE = 4
self.CONV_STRIDE = 2
self.CONV_PADDING = (1, 1)
self.DECONV_FILTER_SIZE = 2
self.DECONV_STRIDE = 2
self.UNET = self.build_model()
def build_model(self):
# (256 x 256 x input_channel_count)
inputs = Input(shape=(self.INPUT_IMAGE_SIZE, self.INPUT_IMAGE_SIZE, self.INPUT_CH))
"""
Encoder
"""
# (128 x 128 x N)
enc1 = ZeroPadding2D(self.CONV_PADDING)(inputs)
enc1 = Conv2D(self.FILTERS, self.CONV_FILTER_SIZE, strides=self.CONV_STRIDE)(enc1)
# (64 x 64 x 2N)
enc2 = self._add_encoding_layer(self.FILTERS*2, enc1)
# (32 x 32 x 4N)
enc3 = self._add_encoding_layer(self.FILTERS*4, enc2)
# (16 x 16 x 8N)
enc4 = self._add_encoding_layer(self.FILTERS*8, enc3)
# (8 x 8 x 8N)
enc5 = self._add_encoding_layer(self.FILTERS*8, enc4)
# (4 x 4 x 8N)
enc6 = self._add_encoding_layer(self.FILTERS*8, enc5)
# (2 x 2 x 8N)
enc7 = self._add_encoding_layer(self.FILTERS*8, enc6)
# (1 x 1 x 8N)
enc8 = self._add_encoding_layer(self.FILTERS*8, enc7)
"""
Decoder
"""
# (2 x 2 x 8N)
dec1 = self._add_decoding_layer(self.FILTERS*8, True, enc8)
dec1 = Concatenate(axis=-1)([dec1, enc7])
# (4 x 4 x 8N)
dec2 = self._add_decoding_layer(self.FILTERS*8, True, dec1)
dec2 = Concatenate(axis=-1)([dec2, enc6])
# (8 x 8 x 8N)
dec3 = self._add_decoding_layer(self.FILTERS*8, True, dec2)
dec3 = Concatenate(axis=-1)([dec3, enc5])
# (16 x 16 x 8N)
dec4 = self._add_decoding_layer(self.FILTERS*8, False, dec3)
dec4 = Concatenate(axis=-1)([dec4, enc4])
# (32 x 32 x 4N)
dec5 = self._add_decoding_layer(self.FILTERS*4, False, dec4)
dec5 = Concatenate(axis=-1)([dec5, enc3])
# (64 x 64 x 2N)
dec6 = self._add_decoding_layer(self.FILTERS*2, False, dec5)
dec6 = Concatenate(axis=-1)([dec6, enc2])
# (128 x 128 x N)
dec7 = self._add_decoding_layer(self.FILTERS, False, dec6)
dec7 = Concatenate(axis=-1)([dec7, enc1])
# (256 x 256 x output_channel_count)
dec8 = Activation(activation='relu')(dec7)
dec8 = Conv2DTranspose(self.OUTPUT_CH, self.DECONV_FILTER_SIZE, strides=self.DECONV_STRIDE)(dec8)
dec8 = Activation(activation='sigmoid')(dec8)
return Model(inputs=inputs, outputs=dec8)
def _add_encoding_layer(self, filters, sequence):
new_sequence = LeakyReLU(0.2)(sequence)
new_sequence = ZeroPadding2D(self.CONV_PADDING)(new_sequence)
new_sequence = Conv2D(filters, self.CONV_FILTER_SIZE, strides=self.CONV_STRIDE)(new_sequence)
new_sequence = BatchNormalization()(new_sequence)
return new_sequence
def _add_decoding_layer(self, filters, add_drop_layer, sequence):
new_sequence = Activation(activation='relu')(sequence)
new_sequence = Conv2DTranspose(filters, self.DECONV_FILTER_SIZE, strides=self.DECONV_STRIDE,
kernel_initializer='he_uniform')(new_sequence)
new_sequence = BatchNormalization()(new_sequence)
if add_drop_layer:
new_sequence = Dropout(0.5)(new_sequence)
return new_sequence
def get_model(self):
return self.UNET
|
|
import shor
def test_shor():
import numpy as np
limit = 1000
for x in range(2, limit):
factors = shor.factorize(x)
assert np.prod(factors) == x, "product({}) != {}".format(factors, x)
for f in factors:
assert shor.prime(f), "{} (factor of {}) is not prime!".format(
f, x)
|
|
# Copyright (c) 2015,2016,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `skewt` module."""
import matplotlib
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pytest
from metpy.plots import Hodograph, SkewT
# Fixtures to make sure we have the right backend and consistent round
from metpy.testing import set_agg_backend # noqa: F401, I202
from metpy.units import units
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(remove_text=True, style='default',
tolerance={'2.1': 1.118}.get(MPL_VERSION, 0.02))
def test_skewt_api():
"""Test the SkewT API."""
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
p = np.linspace(1000, 100, 10)
t = np.linspace(20, -20, 10)
u = np.linspace(-10, 10, 10)
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
skew.ax.set_xlim(-20, 30)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Call again to hit removal statements
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
return fig
@pytest.mark.mpl_image_compare(remove_text=True, style='default',
tolerance={'2.1': 34.37}.get(MPL_VERSION, 0.02))
def test_skewt_api_units():
"""#Test the SkewT API when units are provided."""
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
p = (np.linspace(950, 100, 10) * units.hPa).to(units.Pa)
t = (np.linspace(18, -20, 10) * units.degC).to(units.kelvin)
u = np.linspace(-20, 20, 10) * units.knots
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# This works around the fact that newer pint versions default to degrees_Celsius
skew.ax.set_xlabel('degC')
return fig
@pytest.mark.mpl_image_compare(tolerance=0. if matplotlib.__version__ >= '3.2' else 30.,
remove_text=True, style='default')
def test_skewt_default_aspect_empty():
"""Test SkewT with default aspect and no plots, only special lines."""
# With this rotation and the default aspect, this matches exactly the NWS SkewT PDF
fig = plt.figure(figsize=(12, 9))
skew = SkewT(fig, rotation=43)
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
return fig
@pytest.mark.skipif(matplotlib.__version__ < '3.2',
reason='Matplotlib versions generate different image sizes.')
@pytest.mark.mpl_image_compare(tolerance=0., remove_text=False, style='default',
savefig_kwargs={'bbox_inches': 'tight'})
def test_skewt_tight_bbox():
"""Test SkewT when saved with `savefig(..., bbox_inches='tight')`."""
fig = plt.figure(figsize=(12, 9))
SkewT(fig)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.811, remove_text=True, style='default')
def test_skewt_subplot():
"""Test using SkewT on a sub-plot."""
fig = plt.figure(figsize=(9, 9))
SkewT(fig, subplot=(2, 2, 1), aspect='auto')
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default')
def test_skewt_gridspec():
"""Test using SkewT on a GridSpec sub-plot."""
fig = plt.figure(figsize=(9, 9))
gs = GridSpec(1, 2)
SkewT(fig, subplot=gs[0, 1], aspect='auto')
return fig
def test_skewt_with_grid_enabled():
"""Test using SkewT when gridlines are already enabled (#271)."""
with plt.rc_context(rc={'axes.grid': True}):
# Also tests when we don't pass in Figure
SkewT(aspect='auto')
@pytest.mark.mpl_image_compare(tolerance=0., remove_text=True, style='default')
def test_skewt_arbitrary_rect():
"""Test placing the SkewT in an arbitrary rectangle."""
fig = plt.figure(figsize=(9, 9))
SkewT(fig, rect=(0.15, 0.35, 0.8, 0.3), aspect='auto')
return fig
def test_skewt_subplot_rect_conflict():
"""Test the subplot/rect conflict failure."""
with pytest.raises(ValueError):
SkewT(rect=(0.15, 0.35, 0.8, 0.3), subplot=(1, 1, 1))
@pytest.mark.mpl_image_compare(tolerance=0., remove_text=True, style='default')
def test_skewt_units():
"""Test that plotting with SkewT works with units properly."""
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.ax.axvline(np.array([273]) * units.kelvin, color='purple')
skew.ax.axhline(np.array([50000]) * units.Pa, color='red')
skew.ax.axvline(np.array([-20]) * units.degC, color='darkred')
skew.ax.axvline(-10, color='orange')
return fig
@pytest.fixture()
def test_profile():
"""Return data for a test profile."""
pressure = np.array([966., 937.2, 925., 904.6, 872.6, 853., 850., 836., 821., 811.6, 782.3,
754.2, 726.9, 700., 648.9, 624.6, 601.1, 595., 587., 576., 555.7,
534.2, 524., 500., 473.3, 400., 384.5, 358., 343., 308.3, 300., 276.,
273., 268.5, 250., 244.2, 233., 200.]) * units.mbar
temperature = np.array([18.2, 16.8, 16.2, 15.1, 13.3, 12.2, 12.4, 14., 14.4,
13.7, 11.4, 9.1, 6.8, 4.4, -1.4, -4.4, -7.3, -8.1,
-7.9, -7.7, -8.7, -9.8, -10.3, -13.5, -17.1, -28.1, -30.7,
-35.3, -37.1, -43.5, -45.1, -49.9, -50.4, -51.1, -54.1, -55.,
-56.7, -57.5]) * units.degC
dewpoint = np.array([16.9, 15.9, 15.5, 14.2, 12.1, 10.8, 8.6, 0., -3.6, -4.4,
-6.9, -9.5, -12., -14.6, -15.8, -16.4, -16.9, -17.1, -27.9, -42.7,
-44.1, -45.6, -46.3, -45.5, -47.1, -52.1, -50.4, -47.3, -57.1,
-57.9, -58.1, -60.9, -61.4, -62.1, -65.1, -65.6,
-66.7, -70.5]) * units.degC
profile = np. array([18.2, 16.18287437, 15.68644745, 14.8369451,
13.45220646, 12.57020365, 12.43280242, 11.78283506,
11.0698586, 10.61393901, 9.14490966, 7.66233636,
6.1454231, 4.56888673, 1.31644072, -0.36678427,
-2.09120703, -2.55566745, -3.17594616, -4.05032505,
-5.73356001, -7.62361933, -8.56236581, -10.88846868,
-13.69095789, -22.82604468, -25.08463516, -29.26014016,
-31.81335912, -38.29612829, -39.97374452, -45.11966793,
-45.79482793, -46.82129892, -51.21936594, -52.65924319,
-55.52598916, -64.68843697]) * units.degC
return pressure, temperature, dewpoint, profile
@pytest.mark.mpl_image_compare(tolerance=.033, remove_text=True, style='default')
def test_skewt_shade_cape_cin(test_profile):
"""Test shading CAPE and CIN on a SkewT plot."""
p, t, td, tp = test_profile
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.plot(p, t, 'r')
skew.plot(p, tp, 'k')
skew.shade_cape(p, t, tp)
skew.shade_cin(p, t, tp, td)
skew.ax.set_xlim(-50, 50)
skew.ax.set_ylim(1000, 100)
# This works around the fact that newer pint versions default to degrees_Celsius
skew.ax.set_xlabel('degC')
return fig
@pytest.mark.mpl_image_compare(tolerance=0.033, remove_text=True, style='default')
def test_skewt_shade_cape_cin_no_limit(test_profile):
"""Test shading CIN without limits."""
p, t, _, tp = test_profile
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.plot(p, t, 'r')
skew.plot(p, tp, 'k')
skew.shade_cape(p, t, tp)
skew.shade_cin(p, t, tp)
skew.ax.set_xlim(-50, 50)
skew.ax.set_ylim(1000, 100)
# This works around the fact that newer pint versions default to degrees_Celsius
skew.ax.set_xlabel('degC')
return fig
@pytest.mark.mpl_image_compare(tolerance=0.033, remove_text=True, style='default')
def test_skewt_shade_area(test_profile):
"""Test shading areas on a SkewT plot."""
p, t, _, tp = test_profile
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.plot(p, t, 'r')
skew.plot(p, tp, 'k')
skew.shade_area(p, t, tp)
skew.ax.set_xlim(-50, 50)
skew.ax.set_ylim(1000, 100)
# This works around the fact that newer pint versions default to degrees_Celsius
skew.ax.set_xlabel('degC')
return fig
def test_skewt_shade_area_invalid(test_profile):
"""Test shading areas on a SkewT plot."""
p, t, _, tp = test_profile
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.plot(p, t, 'r')
skew.plot(p, tp, 'k')
with pytest.raises(ValueError):
skew.shade_area(p, t, tp, which='positve')
@pytest.mark.mpl_image_compare(tolerance=0.033, remove_text=True, style='default')
def test_skewt_shade_area_kwargs(test_profile):
"""Test shading areas on a SkewT plot with kwargs."""
p, t, _, tp = test_profile
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.plot(p, t, 'r')
skew.plot(p, tp, 'k')
skew.shade_area(p, t, tp, facecolor='m')
skew.ax.set_xlim(-50, 50)
skew.ax.set_ylim(1000, 100)
# This works around the fact that newer pint versions default to degrees_Celsius
skew.ax.set_xlabel('degC')
return fig
@pytest.mark.mpl_image_compare(tolerance=0.039, remove_text=True, style='default')
def test_skewt_wide_aspect_ratio(test_profile):
"""Test plotting a skewT with a wide aspect ratio."""
p, t, _, tp = test_profile
fig = plt.figure(figsize=(12.5, 3))
skew = SkewT(fig, aspect='auto')
skew.plot(p, t, 'r')
skew.plot(p, tp, 'k')
skew.ax.set_xlim(-30, 50)
skew.ax.set_ylim(1050, 700)
# This works around the fact that newer pint versions default to degrees_Celsius
skew.ax.set_xlabel('degC')
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_api():
"""Basic test of Hodograph API."""
fig = plt.figure(figsize=(9, 9))
ax = fig.add_subplot(1, 1, 1)
hodo = Hodograph(ax, component_range=60)
hodo.add_grid(increment=5, color='k')
hodo.plot([1, 10], [1, 10], color='red')
hodo.plot_colormapped(np.array([1, 3, 5, 10]), np.array([2, 4, 6, 11]),
np.array([0.1, 0.3, 0.5, 0.9]), cmap='Greys')
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_units():
"""Test passing unit-ed quantities to Hodograph."""
fig = plt.figure(figsize=(9, 9))
ax = fig.add_subplot(1, 1, 1)
hodo = Hodograph(ax)
u = np.arange(10) * units.kt
v = np.arange(10) * units.kt
hodo.plot(u, v)
hodo.plot_colormapped(u, v, np.sqrt(u * u + v * v), cmap='Greys')
ax.set_xlabel('')
ax.set_ylabel('')
return fig
def test_hodograph_alone():
"""Test to create Hodograph without specifying axes."""
Hodograph()
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_plot_colormapped():
"""Test hodograph colored line with NaN values."""
u = np.arange(5., 65., 5)
v = np.arange(-5., -65., -5)
u[3] = np.nan
v[6] = np.nan
fig = plt.figure(figsize=(9, 9))
ax = fig.add_subplot(1, 1, 1)
hodo = Hodograph(ax, component_range=80)
hodo.add_grid(increment=20, color='k')
hodo.plot_colormapped(u, v, np.hypot(u, v), cmap='Greys')
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True, style='default')
def test_skewt_barb_color():
"""Test plotting colored wind barbs on the Skew-T."""
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
p = np.linspace(1000, 100, 10)
u = np.linspace(-10, 10, 10)
skew.plot_barbs(p, u, u, c=u)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.02, remove_text=True, style='default')
def test_skewt_barb_unit_conversion():
"""Test that barbs units can be converted at plot time (#737)."""
u_wind = np.array([3.63767155210412]) * units('m/s')
v_wind = np.array([3.63767155210412]) * units('m/s')
p_wind = np.array([500]) * units.hPa
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.ax.set_ylabel('') # remove_text doesn't do this as of pytest 0.9
skew.plot_barbs(p_wind, u_wind, v_wind, plot_units='knots')
skew.ax.set_ylim(1000, 500)
skew.ax.set_yticks([1000, 750, 500])
skew.ax.set_xlim(-20, 20)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.02, remove_text=True, style='default')
def test_skewt_barb_no_default_unit_conversion():
"""Test that barbs units are left alone by default (#737)."""
u_wind = np.array([3.63767155210412]) * units('m/s')
v_wind = np.array([3.63767155210412]) * units('m/s')
p_wind = np.array([500]) * units.hPa
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
skew.ax.set_ylabel('') # remove_text doesn't do this as of pytest 0.9
skew.plot_barbs(p_wind, u_wind, v_wind)
skew.ax.set_ylim(1000, 500)
skew.ax.set_yticks([1000, 750, 500])
skew.ax.set_xlim(-20, 20)
return fig
@pytest.mark.parametrize('u,v', [(np.array([3]) * units('m/s'), np.array([3])),
(np.array([3]), np.array([3]) * units('m/s'))])
def test_skewt_barb_unit_conversion_exception(u, v):
"""Test that errors are raise if unit conversion is requested on un-united data."""
p_wind = np.array([500]) * units.hPa
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, aspect='auto')
with pytest.raises(ValueError):
skew.plot_barbs(p_wind, u, v, plot_units='knots')
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_plot_layers():
"""Test hodograph colored height layers with interpolation."""
u = np.zeros(6) * units.knots
v = np.array([0, 10, 20, 30, 40, 50]) * units.knots
heights = np.array([0, 1000, 2000, 3000, 4000, 5000]) * units.m
intervals = np.array([500, 1500, 2500, 3500, 4500]) * units.m
colors = ['r', 'g', 'b', 'r']
fig = plt.figure(figsize=(7, 7))
ax1 = fig.add_subplot(1, 1, 1)
h = Hodograph(ax1)
h.add_grid(increment=10)
h.plot_colormapped(u, v, heights, colors=colors, intervals=intervals)
ax1.set_xlim(-50, 50)
ax1.set_ylim(-5, 50)
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_plot_layers_different_units():
"""Test hodograph colored height layers with interpolation and different units."""
u = np.zeros(6) * units.knots
v = np.array([0, 10, 20, 30, 40, 50]) * units.knots
heights = np.array([0, 1, 2, 3, 4, 5]) * units.km
intervals = np.array([500, 1500, 2500, 3500, 4500]) * units.m
colors = ['r', 'g', 'b', 'r']
fig = plt.figure(figsize=(7, 7))
ax1 = fig.add_subplot(1, 1, 1)
h = Hodograph(ax1)
h.add_grid(increment=10)
h.plot_colormapped(u, v, heights, colors=colors, intervals=intervals)
ax1.set_xlim(-50, 50)
ax1.set_ylim(-5, 50)
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_plot_layers_bound_units():
"""Test hodograph colored height layers with interpolation and different units."""
u = np.zeros(6) * units.knots
v = np.array([0, 10, 20, 30, 40, 50]) * units.knots
heights = np.array([0, 1000, 2000, 3000, 4000, 5000]) * units.m
intervals = np.array([0.5, 1.5, 2.5, 3.5, 4.5]) * units.km
colors = ['r', 'g', 'b', 'r']
fig = plt.figure(figsize=(7, 7))
ax1 = fig.add_subplot(1, 1, 1)
h = Hodograph(ax1)
h.add_grid(increment=10)
h.plot_colormapped(u, v, heights, colors=colors, intervals=intervals)
ax1.set_xlim(-50, 50)
ax1.set_ylim(-5, 50)
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_plot_arbitrary_layer():
"""Test hodograph colored layers for arbitrary variables without interpolation."""
u = np.arange(5, 65, 5) * units('knot')
v = np.arange(-5, -65, -5) * units('knot')
speed = np.sqrt(u ** 2 + v ** 2)
colors = ['red', 'green', 'blue']
levels = [0, 10, 20, 30] * units('knot')
fig = plt.figure(figsize=(9, 9))
ax = fig.add_subplot(1, 1, 1)
hodo = Hodograph(ax, component_range=80)
hodo.add_grid(increment=20, color='k')
hodo.plot_colormapped(u, v, speed, intervals=levels, colors=colors)
return fig
@pytest.mark.mpl_image_compare(tolerance=0, remove_text=True)
def test_hodograph_wind_vectors():
"""Test plotting wind vectors onto a hodograph."""
u_wind = np.array([-10, -7, 0, 7, 10, 7, 0, -7])
v_wind = np.array([0, 7, 10, 7, 0, -7, -10, -7])
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
h = Hodograph(ax, component_range=20)
h.plot(u_wind, v_wind, linewidth=3)
h.wind_vectors(u_wind, v_wind)
return fig
@pytest.mark.skipif(matplotlib.__version__ < '3.0.1',
reason="Earlier Matplotlib versions don't have a required fix.")
def test_united_hodograph_range():
"""Tests making a hodograph with a united ranged."""
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
Hodograph(ax, component_range=60. * units.knots)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training code for baseline model."""
from absl import app
from absl import flags
from absl import logging
import os
import numpy as np
import tensorflow as tf
from tqdm import trange
import common.data as data
from common.networks import AllConvModel
import training.utils as utils
class TrainLoop:
def __init__(self, num_filters, num_classes, input_shape):
"""
Create the models to be trained, and set up the base variables.
"""
self.model, self.ema_model = self.make_ema_model(num_filters,
num_classes,
input_shape)
self.base_lr = 0.03
self.sgd_momentum = 0.9
self.save_checkpoint_epochs = 10
def make_model(self, num_filters, num_classes, input_shape):
"""
Make a model with the specified number of filters, classes, and shape
"""
model = AllConvModel(num_classes=num_classes,
num_filters=num_filters,
input_shape=input_shape)
# Remove softmax for training
model.layers = model.layers[:-1]
return model
def batch_predict(self, model, x, batch_size):
"""
Predict the neural network on a batch of examples
"""
preds = []
for i in range(0, len(x), batch_size):
preds.extend(tf.argmax(model(x[i:i+batch_size], training=False),axis=1).numpy())
return preds
def loss(self, model, x, y, return_preds=False, wd=1e-4):
"""
Compute the loss of the neural network on a given (x,y) tuple.
"""
logits = model(x, training=True)
l_xe = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=y))
l_wd = tf.add_n([tf.nn.l2_loss(v) for v in model.trainable_variables if 'kernel' in v.name])
total_loss = l_xe + wd * l_wd
if return_preds:
return total_loss, logits
else:
return total_loss
def augment(self, x, y):
return data.augment_weak(x), y
@tf.function
def train_step(self, x, y):
"""
Run one iteration of gradient descent on the (x,y) tuple.
"""
with tf.GradientTape() as tape:
# Compute the loss on this set of examples
total_loss, logits = self.loss(self.model,
*self.augment(x, y),
return_preds=True)
# Get the gradient of the loss
g = tape.gradient(total_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.model.trainable_variables))
# Keep an exponential moving average of model weights to save
for ema_var, value in zip(self.ema_model.variables, self.model.variables):
ema_var.assign_sub((ema_var - value) * 0.001)
return tf.argmax(logits, axis=1), total_loss
def make_ema_model(self, num_filters, num_classes, input_shape):
"""
Create a model, and an EMA model.
Initialize the EMA model to the weights of the original model.
"""
model = self.make_model(num_filters, num_classes=num_classes, input_shape=input_shape)
ema_model = self.make_model(num_filters, num_classes=num_classes, input_shape=input_shape)
for ema_var, value in zip(ema_model.variables, model.variables):
ema_var.assign(value)
return model, ema_model
def post_epoch(self, epoch_frac, dataset):
"""
Method to run after every epoch of training.
By default just print the final test accuracy, but other defenses
might require other processing afterwards.
"""
_, (x_test, y_test), num_classes = dataset
test_acc = np.mean(self.batch_predict(self.ema_model, x_test, 64) == y_test)
print(' test accuracy: ', "%.3f" % test_acc)
def make_optimizer(self, steps_per_epoch, num_epochs):
lr_schedule = utils.DecayLearningRateSchedule(steps_per_epoch=steps_per_epoch,
base_lr=self.base_lr,
num_epochs=num_epochs)
return tf.keras.optimizers.SGD(learning_rate=lr_schedule, momentum=self.sgd_momentum)
def train(self, dataset, batch_size, num_epochs, model_dir):
"""
Actually train the network on the provided dataset, for the
given number of epochs, nad save it to model_dir.
"""
if os.path.exists(os.path.join(model_dir, 'final_checkpoint-1.index')):
print('Model already trained.')
return
(x_train, y_train), (x_test, y_test), num_classes = dataset
steps_per_epoch = (len(x_train) + batch_size - 1) // batch_size
self.optimizer = self.make_optimizer(steps_per_epoch, num_epochs)
checkpoint = utils.create_or_load_checkpoint(model_dir=model_dir,
model=self.model,
ema_model=self.ema_model,
opt=self.optimizer)
print("Total number of training epochs:", num_epochs)
# Compute initial_epoch in case model is restored from checkpoint
initial_epoch = self.optimizer.iterations.numpy() // steps_per_epoch
for epoch in range(initial_epoch, num_epochs):
print('Training epoch ', epoch)
order = np.random.permutation(len(x_train))
# Run training, saving the model loss and accuracy each minibatch
avg_loss = []
avg_acc = []
for i in trange(0, len(order), batch_size, leave=False, unit='img', unit_scale=batch_size):
xb = x_train[order[i:i+batch_size]]
yb = y_train[order[i:i+batch_size]]
batch_preds, batch_loss = self.train_step(xb, yb)
if np.isnan(batch_loss):
print("Training diverged. Loss goes to nan.")
print("Last 30 loss values:", avg_loss[-30:])
exit(1)
avg_loss.append(batch_loss)
avg_acc.append(np.mean(batch_preds == yb))
print("Avg train loss: %.3f" % np.mean(avg_loss),
' avg train accuracy:', "%.3f" % np.mean(avg_acc),
end="")
self.post_epoch(epoch/num_epochs, dataset)
if epoch % self.save_checkpoint_epochs == 0:
checkpoint_name = checkpoint.save(
os.path.join(model_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
print()
# Final checkpoint only includes EMA model
final_checkpoint = tf.train.Checkpoint(model=self.ema_model)
checkpoint_name = final_checkpoint.save(
os.path.join(model_dir, 'final_checkpoint'))
logging.info('Saved final checkpoint to %s', checkpoint_name)
FLAGS = flags.FLAGS
def main(argv):
del argv
dataset = data.load_dataset(FLAGS.dataset)
(x_train, y_train), (x_test, y_test), num_classes = dataset
input_shape = x_train[0].shape
loop = TrainLoop(FLAGS.num_filters,
num_classes, input_shape)
loop.train(dataset=dataset,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
model_dir=os.path.join(FLAGS.model_dir, "baseline/"))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
|
|
#!/usr/bin/python3
import cv2
import numpy as np
import sys
import os
import pickle
import datetime
import base64
import io
from matplotlib import pyplot as plt
from PIL import Image
import extract_feature
# x = np.random.randint(25,100,25)
# y = np.random.randint(175,255,25)
# z = np.hstack((x,y))
# z = z.reshape((50,1))
# z = np.float32(z)
# # plt.hist(z,256,[0,256]),plt.show()
# # Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# # Set flags (Just to avoid line break in the code)
# flags = cv2.KMEANS_RANDOM_CENTERS
# # Apply KMeans
# compactness,labels,centers = cv2.kmeans(z,2,None,criteria,10,flags)
# A = z[labels==0]
# B = z[labels==1]
# # Now plot 'A' in red, 'B' in blue, 'centers' in yellow
# plt.hist(A,256,[0,256],color = 'r')
# plt.hist(B,256,[0,256],color = 'b')
# plt.hist(centers,32,[0,256],color = 'y')
# plt.show()
# img = cv2.imread('C:\\Users\\yagor\\extrator-caracteristicas\\banco_imagens\\Parthenon\\spencer-davis-1533814-unsplash.jpg', cv2.COLOR_BGR2RGB)
# # blur = cv2.bilateralFilter(img,9,500,500)
# cinza = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# canny = cv2.Canny(cinza, 150,150)
# plt.subplot(121),plt.imshow(img)
# plt.title('Imagem original'), plt.xticks([]), plt.yticks([])
# plt.subplot(122),plt.imshow(canny)
# plt.title('Imagem filtrada'), plt.xticks([]), plt.yticks([])
# plt.show()
imagens = extract_feature.ler_diretorio_imagens("banco_imagens/Colosseum")
imagens += extract_feature.ler_diretorio_imagens("banco_imagens/Eiffel")
imagens += extract_feature.ler_diretorio_imagens("banco_imagens/Louvre")
imagens += extract_feature.ler_diretorio_imagens("banco_imagens/Parthenon")
size = 300, 300
for imagem in imagens:
real_img = Image.open(imagem)
sqr_img = extract_feature.make_square(real_img)
sqr_img.thumbnail(size, Image.ANTIALIAS)
sqr_img.save(imagem.replace('banco_imagens', 'banco_imagens_sqr').replace('.jpg', '.png'))
|
|
import numpy
import os
import sys
from setuptools import setup, find_packages, Extension
# Setup C module include directories
include_dirs = [numpy.get_include()]
# Setup C module macros
define_macros = [('NUMPY', '1')]
# Handle MSVC `wcsset` redefinition
if sys.platform == 'win32':
define_macros += [
('_CRT_SECURE_NO_WARNING', None),
('__STDC__', 1)
]
setup(
name="stsciutils",
use_scm_version=True,
url="https://github.com/spacetelescope/stsciutils",
maintainer="J. Hunkeler",
maintainer_email="jhunk@stsci.edu",
setup_requires=['setuptools_scm'],
install_requires=[
'numpy',
'astropy',
],
packages=find_packages(),
ext_modules=[
Extension('stsciutils.image._combine',
['stsciutils/image/src/_combinemodule.c'],
include_dirs=include_dirs,
define_macros=define_macros),
Extension('stsciutils.imagestats.buildHistogram',
['stsciutils/imagestats/src/buildHistogram.c'],
include_dirs=include_dirs,
define_macros=define_macros),
Extension('stsciutils.imagestats.computeMean',
['stsciutils/imagestats/src/computeMean.c'],
include_dirs=include_dirs,
define_macros=define_macros),
Extension('stsciutils.stimage._stimage',
['stsciutils/stimage/src/immatch/geomap.c',
'stsciutils/stimage/src/immatch/xyxymatch.c',
'stsciutils/stimage/src/immatch/lib/tolerance.c',
'stsciutils/stimage/src/immatch/lib/triangles.c',
'stsciutils/stimage/src/immatch/lib/triangles_vote.c',
'stsciutils/stimage/src/lib/error.c',
'stsciutils/stimage/src/lib/lintransform.c',
'stsciutils/stimage/src/lib/polynomial.c',
'stsciutils/stimage/src/lib/util.c',
'stsciutils/stimage/src/lib/xybbox.c',
'stsciutils/stimage/src/lib/xycoincide.c',
'stsciutils/stimage/src/lib/xysort.c',
'stsciutils/stimage/src/surface/cholesky.c',
'stsciutils/stimage/src/surface/fit.c',
'stsciutils/stimage/src/surface/surface.c',
'stsciutils/stimage/src/surface/vector.c',
'stsciutils/stimage/src_wrap/stimage_module.c',
'stsciutils/stimage/src_wrap/wrap_util.c',
'stsciutils/stimage/src_wrap/immatch/py_xyxymatch.c',
'stsciutils/stimage/src_wrap/immatch/py_geomap.c'],
include_dirs=include_dirs + ['stsciutils/stimage/include', 'stsciutils/stimage/src_wrap'],
define_macros=define_macros,
)
],
)
|
|
import polygon_primitives.helper_methods as hm
import numpy as np
from line_extraction_primitives.line import Line
import plotting
"""Definition for the edge class. Extends the Line class from line_extraction_primitives."""
class Edge(Line):
def __init__(self, point1, point2, edge_id=-1, temp_edge=False, order_points=True):
self.end_points = [point1, point2]
self.start_neighbors = []
self.end_neighbors = []
self.polygon_edge_candidate = True
self.edge_id = edge_id
self.left_poly = None
self.right_poly = None
self.potential_start = None
self.potential_end = None
self.temp_edge = temp_edge
self.set_points(point1, point2, order_points)
def get_start(self):
return self.start_point
def get_end(self):
return self.end_point
def set_edge_id(self, edge_id):
self.edge_id = edge_id
self.temp_edge = False
def set_start(self, point):
self.start_point = point
def set_end(self, point):
self.end_point = point
def get_edge_vector(self):
return self.end_point - self.start_point
"""Determines the density of the edge in the grid, by finding the points near the edge and projecting them onto the line."""
def get_support(self, grid):
edge = Edge(self.get_start(), self.get_end())
line.set_points(self.get_start(), self.get_end())
transformed_line = grid.transform_line_to_grid_space(line)
points = grid.get_points_from_line_segment(transformed_line, width=0.4)
density = hm.get_density([points], width=0.4)
return density
"""Reverse the direction of an edge. This switches the start and end neighbors as well."""
def reverse_direction(self):
start_point = self.get_end()
end_point = self.get_start()
new_start_neighbors = self.get_end_neighbors()
new_end_neighbors = self.get_start_neighbors()
self.set_start(start_point)
self.set_end(end_point)
self.start_neighbors = new_start_neighbors
self.end_neighbors = new_end_neighbors
"""Put the edge points in an absolute ordering. Sorted on the y direction, so that the end point has a higher y."""
def set_points_absolute_order(self):
point1 = self.get_start()
point2 = self.get_end()
if point1[1] > point2[1]:
self.reverse_direction()
def set_points(self, point1, point2, order_points=False):
self.set_start(point1)
self.set_end(point2)
if order_points and point1[1] > point2[1]:
self.reverse_direction()
"""Resets all the edge components."""
def reset_edge(self):
self.start_neighbors = []
self.end_neighbors = []
self.polygon_edge_candidate = True
self.left_poly = None
self.right_poly = None
self.temp_edge = False
def get_neighbors(self):
return self.start_neighbors + self.end_neighbors
def remove_neighbor(self, neighbor):
self.start_neighbors.remove(neighbor)
self.end_neighbors.remove(neighbor)
"""Removes the current edge as a neighbor from the neighboring edges."""
def delete(self):
for each in self.start_neighbors:
each.remove_neighbor(self)
for each in self.end_neighbors:
each.remove_neighbor(self)
def get_start_neighbors(self):
return self.start_neighbors
def set_edge_candidate(self, b_candidate):
self.polygon_edge_candidate = b_candidate
def get_end_neighbors(self):
return self.end_neighbors
def is_polygon_candidate(self):
return self.polygon_edge_candidate
def is_neighbor(self, other_edge):
return other_edge in self.start_neighbors or other_edge in self.end_neighbors
def check_if_neighbors_in_list(self, edges):
for edge in self.get_neighbors():
if edge in edges:
return True
return False
"""Shortens the edge by the specified amount, from either the start or the end or both."""
def shorten_edge_by_amount(self, amount, is_start, is_end):
start = self.get_start()
end = self.get_end()
vec = hm.normalize(end - start)
if is_start:
start = start + amount * vec
if is_end:
end = end - amount * vec
self.set_points(start, end)
"""Intersects a ray with the current edge, and returns the intersection point if one exists."""
def get_euclidean_intersection_with_ray(self, ray_start, ray_direction):
start_point = np.array(self.get_start())
end_point = np.array(self.get_end())
v1 = hm.normalize(start_point - end_point)
v2 = hm.normalize(ray_direction)
return hm.get_intersection_point(v1, v2, start_point, ray_start)
"""Gets the intersection point of the current edge with another edge, if one exists."""
def get_euclidean_intersection_point(self, other_edge):
start_point = np.array(self.get_start())
end_point = np.array(self.get_end())
other_start = np.array(other_edge.get_start())
other_end = np.array(other_edge.get_end())
v1 = hm.normalize(start_point - end_point)
v2 = hm.normalize(other_start - other_end)
return hm.get_intersection_point(v1, v2, start_point, other_start)
"""Compares the current edge with another edge, and determines whether the endpoints are close enough to consider them neighbors."""
def check_and_set_neighbor(self, edge, dist=0.5):
if self == edge or edge in self.start_neighbors or edge in self.end_neighbors:
return
if hm.compare_points(edge.get_start(), self.get_start(), dist):
self.start_neighbors.append(edge)
edge.start_neighbors.append(self)
elif hm.compare_points(edge.get_start(), self.get_end(), dist):
self.end_neighbors.append(edge)
edge.start_neighbors.append(self)
elif hm.compare_points(edge.get_end(), self.get_start(), dist):
self.start_neighbors.append(edge)
edge.end_neighbors.append(self)
elif hm.compare_points(edge.get_end(), self.get_end(), dist):
self.end_neighbors.append(edge)
edge.end_neighbors.append(self)
"""Walk along the edge, in the direction specified by following the incoming edge to the current edge."""
def walk_along_edge(self, incoming_edge):
if incoming_edge in self.start_neighbors:
return self.end_neighbors
else:
return self.start_neighbors
"""Gets the endpoint shared by two edges, if one exists."""
def get_shared_point(self, other_edge, dist=0.8):
start_point = self.get_start()
end_point = self.get_end()
other_start = other_edge.get_start()
other_end = other_edge.get_end()
if hm.compare_points(start_point, other_start, dist) or hm.compare_points(start_point, other_end, dist):
return start_point
elif hm.compare_points(end_point, other_start, dist) or hm.compare_points(end_point, other_end, dist):
return end_point
else:
return None
"""Checks if the current edge is left of the input edge."""
def is_edge_left(self, edge):
start_point = None
matching_point = None
end_point = None
if hm.compare_points(edge.get_start(), self.get_start()):
start_point = self.get_end()
matching_point = self.get_start()
end_point = edge.get_end()
elif hm.compare_points(edge.get_end(), self.get_start()):
start_point = self.get_end()
matching_point = self.get_start()
end_point = edge.get_start()
elif hm.compare_points(edge.get_end(), self.get_end()):
start_point = self.get_start()
matching_point = self.get_end()
end_point = edge.get_start()
elif hm.compare_points(edge.get_start(), self.get_end()):
start_point = self.get_start()
matching_point = self.get_end()
end_point = edge.get_end()
if matching_point is None:
return False
return hm.is_point_left(start_point, matching_point, end_point)
"""Gets the angle between two neighboring edges. Checks for neighborhood by checking whether endpoints are within 'dist' of each other."""
def get_angle_between_edges(self, edge, dist=0.5):
#Find matching endpoints, note that the current edge (self) is always treated as the start
start_point = None
matching_point = None
end_point = None
if hm.compare_points(edge.get_start(), self.get_start(), dist):
start_point = self.get_end()
matching_point = self.get_start()
end_point = edge.get_end()
elif hm.compare_points(edge.get_end(), self.get_start(), dist):
start_point = self.get_end()
matching_point = self.get_start()
end_point = edge.get_start()
elif hm.compare_points(edge.get_end(), self.get_end(), dist):
start_point = self.get_start()
matching_point = self.get_end()
end_point = edge.get_start()
elif hm.compare_points(edge.get_start(), self.get_end(), dist):
start_point = self.get_start()
matching_point = self.get_end()
end_point = edge.get_end()
if matching_point is None:
return float("inf")
v1 = np.array(matching_point) - np.array(start_point)
v2 = np.array(end_point) - np.array(matching_point)
theta = hm.signed_angle_between(v1, v2)
if theta > 140:
theta = theta - 180.0
elif theta < -140:
theta = theta + 180.0
return theta
"""Gets the angle between two neighboring edges."""
def get_angle(self, edge):
#Find matching endpoints, note that the current edge (self) is always treated as the start
shared_point = self.get_shared_point(edge)
if shared_point is not None:
if hm.compare_points(shared_point, self.get_start()):
self_start = self.get_start()
self_end = self.get_end()
else:
self_start = self.get_end()
self_end = self.get_start()
if hm.compare_points(shared_point, edge.get_start()):
other_start = edge.get_start()
other_end = edge.get_end()
else:
other_start = edge.get_end()
other_end = edge.get_start()
v1 = np.array(other_end) - np.array(other_start)
v2 = np.array(self_end) - np.array(self_start)
theta = hm.angle_between(v1, v2)
return np.rad2deg(theta)
else:
return float("inf")
"""Finds the normal vector to the edge"""
def find_normal(self):
v1 = np.array(self.get_start()) - np.array(self.get_end())
normal_dir = [-1.0 * v1[1], v1[0]]
return hm.normalize(normal_dir)
"""Returns whether the point is below the edge, above the edge, or on the edge"""
def check_point_below_edge(self, point):
normal = self.find_normal()
signed_dist = np.dot(normal, (np.array(point) - np.array(self.get_start())))
if signed_dist > 0:
return False
else:
return True
"""Returns whether a vector with starting point comp_point intersects the edge"""
def check_edge_intersection(self, comp_point, comp_vector, epsilon=0.01):
start_point = np.array(self.get_start())
end_point = np.array(self.get_end())
edge_vec = end_point - start_point
if comp_vector[1] == 0.0 or (edge_vec[0] - edge_vec[1] * comp_vector[0] / comp_vector[1]) == 0.0:
return
denom_a = (edge_vec[0] - edge_vec[1] * comp_vector[0] / comp_vector[1])
a = ((start_point[1] - comp_point[1]) * comp_vector[0] / comp_vector[1] + (comp_point[0] - start_point[0])) / denom_a
b = (start_point[0] - comp_point[0] + a * edge_vec[0]) / comp_vector[0]
if a >= -epsilon and a <= 1.0 + epsilon and b > -epsilon:
return True
else:
return False
def get_length(self):
return hm.distance(self.get_start(), self.get_end())
"""Returns whether the current edge is collinear with another edge."""
def is_collinear(self, other_edge):
start = self.get_start()
end = self.get_end()
other_start = other_edge.get_start()
other_end = other_edge.get_end()
return hm.collinear(start, end, other_start) and hm.collinear(start, end, other_end)
"""Returns the intersection point that would occur by extending one of the current edge or the input edge, if it exists."""
def get_extended_intersection(self, other_edge):
start = self.get_start()
end = self.get_end()
other_start = other_edge.get_start()
other_end = other_edge.get_end()
edge = None
if hm.collinear(start, end, other_start) and hm.collinear(start, end, other_end):
dist1 = hm.distance(end, other_end)
dist2 = hm.distance(start, other_end)
dist3 = hm.distance(end, other_start)
dist4 = hm.distance(start, other_start)
min_dist = min(dist1, dist2, dist3, dist4)
if min_dist == dist1:
edge_start, edge_end = end, other_end
elif min_dist == dist2:
edge_start, edge_end = start, other_end
elif min_dist == dist3:
edge_start, edge_end = end, other_start
elif min_dist == dist4:
edge_start, edge_end = start, other_start
edge = Edge(edge_start, edge_end)
elif np.rad2deg(hm.angle_between(end - start, other_end - other_start)) > 10.0:
intersection_point = self.get_euclidean_intersection_point(other_edge)
if intersection_point is None:
return None
dist1 = hm.distance(end, intersection_point)
dist2 = hm.distance(start, intersection_point)
min_dist = min(dist1, dist2)
if min_dist == dist1:
edge_start, edge_end = end, intersection_point
elif min_dist == dist2:
edge_start, edge_end = start, intersection_point
edge = Edge(edge_start, edge_end)
return edge
"""If an endpoint is located at shared_point, moves it to move_point."""
def move_to_point(self, shared_point, move_point):
start = self.get_start()
end = self.get_end()
if hm.compare_points(start, shared_point):
self.set_start(move_point)
else:
self.set_end(move_point)
"""Checks whether two edges have endpoints that are close together, and if so, moves them to the exact same location."""
def move_edges_to_shared_point(self, other_edge, epsilon):
shared_point = self.get_shared_point(other_edge, epsilon)
has_moved = True
if shared_point is not None:
start = self.get_start()
end = self.get_end()
other_start = other_edge.get_start()
other_end = other_edge.get_end()
angle = self.get_angle(other_edge)
if abs(angle) < 10 or abs(angle - 180.0) < 10:
if hm.compare_points(shared_point, start, epsilon) and self.get_length() < other_edge.get_length():
other_edge.move_to_point(shared_point, start)
elif hm.compare_points(shared_point, end, epsilon) and self.get_length() < other_edge.get_length():
other_edge.move_to_point(shared_point, end)
elif hm.compare_points(shared_point, other_start, epsilon) and other_edge.get_length() < self.get_length():
self.move_to_point(shared_point, other_start)
elif hm.compare_points(shared_point, other_end, epsilon) and other_edge.get_length() < self.get_length():
self.move_to_point(shared_point, other_end)
else:
has_moved = False
else:
has_moved = False
return has_moved
"""If one edge engulfs another edge, this removes the overlap between them by moving one endpoint of the larger edge to the start of the
smaller edge, and constructs a third edge connecting to the end of the smaller edge."""
def remove_edge_overlap(self, other_edge):
start = self.get_start()
end = self.get_end()
other_start = other_edge.get_start()
other_end = other_edge.get_end()
shared_point = self.get_shared_point(other_edge)
start_dist = hm.distance(start, other_start)
end_dist = hm.distance(end, other_start)
if hm.compare_points(shared_point, other_start):
if hm.compare_points(shared_point, start):
other_edge.set_points(end, other_end)
else:
other_edge.set_points(start, other_end)
else:
if hm.compare_points(shared_point, start):
other_edge.set_points(other_start, end)
else:
other_edge.set_points(other_start, start)
"""Removes edge overlap by moving edge endpoints. Can also create a third edge if one edge engulfs the other."""
def split_edge_overlap(self, other_edge, percent_proximity=0.05):
start = self.get_start()
end = self.get_end()
other_start = other_edge.get_start()
other_end = other_edge.get_end()
new_edge = None
on_line_dist = percent_proximity * max(other_edge.get_length(), self.get_length())
shared_point = self.get_shared_point(other_edge)
start_point_on_other = other_edge.on_line(start, dist=on_line_dist)
end_point_on_other = other_edge.on_line(end, dist=on_line_dist)
other_start_on_self = self.on_line(other_start, dist=on_line_dist)
other_end_on_self = self.on_line(other_end, dist=on_line_dist)
#Case 1: One edge engulfs the other
if shared_point is not None and ((start_point_on_other and hm.compare_points(shared_point, end)) or
(end_point_on_other and hm.compare_points(shared_point, start))):
self.remove_edge_overlap(other_edge)
elif shared_point is not None and ((other_start_on_self and hm.compare_points(shared_point, other_end)) or
(other_end_on_self and hm.compare_points(shared_point, other_start))):
other_edge.remove_edge_overlap(self)
#Case 2: The two edges overlap
elif (start_point_on_other or end_point_on_other) and self.is_collinear(other_edge):
if start_point_on_other:
new_edge_end = start
else:
new_edge_end = end
if other_start_on_self:
new_edge = Edge(other_start, new_edge_end)
self.move_to_point(new_edge_end, other_start)
other_edge.move_to_point(other_start, new_edge_end)
else:
new_edge = Edge(other_end, new_edge_end)
self.move_to_point(new_edge_end, other_end)
other_edge.move_to_point(other_end, new_edge_end)
return new_edge
"""Checks whether an edge intersects any of the edges (within a small proximity), and if so, splits both edges at the intersection point."""
def split_edges(self, edges, dist=0.5, percent_proximity=0.05):
new_edges = []
for edge in edges:
if edge == self:
continue
moved_shared = self.move_edges_to_shared_point(edge, dist)
new_edge = self.split_edge_overlap(edge, percent_proximity=percent_proximity)
if new_edge is not None:
new_edges.append(new_edge)
return new_edges
"""Checks whether a point lies on the edge."""
def on_line(self, point, dist=0.1, epsilon=0.1, debug=False):
start_point = np.array(self.get_start())
end_point = np.array(self.get_end())
v1 = end_point - start_point
orig = point - start_point
#Chooses the shorter line segment between the test point and the two endpoints
checkpoint_start = start_point
if hm.distance(point, start_point) > hm.distance(point, end_point):
orig = point - end_point
checkpoint_start = end_point
v1 = start_point - end_point
index = 0
if abs(v1[0]) < abs(v1[1]):
index = 1
#TODO (Maybe): choose the starting point that minimizes checkpoint distance?
checkpoint = checkpoint_start + v1 * orig[index] / v1[index]
if hm.distance(checkpoint, point) < dist and orig[index] / v1[index] <= 1.0 + epsilon and orig[index] / v1[index] > 0:
# print(start_point, end_point, point)
return True
return False
def check_new_edges(self, edges, dist=1.0):
for edge in edges:
if self.equals(edge, dist=dist):
#Ensures edge directions match before returning
if not self.ordered_equals(edge, dist=dist):
self.reverse_direction()
return edge
return None
#Attract endpoints of edges together
def join_with_nearby_edge(self, edges, polygon, dist=1.0):
corners = polygon.get_corners()
poly_edges = polygon.get_edges()
new_start = None
new_end = None
start_point = self.get_start()
end_point = self.get_end()
for edge in edges:
if self.equals(edge, dist=dist) or edge in poly_edges or edge.get_length() < dist:
continue
edge_start = edge.get_start()
edge_end = edge.get_end()
for corner in corners:
if hm.compare_points(edge_start, corner, dist):
if hm.compare_points(self.get_start(), edge_start, dist):
new_start = edge_start
elif hm.compare_points(self.get_end(), edge_start, dist):
new_end = edge_start
if hm.compare_points(edge_end, corner, dist):
if hm.compare_points(self.get_start(), edge_end, dist):
new_start = edge_end
elif hm.compare_points(self.get_end(), edge_end, dist):
new_end = edge_end
# print(new_start, new_end)
if new_end is not None:
# vec = self.get_start() - self.get_end()
# new_start = new_end + vec
polygon_edges = [self] + poly_edges
# move_edge_points(polygon_edges, start_point, new_start)
hm.move_edge_points(polygon_edges, end_point, new_end)
if new_start is not None:
# vec = self.get_end() - self.get_start()
# new_end = new_start + vec
polygon_edges = [self] + poly_edges
hm.move_edge_points(polygon_edges, start_point, new_start)
# move_edge_points(polygon_edges, end_point, new_end)
"""Checks whether two edges have the same endpoints in the same order."""
def ordered_equals(self, other, dist=1.0):
is_same = hm.compare_points(self.get_start(), other.get_start(), dist) and hm.compare_points(self.get_end(), other.get_end(), dist)
return is_same
"""Checks whether two edges have the same endpoints regardless of ordering (i.e. one edge can be the reverse of the other)."""
def equals(self, other, dist=1.0):
is_same = hm.compare_points(self.get_start(), other.get_start(), dist) and hm.compare_points(self.get_end(), other.get_end(), dist)
is_reversed = hm.compare_points(self.get_start(), other.get_end(), dist) and hm.compare_points(self.get_end(), other.get_start(), dist)
return is_same or is_reversed
"Splits an edge into two at a point."
def split_edge_at_point(self, point):
new_edge = Edge(point, self.get_end())
split_edge = Edge(self.get_start(), point)
return new_edge, split_edge
"""Moves the endpoint of an edge to the intersection point, and splits the edge the intersection point lies on into two."""
def snip_edge(self, intersection_point, snip_dist=1.0):
start_point = np.array(self.get_start())
end_point = np.array(self.get_end())
start_distance = hm.distance(intersection_point, start_point)
end_distance = hm.distance(intersection_point, end_point)
new_edge = None
if start_distance > 0.05 and end_distance > 0.05:
line_length = self.get_length()
created_edge, split_edge = self.split_edge_at_point(intersection_point)
if split_edge.get_length() > snip_dist and created_edge.get_length() > snip_dist:
new_edge = created_edge
self.set_points(split_edge.get_start(), split_edge.get_end())
return new_edge
"""Splits an edge into two if it intersects another edge or is within a reasonable distance from another edge
(i.e. if extending it a small distance based on the length of the edge would cause it to intersect another edge)."""
def split_edge_intersect(self, other_edge, min_wall_length=3.0, percent_proximity=0.2, epsilon_proximity=0.01, debug=False):
intersection_point = self.get_euclidean_intersection_point(other_edge)
start_point = np.array(self.get_start())
end_point = np.array(self.get_end())
new_edges = []
if intersection_point is not None:
start_distance = hm.distance(intersection_point, start_point)
end_distance = hm.distance(intersection_point, end_point)
other_start_dist = hm.distance(intersection_point, other_edge.get_start())
other_end_dist = hm.distance(intersection_point, other_edge.get_end())
#TODO: Make this min_wall_length
snip_proximity = max(percent_proximity * self.get_length(), min_wall_length)
min_dist = min(start_distance, end_distance)
min_other_dist = min(other_start_dist, other_end_dist)
if other_edge.on_line(intersection_point) and min_dist < snip_proximity and min_other_dist < snip_proximity and min_dist > epsilon_proximity:
edge = other_edge.snip_edge(intersection_point)
if edge is not None:
new_edges.append(edge)
start_dist = hm.distance(intersection_point, self.get_start())
end_dist = hm.distance(intersection_point, self.get_end())
if start_dist > end_dist:
self.set_points(self.get_start(), intersection_point)
else:
self.set_points(self.get_end(), intersection_point)
return new_edges
def __eq__(self, other):
return self.equals(other, dist=0.8)
# return self.edge_id == other.edge_id
def __repr__(self):
start_neighbor_ids = []
end_neighbor_ids = []
for each in self.start_neighbors:
start_neighbor_ids.append(each.edge_id)
for each in self.end_neighbors:
end_neighbor_ids.append(each.edge_id)
repr_string = "ID: " + str(self.edge_id) + ", Start Neighbor IDs: " + str(start_neighbor_ids) + ", End Neighbor IDs: " + str(end_neighbor_ids)
return repr_string
def __hash__(self):
return self.edge_id
|
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator as mpl
import datetime
from datetime import datetime as dtime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
data1 = pd.read_csv('../preprocess_data/month9a_reshape.csv',header=0,index_col=0)
data2 = pd.read_csv('../preprocess_data/month9b_reshape.csv',header=0,index_col=0)
data = pd.concat([data1,data2]) #按列拼接
#print(data.shape[0])
'''
# (15,30)为中心的25个格子
indexs = []; center=pd.Series((15,30))
for i in range(-2,3):
for j in range(-2,3):
point = pd.Series((i,j))+center
indexs.append(53*point[0]+point[1])
#print(indexs)
ndays=7
data_part = data.iloc[:24*ndays,indexs]
result = data_part.apply(lambda x:x.sum(),axis=1) #按行求和
'''
#作图
ax = plt.subplot(111)
ndays=1
index = [53*15+30,53*15+31]
result1 = data.iloc[:24*ndays,index[0]]
result2 = data.iloc[:24*ndays,index[1]]
#生成时间序列
start = dtime.strptime('090100','%m%d%H')
end = dtime.strptime('09'+ '%02d' %ndays +'23','%m%d%H')
print(end)
dates = []
dates.append(start)
while start<end:
start += datetime.timedelta(hours=+1)
dates.append(start)
print(dates)
#指定X轴的以日期格式(带小时)显示
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d-%H'))
#X轴的间隔为天
ax.xaxis.set_major_locator(mdates.HourLocator(interval = 2))
#字符串旋转 没成功啊啊啊!
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('right')
plt.plot(dates, result1.values,c='b')
plt.plot(dates, result2.values,c='r')
plt.legend(labels=['grid(15,30)','grid(15,31)'])
#plt.gcf().autofmt_xdate()
plt.xlabel('Time')
plt.ylabel('Crowd/person')
plt.title('Graph of comparing crowd flow in 2 grids on Sep.1st 2017')
plt.show()
|
|
# pynhanes/data.py
__doc__ = """
Loading NHANES data.
"""
#-----------------------------------------------------------------------------
# Logging
#-----------------------------------------------------------------------------
import logging
_l = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports & Options
#-----------------------------------------------------------------------------
# External Imports
import numpy as np
import os
import pandas as pd
import requests
from collections import defaultdict
#-----------------------------------------------------------------------------
# Globals & Constants
#-----------------------------------------------------------------------------
BASE_URL = "https://wwwn.cdc.gov/Nchs/Nhanes"
YEARS = {
"1999-2000": "",
"2001-2002": "_B",
"2003-2004": "_C",
"2005-2006": "_D",
"2007-2008": "_E",
"2009-2010": "_F",
"2011-2012": "_G",
"2013-2014": "_H",
"2015-2016": "_I",
"2017-2018": "_J",
"2019-2020": "_K",
}
#-----------------------------------------------------------------------------
# Building Data Index
#-----------------------------------------------------------------------------
def load(datasets, years):
"""Loads NHANES datasets into a dictionary of DataFrames.
Adds a column "year" and concatenates multi-year results."""
if type(years) != tuple or len(years) != 2:
_l.error('Provide year range as a tuple of ints: (year_start, year_end)')
y0, y1 = years
visited = []
res = defaultdict(list)
for dataset in datasets:
for year in range(y0, y1):
url = nhanes_url(dataset, year) + '.XPT'
if url in visited:
continue
try:
df = pd.read_sas(url, encoding='windows-1252')
_l.info('read {0[0]} rows x {0[1]} cols from {1}'.format(
df.shape, url
))
df['year'] = year
res[dataset].append(df)
except Exception as e:
_l.error(f'{url}: {e}')
finally:
visited.append(url)
try:
res[dataset] = pd.concat(res[dataset], axis=0, join='outer')
_l.info('combined {0} datasets: {1[0]} rows x {1[1]} cols'.format(
dataset, res[dataset].shape
))
except Exception as e:
_l.error(e)
return res
def nhanes_url(dataset: str, year: int=2018) -> str:
"""Build URL to retrieve NHANES dataset."""
years = [k for k in YEARS.keys() if str(year) in k]
if len(years) == 1:
prefix, suffix = years[0], dataset.upper() + YEARS[years[0]]
return f'{BASE_URL}/{prefix}/{suffix}'
else:
_l.error('No NHANES data for this year')
return ''
#-----------------------------------------------------------------------------
# Special Data
#-----------------------------------------------------------------------------
def load_drugs(url='https://wwwn.cdc.gov/Nchs/Nhanes/1999-2000/RXQ_DRUG.xpt'):
df = pd.read_sas(url, encoding='windows-1252')
_l.info('read {0[0]} rows x {0[1]} cols from {1}'.format(
df.shape, url
))
return df
#-----------------------------------------------------------------------------
# Misc
#-----------------------------------------------------------------------------
def test():
print('testing')
_l.debug('testing')
|
|
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, csgraph
import scipy
import igraph as ig
import leidenalg
import time
import hnswlib
import matplotlib.pyplot as plt
import matplotlib
import math
import multiprocessing
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy import sparse
from sklearn.metrics.pairwise import euclidean_distances
import umap
import scanpy as sc
from MulticoreTSNE import MulticoreTSNE as TSNE
import random
from scipy.sparse.csgraph import connected_components
import pygam as pg
import matplotlib.colors as colors
import matplotlib.cm as cm
import palantir #/home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
# version before translating chinese on Feb13
# jan2020 Righclick->GIT->Repository-> PUSH
def plot_sc_pb(ax, embedding, prob, ti):
#threshold = #np.percentile(prob, 95)#np.mean(prob) + 3 * np.std(prob)
#print('thresold', threshold, np.max(prob))
#prob = [x if x < threshold else threshold for x in prob]
cmap = matplotlib.cm.get_cmap('viridis')
norm = matplotlib.colors.Normalize(vmin=0, vmax=np.max(prob))
prob = np.asarray(prob)
c = cmap(norm(prob))
c = c.reshape(-1, 4)
loc_c = np.where(prob <= 0.3)[0]
c[loc_c, 3] = 0.2
loc_c = np.where((prob > 0.3) & (prob <= 0.5))[0]
c[loc_c, 3] = 0.5
loc_c = np.where((prob > 0.5) & (prob <= 0.7))[0]
c[loc_c, 3] = 0.8
loc_c = np.where((prob >0.7))[0]
c[loc_c, 3] = 0.8
ax.scatter(embedding[:, 0], embedding[:, 1], c=c, s=10, cmap='viridis',
edgecolors='none')
ax.set_title('Target: ' + str(ti))
def simulate_multinomial(vmultinomial):
r = np.random.uniform(0.0, 1.0)
CS = np.cumsum(vmultinomial)
CS = np.insert(CS, 0, 0)
m = (np.where(CS < r))[0]
nextState = m[len(m) - 1]
return nextState
def sc_loc_ofsuperCluster_PCAspace(p0, p1,idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
#Returns location (index) in unsampled PCA space of the location of the super-cluster or sub-terminal-cluster and root
print("dict of terminal state pairs, Super: sub: ", p1.dict_terminal_super_sub_pairs)
p0_labels = np.asarray(p0.labels)
p1_labels = np.asarray(p1.labels)
p1_sc_markov_pt = p1.single_cell_pt_markov
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 80
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
#print('loc_i', loc_i)
#print('len p1')
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
else:
# loc_i = np.where(np.asarray(p0.labels) == ci)[0]
loc_i = np.where(p0_labels == ci)[0]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
X_ds = p0.data[idx]
p_ds = hnswlib.Index(space='l2', dim=p0.data.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
new_superclust_index_ds = []
for item in ci_list:
labelsq, distances = p_ds.knn_query(p0.data[item, :], k=1)
new_superclust_index_ds.append(labelsq[0][0])
return new_superclust_index_ds
def sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
# idx is the indices of the subsampled elements
knn_hnsw = hnswlib.Index(space='l2', dim=embedding.shape[1])
knn_hnsw.init_index(max_elements=embedding.shape[0], ef_construction=200, M=16)
knn_hnsw.add_items(embedding)
knn_hnsw.set_ef(50)
p0_labels = np.asarray(p0.labels)[idx]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_markov_pt = list(np.asarray(p1.single_cell_pt_markov)[idx])
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 80) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
#print('loc_i', loc_i)
#print('len p1')
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
else:
# loc_i = np.where(np.asarray(p0.labels) == ci)[0]
loc_i = np.where(p0_labels == ci)[0]
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distancesq = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
# labels, distances = p.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
return knn_hnsw, ci_list
def draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, G, idx, X_data):
# G is the igraph knn (low K) used for shortest path. no idx needed as it's made on full sample
# knn_hnsw is the knn made in the embedded space used for query
# X_data is the PCA space with all samples
# idx is the selected indices of the downsampled samples
y_root = []
x_root = []
root1_list = []
p1_sc_bp = p1.single_cell_bp[idx, :]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
p1_cc = p1.connected_comp_labels
X_ds = X_data[idx, :]
p_ds = hnswlib.Index(space='l2', dim=X_ds.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for ii, r_i in enumerate(p1.root):
loc_i = np.where(p1_labels == p1.root[ii])[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labels_root, distances_root = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # sc location in embedded space of root cell
x_root.append(embedding[labels_root, 0][0])
y_root.append(embedding[labels_root, 1][0])
labelsroot1, distances1 = p1.knn_struct.knn_query(X_ds[labels_root[0][0], :],
k=1) # index of sc-root-cell in the full-PCA space. Need for path
root1_list.append(labelsroot1[0][0])
# single-cell branch probability evolution probability
for i, ti in enumerate(p1.terminal_clusters):
print('i, ti, p1.root, p1.connected', i, ti, p1.root, p1_cc)
print('root1list', root1_list)
root_i = p1.root[p1_cc[ti]]
xx_root = x_root[p1_cc[ti]]
yy_root = y_root[p1_cc[ti]]
fig, ax = plt.subplots()
plot_sc_pb(ax, embedding, p1_sc_bp[:, i], ti)
loc_i = np.where(p1_labels == ti)[0]
val_pt = [p1_sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in
loc_i] # location of sc nearest to average location of terminal clus in the EMBEDDED space
y = [embedding[yi, 1] for yi in loc_i]
labels, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # knn_hnsw is knn of embedded space
x_sc = embedding[labels[0], 0] # terminal sc location in the embedded space
y_sc = embedding[labels[0], 1]
start_time = time.time()
labelsq1, distances1 = p1.knn_struct.knn_query(X_ds[labels[0][0], :],
k=1) # find the nearest neighbor in the PCA-space full graph
print('labels root and labels[0]', root1_list[p1_cc[ti]], labels[0])
## path = G.get_shortest_paths(labels_root[0][0], to=labels[0][0], weights='weight') #G is the knn of all sc points
# path = G.get_shortest_paths(labelsroot1[0][0], to=labelsq1[0][0], weights='weight') # G is the knn of all sc points
path = G.get_shortest_paths(root1_list[p1_cc[ti]], to=labelsq1[0][0],
weights='weight') # G is the knn of all sc points
path_idx = [] # find the single-cell which is nearest to the average-location of a terminal cluster
# get the nearest-neighbor in this downsampled PCA-space graph. These will make the new path-way points
for pii in path[0]:
labelsq, distances = p_ds.knn_query(X_data[pii, :], k=1)
# print('location of pathway point in idx-space', labelsq[0][0])
path_idx.append(labelsq[0][0])
print(f"get_shortest_paths time: {time.time()-start_time}")
print('path', path)
print('new path indices', path_idx)
path = path_idx
n_orange = len(path)
orange_m = np.zeros((n_orange, 3))
for enum_point, point in enumerate(path):
#ax.text(embedding[point, 0], embedding[point, 1], 'D ' + str(enum_point), color='blue', fontsize=8)
orange_m[enum_point, 0] = embedding[point, 0]
orange_m[enum_point, 1] = embedding[point, 1]
orange_m[enum_point, 2] = p1_sc_pt_markov[ point]
from sklearn.neighbors import NearestNeighbors
k_orange = 3 # increasing can smoothen in simple trajectories (Toy)
nbrs = NearestNeighbors(n_neighbors=k_orange, algorithm='ball_tree').fit(orange_m[:, 0:])
distances, indices = nbrs.kneighbors(orange_m[:, 0:])
row_list = []
col_list = []
dist_list = []
for i_or in range(n_orange):
for j_or in range(1, k_orange):
row_list.append(i_or)
col_list.append(indices[i_or, j_or])
dist_list.append(distances[i_or, j_or])
print('target number ' + str(ti))
orange_adjacency_knn = csr_matrix((np.array(dist_list), (np.array(row_list), np.array(col_list))),
shape=(n_orange, n_orange))
print('orange adj knn shape', orange_adjacency_knn.shape)
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False, return_labels=True)
for enum_point, point in enumerate(path): # [0]):
orange_m[enum_point, 2] = p1_sc_pt_markov[point] * p1_sc_pt_markov[
point] * 2 # p1.single_cell_pt_markov[point] * p1.single_cell_pt_markov[point]*2
while n_mst > 1:
comp_root = comp_labels_mst[0]
# print('comp-root', comp_root)
min_ed = 9999999
loc_comp_i = np.where(comp_labels_mst == comp_root)[0]
loc_comp_noti = np.where(comp_labels_mst != comp_root)[0]
# print('compi', loc_comp_i)
# print('comp_noti', loc_comp_noti)
orange_pt_val = [orange_m[cc, 2] for cc in loc_comp_i]
loc_comp_i_revised = [loc_comp_i[cc] for cc in range(len(orange_pt_val)) if
orange_pt_val[cc] >= np.percentile(orange_pt_val, 70)]
for nn_i in loc_comp_i_revised:
ed = euclidean_distances(orange_m[nn_i, :].reshape(1, -1), orange_m[loc_comp_noti])
if np.min(ed) < min_ed:
ed_where_min = np.where(ed[0] == np.min(ed))[0][0]
# print('ed where min', ed_where_min, np.where(ed[0] == np.min(ed)))
min_ed = np.min(ed)
ed_loc_end = loc_comp_noti[ed_where_min]
ed_loc_start = nn_i
# print('min ed', min_ed)
print('Connecting components before sc-bp-GAM: the closest pair of points', ed_loc_start, ed_loc_end)
orange_adjacency_knn[ed_loc_start, ed_loc_end] = min_ed
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False,
return_labels=True)
if n_mst == 1: #if no disconnected components in the graph
(orange_sources, orange_targets) = orange_adjacency_knn.nonzero()
orange_edgelist = list(zip(orange_sources.tolist(), orange_targets.tolist()))
G_orange = ig.Graph(n=orange_adjacency_knn.shape[0], edges=orange_edgelist,
edge_attrs={'weight': orange_adjacency_knn.data.tolist()}, )
path_orange = G_orange.get_shortest_paths(0, to=orange_adjacency_knn.shape[0] - 1, weights='weight')[0]
print('path orange', path_orange)
len_path_orange = len(path_orange)
for path_i in range(len_path_orange - 1):
path_x_start = orange_m[path_orange[path_i], 0]
path_x_end = orange_m[path_orange[path_i + 1], 0]
orange_x = [orange_m[path_orange[path_i], 0], orange_m[path_orange[path_i + 1], 0]]
orange_minx = min(orange_x)
orange_maxx = max(orange_x)
orange_y = [orange_m[path_orange[path_i], 1], orange_m[path_orange[path_i + 1], 1]]
orange_miny = min(orange_y)
orange_maxy = max(orange_y)
orange_embedding_sub = embedding[
((embedding[:, 0] <= orange_maxx) & (embedding[:, 0] >= orange_minx)) & (
(embedding[:, 1] <= orange_maxy) & ((embedding[:, 1] >= orange_miny)))]
print('orange sub size', orange_embedding_sub.shape)
if (orange_maxy - orange_miny > 5) | (orange_maxx - orange_minx > 5):
orange_n_reps = 150
else:
orange_n_reps = 100
or_reps = np.repeat(np.array([[orange_x[0], orange_y[0]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
or_reps = np.repeat(np.array([[orange_x[1], orange_y[1]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
orangeGam = pg.LinearGAM(n_splines=8, spline_order=3, lam=10).fit(orange_embedding_sub[:, 0],
orange_embedding_sub[:, 1])
nx_spacing = 100
orange_GAM_xval = np.linspace(orange_minx, orange_maxx, nx_spacing * 2)
yg_orange = orangeGam.predict(X=orange_GAM_xval)
ax.plot(orange_GAM_xval, yg_orange, color='dimgrey', linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
cur_x1 = orange_GAM_xval[-1]
cur_y1 = yg_orange[-1]
cur_x2 = orange_GAM_xval[0]
cur_y2 = yg_orange[0]
if path_i >= 1:
for mmddi in range(2):
xy11 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy12 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
xy21 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy22 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
mmdd_temp_array = np.asarray([xy11, xy12, xy21, xy22])
mmdd_loc = np.where(mmdd_temp_array == np.min(mmdd_temp_array))[0][0]
if mmdd_loc == 0:
ax.plot([cur_x1, prev_x1], [cur_y1, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 1:
ax.plot([cur_x1, prev_x2], [cur_y1, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 2:
ax.plot([cur_x2, prev_x1], [cur_y2, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 3:
ax.plot([cur_x2, prev_x2], [cur_y2, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if (path_x_start > path_x_end): direction_arrow_orange = -1 # going LEFT
if (path_x_start <= path_x_end): direction_arrow_orange = 1 # going RIGHT
if (abs(
path_x_start - path_x_end) > 2.5): # |(abs(orange_m[path_i, 2] - orange_m[path_i + 1, 1]) > 1)):
if (direction_arrow_orange == -1): # & :
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing - 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing - 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5, color='dimgray', zorder=3)
if (direction_arrow_orange == 1): # &(abs(orange_m[path_i,0]-orange_m[path_i+1,0])>0.5):
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing + 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing + 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5,
color='dimgray', zorder=3)
prev_x1 = cur_x1
prev_y1 = cur_y1
prev_x2 = cur_x2
prev_y2 = cur_y2
ax.scatter(x_sc, y_sc, color='pink', zorder=3, label=str(ti), s=22)
ax.text(x_sc + 0.5, y_sc + 0.5, 'TS ' + str(ti), color='black')
return
def get_biased_weights(edgelist, weights, pt, round_no=1):
# print('weights', type(weights), weights)
# small nu means less biasing (0.5 is quite mild)
# larger nu (in our case 1/nu) means more aggressive biasing https://en.wikipedia.org/wiki/Generalised_logistic_function
print(len(edgelist), len(weights))
bias_weight = []
if round_no == 1:
b = 1 # 1 # 0.5
else:
b = 20 # 20 twenty is used for all the CD34 Human cells
K = 1
c = 0
C = 1
nu = 1
high_weights_th = np.mean(weights)
high_pt_th = np.percentile(np.asarray(pt), 80)
loc_high_weights = np.where(weights > high_weights_th)[0]
loc_high_pt = np.where(np.asarray(pt) > high_pt_th)[0]
print('weight hi th', high_weights_th)
print('loc hi pt', loc_high_pt)
# print('loc hi weight', loc_high_weights)
print('edges of high weight', [edgelist[i] for i in loc_high_weights])
edgelist_hi = [edgelist[i] for i in loc_high_weights]
for i in loc_high_weights:
# print('loc of high weight along edgeweight', i)
start = edgelist[i][0]
end = edgelist[i][1]
# print('start and end node', start, end)
if (start in loc_high_pt) | (end in loc_high_pt):
# print("found a high pt high weight node", (start, end), pt[start], pt[end])
weights[i] = 0.5 * np.mean(weights)
upper_lim = np.percentile(weights, 90) # 80
lower_lim = np.percentile(weights, 10) # 20
weights = [i if i <= upper_lim else upper_lim for i in weights]
weights = [i if i >= lower_lim else lower_lim for i in weights]
for i, (start, end) in enumerate(edgelist):
# print('i, start, end', i, start, end)
Pt_a = pt[start]
Pt_b = pt[end]
P_ab = weights[i]
t_ab = Pt_a - Pt_b
Bias_ab = K / ((C + math.exp(b * (t_ab + c)))) ** nu
new_weight = (Bias_ab * P_ab)
bias_weight.append(new_weight)
# print('tab', t_ab, 'pab', P_ab, 'biased_pab', new_weight)
print('original weights', len(weights), list(enumerate(zip(edgelist, weights))))
print('bias weights', list(enumerate(zip(edgelist, bias_weight))))
print('length bias weights', len(bias_weight))
# bias_weight=np.asarray(bias_weight)
# bias_weight = (bias_weight-np.min(bias_weight)+0.1)/(np.max(bias_weight)-np.min(bias_weight)+0.1)
return list(bias_weight)
def expected_num_steps(start_i, N):
n_t = N.shape[0]
N_steps = np.dot(N, np.ones(n_t))
n_steps_i = N_steps[start_i]
return n_steps_i
def absorption_probability(N, R, absorption_state_j):
M = np.dot(N, R)
vec_prob_end_in_j = M[:, absorption_state_j]
return M, vec_prob_end_in_j
def most_likely_path(P_transition_absorbing_markov, start_i, end_i):
graph_absorbing_markov = 0 # ig() log weight them
shortest_path = graph_absorbing_markov.shortest_path(start_i, end_i)
print('the shortest path beginning at ', start_i, 'and ending in ', end_i, 'is:')
return shortest_path
def draw_trajectory_gams(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal, sub_terminal_clusters,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(0).astype(
int)
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
print('final_super_terminal', final_super_terminal)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
print('pt sub', pt_sub[0:20])
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start]['x'].values # groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start]['y'].values # .groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end]['x'].values # .groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end]['y'].values # groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[
0] # np.where((X_dimred[:,0]<=maxx) & (X_dimred[:,0]>=minx))#
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[
0] # np.where((X_dimred[:,1]<=maxy) & (X_dimred[:,1]>=miny))#
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep] # X_dimred[idx_keep,0]#
y_val = y_val[idx_keep] # X_dimred[idx_keep,1]# y_val[idx_keep]
print('start and end', start, '', end)
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
x_val = x_val.reshape((len(x_val), -1))
y_val = y_val.reshape((len(y_val), -1))
xp = np.linspace(minx, maxx, 500)
gam50 = pg.LinearGAM(n_splines=4, spline_order=3, lam=10).gridsearch(x_val, y_val)
XX = gam50.generate_X_grid(term=0, n=500)
preds = gam50.predict(XX)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
# cc = ['black', 'red', 'blue', 'yellow', 'pink'][random.randint(0, 4)]
ax2.plot(XX, preds, linewidth=1, c='dimgray')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc + step] - xp[closest_loc],
preds[closest_loc + step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc - step] - xp[closest_loc],
preds[closest_loc - step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
pen_color = []
super_cluster_label = []
terminal_count_ = 0
dot_size = []
for i in range(len(set(super_cluster_labels))):
if i in final_super_terminal:
print('super cluster', i, 'is a super terminal with sub_terminal cluster',
sub_terminal_clusters[terminal_count_])
width_edge.append(2)
c_edge.append('yellow')
pen_color.append('black')
super_cluster_label.append('TS' + str(sub_terminal_clusters[terminal_count_]))
dot_size.append(60)
terminal_count_ = terminal_count_ + 1
else:
width_edge.append(0)
c_edge.append('black')
pen_color.append('grey')
super_cluster_label.append('')
dot_size.append(40)
# ax2.scatter(x_cluster, y_cluster, c='red') #doesnt visualize as well to just take the embedding cluster-mean x,y values
# text annotations for the super cluster locations
# for i, type in enumerate(pt_str):
# ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
# for i in range(len(x_cluster)):
# ax2.text(x_cluster[i], y_cluster[i], 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
# ax2.set_title('super_knn:' + str(knn) )
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
# ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors = c_edge, linewidth = width_edge)
count_ = 0
for i, c, w, pc, dsz in zip(sc_supercluster_nn, c_edge, width_edge, pen_color, dot_size):
ax2.scatter(X_dimred[i, 0], X_dimred[i, 1], c='black', s=dsz, edgecolors=c, linewidth=w)
ax2.text(X_dimred[i, 0] + 0.5, X_dimred[i, 1] + 0.5, super_cluster_label[count_],
color=pc) # using the SC_NN location is good
count_ = count_ + 1
plt.title(title_str)
return
def draw_trajectory_dimred(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(1).astype(
int)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[0]
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[0]
print('len x-val before intersect', len(x_val))
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep]
y_val = y_val[idx_keep]
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
# midpoint_xy = list_selected_clus[midpoint_loc]
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
z = np.polyfit(x_val, y_val, 2)
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
ax2.plot(xp[idx_keep], smooth[idx_keep], linewidth=3, c='dimgrey')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc + step] - xp[closest_loc],
smooth[closest_loc + step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc - step] - xp[closest_loc],
smooth[closest_loc - step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
for i in range(num_parc_group):
if i in final_super_terminal:
width_edge.append(2.5)
c_edge.append('yellow')
else:
width_edge.append(0)
c_edge.append('black')
ax2.scatter(x_cluster, y_cluster, c='red')
for i, type in enumerate(pt_str):
ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
for i in range(len(x_cluster)):
ax2.text(x_cluster[i], y_cluster[i], pt_sub[i] + 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors=c_edge, linewidth=width_edge)
plt.title(title_str)
return
def csr_mst(adjacency_matrix):
# return minimum spanning tree from adjacency matrix (csr)
Tcsr = adjacency_matrix.copy()
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components before mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
Tcsr.data = -1 * Tcsr.data
Tcsr.data = Tcsr.data - np.min(Tcsr.data)
Tcsr.data = Tcsr.data + 1
print('len Tcsr data', len(Tcsr.data))
Tcsr = minimum_spanning_tree(Tcsr) # adjacency_matrix)
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components after mst', n_components_mst)
Tcsr = (Tcsr + Tcsr.T) * 0.5 # make symmetric
print('number of components after symmetric mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
return Tcsr
def connect_all_components(MSTcsr, cluster_graph_csr, adjacency_matrix):
# connect forest of MSTs (csr)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
while n_components > 1:
sub_td = MSTcsr[comp_labels == 0, :][:, comp_labels != 0]
print('minimum value of link connecting components', np.min(sub_td.data))
locxy = scipy.sparse.find(MSTcsr == np.min(sub_td.data))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] == 0) & (comp_labels[locxy[1][i]] != 0):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
cluster_graph_csr[x, y] = minval
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected componnents after reconnecting ', n_components)
return cluster_graph_csr
def local_pruning_clustergraph_mst(adjacency_matrix, global_pruning_std=1, max_outgoing=30, preserve_disconnected=True):
# larger pruning_std factor means less pruning
# the mst is only used to reconnect components that become disconnect due to pruning
from scipy.sparse.csgraph import minimum_spanning_tree
Tcsr = csr_mst(adjacency_matrix)
initial_links_n = len(adjacency_matrix.data)
n_components_0, comp_labels_0 = connected_components(csgraph=adjacency_matrix, directed=False, return_labels=True)
print('number of components before pruning', n_components_0, comp_labels_0)
adjacency_matrix = scipy.sparse.csr_matrix.todense(adjacency_matrix)
row_list = []
col_list = []
weight_list = []
neighbor_array = adjacency_matrix # not listed in in any order of proximity
n_cells = neighbor_array.shape[0]
rowi = 0
for i in range(neighbor_array.shape[0]):
row = np.asarray(neighbor_array[i, :]).flatten()
# print('row, row')
n_nonz = np.sum(row > 0)
# print('n nonzero 1', n_nonz)
n_nonz = min(n_nonz, max_outgoing)
to_keep_index = np.argsort(row)[::-1][0:n_nonz] # np.where(row>np.mean(row))[0]#
# print('to keep', to_keep_index)
updated_nn_weights = list(row[to_keep_index])
for ik in range(len(to_keep_index)):
row_list.append(rowi)
col_list.append(to_keep_index[ik])
dist = updated_nn_weights[ik]
weight_list.append(dist)
rowi = rowi + 1
final_links_n = len(weight_list)
print('final links n', final_links_n)
cluster_graph_csr = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = cluster_graph_csr.nonzero()
mask = np.zeros(len(sources), dtype=bool)
cluster_graph_csr.data = cluster_graph_csr.data / (np.std(cluster_graph_csr.data)) # normalize
threshold_global = np.mean(cluster_graph_csr.data) - global_pruning_std * np.std(cluster_graph_csr.data)
mask |= (cluster_graph_csr.data < (threshold_global)) # smaller Jaccard weight means weaker edge
cluster_graph_csr.data[mask] = 0
cluster_graph_csr.eliminate_zeros()
print('shape of cluster graph', cluster_graph_csr.shape)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected components after pruning', n_components)
if (preserve_disconnected == True) & (n_components > n_components_0): # preserve initial disconnected components
Td = Tcsr.todense()
Td[Td == 0] = 999.999
n_components_ = n_components
while n_components_ > n_components_0:
for i in range(n_components_0):
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('locx', loc_x, len_i)
while len_i > 1:
s = list(set(comp_labels[loc_x]))
loc_notxx = np.intersect1d(loc_x, np.where((comp_labels != s[0]))[0])
# print('loc_notx', loc_notxx)
loc_xx = np.intersect1d(loc_x, np.where((comp_labels == s[0]))[0])
sub_td = Td[loc_xx, :][:, loc_notxx]
# print('subtd-min', np.min(sub_td))
locxy = np.where(Td == np.min(sub_td))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] != comp_labels[locxy[1][i]]):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
print('inside reconnecting components while preserving original ', x, y, minval)
cluster_graph_csr[x, y] = minval
n_components_, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False,
return_labels=True)
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('number of connected componnents after reconnecting ', n_components_)
'''
if (n_components > 1) & (preserve_disconnected == False):
cluster_graph_csr = connect_all_components(Tcsr, cluster_graph_csr, adjacency_matrix)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
'''
sources, targets = cluster_graph_csr.nonzero()
edgelist = list(zip(sources, targets))
edgeweights = cluster_graph_csr.data / (np.std(cluster_graph_csr.data))
trimmed_n = (initial_links_n - final_links_n) * 100 / initial_links_n
trimmed_n_glob = (initial_links_n - len(edgeweights)) / initial_links_n
if global_pruning_std < 0.5:
print("percentage links trimmed from local pruning relative to start", trimmed_n)
print("percentage links trimmed from global pruning relative to start", trimmed_n_glob)
return edgeweights, edgelist, comp_labels
def get_sparse_from_igraph(graph, weight_attr=None):
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
class PARC:
def __init__(self, data, true_label=None, anndata=None, dist_std_local=2, jac_std_global='median',
keep_all_local_dist='auto',
too_big_factor=0.4, small_pop=10, jac_weighted_edges=True, knn=30, n_iter_leiden=5, random_seed=42,
num_threads=-1, distance='l2', time_smallpop=15, pseudotime=False,
root=0, path='/home/shobi/Trajectory/', super_cluster_labels=False,
super_node_degree_list=False, super_terminal_cells=False, x_lazy=0.95, alpha_teleport=0.99,
root_user="root_cluster", preserve_disconnected=True, dataset="humanCD34", super_terminal_clusters=[], do_magic=False):
# higher dist_std_local means more edges are kept
# highter jac_std_global means more edges are kept
if keep_all_local_dist == 'auto':
if data.shape[0] > 300000:
keep_all_local_dist = True # skips local pruning to increase speed
else:
keep_all_local_dist = False
self.data = data
self.true_label = true_label
self.anndata = anndata
self.dist_std_local = dist_std_local
self.jac_std_global = jac_std_global ##0.15 is also a recommended value performing empirically similar to 'median'
self.keep_all_local_dist = keep_all_local_dist
self.too_big_factor = too_big_factor ##if a cluster exceeds this share of the entire cell population, then the PARC will be run on the large cluster. at 0.4 it does not come into play
self.small_pop = small_pop # smallest cluster population to be considered a community
self.jac_weighted_edges = jac_weighted_edges
self.knn = knn
self.n_iter_leiden = n_iter_leiden
self.random_seed = random_seed # enable reproducible Leiden clustering
self.num_threads = num_threads # number of threads used in KNN search/construction
self.distance = distance # Euclidean distance 'l2' by default; other options 'ip' and 'cosine'
self.time_smallpop = time_smallpop
self.pseudotime = pseudotime
self.root = root
self.path = path
self.super_cluster_labels = super_cluster_labels
self.super_node_degree_list = super_node_degree_list
self.super_terminal_cells = super_terminal_cells
self.x_lazy = x_lazy # 1-x = probability of staying in same node
self.alpha_teleport = alpha_teleport # 1-alpha is probability of jumping
self.root_user = root_user
self.preserve_disconnected = preserve_disconnected
self.dataset = dataset
self.super_terminal_clusters = super_terminal_clusters
self.do_magic = do_magic
def get_terminal_clusters(self, A, markov_pt, root_ai):
n_ = A.shape[0]
if n_ <= 10: n_outlier_std = 3
if (n_ <= 40) & (n_ > 10):n_outlier_std = 2
if n_>=40: n_outlier_std = 1
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
# make an igraph graph to compute the closeness
g_dis = ig.Graph.Adjacency((A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
g_dis.es['weights'] = 1/A_new[A_new.nonzero()] #we want "distances" not weights for closeness and betweeness
betweenness_score = g_dis.betweenness(weights = 'weights')
betweenness_score_array = np.asarray(betweenness_score)
betweenness_score_takeout_outlier = betweenness_score_array[betweenness_score_array<(np.mean(betweenness_score_array)+n_outlier_std*np.std(betweenness_score_array))]
betweenness_list = [ i for i, score in enumerate(betweenness_score) if score < (np.mean(betweenness_score_takeout_outlier) - 0 * np.std(betweenness_score_takeout_outlier))]
closeness_score = g_dis.closeness( mode='ALL', cutoff=None, weights='weights', normalized=True)
closeness_score_array = np.asarray( closeness_score)
closeness_score_takeout_outlier = closeness_score_array[closeness_score_array < (np.mean( closeness_score_array) + n_outlier_std * np.std( closeness_score_array))]
closeness_list = [i for i, score in enumerate(closeness_score) if
score < (np.mean(closeness_score_takeout_outlier) - 0 * np.std(closeness_score_takeout_outlier))]
print('closeness_score ', [(i, score) for i, score in enumerate(closeness_score)])
print('closeness_score shortlist', closeness_list)
print('betweeness_score ', [(i,score) for i, score in enumerate(betweenness_score)])
print('betweeness_score shortlist', betweenness_list)
# make an igraph graph to compute the closeness
#g_ = ig.Graph.Adjacency( (A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
#g_.es['weights'] =A_new[A_new.nonzero()] # we want "distances" not weights for closeness and betweeness
#eig_cent_score = g_.evcent(weights='weights',scale = False, directed = True)
#print('eigcent', eig_cent_score)
#eig_cent_list = [i for i, score in enumerate(eig_cent_score) if score < (np.mean(eig_cent_score) - 0 * np.std(eig_cent_score))]
#print('eigcent shortlist', eig_cent_list)
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
# for pi, item in enumerate(out_deg):
# out_list.append(item/pop_list[i])
out_deg = np.asarray(out_deg)
# print('out deg', out_deg)
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 10))[0] # 60 Toy
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0] # 60Toy
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
#terminal_clusters = list(set(loc_deg) | set(loc_deg_in))
#terminal_clusters = list(set(closeness_list) & set(loc_pt))
terminal_clusters_1 = list(set(closeness_list)&set(betweenness_list))
terminal_clusters_2 = list(set(closeness_list) & set(loc_deg))
terminal_clusters_3 = list(set(betweenness_list) & set(loc_deg))
terminal_clusters = list(set(terminal_clusters_1)|set(terminal_clusters_2))
terminal_clusters = list(set(terminal_clusters)|set(terminal_clusters_3))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 3 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def get_terminal_clusters_old(self, A, markov_pt, root_ai):
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
# for pi, item in enumerate(out_deg):
# out_list.append(item/pop_list[i])
out_deg = np.asarray(out_deg)
print('out deg', out_deg)
n_ = A.shape[0]
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 30))[0] # 60 Toy
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 30))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 40))[0] # 60Toy
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
terminal_clusters = list(set(loc_deg) | set(loc_deg_in))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
# terminal_clusters.reverse()
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 4 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def compute_hitting_time(self, sparse_graph, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = sparse_graph.shape[0]
# print('adjacency in compute hitting', sparse_graph)
# sparse_graph = scipy.sparse.csr_matrix(sparse_graph)
print('start compute hitting')
A = scipy.sparse.csr_matrix.todense(sparse_graph) # A is the adjacency matrix
print('is graph symmetric', (A.transpose() == A).all())
lap = csgraph.laplacian(sparse_graph,
normed=False) # compute regular laplacian (normed = False) to infer the degree matrix where D = L+A
# see example and definition in the SciPy ref https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.laplacian.html
A = scipy.sparse.csr_matrix.todense(lap)
print('is laplacian symmetric', (A.transpose() == A).all())
deg = sparse_graph + lap # Recall that L=D-A (modified for weighted where D_ii is sum of edge weights and A_ij is the weight of particular edge)
deg.data = 1 / np.sqrt(deg.data) ##inv sqrt of degree matrix
deg[deg == np.inf] = 0
norm_lap = csgraph.laplacian(sparse_graph, normed=True) # returns symmetric normalized D^-.5 xL x D^-.5
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
norm_lap = scipy.sparse.csr_matrix.todense(norm_lap)
eig_val, eig_vec = np.linalg.eig(
norm_lap) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
# print('eig val', eig_val.shape, eig_val)
if number_eig == 0: number_eig = eig_vec.shape[1]
# print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
deg = scipy.sparse.csr_matrix.todense(deg)
temp = Greens_matrix.dot(deg)
temp = deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def pagerank_compute(self, P_bias, max_iterations=200):
x_lazy = self.x_lazy # 1-x is prob lazy
alpha_teleport = self.alpha_teleport
# bias_P is the transition probability matrix
n = P_bias.shape[0]
P_bias = x_lazy * P_bias + (1 - x_lazy) * np.identity(n)
P_bias = alpha_teleport * P_bias + ((1 - alpha_teleport) * (1 / n) * (np.ones((n, n)) - np.identity(n)))
# transition matrix for the lazy, teleporting directed walk
p0 = 1.0 / float(n)
# p0=np.zeros((n,1))
# p0[self.root,0] = 1#np.ones((n,1))*p0
p0 = np.ones((n, 1)) * p0
p0 = p0.T # random uniform initial stationary distribution
for iteration in range(max_iterations):
# old = p0.copy()
p0 = p0.dot(P_bias)
# delta = p0 - old
# delta = math.sqrt(delta.dot(delta.T))
p0 = p0[0] / np.sum(p0[0])
# print('p0 stationary is', [('c' + str(i), pp0) for i, pp0 in enumerate(p0)])
# print([('c' + str(i), pp0) for i, pp0 in enumerate(p0) if pp0>np.mean(p0)])
upperlim = np.percentile(p0, 90)
lowerlim = np.percentile(p0, 10)
# upper_val = p0[p0 >upperlim]
# upperlim = np.mean(upper_val)
# print('upper lim', upperlim)
if self.too_big_factor < 0.3:
p0 = np.array([d if d <= upperlim else upperlim for d in p0])
p0 = p0 / np.sum(p0)
print('final stationary', [(i, pp0) for i, pp0 in enumerate(p0)])
return p0
def prob_reaching_terminal_state1(self, terminal_state, all_terminal_states, A, root, pt, num_sim,q,cumstateChangeHist, cumstateChangeHist_all,seed):
np.random.seed(seed)
print('root', root)
print('terminal state target', terminal_state)
n_states = A.shape[0]
n_components, labels = connected_components(csgraph=csr_matrix(A), directed=False)
A = A / (np.max(A))
# A[A<=0.05]=0
jj = 0
for row in A:
if np.all(row == 0): A[jj, jj] = 1
jj = jj + 1
P = A / A.sum(axis=1).reshape((n_states, 1))
# if P.shape[0]>16:
# print("P 16", P[:,16])
n_steps = int(2* n_states) # 2
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
neigh_terminal = np.where(A[:, terminal_state] > 0)[0]
non_nn_terminal_state = []
for ts_i in all_terminal_states:
if pt[ts_i] > pt[terminal_state]: non_nn_terminal_state.append(ts_i)
for ts_i in all_terminal_states:
if np.all(neigh_terminal != ts_i): non_nn_terminal_state.append(ts_i)
# print(ts_i, 'is a non-neighbor terminal state to the target terminal', terminal_state)
#cumstateChangeHist = np.zeros((1, n_states))
#cumstateChangeHist_all = np.zeros((1, n_states))
count_reach_terminal_state = 0
count_r = 0
for i in range(num_sim):
# distr_hist = [[0 for i in range(n_states)]]
stateChangeHist = np.zeros((n_states, n_states))
stateChangeHist[root, root] = 1
state = state_root
currentState = root
stateHist = state
terminal_state_found = False
non_neighbor_terminal_state_reached = False
# print('root', root)
# print('terminal state target', terminal_state)
x = 0
while (x < n_steps) & (
(terminal_state_found == False)): # & (non_neighbor_terminal_state_reached == False)):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
# print('next state', nextState)
if nextState == terminal_state:
terminal_state_found = True
count_r = count_r+1
# print('terminal state found at step', x)
# if nextState in non_nn_terminal_state:
# non_neighbor_terminal_state_reached = True
# Keep track of state changes
stateChangeHist[currentState, nextState] += 1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
currentState = nextState
x = x + 1
if (terminal_state_found == True):
cumstateChangeHist = cumstateChangeHist + np.any(
stateChangeHist > 0, axis=0)
count_reach_terminal_state = count_reach_terminal_state + 1
cumstateChangeHist_all = cumstateChangeHist_all + np.any(
stateChangeHist > 0, axis=0)
# avoid division by zero on states that were never reached (e.g. terminal states that come after the target terminal state)
cumstateChangeHist_all[cumstateChangeHist_all == 0] = 1
prob_ = cumstateChangeHist / cumstateChangeHist_all
np.set_printoptions(precision=3)
#print('in multiproc: number of times Terminal state', terminal_state, 'is found:', count_reach_terminal_state)
#print('in multiproc: changeHist_all[0,terminal]', terminal_state, 'is found:', cumstateChangeHist_all[0, terminal_state])
#print(cumstateChangeHist)
#print(cumstateChangeHist_all)
q.append([cumstateChangeHist, cumstateChangeHist_all])
def simulate_markov_sub(self, A, num_sim, hitting_array, q, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# hitting_array = np.ones((P.shape[0], 1)) * 1000
hitting_array_temp = np.zeros((P.shape[0], 1)).astype('float64')
n_steps = int(2 * n_states)
hitting_array_final = np.zeros((1, n_states))
currentState = root
print('root is', root)
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
for i in range(num_sim):
dist_list = []
# print(i, 'th simulation in Markov')
# if i % 10 == 0: print(i, 'th simulation in Markov', time.ctime())
state = state_root
currentState = root
stateHist = state
for x in range(n_steps):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
dist = A[currentState, nextState]
dist = (1 / ((1 + math.exp((dist - 1)))))
dist_list.append(dist)
# print('next state', nextState)
# Keep track of state changes
# stateChangeHist[currentState,nextState]+=1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
currentState = nextState
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
# calculate the actual distribution over the n_states so far
# totals = np.sum(stateHist, axis=0)
# gt = np.sum(totals)
# distrib = totals / gt
# distrib = np.reshape(distrib, (1, n_states))
# distr_hist = np.append(distr_hist, distrib, axis=0)
for state_i in range(P.shape[0]):
# print('first reach state', state_i, 'at step', np.where(stateHist[:, state_i] == 1)[0][0])
first_time_at_statei = np.where(stateHist[:, state_i] == 1)[0]
if len(first_time_at_statei) == 0:
# print('did not reach state', state_i,'setting dummy path length')
hitting_array_temp[state_i, 0] = n_steps + 1
else:
total_dist = 0
for ff in range(first_time_at_statei[0]):
total_dist = dist_list[ff] + total_dist
hitting_array_temp[state_i, 0] = total_dist # first_time_at_statei[0]
# hitting_array_temp[hitting_array_temp==(n_steps+1)] = np.mean(hitting_array_temp[hitting_array_temp!=n_steps+1])
hitting_array = np.append(hitting_array, hitting_array_temp, axis=1)
# print('hitting temp', hitting_array_temp)
# if i % 100 == 0: print(i, 'th','has hitting temp', hitting_array_temp.flatten())
hitting_array = hitting_array[:, 1:]
q.append(hitting_array) # put(hitting_array)
# return hitting_array
def simulate_branch_probability(self, terminal_state, all_terminal_states, A, root, pt, num_sim=300 ):
n_states = A.shape[0]
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
print('num_sim_pp', num_sim_pp)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
seed_list = list(range(n_jobs))
for i in range(n_jobs):
cumstateChangeHist = np.zeros((1, n_states))
cumstateChangeHist_all = np.zeros((1, n_states))
process = multiprocessing.Process(target=self.prob_reaching_terminal_state1,args=(terminal_state, all_terminal_states, A, root, pt, num_sim_pp,q, cumstateChangeHist, cumstateChangeHist_all, seed_list[i]))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
cumhistory_vec = q[0][0]
cumhistory_vec_all = q[0][1]
count_reached= cumhistory_vec_all[0,terminal_state]
print('length of q', len(q))
for i in range(1,len(q)):#[1,2,3,4]:
#for qi in q[1:]:
cumhistory_vec = cumhistory_vec + q[i][0]
cumhistory_vec_all = cumhistory_vec_all+ q[i][1]
#hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
count_reached = count_reached+ q[i][1][0,terminal_state]
print('accumulated number of times Terminal state',terminal_state, 'is found:',count_reached)
print('cumhistory_vec', cumhistory_vec)
print('cumhistory_vec_all', cumhistory_vec_all)
cumhistory_vec_all[cumhistory_vec_all == 0] = 1
prob_ = cumhistory_vec /cumhistory_vec_all
np.set_printoptions(precision=3)
print('prob', prob_)
if count_reached == 0:
prob_[:, terminal_state] = 0
print('never reached state', terminal_state)
else:
loc_1 = np.where(prob_ == 1)
print('loc_1', loc_1)
loc_1 = loc_1[1]
print('loc_1', loc_1)
# prob_[0, terminal_state] = 0 # starting at the root, index=0
prob_[0, loc_1] = 0
#print('zerod out prob', prob_)
prob_ = prob_ / min(1,1.1 * np.max(prob_))
# prob_[0, terminal_state] = 1
prob_[0, loc_1] = 1
#prob_ = np.sqrt(prob_)
print('np.max', np.max(prob_))
#prob_ = prob_/np.max(prob_)
print('scaled prob', prob_)
return list(prob_)[0]
def simulate_markov(self, A, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# print('row normed P',P.shape, P, P.sum(axis=1))
x_lazy = self.x_lazy # 1-x is prob lazy
alpha_teleport = self.alpha_teleport
# bias_P is the transition probability matrix
# P = x_lazy * P + (1 - x_lazy) * np.identity(n_states)
# print(P, P.sum(axis=1))
# P = alpha_teleport * P + ((1 - alpha_teleport) * (1 / n_states) * (np.ones((n_states, n_states))))
# print('check prob of each row sum to one', P.sum(axis=1))
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
stateHist = state
dfStateHist = pd.DataFrame(state)
distr_hist = np.zeros([1, n_states])
num_sim = 1300 # 1000 # 1300
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
print('num_sim_pp', num_sim_pp)
n_steps = int(2 * n_states)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
for i in range(n_jobs):
hitting_array = np.ones((P.shape[0], 1)) * 1000
process = multiprocessing.Process(target=self.simulate_markov_sub,
args=(P, num_sim_pp, hitting_array, q, root))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
print('ended all multiprocesses, will retrieve and reshape')
hitting_array = q[0]
for qi in q[1:]:
hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
print('finished getting from queue', hitting_array.shape)
hitting_array_final = np.zeros((1, n_states))
no_times_state_reached_array = np.zeros((1, n_states))
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached_array[0, i] = np.sum(rowtemp != (n_steps + 1))
lower_quart = np.percentile(no_times_state_reached_array, 25)
# loc_rarely_reached = np.where(no_times_state_reached_array<= upper_quart)
# print('rarely reached clus', loc_rarely_reached, upper_quart, no_times_state_reached_array)
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached = np.sum(rowtemp != (n_steps + 1))
if no_times_state_reached != 0:
# print('the number of times state ',i, 'has been reached is', no_times_state_reached )
# if no_times_state_reached < lower_quart:
# perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 5) + 0.001
# print('in lower quart for state', i)
perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 15) + 0.001 # 15 for Human and Toy
# print('state ', i,' has perc' ,perc)
# print('smaller than perc', rowtemp[rowtemp <= perc])
# hitting_array_final[0, i] = np.min(rowtemp[rowtemp != (n_steps + 1)])
hitting_array_final[0, i] = np.mean(rowtemp[rowtemp <= perc])
else:
hitting_array_final[0, i] = (n_steps + 1)
# hitting_array=np.mean(hitting_array, axis=1)
print('hitting from sim markov', [(i, val) for i, val in enumerate(hitting_array_final.flatten())])
return hitting_array_final[0]
def compute_hitting_time_onbias(self, laplacian, inv_sqr_deg, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = laplacian.shape[0]
print('is laplacian of biased symmetric', (laplacian.transpose() == laplacian).all())
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
# norm_lap = scipy.sparse.csr_matrix.todense(laplacian)
eig_val, eig_vec = np.linalg.eig(
laplacian) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
print('eig val', eig_val.shape)
if number_eig == 0: number_eig = eig_vec.shape[1]
print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
# print(i, 'th eigenvalue is', eig_val[i])
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
# print('factor', 1 / factor)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
temp = Greens_matrix.dot(inv_sqr_deg)
temp = inv_sqr_deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def project_hittingtimes_sc(self, pt):
if self.data.shape[0] > 1000:
knn_sc = 30
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
sc_pt = np.zeros((len(self.labels),))
i = 0
for row in neighbor_array:
mean_weight = 0
# print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
# print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
hitting_time_clus_i = pt[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
#if clus_i == self.root[0]: print('root is a neighbor', pt[clus_i], 'num NN cells beloning to root', num_clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
mean_weight = mean_weight + hitting_time_clus_i * num_clus_i / knn_sc
# print('mean weight',mean_weight)
sc_pt[i] = mean_weight
#if self.root[0] in set(list(neighboring_clus)): print('the mean sc time for root neighbor is', mean_weight)
i = i + 1
return sc_pt
def project_branch_probability_sc(self, bp_array_clus):
if self.data.shape[0] > 1000:
knn_sc = 10 # 30
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
weight_array = np.zeros((len(self.labels), len(list(set(self.labels)))))
for irow, row in enumerate(neighbor_array):
mean_weight = 0
#print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
# hitting_time_clus_i = df_graph[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
wi = num_clus_i / knn_sc
weight_array[irow, clus_i] = wi
# print('mean weight',mean_weight)
#print('rowi of weight array', weight_array[irow,:])
#print('shape weight array', weight_array)
print(weight_array)
bp_array_sc = weight_array.dot(bp_array_clus)
bp_array_sc = bp_array_sc * 1. / np.max(bp_array_sc, axis=0) #divide cell by max value in that column
print('column max:',np.max(bp_array_sc, axis=0))
#print('sc bp array max', np.max(bp_array_sc))
#bp_array_sc = bp_array_sc/np.max(bp_array_sc)
for i, label_ts in enumerate(list(self.terminal_clusters)):
print('set labels', set(labels))
print('set terminal clus' ,set(self.terminal_clusters))
loc_i = np.where(np.asarray(self.labels) == label_ts)[0]
loc_noti = np.where(np.asarray(self.labels) != label_ts)[0]
if np.max(bp_array_sc[loc_noti,i])==1: bp_array_sc[loc_i,i]=1.2
print('terminal cluster', label_ts, len(loc_i), loc_i)
print('sc bp array', bp_array_sc)
self.single_cell_bp = bp_array_sc
return
def make_knn_struct(self, too_big=False, big_cluster=None):
if self.knn > 190: print('please provide a lower K_in for KNN graph construction')
ef_query = max(100, self.knn + 1) # ef always should be >K. higher ef, more accuate query
if too_big == False:
num_dims = self.data.shape[1]
n_elements = self.data.shape[0]
p = hnswlib.Index(space=self.distance, dim=num_dims) # default to Euclidean distance
p.set_num_threads(self.num_threads) # allow user to set threads used in KNN construction
if n_elements < 10000:
ef_param_const = min(n_elements - 10, 500)
ef_query = ef_param_const
print('setting ef_construction to', )
else:
ef_param_const = 200
if num_dims > 30:
p.init_index(max_elements=n_elements, ef_construction=ef_param_const,
M=48) ## good for scRNA seq where dimensionality is high
else:
p.init_index(max_elements=n_elements, ef_construction=200, M=30, )
p.add_items(self.data)
if too_big == True:
num_dims = big_cluster.shape[1]
n_elements = big_cluster.shape[0]
p = hnswlib.Index(space='l2', dim=num_dims)
p.init_index(max_elements=n_elements, ef_construction=200, M=30)
p.add_items(big_cluster)
p.set_ef(ef_query) # ef should always be > k
return p
def make_csrmatrix_noselfloop(self, neighbor_array, distance_array):
local_pruning_bool = not (self.keep_all_local_dist)
if local_pruning_bool == True: print('commencing local pruning based on minkowski metric at',
self.dist_std_local, 's.dev above mean')
row_list = []
col_list = []
weight_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
# print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
distance_array = distance_array
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
rowi = 0
count_0dist = 0
discard_count = 0
if local_pruning_bool == True: # do some local pruning based on distance
for row in neighbor_array:
distlist = distance_array[rowi, :]
to_keep = np.where(distlist <= np.mean(distlist) + self.dist_std_local * np.std(distlist))[0] # 0*std
updated_nn_ind = row[np.ix_(to_keep)]
updated_nn_weights = distlist[np.ix_(to_keep)]
discard_count = discard_count + (num_neigh - len(to_keep))
for ik in range(len(updated_nn_ind)):
if rowi != row[ik]: # remove self-loops
row_list.append(rowi)
col_list.append(updated_nn_ind[ik])
dist = np.sqrt(updated_nn_weights[ik])
if dist == 0:
count_0dist = count_0dist + 1
weight_list.append(dist)
rowi = rowi + 1
if local_pruning_bool == False: # dont prune based on distance
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.1)).tolist()
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
csr_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
return csr_graph
def func_mode(self, ll): # return MODE of list
# If multiple items are maximal, the function returns the first one encountered.
return max(set(ll), key=ll.count)
def run_toobig_subPARC(self, X_data, jac_std_toobig=1,
jac_weighted_edges=True):
n_elements = X_data.shape[0]
hnsw = self.make_knn_struct(too_big=True, big_cluster=X_data)
if self.knn >= 0.8 * n_elements:
k = int(0.5 * n_elements)
else:
k = self.knn
neighbor_array, distance_array = hnsw.knn_query(X_data, k=k)
# print('shapes of neigh and dist array', neighbor_array.shape, distance_array.shape)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
sources, targets = csr_array.nonzero()
mask = np.zeros(len(sources), dtype=bool)
mask |= (csr_array.data > (
np.mean(csr_array.data) + np.std(csr_array.data) * 5)) # smaller distance means stronger edge
# print('sum of mask', sum(mask))
csr_array.data[mask] = 0
csr_array.eliminate_zeros()
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist_copy) # list of jaccard weights
new_edgelist = []
sim_list_array = np.asarray(sim_list)
if jac_std_toobig == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_toobig * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
for ii in strong_locs: new_edgelist.append(edgelist_copy[ii])
sim_list_new = list(sim_list_array[strong_locs])
if jac_weighted_edges == True:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
else:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist))
G_sim.simplify(combine_edges='sum')
resolution_parameter = 1
if jac_weighted_edges == True:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
else:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
# print('Q= %.2f' % partition.quality())
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 5: # <10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
do_while_time = time.time()
while (small_pop_exist == True) & (time.time() - do_while_time < 5):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 10:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
self.labels = PARC_labels_leiden
print('finished labels')
# self.anndata.obs['parc_label'] = self.labels
# cma1_cluster = self.anndata.obs.groupby('parc_label').mean('Cma1')
return PARC_labels_leiden
def recompute_weights(self, clustergraph_ig, pop_list_raw):
sparse_clustergraph = get_sparse_from_igraph(clustergraph_ig, weight_attr='weight')
n = sparse_clustergraph.shape[0]
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
weights = sparse_clustergraph.data
# print('edgelist of combined clustergraph', edgelist)
# print('edge weights of combined clustergraph', weights)
new_weights = []
i = 0
for s, t in edgelist:
pop_s = pop_list_raw[s]
pop_t = pop_list_raw[t]
w = weights[i]
nw = w * (pop_s + pop_t) / (pop_s * pop_t) # *
new_weights.append(nw)
# print('old and new', w, nw)
i = i + 1
scale_factor = max(new_weights) - min(new_weights)
wmin = min(new_weights)
# wmax = max(new_weights)
# print('weights before scaling', new_weights)
new_weights = [(wi + wmin) / scale_factor for wi in new_weights]
# print('weights after scaling', new_weights)
sparse_clustergraph = csr_matrix((np.array(new_weights), (sources, targets)),
shape=(n, n))
# print('new weights', new_weights)
# print(sparse_clustergraph)
# print('reweighted sparse clustergraph')
# print(sparse_clustergraph)
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
return sparse_clustergraph, edgelist
def find_root_HumanCD34(self, graph_dense, PARC_labels_leiden, root_idx, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_idx]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_bcell(self, graph_dense, PARC_labels_leiden, root_user, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_user]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root(self, graph_dense, PARC_labels_leiden, root_user, true_labels, super_cluster_labels_sub,
super_node_degree_list):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
min_deg = 1000
super_min_deg = 1000
found_super_and_sub_root = False
found_any_root = False
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
print('deg list', deg_list) # locallytrimmed_g.degree()
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
# print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if self.super_cluster_labels != False:
super_majority_cluster = self.func_mode(list(np.asarray(super_cluster_labels_sub)[cluster_i_loc]))
super_majority_cluster_loc = np.where(np.asarray(super_cluster_labels_sub) == super_majority_cluster)[0]
super_majority_truth = self.func_mode(list(true_labels[super_majority_cluster_loc]))
# print('spr node degree list sub',super_node_degree_list, super_majority_cluster)
super_node_degree = super_node_degree_list[super_majority_cluster]
if (str(root_user) in majority_truth) & (str(root_user) in str(super_majority_truth)):
if super_node_degree < super_min_deg:
# if deg_list[cluster_i] < min_deg:
found_super_and_sub_root = True
root = cluster_i
found_any_root = True
min_deg = deg_list[ci]
super_min_deg = super_node_degree
print('new root is', root, ' with degree', min_deg, 'and super node degree',
super_min_deg)
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
if (self.super_cluster_labels == False) | (found_super_and_sub_root == False):
print('self.super_cluster_labels', super_cluster_labels_sub, ' foundsuper_cluster_sub and super root',
found_super_and_sub_root)
for ic, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
print('cluster', cluster_i, 'set true labels', set(true_labels))
true_labels = np.asarray(true_labels)
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if (str(root_user) in str(majority_truth)):
print('did not find a super and sub cluster with majority ', root_user)
if deg_list[ic] < min_deg:
root = cluster_i
found_any_root = True
min_deg = deg_list[ic]
print('new root is', root, ' with degree', min_deg)
# print('len graph node label', graph_node_label)
if found_any_root == False:
print('setting arbitrary root', cluster_i)
self.root = cluster_i
return graph_node_label, majority_truth_labels, deg_list, root
def full_graph_paths(self, X_data, n_components_original=1):
# make igraph object of low-K KNN using the knn_struct PCA-dimension space made in PARC.
# This is later used by find_shortest_path for sc_bp visual
# neighbor array is not listed in in any order of proximity
print('number of components in the original full graph', n_components_original)
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=3)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
k_0 = 3
if n_components_original == 1:
while (n_comp > 1):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
if n_components_original > 1:
while (k_0 <= 5) & (n_comp > n_components_original):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
row_list = []
print('size neighbor array in low-KNN in pca-space for visualization', neighbor_array.shape)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (distance_array.flatten()).tolist()
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
Gr = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
Gr.simplify(combine_edges='sum')
return Gr
def get_gene_expression(self, gene_exp, title_gene=""):
fig_0, ax = plt.subplots()
sc_pt = self.single_cell_pt_markov
sc_bp_original = self.single_cell_bp
n_terminal_states = sc_bp_original.shape[1]
jet = cm.get_cmap('jet', n_terminal_states)
cmap_ = jet(range(n_terminal_states))
# print('cmap', cmap_)
for i in range(n_terminal_states):
sc_bp = sc_bp_original.copy()
loc_terminal_i = np.where(np.asarray(self.labels) == self.terminal_clusters[i])[0]
sc_bp[loc_terminal_i,:] = 1.4
loc_i = np.where(sc_bp[:, i] > 0.8)[0]
val_pt = [sc_pt[pt_i] for pt_i in loc_i] # TODO, replace with array to speed up
# max_val_pt = np.percentile(np.asarray(val_pt),90)
max_val_pt = max(val_pt)
#print('gene exp max pt', max_val_pt)
loc_i_bp = np.where(sc_bp[:, i] > 0.000)[0] #0.001
loc_i_sc = np.where(np.asarray(sc_pt) <= max_val_pt)[0]
# print('loc i bp', loc_i_bp)
# print('loc i sc', loc_i_sc)
loc_ = np.intersect1d(loc_i_bp, loc_i_sc)
# print('loc_', loc_.shape)
gam_in = np.asarray(sc_pt)[loc_]
x = gam_in.reshape(-1, 1)
y = np.asarray(gene_exp)[loc_].reshape(-1, 1)
# print('Gene Expression:', gam_in.shape)
weights = np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# print('weights',weights)
# print('weights ==0', np.sum(weights == 0))
# print('Gene Expression: setting up subplot number',i)
if len(loc_)>1:
#geneGAM = pg.LinearGAM(n_splines=20, spline_order=5, lam=10).fit(x, y, weights=weights)
geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
nx_spacing = 100
xval = np.linspace(min(sc_pt), max_val_pt, nx_spacing * 2)
yg = geneGAM.predict(X=xval)
else: print('loc_ has length zero')
ax.plot(xval, yg, color=cmap_[i], linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round', label='TS:' + str(self.terminal_clusters[i]))
plt.legend()
plt.title('Gene Expression ' + title_gene)
return
def run_subPARC(self):
root_user = self.root_user
X_data = self.data
too_big_factor = self.too_big_factor
small_pop = self.small_pop
jac_std_global = self.jac_std_global
jac_weighted_edges = self.jac_weighted_edges
n_elements = X_data.shape[0]
# if n_elements < 2000: self.knn = 10
n_elements = X_data.shape[0]
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=self.knn)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
#### construct full graph
row_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (1. / (distance_array.flatten() + 0.05)).tolist()
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
#DO MAGIC IMPUTATION#
if self.do_magic == True:
from sklearn.preprocessing import normalize
magic_steps = 3
Transition_full_graph = normalize(csr_full_graph, norm='l1', axis=1) ** magic_steps
imputed_data = pd.DataFrame(np.dot(Transition_full_graph.todense(), data), index=data.index, columns=data.columns )
n_original_comp, n_original_comp_labels = connected_components(csr_full_graph, directed=False)
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
G = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist) # list of jaccard weights
ig_fullgraph = ig.Graph(list(edgelist), edge_attrs={'weight': sim_list})
ig_fullgraph.simplify(combine_edges='sum')
inv_simlist = [1 - i for i in sim_list]
# full_graph_shortpath = ig.Graph(list(edgelist), edge_attrs={'weight': inv_simlist}) #the weights reflect distances
# full_graph_shortpath.simplify(combine_edges='sum')
# self.full_graph_shortpath = full_graph_shortpath
self.full_graph_shortpath = self.full_graph_paths(X_data, n_original_comp)
####
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources, targets))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
# print('average degree of prejacard graph is %.1f'% (np.mean(G.degree())))
# print('computing Jaccard metric')
sim_list = G.similarity_jaccard(pairs=edgelist_copy)
print('commencing global pruning')
sim_list_array = np.asarray(sim_list)
edge_list_copy_array = np.asarray(edgelist_copy)
if jac_std_global == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_global * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
print('Share of edges kept after Global Pruning %.2f' % (len(strong_locs) / len(sim_list)), '%')
new_edgelist = list(edge_list_copy_array[strong_locs])
sim_list_new = list(sim_list_array[strong_locs])
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
# print('average degree of graph is %.1f' % (np.mean(G_sim.degree())))
G_sim.simplify(combine_edges='sum') # "first"
# print('average degree of SIMPLE graph is %.1f' % (np.mean(G_sim.degree())))
print('commencing community detection')
if jac_weighted_edges == True:
start_leiden = time.time()
# print('call leiden on weighted graph for ', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
else:
start_leiden = time.time()
# print('call leiden on unweighted graph', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
time_end_PARC = time.time()
# print('Q= %.1f' % (partition.quality()))
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1)
too_big = False
# print('labels found after Leiden', set(list(PARC_labels_leiden.T)[0])) will have some outlier clusters that need to be added to a cluster if a cluster has members that are KNN
cluster_i_loc = np.where(PARC_labels_leiden == 0)[
0] # the 0th cluster is the largest one. so if cluster 0 is not too big, then the others wont be too big either
pop_i = len(cluster_i_loc)
print('largest cluster population', pop_i, too_big_factor, n_elements)
if pop_i > too_big_factor * n_elements: # 0.4
too_big = True
print('too big is', too_big)
cluster_big_loc = cluster_i_loc
list_pop_too_bigs = [pop_i]
cluster_too_big = 0
while too_big == True:
X_data_big = X_data[cluster_big_loc, :]
print(X_data_big.shape)
PARC_labels_leiden_big = self.run_toobig_subPARC(X_data_big)
# print('set of new big labels ', set(PARC_labels_leiden_big.flatten()))
PARC_labels_leiden_big = PARC_labels_leiden_big + 1000
# print('set of new big labels +1000 ', set(list(PARC_labels_leiden_big.flatten())))
pop_list = []
for item in set(list(PARC_labels_leiden_big.flatten())):
pop_list.append([item, list(PARC_labels_leiden_big.flatten()).count(item)])
# print('pop of new big labels', pop_list)
jj = 0
print('shape PARC_labels_leiden', PARC_labels_leiden.shape)
for j in cluster_big_loc:
PARC_labels_leiden[j] = PARC_labels_leiden_big[jj]
jj = jj + 1
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
print('new set of labels ')
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1, set(PARC_labels_leiden))
too_big = False
set_PARC_labels_leiden = set(PARC_labels_leiden)
PARC_labels_leiden = np.asarray(PARC_labels_leiden)
for cluster_ii in set_PARC_labels_leiden:
cluster_ii_loc = np.where(PARC_labels_leiden == cluster_ii)[0]
pop_ii = len(cluster_ii_loc)
not_yet_expanded = pop_ii not in list_pop_too_bigs
if pop_ii > too_big_factor * n_elements and not_yet_expanded == True:
too_big = True
print('cluster', cluster_ii, 'is too big and has population', pop_ii)
cluster_big_loc = cluster_ii_loc
cluster_big = cluster_ii
big_pop = pop_ii
if too_big == True:
list_pop_too_bigs.append(big_pop)
print('cluster', cluster_big, 'is too big with population', big_pop, '. It will be expanded')
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop: # 10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
time_smallpop = time.time()
while (small_pop_exist) == True & (time.time() - time_smallpop < 15):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
PARC_labels_leiden = list(PARC_labels_leiden.flatten())
# print('final labels allocation', set(PARC_labels_leiden))
pop_list = []
pop_list_raw = []
for item in range(len(set(PARC_labels_leiden))):
pop_item = PARC_labels_leiden.count(item)
pop_list.append((item, pop_item))
pop_list_raw.append(pop_item)
print('list of cluster labels and populations', len(pop_list), pop_list)
self.labels = PARC_labels_leiden # list
n_clus = len(set(self.labels))
##determine majority truth
if self.pseudotime == True:
## Make cluster-graph (1)
vc_graph = ig.VertexClustering(ig_fullgraph,
membership=PARC_labels_leiden) # jaccard weights, bigger is better
vc_graph_old = ig.VertexClustering(G_sim, membership=PARC_labels_leiden)
# print('vc graph G_sim', vc_graph)
vc_graph = vc_graph.cluster_graph(combine_edges='sum')
vc_graph_old = vc_graph_old.cluster_graph(combine_edges='sum')
# print('vc graph G_sim', vc_graph)
# print('vc graph G_sim old', vc_graph_old)
reweighted_sparse_vc, edgelist = self.recompute_weights(vc_graph, pop_list_raw)
print('len old edge list', edgelist) # 0.15 for CD34
if self.dataset == 'toy': # ''humanCD34':# == False:
global_pruning_std = 2
print('Toy: global cluster graph pruning level', global_pruning_std)
# toy data is usually simpler so we dont need to prune the links as the clusters are usually well separated such that spurious links dont exist
elif self.dataset == 'bcell':
global_pruning_std = 0.15
print('Bcell: global cluster graph pruning level', global_pruning_std)
else:
global_pruning_std = 0.15
print('Humancd34: global cluster graph pruning level', global_pruning_std)
edgeweights, edgelist, comp_labels = local_pruning_clustergraph_mst(reweighted_sparse_vc,
global_pruning_std=global_pruning_std,
preserve_disconnected=self.preserve_disconnected) # 0.8 on 20knn and 40ncomp #0.15
self.connected_comp_labels = comp_labels
print('final comp labels set', set(comp_labels))
print('len new edge list', edgelist)
locallytrimmed_g = ig.Graph(edgelist, edge_attrs={'weight': edgeweights.tolist()})
# print('locally trimmed_g', locallytrimmed_g)
locallytrimmed_g = locallytrimmed_g.simplify(combine_edges='sum')
# print('locally trimmed and simplified', locallytrimmed_g)
locallytrimmed_sparse_vc = get_sparse_from_igraph(locallytrimmed_g, weight_attr='weight')
layout = locallytrimmed_g.layout_fruchterman_reingold(
weights='weight') ##final layout based on locally trimmed
# globally trimmed link
sources, targets = locallytrimmed_sparse_vc.nonzero()
edgelist_simple = list(zip(sources.tolist(), targets.tolist()))
edgelist_unique = set(tuple(sorted(l)) for l in edgelist_simple) # keep only one of (0,1) and (1,0)
self.edgelist_unique = edgelist_unique
self.edgelist = edgelist
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
# number of components
graph_dict = {}
n_components, labels = connected_components(csgraph=locallytrimmed_sparse_vc, directed=False,
return_labels=True)
print('there are ', n_components, 'components in the graph')
df_graph = pd.DataFrame(locallytrimmed_sparse_vc.todense())
df_graph['cc'] = labels
df_graph['pt'] = float('NaN')
df_graph['markov_pt'] = float('NaN')
df_graph['majority_truth'] = 'maj truth'
df_graph['graph_node_label'] = 'node label'
set_parc_labels = list(set(PARC_labels_leiden))
set_parc_labels.sort()
print('parc labels', set_parc_labels)
terminal_clus = []
node_deg_list = []
super_terminal_clus_revised = []
pd_columnnames_terminal = []
dict_terminal_super_sub_pairs = {}
self.root = []
for comp_i in range(n_components):
loc_compi = np.where(labels == comp_i)[0]
print('loc_compi', loc_compi)
a_i = df_graph.iloc[loc_compi][loc_compi].values
a_i = csr_matrix(a_i, (a_i.shape[0], a_i.shape[0]))
cluster_labels_subi = [x for x in loc_compi]
sc_labels_subi = [PARC_labels_leiden[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
sc_truelabels_subi = [self.true_label[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
if self.dataset == 'toy':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
print('super node degree', self.super_node_degree_list)
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else:
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
elif self.dataset == 'humanCD34':
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_HumanCD34(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
elif self.dataset == 'bcell':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
'''
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_bcell(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
'''
else: # if this is p0.run()
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
self.root.append(root_i)
for item in node_deg_list_i:
node_deg_list.append(item)
print('a_i shape, true labels shape', a_i.shape, len(sc_truelabels_subi), len(sc_labels_subi))
new_root_index_found = False
for ii, llabel in enumerate(cluster_labels_subi):
if root_i == llabel:
new_root_index = ii
new_root_index_found = True
print('new root index', new_root_index)
if new_root_index_found == False:
print('cannot find the new root index')
new_root_index = 0
hitting_times, roundtrip_times = self.compute_hitting_time(a_i, root=new_root_index,
x_lazy=x_lazy, alpha_teleport=alpha_teleport)
# rescale hitting times
very_high = np.mean(hitting_times) + 1.5 * np.std(hitting_times)
without_very_high_pt = [iii for iii in hitting_times if iii < very_high]
new_very_high = np.mean(without_very_high_pt) + np.std(without_very_high_pt)
print('very high, and new very high', very_high, new_very_high)
new_hitting_times = [x if x < very_high else very_high for x in hitting_times]
hitting_times = np.asarray(new_hitting_times)
scaling_fac = 10 / max(hitting_times)
hitting_times = hitting_times * scaling_fac
s_ai, t_ai = a_i.nonzero()
edgelist_ai = list(zip(s_ai, t_ai))
edgeweights_ai = a_i.data
# print('edgelist ai', edgelist_ai)
# print('edgeweight ai', edgeweights_ai)
biased_edgeweights_ai = get_biased_weights(edgelist_ai, edgeweights_ai, hitting_times)
# biased_sparse = csr_matrix((biased_edgeweights, (row, col)))
adjacency_matrix_ai = np.zeros((a_i.shape[0], a_i.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix_ai[start, end] = biased_edgeweights_ai[i]
markov_hitting_times_ai = self.simulate_markov(adjacency_matrix_ai,
new_root_index) # +adjacency_matrix.T))
print('markov_hitting times ')
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
very_high = np.mean(markov_hitting_times_ai) + 1.5 * np.std(markov_hitting_times_ai)
very_high = min(very_high, max(markov_hitting_times_ai))
without_very_high_pt = [iii for iii in markov_hitting_times_ai if iii < very_high]
new_very_high = min(np.mean(without_very_high_pt) + np.std(without_very_high_pt), very_high)
print('very high, and new very high', very_high, new_very_high)
new_markov_hitting_times_ai = [x if x < very_high else very_high for x in markov_hitting_times_ai]
for eee, ttt in enumerate(new_markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
markov_hitting_times_ai = np.asarray(new_markov_hitting_times_ai)
scaling_fac = 10 / max(markov_hitting_times_ai)
markov_hitting_times_ai = markov_hitting_times_ai * scaling_fac
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
print('markov hitting times', [(i, j) for i, j in enumerate(markov_hitting_times_ai)])
print('hitting times', [(i, j) for i, j in enumerate(hitting_times)])
markov_hitting_times_ai = (markov_hitting_times_ai )#+ hitting_times)*.5 #consensus
adjacency_matrix_csr_ai = sparse.csr_matrix(adjacency_matrix_ai)
(sources, targets) = adjacency_matrix_csr_ai.nonzero()
edgelist_ai = list(zip(sources, targets))
weights_ai = adjacency_matrix_csr_ai.data
bias_weights_2_ai = get_biased_weights(edgelist_ai, weights_ai, markov_hitting_times_ai, round_no=2)
adjacency_matrix2_ai = np.zeros((adjacency_matrix_ai.shape[0], adjacency_matrix_ai.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix2_ai[start, end] = bias_weights_2_ai[i]
if self.super_terminal_cells == False:
terminal_clus_ai = self.get_terminal_clusters(adjacency_matrix2_ai, markov_hitting_times_ai,
new_root_index)
for i in terminal_clus_ai:
terminal_clus.append(cluster_labels_subi[i])
elif len(self.super_terminal_clusters) > 0:
sub_terminal_clus_temp_ = []
terminal_clus_ai = []
for i in self.super_terminal_clusters:
print('super cluster terminal label', i)
sub_terminal_clus_temp_loc = np.where(np.asarray(self.super_cluster_labels) == i)[0]
# print('sub_terminal_clus_temp_loc', sub_terminal_clus_temp_loc)
temp_set = set(list(np.asarray(self.labels)[sub_terminal_clus_temp_loc]))
# print('temp set', temp_set)
temp_max_pt = 0
most_likely_sub_terminal = False
count_frequency_super_in_sub = 0
for j in temp_set:
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
super_cluster_composition = self.func_mode(
list(np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]))
# print('the composision of sub cluster', j, 'is mostly', super_cluster_composition)
if (markov_hitting_times_ai[j] > temp_max_pt) & (super_cluster_composition == i):
temp_max_pt = markov_hitting_times_ai[j]
print('super, j and temp max pt', i, j, temp_max_pt)
most_likely_sub_terminal = j
if most_likely_sub_terminal == False:
print('no sub cluster has majority made of super-cluster ', i)
for j in temp_set:
count_frequency_super_in_sub_temp = list(
np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]).count(j)
if (markov_hitting_times_ai[j] > temp_max_pt) & (
count_frequency_super_in_sub_temp > count_frequency_super_in_sub):
count_frequency_super_in_sub = count_frequency_super_in_sub_temp
temp_max_pt = markov_hitting_times_ai[j]
most_likely_sub_terminal = j
sub_terminal_clus_temp_.append(most_likely_sub_terminal)
if (markov_hitting_times_ai[most_likely_sub_terminal] > np.percentile(
np.asarray(markov_hitting_times_ai), 30)):
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
super_terminal_clus_revised.append(i)
terminal_clus.append(most_likely_sub_terminal)
terminal_clus_ai.append(
np.where(np.asarray(cluster_labels_subi) == most_likely_sub_terminal)[0][0]) # =i
# terminal_clus_ai.append(most_likely_sub_terminal)
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal)
else:
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal, 'but the pseudotime is too low')
# terminal_clus.append(9999)
# super_terminal_clus_revised.append(9999)
else:
print('super terminal cells', self.super_terminal_cells)
print([self.labels[ti] for ti in
self.super_terminal_cells]) # find the sub-cluster which contains the single-cell-superterminal
temp = [self.labels[ti] for ti in self.super_terminal_cells if
self.labels[ti] in cluster_labels_subi]
terminal_clus_ai = []
for i in temp:
terminal_clus_ai.append(np.where(np.asarray(cluster_labels_subi) == i)[0][0])
terminal_clus.append(i)
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
# for i in temp:
# terminal_clus.append(i)
print('terminal clus in this a_i', terminal_clus_ai)
print('final terminal clus', terminal_clus)
for target_terminal in terminal_clus_ai:
#prob_ai = self.prob_reaching_terminal_state(target_terminal, terminal_clus_ai, adjacency_matrix2_ai, new_root_index, pt=markov_hitting_times_ai, num_sim=500)
prob_ai = self.simulate_branch_probability(target_terminal, terminal_clus_ai, adjacency_matrix2_ai,
new_root_index, pt=markov_hitting_times_ai, num_sim=500) #50 ToDO change back to 500 = numsim
df_graph['terminal_clus' + str(cluster_labels_subi[target_terminal])] = 0.0000000
pd_columnnames_terminal.append('terminal_clus' + str(cluster_labels_subi[target_terminal]))
print('prob ai for target terminal', target_terminal, prob_ai)
for k, prob_ii in enumerate(prob_ai):
df_graph.at[cluster_labels_subi[k], 'terminal_clus' + str(
cluster_labels_subi[target_terminal])] = prob_ii
bp_array = df_graph[pd_columnnames_terminal].values
bp_array[np.isnan(bp_array)]=0.00000001
print('final bp_array NOT normed by rowsum', bp_array)
bp_array = bp_array / bp_array.sum(axis=1)[:, None]
bp_array[np.isnan(bp_array)] = 0.00000001
print('final bp_array normed by rowsum', bp_array)
for ei, ii in enumerate(loc_compi):
df_graph.at[ii, 'pt'] = hitting_times[ei]
df_graph.at[ii, 'graph_node_label'] = graph_node_label[ei]
df_graph.at[ii, 'majority_truth'] = graph_node_label[ei]
df_graph.at[ii, 'markov_pt'] = markov_hitting_times_ai[ei]
locallytrimmed_g.vs["label"] = df_graph['graph_node_label'].values
hitting_times = df_graph['pt'].values
if len(super_terminal_clus_revised) > 0:
self.revised_super_terminal_clusters = super_terminal_clus_revised
else:
self.revised_super_terminal_clusters = self.super_terminal_clusters
self.hitting_times = hitting_times # * 1000
self.markov_hitting_times = df_graph['markov_pt'].values
self.terminal_clusters = terminal_clus
print('terminal clusters', terminal_clus)
self.node_degree_list = node_deg_list
self.project_branch_probability_sc(bp_array)
self.dict_terminal_super_sub_pairs = dict_terminal_super_sub_pairs
hitting_times = self.markov_hitting_times
bias_weights_2_all = get_biased_weights(edgelist, edgeweights, self.markov_hitting_times, round_no=2)
row_list = []
col_list = []
for (rowi, coli) in edgelist:
row_list.append(rowi)
col_list.append(coli)
# print('shape', a_i.shape[0], a_i.shape[0], row_list)
temp_csr = csr_matrix((np.array(bias_weights_2_all), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
if self.dataset == 'toy': # 'humanCD34':#False:
visual_global_pruning_std = 0.15
max_outgoing = 4
else:
visual_global_pruning_std = 1 # 0.15#0 for human
max_outgoing = 2
# glob_std_pruning =0 and max_out = 2 for HumanCD34 to simplify structure
edgeweights_maxout_2, edgelist_maxout_2, comp_labels_2 = local_pruning_clustergraph_mst(temp_csr,
global_pruning_std=visual_global_pruning_std,
max_outgoing=max_outgoing,
preserve_disconnected=self.preserve_disconnected)
row_list = []
col_list = []
for (rowi, coli) in edgelist_maxout_2:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(edgeweights_maxout_2), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
temp_csr = temp_csr.transpose().todense() + temp_csr.todense()
temp_csr = np.tril(temp_csr, -1) # elements along the main diagonal and above are set to zero
temp_csr = csr_matrix(temp_csr)
edgeweights_maxout_2 = temp_csr.data
scale_factor = max(edgeweights_maxout_2) - min(edgeweights_maxout_2)
edgeweights_maxout_2 = [((wi + .1) * 2.5 / scale_factor) + 0.1 for wi in edgeweights_maxout_2]
sources, targets = temp_csr.nonzero()
edgelist_maxout_2 = list(zip(sources.tolist(), targets.tolist()))
self.edgelist_maxout = edgelist_maxout_2
self.edgeweights_maxout = edgeweights_maxout_2
remove_outliers = hitting_times
threshold = np.percentile(remove_outliers, 95) # np.mean(remove_outliers) + 1* np.std(remove_outliers)
th_hitting_times = [x if x < threshold else threshold for x in hitting_times]
remove_outliers_low = hitting_times[hitting_times < (np.mean(hitting_times) - 0.3 * np.std(hitting_times))]
threshold_low = np.mean(remove_outliers_low) - 0.3 * np.std(remove_outliers_low)
threshold_low = np.percentile(remove_outliers_low, 5)
# print('thresh low', threshold_low)
th_hitting_times = [x if x > threshold_low else threshold_low for x in th_hitting_times]
scaled_hitting_times = (th_hitting_times - np.min(th_hitting_times))
scaled_hitting_times = scaled_hitting_times * (1000 / np.max(scaled_hitting_times))
self.scaled_hitting_times = scaled_hitting_times
# self.single_cell_pt = self.project_hittingtimes_sc(self.hitting_times)
# self.single_cell_pt_stationary_bias = self.project_hittingtimes_sc(self.stationary_hitting_times.flatten())
print('markov hitting times to put in single cell project', self.markov_hitting_times)
self.single_cell_pt_markov = self.project_hittingtimes_sc(self.markov_hitting_times)
print('markov hitting times to put in single cell project', self.single_cell_pt_markov)
# self.dijkstra_hitting_times = self.path_length_onbias(edgelist, biased_edgeweights)
# print('dijkstra hitting times', [(i,j) for i,j in enumerate(self.dijkstra_hitting_times)])
# self.single_cell_pt_dijkstra_bias = self.project_hittingtimes_sc(self.dijkstra_hitting_times)
# threshold = np.mean(scaled_hitting_times)+0.25*np.std(scaled_hitting_times)
threshold = int(threshold)
scaled_hitting_times = scaled_hitting_times.astype(int)
# print('scaled hitting times')
# print(scaled_hitting_times)
pal = ig.drawing.colors.AdvancedGradientPalette(['yellow', 'green', 'blue'], n=1001)
all_colors = []
# print('100 scaled hitting', scaled_hitting_times)
for i in scaled_hitting_times:
all_colors.append(pal.get(int(i))[0:3])
# print('extract all colors', zip(scaled_hitting_times,all_colors))
locallytrimmed_g.vs['hitting_times'] = scaled_hitting_times
locallytrimmed_g.vs['color'] = [pal.get(i)[0:3] for i in scaled_hitting_times]
self.group_color = [colors.to_hex(v) for v in locallytrimmed_g.vs['color']] # based on ygb scale
viridis_cmap = cm.get_cmap('viridis_r')
self.group_color_cmap = [colors.to_hex(v) for v in
viridis_cmap(scaled_hitting_times / 1000)] # based on ygb scale
self.graph_node_label = df_graph['graph_node_label'].values
self.edgeweight = [e['weight'] * 1 for e in locallytrimmed_g.es]
print('self edge weight', len(self.edgeweight), self.edgeweight)
print('self edge list', len(self.edgelist_unique), self.edgelist_unique)
self.graph_node_pos = layout.coords
f, ((ax, ax1, ax2)) = plt.subplots(1, 3, sharey=True)
self.draw_piechart_graph(ax, ax1, ax2)
plt.show()
return
def draw_piechart_graph(self, ax, ax1, ax2, type_pt='original', ):
arrow_head_w = 0.2
edgeweight_scale = 1
node_pos = self.graph_node_pos
edgelist = list(self.edgelist_maxout)
edgeweight = self.edgeweights_maxout
node_pos = np.asarray(node_pos)
graph_node_label = self.graph_node_label
if type_pt == 'original': pt = self.scaled_hitting_times
if type_pt == 'biased_stationary': pt = self.biased_hitting_times_stationary
if type_pt == 'markov': pt = self.markov_hitting_times
import matplotlib.lines as lines
n_groups = len(set(self.labels)) # node_pos.shape[0]
n_truegroups = len(set(self.true_label))
group_pop = np.zeros([n_groups, 1])
group_frac = pd.DataFrame(np.zeros([n_groups, n_truegroups]), columns=list(set(self.true_label)))
for group_i in set(self.labels):
loc_i = np.where(self.labels == group_i)[0]
group_pop[group_i] = len(loc_i) # np.sum(loc_i) / 1000 + 1
true_label_in_group_i = list(np.asarray(self.true_label)[[loc_i]])
for ii in set(true_label_in_group_i):
group_frac[ii][group_i] = true_label_in_group_i.count(ii)
group_frac = group_frac.div(group_frac.sum(axis=1), axis=0)
line_true = np.linspace(0, 1, n_truegroups)
color_true_list = [plt.cm.jet(color) for color in line_true]
sct = ax.scatter(
node_pos[:, 0], node_pos[:, 1],
c='white', edgecolors='face', s=group_pop, cmap='jet')
print('draw triangle edgelist', len(edgelist), edgelist)
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax.add_line(lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='grey', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.2))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250], shape='full',
lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
# ax.plot(xp, smooth, linewidth=edgeweight[e_i], c='pink')
else:
ax.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
trans = ax.transData.transform
bbox = ax.get_position().get_points()
ax_x_min = bbox[0, 0]
ax_x_max = bbox[1, 0]
ax_y_min = bbox[0, 1]
ax_y_max = bbox[1, 1]
ax_len_x = ax_x_max - ax_x_min
ax_len_y = ax_y_max - ax_y_min
trans2 = ax.transAxes.inverted().transform
pie_axs = []
pie_size_ar = ((group_pop - np.min(group_pop)) / (np.max(group_pop) - np.min(group_pop)) + 0.5) / 10
for node_i in range(n_groups):
pie_size = pie_size_ar[node_i][0]
x1, y1 = trans(node_pos[node_i]) # data coordinates
xa, ya = trans2((x1, y1)) # axis coordinates
xa = ax_x_min + (xa - pie_size / 2) * ax_len_x
ya = ax_y_min + (ya - pie_size / 2) * ax_len_y
# clip, the fruchterman layout sometimes places below figure
# if ya < 0: ya = 0
# if xa < 0: xa = 0
rect = [xa, ya, pie_size * ax_len_x, pie_size * ax_len_y]
frac = group_frac.iloc[node_i].values
pie_axs.append(plt.axes(rect, frameon=False))
pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
pie_axs[node_i].set_xticks([])
pie_axs[node_i].set_yticks([])
pie_axs[node_i].set_aspect('equal')
pie_axs[node_i].text(0.5, 0.5, graph_node_label[node_i])
patches, texts = pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
labels = list(set(self.true_label))
plt.legend(patches, labels, loc=(-5, -5), fontsize=6)
if self.too_big_factor > 0.1:
is_sub = ' super clusters'
else:
is_sub = ' sub clusters'
ti = 'Reference Group Membership. K=' + str(self.knn) + '. ncomp = ' + str(self.ncomp) + is_sub
ax.set_title(ti)
title_list = ["PT using Markov Simulation", "PT on undirected original graph"]
for i, ax_i in enumerate([ax1, ax2]):
print("drawing axis", i)
if i == 0: pt = self.markov_hitting_times
if i == 1: pt = self.hitting_times
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax_i.add_line(
lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='black', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.5))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax_i.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250],
shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
else:
ax_i.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
c_edge = []
l_width = []
for ei, pti in enumerate(pt):
if ei in self.terminal_clusters:
c_edge.append('red')
l_width.append(1.5)
else:
c_edge.append('gray')
l_width.append(0.0)
gp_scaling = 500 / max(group_pop)
print(gp_scaling, 'gp_scaline')
group_pop_scale = group_pop * gp_scaling
ax_i.scatter(node_pos[:, 0], node_pos[:, 1], s=group_pop_scale, c=pt, cmap='viridis_r', edgecolors=c_edge,
alpha=1, zorder=3, linewidth=l_width)
for ii in range(node_pos.shape[0]):
ax_i.text(node_pos[ii, 0] + 0.5, node_pos[ii, 1] + 0.5, 'c' + str(ii), color='black', zorder=4)
title_pt = title_list[i]
ax_i.set_title(title_pt)
def accuracy(self, onevsall=1):
true_labels = self.true_label
Index_dict = {}
PARC_labels = self.labels
N = len(PARC_labels)
n_cancer = list(true_labels).count(onevsall)
n_pbmc = N - n_cancer
for k in range(N):
Index_dict.setdefault(PARC_labels[k], []).append(true_labels[k])
num_groups = len(Index_dict)
sorted_keys = list(sorted(Index_dict.keys()))
error_count = []
pbmc_labels = []
thp1_labels = []
fp, fn, tp, tn, precision, recall, f1_score = 0, 0, 0, 0, 0, 0, 0
for kk in sorted_keys:
vals = [t for t in Index_dict[kk]]
majority_val = self.func_mode(vals)
if majority_val == onevsall: print('cluster', kk, ' has majority', onevsall, 'with population', len(vals))
if kk == -1:
len_unknown = len(vals)
print('len unknown', len_unknown)
if (majority_val == onevsall) and (kk != -1):
thp1_labels.append(kk)
fp = fp + len([e for e in vals if e != onevsall])
tp = tp + len([e for e in vals if e == onevsall])
list_error = [e for e in vals if e != majority_val]
e_count = len(list_error)
error_count.append(e_count)
elif (majority_val != onevsall) and (kk != -1):
pbmc_labels.append(kk)
tn = tn + len([e for e in vals if e != onevsall])
fn = fn + len([e for e in vals if e == onevsall])
error_count.append(len([e for e in vals if e != majority_val]))
predict_class_array = np.array(PARC_labels)
PARC_labels_array = np.array(PARC_labels)
number_clusters_for_target = len(thp1_labels)
for cancer_class in thp1_labels:
predict_class_array[PARC_labels_array == cancer_class] = 1
for benign_class in pbmc_labels:
predict_class_array[PARC_labels_array == benign_class] = 0
predict_class_array.reshape((predict_class_array.shape[0], -1))
error_rate = sum(error_count) / N
n_target = tp + fn
tnr = tn / n_pbmc
fnr = fn / n_cancer
tpr = tp / n_cancer
fpr = fp / n_pbmc
if tp != 0 or fn != 0: recall = tp / (tp + fn) # ability to find all positives
if tp != 0 or fp != 0: precision = tp / (tp + fp) # ability to not misclassify negatives as positives
if precision != 0 or recall != 0:
f1_score = precision * recall * 2 / (precision + recall)
majority_truth_labels = np.empty((len(true_labels), 1), dtype=object)
for cluster_i in set(PARC_labels):
cluster_i_loc = np.where(np.asarray(PARC_labels) == cluster_i)[0]
true_labels = np.asarray(true_labels)
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = majority_truth
majority_truth_labels = list(majority_truth_labels.flatten())
accuracy_val = [error_rate, f1_score, tnr, fnr, tpr, fpr, precision,
recall, num_groups, n_target]
return accuracy_val, predict_class_array, majority_truth_labels, number_clusters_for_target
def run_PARC(self):
print('input data has shape', self.data.shape[0], '(samples) x', self.data.shape[1], '(features)')
self.ncomp = self.data.shape[1]
pop_list = []
for item in set(list(self.true_label)):
pop_list.append([item, list(self.true_label).count(item)])
# print("population composition", pop_list)
if self.true_label is None:
self.true_label = [1] * self.data.shape[0]
list_roc = []
time_start_total = time.time()
time_start_knn = time.time()
self.knn_struct = self.make_knn_struct()
time_end_knn_struct = time.time() - time_start_knn
# Query dataset, k - number of closest elements (returns 2 numpy arrays)
self.run_subPARC()
run_time = time.time() - time_start_total
print('time elapsed {:.1f} seconds'.format(run_time))
targets = list(set(self.true_label))
N = len(list(self.true_label))
self.f1_accumulated = 0
self.f1_mean = 0
self.stats_df = pd.DataFrame({'jac_std_global': [self.jac_std_global], 'dist_std_local': [self.dist_std_local],
'runtime(s)': [run_time]})
self.majority_truth_labels = []
if len(targets) > 1:
f1_accumulated = 0
f1_acc_noweighting = 0
for onevsall_val in targets:
print('target is', onevsall_val)
vals_roc, predict_class_array, majority_truth_labels, numclusters_targetval = self.accuracy(
onevsall=onevsall_val)
f1_current = vals_roc[1]
print('target', onevsall_val, 'has f1-score of %.2f' % (f1_current * 100))
f1_accumulated = f1_accumulated + f1_current * (list(self.true_label).count(onevsall_val)) / N
f1_acc_noweighting = f1_acc_noweighting + f1_current
list_roc.append(
[self.jac_std_global, self.dist_std_local, onevsall_val] + vals_roc + [numclusters_targetval] + [
run_time])
f1_mean = f1_acc_noweighting / len(targets)
print("f1-score (unweighted) mean %.2f" % (f1_mean * 100), '%')
print('f1-score weighted (by population) %.2f' % (f1_accumulated * 100), '%')
df_accuracy = pd.DataFrame(list_roc,
columns=['jac_std_global', 'dist_std_local', 'onevsall-target', 'error rate',
'f1-score', 'tnr', 'fnr',
'tpr', 'fpr', 'precision', 'recall', 'num_groups',
'population of target', 'num clusters', 'clustering runtime'])
self.f1_accumulated = f1_accumulated
self.f1_mean = f1_mean
self.stats_df = df_accuracy
self.majority_truth_labels = majority_truth_labels
return
def run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823'):
norm_df_pal = pd.DataFrame(ad.X)
# print('norm df', norm_df_pal)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
norm_df_pal.columns =[i for i in ad.var_names]
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(revised_clus, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# start_cell = 'c4823' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, knn, ncomps)
#plt.show()
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
#imp_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100.csv')
genes = ['GATA1', 'GATA2', 'ITGA2B']#, 'SPI1']#['CD34','GATA1', 'IRF8','ITGA2B']
gene_trends = palantir.presults.compute_gene_trends( pr_res, imp_df.loc[:, genes])
palantir.plot.plot_gene_trends(gene_trends)
genes = ['MPO','ITGAX','IRF8','CSF1R','IL3RA']#'CD34','MPO', 'CD79B'
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, genes])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
def slalom_human():
import os
import slalom
from slalom import plotFactors, plotRelevance, plotLoadings, saveFA, dumpFA
data_dir = '/home/shobi/Trajectory/Datasets/'
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad') # 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
annoDB = 'custom' # ''MSigDB'
annoFile = os.path.join(data_dir, 'geneset.gmt')
data_slalom = slalom.utils.load_txt(df=df_.T, annoFiles=annoFile, annoDBs=annoDB)
print("Loaded {:d} cells, {:d} genes".format(data_slalom['Y'].shape[0], data_slalom['Y'].shape[1]))
print("Annotation: {:d} terms".format(len(data_slalom['terms'])))
print('data terms', data_slalom['terms'])
print(data_slalom['genes'])
print(data_slalom['lab'])
# I: indicator matrix that assigns genes to pathways
I = data_slalom['I'] # if loaded from the hdf file change to I = data['IMSigDB']
# Y: log expresison values
Y = data_slalom['Y']
# terms: ther names of the terms
terms = data_slalom['terms']
print("terms", terms)
# gene_ids: the ids of the genes in Y
gene_ids = data_slalom['genes']
print('gene_ids', gene_ids)
print(I.shape, Y.shape, terms.shape)
# initialize FA instance, here using a Gaussian noise model and fitting 3 dense hidden factors
FA = slalom.initFA(Y, terms, I, gene_ids=gene_ids, noise='gauss', nHidden=3, minGenes=1)
FA.train()
# print diagnostics
FA.printDiagnostics()
fig = plotRelevance(FA, madFilter=0)
# idx=FA.getTermIndex(['G2m checkpoint', 'P53 pathway'])
# print('idx',idx)
corrected_data = FA.regressOut(
terms=['M phase', 'Dna replication', 'Chromosome segregation', 'M phase of mitotic cell cycle',
'Organelle fission'])
print('corrected_data.shape', corrected_data.shape)
full_matrix = df_.copy()
print(full_matrix.head)
annotated_genes = np.array(data_slalom['genes'])[np.sum(data_slalom['I'], axis=1) != 0]
print('annotated genes', len(annotated_genes), annotated_genes)
full_matrix[annotated_genes] = corrected_data
print('full shape ', full_matrix)
return full_matrix
def main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func = False):
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
ncomps = ncomps# 40 ncomps and 20KNN works well
knn = knn # 30
p0_random_seed =p0_random_seed
print('ncomp =', ncomps, ' knn=', knn, ' randseed=', p0_random_seed)
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(
ad.X) # slalom_human())#(ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=1000, log=True) #using this or the .X scaled version is pretty much the same.
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
if run_palantir_func == True:
run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823')
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = ['ITGAX']#['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
#'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
for gene_name in gene_list:# 'GATA2',
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
print('gene name', gene_name, loc_gata)
#print('xpca',norm_df['X_pca'])
true_label = nover_labels # revised_clus
print('p0 random seed', p0_random_seed)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.4,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/HumanCD34/", root=1,
root_user=4823, dataset='humanCD34', preserve_disconnected=True, random_seed=p0_random_seed) # *.4
p0.run_PARC()
super_labels = p0.labels
print('super labels', set(super_labels))
ad.obs['parc0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['parc0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA','ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
print('make the p0 matrix plot')
sc.pl.matrixplot(magic_ad, marker_genes, groupby='parc0_label')
'''
sc.tl.rank_genes_groups(ad, groupby='parc0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="parc0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='parc0_label', n_genes = 3) # plot the result
print('show the matrix plot')
'''
super_edges = p0.edgelist_maxout # p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p0.terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/HumanCD34/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=4823,
x_lazy=0.99, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=p0.terminal_clusters) # *.4super_terminal_cells = tsi_list
p1.run_PARC()
labels = p1.labels
ad.obs['parc1_label'] = [str(i) for i in labels]
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
'''
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=p1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # p1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
loaded_magic_df.head()
for gene_name in ['ITGA2B','IL3RA','ITGAX','IRF8']:#['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
#DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO':'MPO (Mono)', 'CD79B':'CD79B (B)','IRF8':'IRF8 (DC)', 'SPI1':'PU.1','CD34': 'CD34','CSF1R':'CSF1R (pDC. Up then Down in cDC)','IL3RA':'CD123 (pDC)','IRF4': 'IRF4 (pDC)', 'ITGAX':'ITGAX (cDCs)','CSF2RA':'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
#magic_ad=loaded_magic_df[gene_name]
p1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:15])
# print('tsne input size', adata_counts.obsm['X_pca'].shape)
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace( p0, p1, idx)
draw_trajectory_gams(embedding,super_clus_ds_PCA_loc, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
# final_super_terminal=p0.terminal clusters
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
lineP0 = np.linspace(0, 1, len(set(p0.labels)))
lineP1 = np.linspace(0, 1, len(set(p1.labels)))
# find the single-cell which is nearest to the average-location of a terminal cluster - for just the sub-set of downsampled points in the corresponding PCA-space
new_tsi_list = []
# find the single-cell which is nearest to the average-location of a terminal cluster
# TODO make a knn in the downsampled PCA-space
X_ds = adata_counts.obsm['X_pca'][:, 0:ncomps][idx]
p_ds = hnswlib.Index(space='l2', dim=ncomps)
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for tsi_item in tsi_list:
labelsq, distances = p_ds.knn_query(adata_counts.obsm['X_pca'][:, 0:ncomps][tsi_item, :], k=1)
new_tsi_list.append(labelsq[0][0])
# for old_tsi_i in tsi_list:
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
# labelsq, distances = p1.knn_struct.query(.knn_query(temp, k=1)
# print(labelsq[0])
# tsi_list.append(labelsq[0][0])
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
ff, (ax11, ax22) = plt.subplots(1, 2, sharey=True)
col_i = 0
for color, group in zip(line, set(true_label)):
marker_x = marker[random.randint(0, 5)]
where = np.where(np.asarray(true_label) == group)[0]
# ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=plt.cm.jet(color))
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax1.legend(fontsize=6)
ax1.set_title('true labels')
for color, group in zip(lineP0, set(p0.labels)):
where = np.where(super_labels == group)[0]
ax11.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax11.legend(fontsize=6)
ax11.set_title('p0 labels')
for color, group in zip(lineP1, set(p1.labels)):
where = np.where(labels == group)[0]
ax22.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax22.legend(fontsize=6)
ax22.set_title('p1 labels')
ax3.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
ax2.set_title("terminal clus from P0 super clus:" + str(ncomps) + '. knn:' + str(knn)+ 'randseed' +str( p0_random_seed))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
jj = 0
for ti in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(super_labels == ti)[0]
val_pt = [sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
ax2.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(ti), c='pink', s=18) # PCs HNSW
# ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(p1.labels[tsi_list[jj]]), c='pink',s=18)
ax2.text(embedding[new_tsi_list[jj], 0]+0.05, embedding[new_tsi_list[jj], 1]+ 0.05, 'TS' + str(ti), color='black', zorder=3)
# ax3.text(np.mean(x) + 0.05, np.mean(y) + 0.05, 'TS' + str(ti), color='black', zorder=3)
ax2.legend(fontsize=6)
jj = jj + 1
jj = 0
print('')
for ti in p1.terminal_clusters:
print('terminal ti', ti)
loc_i = np.where(np.asarray(labels) == ti)[0]
#print(np.where(labels == ti), np.where(np.asarray(labels) == ti) ,loc_i)
val_pt = [sc_pt_markov[i] for i in loc_i]
print(val_pt)
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1],
label='TS' + str(ti), c='pink', s=18)
ax3.text(embedding[new_tsi_list[jj], 0]+0.05, embedding[new_tsi_list[jj], 1] + 0.05, 'TS' + str(ti), color='black', zorder=3)
jj = jj + 1
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def mainToy():
dataset = "Toy3" # ""Toy1" # GermlineLi #Toy1
## Dataset Germline Li https://zenodo.org/record/1443566#.XZlhEkEzZ5y
if dataset == "GermlineLine":
df_expression_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li.csv", 'rt',
delimiter=",")
print(df_expression_ids.shape)
# print(df_expression_ids[['cell_id',"week","ACTG2","STK31"]])[10:12]
df_counts = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_filteredcounts.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_labels.csv", 'rt',
delimiter=",")
# print(df_counts.shape, df_counts.head() ,df_ids.shape)
# X_counts = df_counts.values
# print(X_counts.shape)
# varnames = pd.Categorical(list(df_counts.columns))
adata_counts = sc.AnnData(df_counts, obs=df_ids)
print(adata_counts.obs)
sc.pp.filter_cells(adata_counts, min_counts=1)
print(adata_counts.n_obs)
sc.pp.filter_genes(adata_counts, min_counts=1) # only consider genes with more than 1 count
print(adata_counts.X.shape)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata_counts, key_n_counts='n_counts_all')
print(adata_counts.X.shape, len(list(adata_counts.var_names)))
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata_counts.X, flavor='cell_ranger', n_top_genes=1000, log=False)
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # , list(adata_counts.var_names))
adata_counts = adata_counts[:, filter_result.gene_subset]
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # ,list(adata_counts.var_names))
# subset the genes
sc.pp.normalize_per_cell(adata_counts) # renormalize after filtering
sc.pp.log1p(adata_counts) # log transform: adata_counts.X = log(adata_counts.X + 1)
sc.pp.scale(adata_counts) # scale to unit variance and shift to zero mean
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=20)
true_label = list(adata_counts.obs['week'])
sc.pp.neighbors(adata_counts, n_neighbors=10, n_pcs=20)
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='gender_week', legend_loc='right margin', palette='jet')
## Dataset Paul15 https://scanpy-tutorials.readthedocs.io/en/latest/paga-paul15.html
if dataset == 'Paul15':
root_user = "8Mk"
adata_counts = sc.datasets.paul15()
sc.pp.recipe_zheng17(adata_counts)
sc.tl.pca(adata_counts, svd_solver='arpack')
true_label = list(adata_counts.obs['paul15_clusters']) # PAUL
adata_counts.obs['group_id'] = true_label
# sc.pp.neighbors(adata_counts, n_neighbors=10)
# sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color=['paul15_clusters', 'Cma1'], legend_loc='on data')
if dataset.startswith('Toy'):
root_user = 'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy1":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000_ids.csv",
'rt', delimiter=",")
if dataset == "Toy2":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy3":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "ToyCyclic":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy4":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000_ids.csv", 'rt',
delimiter=",")
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=20) not helpful for toy data
ncomps = 50
knn = 30
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
'''
print(np.flatnonzero(adata_counts.obs['group_id'] == 'T1_M1')[0])
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == 'T1_M1')[0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps)#4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') #force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap')#4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
#sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'], title=['leiden (knn:'+str(knn)+' ncomps:'+str(ncomps)+')', 'group_id (ncomps:'+str(ncomps)+')','pseudotime (ncomps:'+str(ncomps)+')'])
#X = df_counts.values
print(palantir.__file__) #location of palantir source code
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv")
print('counts',counts)
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts#palantir.preprocess.normalize_counts(counts)
pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps)
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) #n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'C108'#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
print('ms data', ms_data)
pr_res = palantir.core.run_palantir(ms_data, start_cell, num_waypoints=500,knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne)
plt.show()
'''
# clusters = palantir.utils.determine_cell_clusters(pca_projections)
from sklearn.decomposition import PCA
pca = PCA(n_components=ncomps)
pc = pca.fit_transform(df_counts)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + dataset + "/", root=2,
root_user=root_user, preserve_disconnected=True, dataset='toy') # *.4
p0.run_PARC()
super_labels = p0.labels
super_edges = p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
# 0.05 for p1 toobig
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=1, dist_std_local=0.15, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + dataset + "/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
super_terminal_clusters=p0.terminal_clusters)
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
# p1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# p1.run_PARC()
# labels = p1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=900, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = TSNE().fit_transform(pc) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters, sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(pc.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
#draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Bcell():
def run_zheng(adata, min_counts=3, n_top_genes=500, do_log=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all'
)
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False
)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
sc.pp.scale(adata) # scale to unit variance and shift to zero mean
return adata
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
# print('norm df', norm_df_pal)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c23' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, ncomps, knn)
plt.show()
def find_time(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_count_table.txt', sep='\t')
gene_name = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
# for i in Bcell_columns:
# print(i)
# adata_counts.var_names_make_unique()
time_list = [find_time(s) for s in Bcell_columns]
ID_list = [find_cellID(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
# sc.pp.filter_genes(adata_counts, min_counts=3)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
adata_counts = run_zheng(adata_counts, n_top_genes=1000, min_counts=10, do_log=True)
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
ncomps = 100 # (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
knn = 20
random_seed = 1
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
embedding = umap.UMAP(random_state=42, n_neighbors=12, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
'''
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)
ax1.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
ax1.set_title('true labels')
ax1.legend()
for i in range(embedding.shape[0]):
ax2.scatter(embedding[i, 0], embedding[i, 1], c='blue', alpha=0.5)
ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
# run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + 'bcell' + "/", root=2,
root_user=0, preserve_disconnected=True, random_seed=random_seed) # *.4#root_user = 34
p0.run_PARC()
super_labels = p0.labels
'''
umap_init_ = p0.graph_node_pos
umap_init_ = np.asarray(umap_init_)
umap_init = np.random.rand(len(super_labels),2)
for clus_i in range(umap_init_.shape[0]):
loc_clus_i = np.where(np.asarray(super_labels) == clus_i)[0]
umap_init[loc_clus_i,0]=umap_init_[clus_i,0]
umap_init[loc_clus_i, 1] = umap_init_[clus_i, 1]
'''
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=100, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(30)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=1, dist_std_local=0.15, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + "bcell/", pseudotime=True, root=1,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=0,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=p0.terminal_clusters, random_seed=random_seed)
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
super_edges = p0.edgelist
print('p1 markov times', p1.markov_hitting_times)
print('p1 markov times', p1.single_cell_pt_markov)
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
for gene_name in Bcell_marker_gene_list:
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
print('loc gata', loc_gata)
magic_ad = adata_counts_unfiltered.X[:, loc_gata]
p1.get_gene_expression(magic_ad, gene_name)
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=900, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = p1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Markov Hitting Times (polyfit)', ncomp=ncomps)
plt.show()
'''
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p1.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main():
dataset = 'Human'#'bcell'##''Human' # 'Toy'
if dataset == 'Human':
main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func=False)
elif dataset == 'bcell':
main_Bcell()
else:
mainToy()
if __name__ == '__main__':
main()
|
|
"""
Utilities for interacting with PubChem.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import re
import time
import urllib
import urllib2
from .pug import PugQuery
class PubChem(object):
"""
Submit queries to PUG and return PUGQuery objects.
Parameters
----------
submit : bool, optional (default True)
Whether to automatically submit PUGQuery queries.
delay : int, optional (default 10)
Number of seconds for PUGQuery objects to wait between status
checks.
verbose : bool, optional (default False)
Whether to create PUG queries in verbose mode.
"""
def __init__(self, submit=True, delay=10, verbose=False):
self.submit = submit
self.delay = delay
self.verbose = verbose
def get_query(self, query):
"""
Create a PUG request.
Parameters
----------
query : str
PUG query XML.
"""
return PugQuery(query, submit=self.submit, delay=self.delay,
verbose=self.verbose)
def get_records(self, ids, filename=None, sids=False,
download_format='sdf', compression='gzip', use_3d=False,
n_conformers=1):
"""
Download records for substances or compounds identified by
PubChem substance IDs (SIDs) or compound IDs (CIDs).
Parameters
----------
ids : iterable
PubChem substance or compound IDs.
filename : str, optional
Output filename. If not provided, a temporary file is created.
sids : bool, optional (default False)
Whether ids are SIDs. If False, IDs are assumed to be CIDs.
download_format : str, optional (default 'sdf')
Download file format.
compression : str, optional (default 'gzip')
Compression type for downloaded structures.
use_3d : bool, optional (default True)
Whether to query 3D information. If False, 2D information is
retrieved.
n_conformers : int, optional (default 1)
Number of conformers to download if retrieving 3D structures.
"""
query_template = """
<PCT-Data>
<PCT-Data_input>
<PCT-InputData>
<PCT-InputData_download>
<PCT-Download>
<PCT-Download_uids>
<PCT-QueryUids>
<PCT-QueryUids_ids>
<PCT-ID-List>
<PCT-ID-List_db>%(database)s</PCT-ID-List_db>
<PCT-ID-List_uids>
%(uids)s
</PCT-ID-List_uids>
</PCT-ID-List>
</PCT-QueryUids_ids>
</PCT-QueryUids>
</PCT-Download_uids>
<PCT-Download_format value="%(download_format)s"/>
<PCT-Download_compression value="%(compression)s"/>
<PCT-Download_use-3d value="%(use_3d)s"/>
<PCT-Download_n-3d-conformers>
%(n_conformers)s
</PCT-Download_n-3d-conformers>
</PCT-Download>
</PCT-InputData_download>
</PCT-InputData>
</PCT-Data_input>
</PCT-Data>
"""
mapping = {}
# database
if sids:
mapping['database'] = 'pcsubstance'
else:
mapping['database'] = 'pccompound'
# download format
download_formats = ['text-asn', 'binary-asn', 'xml', 'sdf', 'image',
'image-small', 'smiles', 'inchi']
assert download_format in download_formats, (
'download_format must be one of ' + str(download_formats))
mapping['download_format'] = download_format
# compression
if compression is None:
compression = 'none'
compressions = ['none', 'gzip', 'bzip2']
assert compression in compressions, (
'compression must be one of ' + str(compressions))
mapping['compression'] = compression
# 3D
if use_3d:
mapping['use_3d'] = 'true'
else:
mapping['use_3d'] = 'false'
# conformers
mapping['n_conformers'] = n_conformers
# create XML for each ID
xml_uids = ''
for uid in ids:
xml_uids += ('<PCT-ID-List_uids_E>{}'.format(uid) +
'</PCT-ID-List_uids_E>\n')
mapping['uids'] = xml_uids
# construct query
query = self.get_query(query_template % mapping)
rval = query.fetch(filename, compression=compression)
return rval
def get_parent_cids(self, cids):
"""
Get IDs of parent compounds. Note that the parent IDs are not
guaranteed to be returned in the same order as the child IDs, so we
return a set if there is more than one result.
Parameters
----------
ids : iterable
PubChem substance or compound IDs.
sids : bool, optional (default False)
Whether ids are SIDs. If False, IDs are assumed to be CIDs.
"""
url_template = ('http://pubchem.ncbi.nlm.nih.gov/rest/pug/compound' +
'/cid/%(cids)s/cids/TXT?cids_type=parent')
mapping = {'cids': ','.join([str(cid) for cid in cids])}
response = urllib2.urlopen(url_template % mapping)
parents = set()
for line in response.readlines():
cid = int(line)
if cid: # 0 is not a valid ID
parents.add(cid)
if len(parents) == 1:
parents = parents.pop()
return parents
def get_ids_from_assay(self, aid, sids=False, activity_outcome=None):
"""
Retrieve substance or compound IDs tested in a PubChem BioAssay
assay.
Parameters
----------
aid : int
PubChem BioAssay assay ID (AID).
sids : bool, optional (default False)
Whether ids are SIDs. If False, IDs are assumed to be CIDs.
activity_outcome : str, optional
If provided, only retrieve records with this activity outcome,
such as 'active'.
"""
url_template = ('https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid' +
'/%(aid)s/%(database)s/txt')
mapping = {'aid': aid}
if sids:
mapping['database'] = 'sids'
else:
mapping['database'] = 'cids'
if activity_outcome is not None:
url_template += '?{}_type={}'.format(mapping['database'],
activity_outcome.lower())
url = url_template % mapping
response = urllib2.urlopen(url)
ids = []
for this in response.readlines():
this = this.strip()
if int(this): # 0 is not a valid ID
ids.append(this)
ids = np.asarray(ids, dtype=int)
return ids
def get_assay_data(self, aids, filename=None, substance_view=True,
concise=False, compression='gzip'):
"""
Download PubChem BioAssay data table.
Parameters
----------
aids : array_like
PubChem BioAssay IDs (AIDs).
filename : str, optional
Output filename. If not provided, a temporary file is created.
substance_view : bool, optional (default True)
Whether to group results by substance. If False, results will be
grouped by compound. The default (True) is recommended when
retrieving data from a single assay.
compression : str, optional (default 'gzip')
Compression type for assay data.
concise : bool, optional (default False)
Whether to return the concise data table. If False, the complete
data table is retrieved.
"""
query_template = """
<PCT-Data>
<PCT-Data_input>
<PCT-InputData>
<PCT-InputData_query>
<PCT-Query>
<PCT-Query_type>
<PCT-QueryType>
<PCT-QueryType_bas>
<PCT-QueryAssayData>
<PCT-QueryAssayData_output value="csv">4</PCT-QueryAssayData_output>
<PCT-QueryAssayData_aids>
<PCT-QueryUids>
<PCT-QueryUids_ids>
<PCT-ID-List>
<PCT-ID-List_db>pcassay</PCT-ID-List_db>
<PCT-ID-List_uids>
%(aids)s
</PCT-ID-List_uids>
</PCT-ID-List>
</PCT-QueryUids_ids>
</PCT-QueryUids>
</PCT-QueryAssayData_aids>
%(dataset)s
<PCT-QueryAssayData_focus>
<PCT-Assay-FocusOption>
%(group_by)s
</PCT-Assay-FocusOption>
</PCT-QueryAssayData_focus>
<PCT-QueryAssayData_compression value="%(compression)s"/>
</PCT-QueryAssayData>
</PCT-QueryType_bas>
</PCT-QueryType>
</PCT-Query_type>
</PCT-Query>
</PCT-InputData_query>
</PCT-InputData>
</PCT-Data_input>
</PCT-Data>
"""
group_by = ('<PCT-Assay-FocusOption_group-results-by value="{}">{}' +
'</PCT-Assay-FocusOption_group-results-by>')
if substance_view:
group_by = group_by.format('substance', 4)
else:
group_by = group_by.format('compound', 0)
dataset = ('<PCT-QueryAssayData_dataset value="{}">{}' +
'</PCT-QueryAssayData_dataset>')
if concise:
dataset = dataset.format('concise', 1)
else:
dataset = dataset.format('complete', 0)
aid_xml = ''
for aid in np.atleast_1d(aids):
aid_xml += ('<PCT-ID-List_uids_E>{}'.format(aid) +
'</PCT-ID-List_uids_E>')
mapping = {'group_by': group_by, 'dataset': dataset, 'aids': aid_xml,
'compression': compression}
query = self.get_query(query_template % mapping)
rval = query.fetch(filename, compression=compression)
return rval
def id_exchange(self, ids, source=None, operation_type='same',
output_type='cid'):
"""
Use the PubChem Identifier exchange service.
Currently only supports mapping from Registry IDs (e.g. ChEMBL IDs) to
PubChem IDs.
Parameters
----------
ids : iterable
Input identifiers.
source : str, optional
Input source. If None, it will be inferred from ids (if possible).
operation_type : str, optional (default 'same')
Operation type. Defaults to exact matches.
output_type : str, optional (default 'cid')
Output type. Defaults to PubChem CIDs.
"""
query_template = """
<PCT-Data>
<PCT-Data_input>
<PCT-InputData>
<PCT-InputData_query>
<PCT-Query>
<PCT-Query_type>
<PCT-QueryType>
<PCT-QueryType_id-exchange>
<PCT-QueryIDExchange>
<PCT-QueryIDExchange_input>
<PCT-QueryUids>
<PCT-QueryUids_source-ids>
<PCT-RegistryIDs>
<PCT-RegistryIDs_source-name>%(source)s</PCT-RegistryIDs_source-name>
<PCT-RegistryIDs_source-ids>
%(source_ids)s
</PCT-RegistryIDs_source-ids>
</PCT-RegistryIDs>
</PCT-QueryUids_source-ids>
</PCT-QueryUids>
</PCT-QueryIDExchange_input>
<PCT-QueryIDExchange_operation-type
value="%(operation_type)s"/>
<PCT-QueryIDExchange_output-type value="%(output_type)s"/>
<PCT-QueryIDExchange_output-method value="file-pair"/>
<PCT-QueryIDExchange_compression value="gzip"/>
</PCT-QueryIDExchange>
</PCT-QueryType_id-exchange>
</PCT-QueryType>
</PCT-Query_type>
</PCT-Query>
</PCT-InputData_query>
</PCT-InputData>
</PCT-Data_input>
</PCT-Data>
"""
ids = np.atleast_1d(ids)
if np.unique(ids).size != len(ids):
raise ValueError('Source IDs must be unique.')
if source is None:
source = self.guess_source(ids[0])
if source is None:
raise ValueError('Cannot guess identifier source.')
mapping = {'source': source, 'operation_type': operation_type,
'output_type': output_type}
source_ids = []
for source_id in ids:
id_xml = ('<PCT-RegistryIDs_source-ids_E>{}'.format(source_id) +
'</PCT-RegistryIDs_source-ids_E>\n')
source_ids.append(id_xml)
mapping['source_ids'] = ''.join(source_ids)
# construct query
query = self.get_query(query_template % mapping)
rval = query.fetch(compression='gzip')
# identify matched and unmatched IDs
id_map = {}
for line in rval.splitlines():
source, dest = line.split()
try:
dest = int(dest) # try to convert to an int
except ValueError:
pass
if source in id_map and id_map[source] != dest:
raise ValueError('Nonidentical duplicate mapping.')
id_map[source] = dest
for source_id in ids:
if source_id not in id_map:
id_map[source_id] = None
return id_map
@staticmethod
def guess_source(identifier):
"""
Guess the source for an identifier.
Parameters
----------
identifier : str
Identifier.
"""
source = None
if str(identifier).startswith('CHEMBL'):
source = 'ChEMBL'
elif str(identifier).startswith('ZINC'):
source = 'ZINC'
return source
def structure_search(self, structure, structure_format='smiles'):
"""
Search PubChem for identical structure and return matching CID.
Parameters
----------
structure : str
SMILES or SDF query.
structure_format : str, optional (default 'smiles')
Structure format. Can be either 'smiles' or 'sdf'.
"""
query_template = ('http://pubchem.ncbi.nlm.nih.gov/rest/pug/compound' +
'/identity/{}/XML')
status_template = ('http://pubchem.ncbi.nlm.nih.gov/rest/pug' +
'/compound/listkey/{}/cids/XML')
request_id = None
post_data = urllib.urlencode({structure_format: structure})
req = urllib2.Request(query_template.format(structure_format))
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = urllib2.urlopen(req, data=post_data)
for line in response.readlines():
search = re.search('<ListKey>(\d+)</ListKey>', line)
if search is not None:
request_id = search.groups()[0]
if request_id is None:
return None
cid = None
while True:
try:
response = urllib2.urlopen(
status_template.format(request_id))
except urllib2.HTTPError:
break
for line in response.readlines():
search = re.search('<CID>(\d+)</CID>', line)
if search is not None:
cid = int(search.groups()[0])
if cid is not None:
break
time.sleep(self.delay)
return cid
|
|
import json
import sys
import os
from logging import getLogger
from pathlib import Path
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import click
import torch
import pandas as pd
import numpy as np
# todo: make this better
sys.path.append('./') # aa
from aa.pytorch.data_provider import ReadingImageProvider, RawImageType
from aa.pytorch.transforms import get_flips_colors_augmentation
import aa.pytorch.trainer
import aa.cli.sp5r2.util as u
logger = getLogger('aa')
torch.randn(10).cuda()
class RawImageTypePad(RawImageType):
def finalyze(self, data):
padding_size = 22
return self.reflect_border(data, padding_size)
def train(conf, args_fold):
paths = {
'masks': conf.train_data_refined_dir_masks,
'images': conf.train_data_refined_dir_ims,
}
fn_mapping = {
'masks': lambda name: os.path.splitext(name)[0] + '.tif',
}
ds = ReadingImageProvider(RawImageType,
paths,
fn_mapping,
image_suffix='',
num_channels=conf.num_channels)
val_ds = None
logger.info(f'Total Dataset size: {len(ds)}')
fn_val_splits = conf.folds_save_path
folds = u.get_csv_folds(fn_val_splits, ds.im_names)
for fold, (train_idx, val_idx) in enumerate(folds):
if int(args_fold) != fold:
continue
logger.info(f'Fold idx: {args_fold}')
logger.info(f'Train size: {len(train_idx)}')
logger.info(f'Val size: {len(val_idx)}')
transforms = get_flips_colors_augmentation()
aa.pytorch.trainer.train(ds,
fold,
train_idx,
val_idx,
conf,
transforms=transforms,
val_ds=val_ds)
@click.command()
@click.option('-c', '--config_path', type=str)
@click.option('-f', '--fold', type=int, default=0)
def main(config_path, fold):
conf = u.load_config(config_path)
u.set_filehandler(conf)
logger.info('ARGV: {}'.format(str(sys.argv)))
train(conf, fold)
if __name__ == '__main__':
u.set_logger()
main()
|
|
import unittest
from unittest.mock import patch
from unittest import mock
import tensorflow as tf
import numpy as np
import numpy.testing as npt
from laplace.curvature import LayerMap, DiagFisher, BlockDiagFisher, KFAC
from tests.testutils.tensorflow import ModelMocker
class LayerMapTest(unittest.TestCase):
@patch('tensorflow.keras.layers.Dense', autospec=True)
@patch('tensorflow.keras.models.Model', autospec=True)
def setUp(self, model, layer) -> None:
layer.name = 'dense'
kernel_weights = mock.create_autospec(tf.Variable)
kernel_weights.name = 'dense/kernel:0'
kernel_weights.shape = [1, 1]
bias_weights = mock.create_autospec(tf.Variable)
bias_weights.name = 'dense/bias:0'
bias_weights.shape = [1]
layer.weights = [kernel_weights, bias_weights]
model.layers = [layer]
self.dense_model = model
@patch('tensorflow.keras.layers.Dense', autospec=True)
@patch('tensorflow.keras.models.Model', autospec=True)
def test_considers_dense_layers(self, model, layer):
# given
layer.name = 'dense'
model.layers = [layer]
# when
layer_map = LayerMap(model)
# then
is_layer_considered = layer_map.is_curvature_eligible('dense')
self.assertTrue(is_layer_considered)
@patch('tensorflow.keras.layers.Conv2D', autospec=True)
@patch('tensorflow.keras.models.Model', autospec=True)
def test_considers_conv_layers(self, model, layer):
# given
layer.name = 'conv'
model.layers = [layer]
# when
layer_map = LayerMap(model)
# then
is_layer_considered = layer_map.is_curvature_eligible('conv')
self.assertTrue(is_layer_considered)
@patch('tensorflow.keras.layers.MaxPooling2D', autospec=True)
@patch('tensorflow.keras.models.Model', autospec=True)
def test_does_not_consider_pooling_layers(self, model, layer):
# given
layer.name = 'pooling'
model.layers = [layer]
# when
layer_map = LayerMap(model)
# then
is_layer_considered = layer_map.is_curvature_eligible('pooling')
self.assertFalse(is_layer_considered)
def test_checks_for_bias(self):
# given
model = self.dense_model
# when
layer_map = LayerMap(model)
# then
has_bias = layer_map.has_bias('dense')
self.assertTrue(has_bias)
def test_it_exposes_bias_weights(self):
# given
model = self.dense_model
# when
layer_map = LayerMap(model)
# then
weights = layer_map.get_bias_weights('dense')
self.assertIn('bias', weights['name'])
def test_it_exposes_kernel_weights(self):
# given
model = self.dense_model
# when
layer_map = LayerMap(model)
# then
weights = layer_map.get_kernel_weights('dense')
self.assertIn('kernel', weights['name'])
def test_it_exposes_layer_weights(self):
# given
model = self.dense_model
# when
layer_map = LayerMap(model)
# then
weights = layer_map.get_layer_weights('dense')
self.assertIn('kernel', weights)
self.assertIn('bias', weights)
def test_it_exposes_layer_shape(self):
# given
model = self.dense_model
# when
layer_map = LayerMap(model)
# then
shape = layer_map.get_layer_shape('dense')
self.assertEqual([2, 1], shape)
class DiagFisherTest(unittest.TestCase):
def test_update_first_iteration(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
gradients = [
[[2., 2., 2.], [2., 2., 2.]],
[2., 2., 2.],
[[3., 3.], [3., 3.], [3., 3.]],
[1., 1.]
]
diagfisher = DiagFisher(model)
# when
diagfisher.update(gradients, 1)
# then
expected_state = {
'dense': [[4., 4., 4.], [4., 4., 4.], [4., 4., 4.]],
'dense_1': [[9., 9.], [9., 9.], [9., 9.], [1., 1.]]
}
self.assertEqual(len(diagfisher.state), 2)
for lname, litem in diagfisher.state.items():
npt.assert_allclose(litem.numpy(), expected_state[lname])
def test_update_second_iteration(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
gradients = [
[[2., 2., 2.], [2., 2., 2.]],
[2., 2., 2.],
[[3., 3.], [3., 3.], [3., 3.]],
[1., 1.]
]
diagfisher = DiagFisher(model)
diagfisher.state = {
'dense': [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]],
'dense_1': [[1., 1.], [1., 1.], [1., 1.], [1., 1.]]
}
# when
diagfisher.update(gradients, 1)
# then
expected_state = {
'dense': np.array([[5., 5., 5.], [5., 5., 5.], [5., 5., 5.]]),
'dense_1': np.array([[10., 10.], [10., 10.], [10., 10.], [2., 2.]])
}
self.assertEqual(len(diagfisher.state), 2)
for lname, litem in diagfisher.state.items():
npt.assert_allclose(litem.numpy(), expected_state[lname])
def test_invert(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
diagfisher = DiagFisher(model)
diagfisher.state = {
'dense': np.array([[4., 4., 4.], [4., 4., 4.], [4., 4., 4.]]),
'dense_1': np.array([[9., 9.], [9., 9.], [9., 9.], [1., 1.]])
}
# when
inverse = diagfisher.invert(tau=1., n=1.)
# then
expected_inverse = {
'dense': np.array([[0.4472136, 0.4472136, 0.4472136],
[0.4472136, 0.4472136, 0.4472136],
[0.4472136, 0.4472136, 0.4472136]]),
'dense_1': np.array([[0.31622777, 0.31622777],
[0.31622777, 0.31622777],
[0.31622777, 0.31622777],
[0.70710678, 0.70710678]]),
}
self.assertEqual(len(inverse), 2)
for lname, litem in inverse.items():
npt.assert_allclose(litem.numpy(), expected_inverse[lname])
class BlockDiagFisherTest(unittest.TestCase):
def test_update_first_iteration(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
gradients = [
[[2., 2., 2.], [2., 2., 2.]],
[2., 2., 2.],
[[3., 3.], [3., 3.], [3., 3.]],
[1., 1.]
]
blockfisher = BlockDiagFisher(model)
# when
blockfisher.update(gradients, 1)
# then
expected_state = {
'dense': np.ones([9, 9])*4,
'dense_1': np.repeat([[9., 9., 9., 9., 9., 9., 3., 3.]], 8, axis=0)
}
expected_state['dense_1'][6] = expected_state['dense_1'][6]/3
expected_state['dense_1'][7] = expected_state['dense_1'][7]/3
self.assertEqual(len(blockfisher.state), 2)
for lname, litem in blockfisher.state.items():
npt.assert_allclose(litem.numpy(), expected_state[lname])
def test_update_second_iteration(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
gradients = [
[[2., 2., 2.], [2., 2., 2.]],
[2., 2., 2.],
[[3., 3.], [3., 3.], [3., 3.]],
[1., 1.]
]
blockfisher = BlockDiagFisher(model)
blockfisher.state = {
'dense': np.ones([9, 9])*1,
'dense_1': np.repeat([[1., 1., 1., 1., 1., 1., 1., 1.]], 8, axis=0)
}
# when
blockfisher.update(gradients, 1)
# then
expected_state = {
'dense': np.ones([9, 9]) * 5,
'dense_1': np.repeat([[10., 10., 10., 10., 10., 10., 4., 4.]], 8, axis=0)
}
expected_state['dense_1'][6] = [4., 4., 4., 4., 4., 4., 2., 2.]
expected_state['dense_1'][7] = [4., 4., 4., 4., 4., 4., 2., 2.]
self.assertEqual(len(blockfisher.state), 2)
for lname, litem in blockfisher.state.items():
npt.assert_allclose(litem.numpy(), expected_state[lname])
def test_invert(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
blockfisher = BlockDiagFisher(model)
blockfisher.state = {
'dense': np.ones([9, 9]) * 4,
'dense_1': np.repeat([[9., 9., 9., 9., 9., 9., 3., 3.]], 8, axis=0)
}
blockfisher.state['dense_1'][6] = blockfisher.state['dense_1'][6] / 3
blockfisher.state['dense_1'][7] = blockfisher.state['dense_1'][7] / 3
# when
inverse = blockfisher.invert(tau=1., n=1.)
# then
expected_inverse = {
'dense': np.array([
[ 0.89189196, -0.10810812, -0.10810812, -0.10810812, -0.10810812, -0.10810813, -0.10810812, -0.10810813, -0.10810812],
[-0.10810812, 0.89189196, -0.10810812, -0.10810812, -0.10810812, -0.10810813, -0.10810812, -0.10810813, -0.10810812],
[-0.10810812, -0.10810812, 0.89189196, -0.10810811, -0.10810811, -0.10810811, -0.10810811, -0.10810811, -0.10810811],
[-0.10810812, -0.10810812, -0.10810811, 0.89189196, -0.10810811, -0.10810811, -0.10810811, -0.10810811, -0.10810811],
[-0.10810812, -0.10810812, -0.10810811, -0.10810811, 0.89189196, -0.10810811, -0.10810811, -0.10810811, -0.10810811],
[-0.10810813, -0.10810813, -0.10810811, -0.10810811, -0.10810811, 0.89189196, -0.10810811, -0.10810811, -0.10810811],
[-0.10810812, -0.10810812, -0.10810811, -0.10810811, -0.10810811, -0.10810811, 0.89189196, -0.10810811, -0.10810811],
[-0.10810813, -0.10810813, -0.10810811, -0.10810811, -0.10810811, -0.10810811, -0.10810811, 0.89189196, -0.10810811],
[-0.10810812, -0.10810812, -0.10810811, -0.10810811, -0.10810811, -0.10810811, -0.10810811, -0.10810811, 0.8918919]
], dtype=np.float32),
'dense_1': np.array([
[0.8421055, -0.15789483, -0.15789482, -0.1578948, -0.15789478, -0.15789479, -0.05263155, -0.05263155],
[-0.15789483, 0.8421052, -0.15789473, -0.1578947, -0.15789469, -0.1578947, -0.05263159, -0.05263159],
[-0.15789482, -0.15789473, 0.84210527, -0.15789473, -0.15789473, -0.15789473, -0.0526316, -0.0526316],
[-0.15789479, -0.1578947, -0.15789473, 0.84210527, -0.15789473, -0.15789475, -0.0526316, -0.0526316],
[-0.15789478, -0.15789469, -0.15789473, -0.15789473, 0.8421052, -0.15789473, -0.0526316, -0.0526316],
[-0.15789479, -0.1578947, -0.15789473, -0.15789475, -0.15789473, 0.84210527, -0.0526316, -0.05263161],
[-0.05263154, -0.05263159, -0.0526316, -0.0526316, -0.0526316, -0.0526316, 0.98245627, -0.01754383],
[-0.05263154, -0.05263159, -0.0526316, -0.0526316, -0.0526316, -0.05263161, -0.01754383, 0.9824563]
], dtype=np.float32)
}
self.assertEqual(len(inverse), 2)
for lname, litem in inverse.items():
npt.assert_allclose(litem.numpy(), expected_inverse[lname], rtol=1e-5, atol=1e-6)
class KFACTest(unittest.TestCase):
def test_update_first_iteration(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
kfac = KFAC(model)
kfac._layer_inputs['dense'] = [[1., 1.]]
kfac._layer_inputs['dense_1'] = [[3., 3., 3.]]
grads_preactivations = {
'dense': np.array([[18., 18., 18.]]),
'dense_1': np.array([[9., 9.]]),
}
# when
kfac.update(grads_preactivations, 1)
# then
expected_state = {
'dense': [
np.ones([3, 3]),
np.ones([3, 3]) * 324
],
'dense_1': [
np.array([[9., 9., 9., 3.], [9., 9., 9., 3.], [9., 9., 9., 3.], [3., 3., 3., 1.]]),
np.ones([2, 2]) * 81
]
}
self.assertEqual(len(kfac.state), 2)
for lname, litem in kfac.state.items():
Q, H = litem[0].numpy(), litem[1].numpy()
npt.assert_allclose(Q, expected_state[lname][0])
npt.assert_allclose(H, expected_state[lname][1])
def test_update_second_iteration(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
kfac = KFAC(model)
kfac._layer_inputs['dense'] = [[1., 1.]]
kfac._layer_inputs['dense_1'] = [[3., 3., 3.]]
grads_preactivations = {
'dense': np.array([[18., 18., 18.]]),
'dense_1': np.array([[9., 9.]]),
}
kfac.state = {
'dense': [np.ones([3, 3]), np.ones([3, 3])],
'dense_1': [np.ones([4, 4]), np.ones([2, 2])]
}
# when
kfac.update(grads_preactivations, 1)
# then
expected_state = {
'dense': [
np.ones([3, 3]) * 2,
np.ones([3, 3]) * 325
],
'dense_1': [
np.array([[10., 10., 10., 4.], [10., 10., 10., 4.], [10., 10., 10., 4.], [4., 4., 4., 2.]]),
np.array([[82., 82.], [82., 82.]])
]
}
self.assertEqual(len(kfac.state), 2)
for lname, litem in kfac.state.items():
Q, H = litem[0].numpy(), litem[1].numpy()
npt.assert_allclose(Q, expected_state[lname][0])
npt.assert_allclose(H, expected_state[lname][1])
def test_invert(self):
# given
model = ModelMocker.mock_model()
layer1 = ModelMocker.mock_layer('dense', (2, 3))
layer2 = ModelMocker.mock_layer('dense_1', (3, 2))
model.layers = [layer1, layer2]
kfac = KFAC(model)
kfac.state = {
'dense': [
np.ones([3, 3]),
np.ones([3, 3]) * 324
],
'dense_1': [
np.array([[9., 9., 9., 3.], [9., 9., 9., 3.], [9., 9., 9., 3.], [3., 3., 3., 1.]]),
np.array([[81., 81.], [81., 81.]])
]
}
# when
inv = kfac.invert(tau=1., n=1.)
# then
expected_inv = {
'dense': [
np.array([[0.8660254, 0., 0.],
[-0.28867513, 0.8164966, 0.],
[-0.2886751, -0.40824828, 0.70710677]]),
np.array([[0.81670785, 0., 0.],
[-0.40772474, 0.7076513, 0.],
[-0.4077247, -0.70547396, 0.05546965]])
],
'dense_1': [
np.array([[0.8304549, 0., 0., 0.],
[-0.3737047, 0.7416198, 0., 0.],
[-0.37370473, -0.6067798, 0.42640147, 0.],
[-0.12456819, -0.20225996, -0.6396021, 0.7071068]]),
np.array([[0.7092719, 0.],
[-0.7006222, 0.11043184]]),
]
}
self.assertEqual(len(inv), 2)
for lname, factors in inv.items():
Q, H = factors[0].numpy(), factors[1].numpy()
npt.assert_allclose(Q, expected_inv[lname][0], rtol=1e-5, atol=1e-6)
npt.assert_allclose(H, expected_inv[lname][1], rtol=1e-5, atol=1e-6)
if __name__ == '__main__':
unittest.main()
|
|
"""
Module that provide a classifier template to train a model on embeddings in
order to predict the family of a given protein.
The model is built with pytorch_ligthning, a wrapper on top of
pytorch (similar to keras with tensorflow)
"""
from biodatasets import load_dataset
from deepchain.models.utils import (
dataloader_from_numpy,
)
from deepchain.models.torch_model import TorchModel
import torch
import torch.nn.functional as F
from torch import nn
from pytorch_lightning.metrics.functional import accuracy
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader
from typing import Tuple
# Our custom protein family prediction MLP class
class FamilyMLP(TorchModel):
"""Multi-layer perceptron model."""
def __init__(self, input_shape: int = 768, output_shape: int = 1, **kwargs):
super().__init__(**kwargs)
self.output = nn.Softmax if output_shape > 1 else nn.Sigmoid
self.loss = F.cross_entropy if output_shape > 1 else F.binary_cross_entropy
self._model = nn.Sequential(
nn.Linear(input_shape, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, output_shape)
)
def forward(self, x):
"""Defines forward pass"""
if not isinstance(x, torch.Tensor):
x = torch.tensor(x).float()
return self._model(x)
def training_step(self, batch, batch_idx):
"""training_step defined the train loop. It is independent of forward"""
x, y = batch
y_hat = self._model(x)
y = y.long()
# y = torch.unsqueeze(y, 1)
loss = self.loss(y_hat, y)
self.log("train_loss", loss, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self._model(x)
y = y.long()
loss = self.loss(y_hat, y)
preds = torch.max(y_hat, dim=1)[1]
acc = accuracy(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log('val_loss', loss, prog_bar=True)
self.log('val_acc', acc, prog_bar=True)
return loss
def save_model(self, path: str):
"""Save entire model with torch"""
torch.save(self._model, path)
# Load pfam dataset
pfam_dataset = load_dataset("pfam-32.0", force=True)
_, y = pfam_dataset.to_npy_arrays(input_names=["sequence"], target_names=["family_id"])
# Get embeddings and filter on available embeddings
embeddings = pfam_dataset.get_embeddings("sequence", "protbert", "mean")
available_embeddings_len = len(embeddings)
print(f"We take only the first {available_embeddings_len} sequences as we have only their embeddings available.")
y = y[0][:available_embeddings_len]
# Process targets
unique_classes = np.unique(y)
num_classes = len(unique_classes)
print(f"There are {num_classes} unique classes for family_id.")
# Encode target classes (families)
le = preprocessing.LabelEncoder()
labels = le.fit(unique_classes)
targets = le.transform(y)
print(f"Targets: {targets.shape}, {targets}, {len(labels.classes_)} classes")
# Load dataloaders
X_train, X_val, y_train, y_val = train_test_split(embeddings, targets, test_size=0.3)
train_dataloader = dataloader_from_numpy(X_train, y_train, batch_size=256)
val_dataloader = dataloader_from_numpy(X_val, y_val, batch_size=256)
# Fit the model and save it
mlp = FamilyMLP(input_shape=X_train.shape[1], output_shape=num_classes)
mlp.fit(train_dataloader, val_dataloader, epochs=100, auto_lr_find=True, auto_scale_batch_size=True)
mlp.save_model("family_model.pt")
# Model evaluation
def model_evaluation_accuracy(dataloader: DataLoader, model) -> Tuple[np.array, np.array]:
"""
Make prediction for test data.
Args:
dataloader: a torch dataloader containing dataset to be evaluated
model : a callable trained model with a predict method
"""
y_pred, y_truth = [], []
for X, y in dataloader:
y_hat = torch.max(model.predict(X), 1)[1]
y_pred += y_hat
y_truth += y.detach().numpy().flatten().tolist()
y_pred, y_truth = np.array(y_pred), np.array(y_truth)
acc_score = accuracy_score(y_truth, y_pred)
print(f" Test : accuracy score : {acc_score:0.2f}")
return y_pred, y_truth
prediction, truth = model_evaluation_accuracy(train_dataloader, mlp)
|
|
"""
The :mod:`fatf.utils.metrics.subgroup_metrics` module holds sub-group metrics.
These functions are mainly used to compute a given performance metric for every
sub population in a data set defined by a grouping on a selected feature.
"""
# Author: Kacper Sokol <k.sokol@bristol.ac.uk>
# License: new BSD
import inspect
from numbers import Number
from typing import Callable, List, Optional, Tuple, Union
from typing import Dict # pylint: disable=unused-import
import numpy as np
import fatf.utils.metrics.metrics as fumm
import fatf.utils.metrics.tools as fumt
__all__ = ['apply_metric_function',
'apply_metric',
'performance_per_subgroup',
'performance_per_subgroup_indexed'] # yapf: disable
Index = Union[int, str] # A column index type
def apply_metric_function(population_confusion_matrix: List[np.ndarray],
metric_function: Callable[[np.ndarray], float],
*args, **kwargs) -> List[float]:
"""
Applies the provided performance metric to every confusion matrix.
The performance metric function needs to take a numpy.ndarray confusion
matrix as the first parameter followed by any number of unnamed and named
parameters provided by ``*args`` and ``**kwargs`` parameters.
Parameters
----------
population_confusion_matrix : List[numpy.ndarray]
A list of confusion matrices for each sub-population.
metric_function : Callable[[numpy.ndarray], Number]
A metric function that takes a confusion matrix as a first parameter,
followed by any number of unnamed parameters (``*args``) and any number
of named parameters (``**kwargs``) and outputs a single number -- the
metric value.
*args
Unnamed arguments passed to the metric function.
**kwargs
Named arguments passed to the metric function.
Raises
------
AttributeError
The ``metric_function`` parameter does not require at least one unnamed
parameter.
IncorrectShapeError
The confusion matrix is not a 2-dimensional numpy array, it is not
square (equal width and height) or its dimension is not at least 2x2.
TypeError
The confusion matrix is not of an integer kind (e.g. ``int``,
``numpy.int32``, ``numpy.int64``). One of the ``metric_function``
outputs is not numerical. The ``metric_function`` is not Python
callable. The ``population_confusion_matrix`` is not a list.
ValueError
The confusion matrix is a structured numpy array. The
``population_confusion_matrix`` parameter is an empty list.
Returns
-------
metrics : List[numbers]
A list with the value of the selected metric for every sub-population.
"""
# Validate the confusion matrices type
if isinstance(population_confusion_matrix, list):
if not population_confusion_matrix:
raise ValueError('The population_confusion_matrix parameter '
'cannot be an empty list.')
for confusion_matrix in population_confusion_matrix:
assert fumt.validate_confusion_matrix(confusion_matrix), \
'Invalid confusion matrix.'
else:
raise TypeError('The population_confusion_matrix parameter has to be '
'a list.')
# Validate metric_function
if callable(metric_function):
required_param_n = 0
params = inspect.signature(metric_function).parameters
for param in params:
if params[param].default is params[param].empty:
required_param_n += 1
if not required_param_n:
raise AttributeError('The metric_function callable needs to have '
'at least one required parameter taking a '
'confusion matrix. 0 were found.')
else:
raise TypeError('The metric_function parameter has to be a Python '
'callable.')
metrics = []
for cmx in population_confusion_matrix:
metrics.append(metric_function(cmx, *args, **kwargs)) # type: ignore
for metric_value in metrics:
if not isinstance(metric_value, Number):
raise TypeError('One of the metric function outputs is not a '
'number: *{}*.'.format(metric_value))
return metrics
def apply_metric(population_confusion_matrix: List[np.ndarray],
metric: Optional[str] = None,
label_index: int = 0,
**kwargs) -> List[float]:
"""
Applies one of the predefined performance metric to all confusion matrices.
Available metrics are:
* ``true positive rate``,
* ``true negative rate``,
* ``false positive rate``,
* ``false negative rate``,
* ``positive predictive value``,
* ``negative predictive value``,
* ``accuracy``, and
* ``treatment``.
Parameters
----------
population_confusion_matrix : List[numpy.ndarray]
A list of confusion matrices for each sub-population.
metric : string, optional (default='accuracy')
A performance metric identifier that will be used.
label_index : integer, optional (default=0)
The index of a label that should be treated as "positive". All the
other labels will be treated as "negative". This is only useful when
the confusion matrices are multi-class.
Raises
------
TypeError
The ``metric`` parameter is not a string.
ValueError
The ``metric`` parameter specifies an unknown metric.
Returns
-------
metrics : List[number]
A list with the value of the selected metric for every sub-population.
"""
available_metrics = {
'true positive rate': fumm.multiclass_true_positive_rate,
'true negative rate': fumm.multiclass_true_negative_rate,
'false positive rate': fumm.multiclass_false_positive_rate,
'false negative rate': fumm.multiclass_false_negative_rate,
'positive predictive value': fumm.multiclass_positive_predictive_value,
'negative predictive value': fumm.multiclass_negative_predictive_value,
'accuracy': fumm.accuracy,
'treatment': fumm.multiclass_treatment
} # type: Dict[str, Callable]
if metric is None:
metric = 'accuracy'
elif isinstance(metric, str):
if metric not in available_metrics:
available_metrics_names = sorted(list(available_metrics.keys()))
raise ValueError('The selected metric (*{}*) is not recognised. '
'The following options are available: '
'{}.'.format(metric, available_metrics_names))
else:
raise TypeError('The metric parameter has to be a string.')
if metric == 'accuracy':
metrics = apply_metric_function(population_confusion_matrix,
available_metrics[metric], **kwargs)
else:
metrics = apply_metric_function(population_confusion_matrix,
available_metrics[metric], label_index,
**kwargs)
return metrics
def performance_per_subgroup(
dataset: np.ndarray,
#
ground_truth: np.ndarray,
predictions: np.ndarray,
#
column_index: Index,
#
*args,
label_index: int = 0,
#
groupings: Optional[List[Union[float, Tuple[str]]]] = None,
numerical_bins_number: int = 5,
treat_as_categorical: Optional[bool] = None,
#
labels: Optional[List[Union[str, float]]] = None,
#
metric: Optional[str] = None,
metric_function: Optional[Callable[[np.ndarray], float]] = None,
#
**kwargs) -> Tuple[List[float], List[str]]:
"""
Computes a chosen metric per sub-population for a data set.
This function combines
:func:`fatf.utils.metrics.tools.confusion_matrix_per_subgroup`
function together with
:func:`fatf.utils.metrics.subgroup_metrics.apply_metric` (when using
``metric`` parameter) and
:func:`fatf.utils.metrics.subgroup_metrics.apply_metric_function` (when
using ``metric_function`` parameter) functions. For the description of
parameters, errors and exceptions please see the documentation of these
functions.
.. note::
The ``metric_function`` parameter takes the precedence over the
``metric`` parameter is both are provided.
Returns
-------
population_metrics : List[numbers]
A list with the value of the selected metric for every sub-population.
bin_names : List[strings]
The name of every sub-population (binning results) defined by the
feature ranges for a numerical feature and feature value sets for a
categorical feature.
"""
# pylint: disable=too-many-locals
population_cmxs, bin_names = fumt.confusion_matrix_per_subgroup(
dataset, ground_truth, predictions, column_index, groupings,
numerical_bins_number, treat_as_categorical, labels)
if metric_function is not None:
population_metrics = apply_metric_function(
population_cmxs, metric_function, *args, **kwargs)
else:
population_metrics = apply_metric(population_cmxs, metric, label_index,
**kwargs)
return population_metrics, bin_names
def performance_per_subgroup_indexed(
indices_per_bin: List[np.ndarray],
ground_truth: np.ndarray,
predictions: np.ndarray,
#
*args,
label_index: int = 0,
#
labels: Optional[List[Union[str, float]]] = None,
#
metric: Optional[str] = None,
metric_function: Optional[Callable[[np.ndarray], float]] = None,
#
**kwargs) -> List[float]:
"""
Computes a chosen metric per sub-population for index-based grouping.
This function combines
:func:`fatf.utils.metrics.tools.confusion_matrix_per_subgroup_indexed`
function together with
:func:`fatf.utils.metrics.subgroup_metrics.apply_metric` (when using
``metric`` parameter) and
:func:`fatf.utils.metrics.subgroup_metrics.apply_metric_function`
(when using ``metric_function`` parameter) functions. For the description
of parameters, errors and exceptions please see the documentation of these
functions.
.. note::
The ``metric_function`` parameter takes the precedence over the
``metric`` parameter is both are provided.
Returns
-------
population_metrics : List[numbers]
A list with the value of the selected metric for every sub-population.
bin_names : List[strings]
The name of every sub-population (binning results) defined by the
feature ranges for a numerical feature and feature value sets for a
categorical feature.
"""
population_cmxs = fumt.confusion_matrix_per_subgroup_indexed(
indices_per_bin, ground_truth, predictions, labels)
if metric_function is not None:
population_metrics = apply_metric_function(
population_cmxs, metric_function, *args, **kwargs)
else:
population_metrics = apply_metric(population_cmxs, metric, label_index,
**kwargs)
return population_metrics
|
|
from copy import deepcopy
from pyquaternion import Quaternion
import numpy as np
def interpolate(key0, key1, t=0.5):
mesh = deepcopy(key0)
# TODO: Takes too long
print("Interpolating IICs")
for eid, fids in enumerate(mesh.edge2face):
left = fids[0]
right = fids[1]
if left is None or right is None:
continue
q00 = Quaternion(matrix=key0.tangent_frames[left])
q01 = Quaternion(matrix=key0.tangent_frames[right])
q10 = Quaternion(matrix=key1.tangent_frames[left])
q11 = Quaternion(matrix=key1.tangent_frames[right])
q = Quaternion.slerp(q01.inverse*q00, q11.inverse*q10, amount=t)
mesh.Q[eid] = q.rotation_matrix
mesh.L[eid] = (1 - t) * key0.L[eid] + t * key1.L[eid]
q0 = Quaternion(matrix=key0.tangent_frames[0])
q1 = Quaternion(matrix=key1.tangent_frames[0])
q = Quaternion.slerp(q0, q1, amount=t)
mesh.tangent_frames[0] = q.rotation_matrix
mesh.verts[0] = (1 - t) * (key0.verts[0]) + t * (key1.verts[0])
print("Solving the face system.")
mesh._compute_face_system()
mesh._prefactor_face_system()
mesh._solve_face_system()
print("Solving the vertex system.")
mesh._compute_vertex_system()
mesh._prefactor_vertex_system()
mesh._solve_vertex_system()
mesh.update()
return mesh
def evaluate(mesh, key0, key1, t):
E = mesh.edges
l0 = np.sqrt(np.sum((key0.verts[E.T[0]] - key0.verts[E.T[1]])**2, axis=1))
l1 = np.sqrt(np.sum((key1.verts[E.T[0]] - key1.verts[E.T[1]])**2, axis=1))
l_true = (1-t)*l0 + t*l1
l_pred = np.sqrt(np.sum((mesh.verts[E.T[0]] - mesh.verts[E.T[1]])**2, axis=1))
return np.abs(l_pred - l_true)/l_true
def refine(mesh):
NF = mesh.faces.shape[0]
mesh._compute_face_system(constrained=range(NF))
mesh._prefactor_face_system()
mesh._solve_face_system(constrained=range(NF))
mesh._compute_vertex_system()
mesh._prefactor_vertex_system()
mesh._solve_vertex_system()
mesh.update()
return mesh
|
|
# Creating Quantile RBF netowrk class
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.models import Model
from keras import regularizers
from tensorflow.keras import layers
from keras.models import Sequential
from keras.engine.input_layer import Input
from keras.layers.core import Dense
from keras.optimizers import RMSprop
from keras.regularizers import l2
from sklearn.model_selection import GridSearchCV
from rbf import RBFLayer, InitCentersRandom
import matplotlib.pyplot as plt
from keras.datasets import boston_housing
import scipy as sc
from keras.wrappers.scikit_learn import KerasRegressor
from scipy import stats
import sklearn as sk
import pandas as p
from sklearn.model_selection import KFold
from Evaluation import mean_squared_error,trimmed_mean_squares
class QuantileNetwork:
def __init__(self,thau,betas, units, input_shape,x):
self.thau = thau
self.betas = betas
self.units = units
self.shape = input_shape
self.x = x
#MLP_model
def MLP_model(self,input_shape, loss):
inputs = Input(shape = (input_shape,))
layer = Dense(128, activation = K.sigmoid)(inputs)
lay = Dense(64,activation = K.sigmoid)(layer)
out = Dense(1)(lay)
model = Model(inputs = inputs , outputs = out)
model.compile(loss = loss, optimizer = RMSprop())
return model
#RBF model
def RBF_model(self,x, input_shape, units, betas, loss):
inputs = Input(shape = (input_shape,))
rbflayer = RBFLayer(output_dim = units,
betas=betas,
initializer = InitCentersRandom(x))
rbf = rbflayer(inputs)
out = Dense(1)(rbf)
model = Model(inputs = inputs , outputs = out)
model.compile(loss = loss,
optimizer = RMSprop())
return model
def upper_mlp(self):
thau_upper = self.thau
def quantile_nonlinear(y_true,y_pred):
x = y_true - y_pred
#pretoze sa bude variac tensor, toto je postup pri kerase
return K.maximum(thau_upper * x,(thau_upper - 1) * x)
model = self.MLP_model(input_shape = self.shape,loss = quantile_nonlinear)
return model
def lower_mlp(self):
thau_lower = 1 - self.thau
def quantile_nonlinear(y_true,y_pred):
x = y_true - y_pred
#pretoze sa bude variac tensor, toto je postup pri kerase
return K.maximum(thau_lower * x,(thau_lower - 1) * x)
model = self.MLP_model(input_shape = self.shape,loss = quantile_nonlinear)
return model
def upper_rbf(self):
thau_upper = self.thau
def quantile_nonlinear(y_true,y_pred):
x = y_true - y_pred
#pretoze sa bude variac tensor, toto je postup pri kerase
return K.maximum(thau_upper * x,(thau_upper - 1) * x)
model = self.RBF_model(x = self.x, input_shape = self.shape,betas = self.betas, units = self.units, loss = quantile_nonlinear)
return model
def lower_rbf(self):
thau_lower = 1 - self.thau
def quantile_nonlinear(y_true,y_pred):
x = y_true - y_pred
#pretoze sa bude variac tensor, toto je postup pri kerase
return K.maximum(thau_lower * x,(thau_lower - 1) * x)
model = self.RBF_model(x = self.x, input_shape = self.shape,betas = self.betas, units = self.units, loss = quantile_nonlinear)
return model
def evaluate(self,func,y_true,y_pred):
if func == 'mean_squared_error':
return mean_squared_error(y_true,y_pred)
elif func == 'trimmed_mean_squared_error':
return trimmed_mean_squares(y_true,y_pred,alpha = 0.75)
|
|
# This is a first cut at using my own python script to check a student file
import numpy as np
import sys
# load the student array
y = np.load('product.npy')
ytrue = np.load('true_product.npy')
# output shape, but do not proceed if the shapes do not match
print(y.shape)
if y.shape != ytrue.shape:
sys.exit()
for i in range(ytrue.shape[0]):
if abs(y[i] - ytrue[i]) > 1e-14:
print("Element ",i," is incorrect: error = ",abs(y[i] - ytrue[i]))
else:
print("Element ",i," is correct")
|
|
"""
This script should make a big ol' data file with the angular and specular
resolved T, R_f, and R_b for a PV window in a format edible by EnergyPlus
"""
import numpy as np
from wpv import Layer,Stack
import matplotlib.pyplot as plt
# This whole thing uses microns for length
degree = np.pi/180
inc_angles = np.linspace(0,89,num=20)*degree
num_lams = 50
lams = np.linspace(0.3,2.5,num=num_lams)
lamrange = [min(lams),max(lams)]
Glass = Layer(4000,'nkLowFeGlass','i')
TiO2 = Layer(0.05,'nkTiO2','c')
FTO = Layer(0.3,'nkFTO','c')
MAPI = Layer(0.5,'nkMAPI','c')
ITO = Layer(0.4,'nkITO','c')
SnO2 = Layer(0.5,'nkSnO2','c')
NiO = Layer(0.05,'nkNiO','c')
Ag = Layer(0.01,'nkAg','c')
TiO2lowE = Layer(0.02,'nkTiO2','c')
Bleach = Layer(0.5,'nkTiO2','c')
EVA = Layer(1500,'nkEVA','i')
#MAPI.plotnk(lams)
layers = [Glass,FTO,TiO2,Bleach,NiO,ITO,EVA,Glass,TiO2lowE,Ag,TiO2lowE]
#layers = [MAPI]
stack = Stack(layers)
stack_b = stack.reverse()
Rfs = []
Rbs = []
Ts = []
outlams = []
outangs = []
for iang in inc_angles:
for lam in lams:
outlams.append(lam)
outangs.append(iang)
[Rf,A,T] = stack.get_RAT(lam,iang)
Rfs.append(Rf)
Ts.append(T)
[Rb,A,T] = stack_b.get_RAT(lam,iang)
Rbs.append(Rb)
outlams = np.array(outlams)
outangs = np.array(outangs)
Rfs = np.array(Rfs)
Rbs = np.array(Rbs)
Ts = np.array(Ts)
X = np.transpose([outangs/degree,outlams,Ts])
np.savetxt('./Output/T_spectral_angular.txt',X,delimiter=',',header="angle [rad], wavelength [micron], T [1]")
Y = np.transpose([outangs/degree,outlams,Rfs])
np.savetxt('./Output/Rf_spectral_angular.txt',Y,delimiter=',',header="angle [rad], wavelength [micron], Rf [1]")
Z = np.transpose([outangs/degree,outlams,Rbs])
np.savetxt('./Output/Rb_spectral_angular.txt',Z,delimiter=',',header="angle [rad], wavelength [micron], Rb [1]")
'''
plt.figure()
plt.plot(inc_angles/degree,Rs,label="$R$")
plt.plot(inc_angles/degree,Ts,label="$T$")
plt.plot(inc_angles/degree, Ts[0]*taubar(inc_angles, *popt),label="$T_{fitted}$")
plt.plot(inc_angles/degree,Rs+As+Ts,label="$R+A+T$")
plt.xlabel(r"$\theta$")
plt.legend()
plt.show()
'''
|
|
#!/usr/bin/env python3.5
# coding=utf-8
'''
@date = '17/12/1'
@author = 'lynnchan'
@email = 'ccchen706@126.com'
'''
import pandas as pd
import os
import random
import numpy as np
from numpy import random as nr
class CsvReader():
def __init__(self,dic=''):
if dic is not '':
self.csv_dict=dic+'/'
else:
self.csv_dict='./'
def read_data(self,file_name):
file_path_name=self.csv_dict+file_name
read_data=pd.read_csv(file_path_name)
# print(read_data.head())
return read_data
def read_data_chunk(self,file_name,chunkSize=5):
file_path_name=self.csv_dict+file_name
read_data_chunk=pd.read_csv(file_path_name,chunksize=chunkSize)
# print(read_data.head())
return read_data_chunk
def read_data_with_number(self,file_name,read_num=500000):
print('start')
x = np.arange(1,200000000)
skiprow = nr.choice(x, 200000000-read_num,replace = False)
print('end')
file_path_name=self.csv_dict+file_name
read_data = pd.read_csv(file_path_name, nrows=read_num, skiprows=skiprow)
print('read data end')
return read_data
def read_data_with_random(self,file_name,read_num=2000000):
skiprow = random.randint(100000000,200000000)
file_path_name=self.csv_dict+file_name
read_data = pd.read_csv(file_path_name,nrows =read_num, skiprows=range(1, skiprow))
return read_data
def write_data_without_index(self,res_data,file_name,columns=None,index_name='',index_start=0,):
file_path_name = self.csv_dict + file_name
res_data_frame = pd.DataFrame(res_data,columns=(columns))
res_data_frame.index=index_start
res_data_frame.index.name=index_name
if os.path.exists(file_path_name):
res_data_frame.to_csv(file_path_name)
else:
with open(file_path_name, 'a') as f:
res_data_frame.to_csv(f,header=False)
def write_data_with_index(self,res_data,index,file_name,columns=None,index_name='',):
file_path_name = self.csv_dict + file_name
res_data_frame = pd.DataFrame(res_data,index,columns=(columns))
res_data_frame.index.name=index_name
if os.path.exists(file_path_name):
with open(file_path_name, 'a') as f:
res_data_frame.to_csv(f, header=False)
else:
res_data_frame.to_csv(file_path_name)
def write_data(self,res_data,file_name,columns=None):
file_path_name = self.csv_dict + file_name
res_data_frame = pd.DataFrame(res_data,index=None,columns=(columns))
# print(res_data_frame.head())
res_data_frame.to_csv(file_path_name)
|
|
# Authors: Soledad Galli <solegalli@protonmail.com>
# License: BSD 3 clause
from typing import List, Union
import numpy as np
import pandas as pd
from feature_engine.encoding.base_encoder import BaseCategoricalTransformer
from feature_engine.variable_manipulation import _check_input_parameter_variables
class WoEEncoder(BaseCategoricalTransformer):
"""
The WoERatioCategoricalEncoder() replaces categories by the weight of evidence
(WoE). The WoE was used primarily in the financial sector to create credit risk
scorecards.
The encoder will encode only categorical variables (type 'object'). A list
of variables can be passed as an argument. If no variables are passed the encoder
will find and encode all categorical variables (object type).
The encoder first maps the categories to the weight of evidence for each variable
(fit). The encoder then transforms the categories into the mapped numbers
(transform).
**Note**
This categorical encoding is exclusive for binary classification.
**The weight of evidence is given by:**
.. math::
log( p(X=xj|Y = 1) / p(X=xj|Y=0) )
**The WoE is determined as follows:**
We calculate the percentage positive cases in each category of the total of all
positive cases. For example 20 positive cases in category A out of 100 total
positive cases equals 20 %. Next, we calculate the percentage of negative cases in
each category respect to the total negative cases, for example 5 negative cases in
category A out of a total of 50 negative cases equals 10%. Then we calculate the
WoE by dividing the category percentages of positive cases by the category
percentage of negative cases, and take the logarithm, so for category A in our
example WoE = log(20/10).
**Note**
- If WoE values are negative, negative cases supersede the positive cases.
- If WoE values are positive, positive cases supersede the negative cases.
- And if WoE is 0, then there are equal number of positive and negative examples.
**Encoding into WoE**:
- Creates a monotonic relationship between the encoded variable and the target
- Returns variables in a similar scale
**Note**
The log(0) is not defined and the division by 0 is not defined. Thus, if any of the
terms in the WoE equation are 0 for a given category, the encoder will return an
error. If this happens, try grouping less frequent categories.
Parameters
----------
variables : list, default=None
The list of categorical variables that will be encoded. If None, the
encoder will find and select all object type variables.
Attributes
----------
encoder_dict_ :
Dictionary with the WoE per variable.
Methods
-------
fit:
Learn the WoE per category, per variable.
transform:
Encode the categories to numbers.
fit_transform:
Fit to the data, then transform it.
inverse_transform:
Encode the numbers into the original categories.
Notes
-----
For details on the calculation of the weight of evidence visit:
https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
In credit scoring, continuous variables are also transformed using the WoE. To do
this, first variables are sorted into a discrete number of bins, and then these
bins are encoded with the WoE as explained here for categorical variables. You can
do this by combining the use of the equal width, equal frequency or arbitrary
discretisers.
NAN are introduced when encoding categories that were not present in the training
dataset. If this happens, try grouping infrequent categories using the
RareLabelEncoder().
See Also
--------
feature_engine.encoding.RareLabelEncoder
feature_engine.discretisation
"""
def __init__(
self, variables: Union[None, int, str, List[Union[str, int]]] = None
) -> None:
self.variables = _check_input_parameter_variables(variables)
def fit(self, X: pd.DataFrame, y: pd.Series):
"""
Learn the the WoE.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
Can be the entire dataframe, not just the categorical variables.
y : pandas series.
Target, must be binary [0,1].
Raises
------
TypeError
- If the input is not the Pandas DataFrame.
- If any user provided variables are not categorical.
ValueError
- If there are no categorical variables in df or df is empty
- If variable(s) contain null values.
- If y is not binary with values 0 and 1.
- If p(0) = 0 or p(1) = 0.
Returns
-------
self
"""
X = self._check_fit_input_and_variables(X)
# check that y is binary
if any(x for x in y.unique() if x not in [0, 1]):
raise ValueError(
"This encoder is only designed for binary classification, values of y "
"can be only 0 or 1."
)
temp = pd.concat([X, y], axis=1)
temp.columns = list(X.columns) + ["target"]
self.encoder_dict_ = {}
total_pos = temp["target"].sum()
total_neg = len(temp) - total_pos
temp["non_target"] = np.where(temp["target"] == 1, 0, 1)
for var in self.variables:
pos = temp.groupby([var])["target"].sum() / total_pos
neg = temp.groupby([var])["non_target"].sum() / total_neg
t = pd.concat([pos, neg], axis=1)
t["woe"] = np.log(t["target"] / t["non_target"])
if (
not t.loc[t["target"] == 0, :].empty
or not t.loc[t["non_target"] == 0, :].empty
):
raise ValueError(
"The proportion of one of the classes for a category in "
"variable {} is zero, and log of zero is not defined".format(var)
)
self.encoder_dict_[var] = t["woe"].to_dict()
self._check_encoding_dictionary()
self.input_shape_ = X.shape
return self
# Ugly work around to import the docstring for Sphinx, otherwise not necessary
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
X = super().transform(X)
return X
transform.__doc__ = BaseCategoricalTransformer.transform.__doc__
def inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame:
X = super().inverse_transform(X)
return X
inverse_transform.__doc__ = BaseCategoricalTransformer.inverse_transform.__doc__
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def salinity(path):
"""Water Salinity and River Discharge
The `salinity` data frame has 28 rows and 4 columns.
Biweekly averages of the water salinity and river discharge in Pamlico
Sound, North Carolina were recorded between the years 1972 and 1977. The
data in this set consists only of those measurements in March, April and
May.
This data frame contains the following columns:
`sal`
The average salinity of the water over two weeks.
`lag`
The average salinity of the water lagged two weeks. Since only
spring is used, the value of `lag` is not always equal to the
previous value of `sal`.
`trend`
A factor indicating in which of the 6 biweekly periods between March
and May, the observations were taken. The levels of the factor are
from 0 to 5 with 0 being the first two weeks in March.
`dis`
The amount of river discharge during the two weeks for which `sal`
is the average salinity.
The data were obtained from
Ruppert, D. and Carroll, R.J. (1980) Trimmed least squares estimation in
the linear model. *Journal of the American Statistical Association*,
**75**, 828–838.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `salinity.csv`.
Returns:
Tuple of np.ndarray `x_train` with 28 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'salinity.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/boot/salinity.csv'
maybe_download_and_extract(path, url,
save_file_name='salinity.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 13:46:08 2019
@author: Leheng Chen
"""
from binomialTreePricer import asianOptionBinomialTree
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
uly_names = ['Crude Oil WTI', 'Ethanol', 'Gold', 'Silver', 'Natural Gas']
uly_init = df_uly[uly_names].tail(1)
df_opt['bdays'] = 1 + np.busday_count(df_opt['Start Date'].values.astype('datetime64[D]'), df_opt['Maturity Date'].values.astype('datetime64[D]'))
df_uly_vol = np.log(df_uly[uly_names].pct_change() + 1).std(skipna=True) * 100
oneOverRho = 3
df_units = pd.DataFrame([[0.01, 0.0001, 1, 0.01, 0.001]], columns = uly_names)
bdays_year = 252
bdays_month = 21
# =============================================================================
# Define risk free rate, reference to US treasury yield curve as of 20190322
# https://www.treasury.gov/resource-center/data-chart-center/interest-rates/pages/TextView.aspx?data=yieldYear&year=2019
# 1m, 2m, 3m, 6m, 1y, 2y, 3y, 5y, 7y, 10y, 20y, 30y
# =============================================================================
# Define risk free rate according to US
yieldCurveDict = {
'2019-04-22': 2.49,
'2019-05-22': 2.48,
'2019-06-22': 2.46,
'2019-09-22': 2.48,
'2020-03-22': 2.45,
'2021-03-22': 2.31,
'2022-03-22': 2.24,
'2024-03-22': 2.24,
'2026-03-22': 2.34,
'2029-03-22': 2.44,
'2039-03-22': 2.69,
'2049-03-22': 2.88
}
# Derive forward rates from US treasury yield curve
curvePoints = ['2019-03-22'] + list(yieldCurveDict.keys())
forwardCurveDict = {}
for i in range(len(yieldCurveDict)):
datePoint1 = curvePoints[i]
datePoint2 = curvePoints[i + 1]
if (datePoint1 == curvePoints[0]):
forwardCurveDict[datePoint2] = yieldCurveDict[datePoint2]
else:
yieldAtDate1 = yieldCurveDict[datePoint1]
yieldAtDate2 = yieldCurveDict[datePoint2]
busDateDiff1 = np.busday_count(curvePoints[0], datePoint1)
busDateDiff2 = np.busday_count(curvePoints[0], datePoint2)
forwardCurveDict[datePoint2] = float((yieldAtDate2 * busDateDiff2 - yieldAtDate1 * busDateDiff1) / (busDateDiff2 - busDateDiff1))
# Function to get risk free rate given a date (datetime.date object)
def getRiskFreeRate(inputDate):
input_date = inputDate.date()
for i in range(len(forwardCurveDict)):
datePoint1 = datetime.strptime(curvePoints[i],'%Y-%m-%d').date()
datePoint2 = datetime.strptime(curvePoints[i + 1],'%Y-%m-%d').date()
if (input_date >= datePoint1 and input_date < datePoint2):
return forwardCurveDict[curvePoints[i + 1]]
return 0
# Function to get risk free rate given a date (datetime.date object)
def getYeildCurveRate(inputDate):
input_date = inputDate.date()
for i in range(len(forwardCurveDict)):
datePoint1 = datetime.strptime(curvePoints[i],'%Y-%m-%d').date()
datePoint2 = datetime.strptime(curvePoints[i + 1],'%Y-%m-%d').date()
if (input_date >= datePoint1 and input_date < datePoint2):
return yieldCurveDict[curvePoints[i + 1]]
return 0
def are(sim, actual):
return ((sim - actual).abs() / actual).sum() * 100 / sim.size
sim_calls, sim_puts, diff = [], [], []
call_stds, put_stds = [], []
for row in df_opt.index:
# Retrieve the name of the underlying
tmp_uly = df_opt['Underlying'][row][:-8]
tmp_strike = df_opt['Strike'][row]
tmp_maturity = df_opt['Maturity Date'][row]
tmp_steps = df_opt['bdays'][row]
if tmp_steps > bdays_year:
tmp_steps = bdays_year
elif tmp_steps < bdays_month:
tmp_steps = bdays_month
tmp_init = uly_init[tmp_uly][0]
tmp_time_period = 1 / bdays_year
tmp_vol = df_uly_vol[tmp_uly]
tmp_rates = [getRiskFreeRate(tmp_maturity - timedelta(d)) / 100 for d in range(tmp_steps)]
tmp_call = df_opt['Call'][row]
tmp_put = df_opt['Put'][row]
tmp_unit = df_units[tmp_uly][0]
pricer = asianOptionBinomialTree(tmp_steps, tmp_vol, tmp_time_period, oneOverRho, tmp_rates)
sim_call, c_std = pricer.getOptionPrice(tmp_init, tmp_strike * tmp_unit, True)
sim_put, p_std = pricer.getOptionPrice(tmp_init, tmp_strike * tmp_unit, False)
sim_calls.append(sim_call)
sim_puts.append(sim_put)
call_stds.append(c_std)
put_stds.append(p_std)
diff.append(tmp_init - tmp_strike * tmp_unit)
print('under: %s; bdays: %d, K: %6.3f, S: %6.3f --> sim: call: %6.3f put: %6.3f; actual call: %6.3f, put: %6.3f' \
% (tmp_uly, tmp_steps, tmp_strike* tmp_unit, tmp_init, sim_call,sim_put, tmp_call, tmp_put
))
df_opt['biTree sim put'] = sim_puts
df_opt['biTree sim call'] = sim_calls
df_opt['biTree sim put std'] = put_stds
df_opt['biTree sim call std'] = call_stds
df_opt['Diff'] = diff
for key in np.unique(df_opt['Underlying']):
tmpCol = df_opt[df_opt['Underlying']==key]
callGarch = are(tmpCol['biTree sim call'], tmpCol['Call'])
putGarch = are(tmpCol['biTree sim put'], tmpCol['Put'])
print("underlying: ", key)
print("MC GARCH Put ARE:",putGarch)
print("MC GARCH Call ARE:",callGarch)
are_call = are(df_opt['biTree sim call'], df_opt['Call'])
are_put = are(df_opt['biTree sim put'], df_opt['Put'])
df_opt.to_csv("../tmp_simulation_biTree.csv", index=False)
print("ARE result: call: %6.3f , put: %6.3f" % (are_call, are_put))
from scipy.stats import norm
def europeanOptionCalculator(asset_price, strike, volatility, interest_rate, maturity):
den = volatility * np.sqrt(maturity)
d1 = np.log(asset_price / strike) + (interest_rate + (volatility **2) / 2) * maturity
d1 /= den
d2 = d1 - den
discount_factor = np.exp(- interest_rate * maturity)
call = asset_price * norm.cdf(d1) - strike * discount_factor * norm.cdf(d2)
put = strike * discount_factor * norm.cdf(-d2) - asset_price * norm.cdf(-d1)
return call, put
euro_calls, euro_puts = [], []
for row in df_opt.index:
# Retrieve the name of the underlying
tmp_uly = df_opt['Underlying'][row][:-8]
tmp_strike = df_opt['Strike'][row]
tmp_maturity = df_opt['Maturity Date'][row]
tmp_steps = df_opt['bdays'][row] / 365
tmp_init = uly_init[tmp_uly][0]
tmp_vol = df_uly_vol[tmp_uly]
tmp_unit = df_units[tmp_uly][0]
tmp_rates = getYeildCurveRate(tmp_maturity) / 100
c, p = europeanOptionCalculator(tmp_init, tmp_unit * tmp_strike, tmp_vol, tmp_rates, tmp_steps)
print('S: %6.3f K: %6.3f vol: %6.3f r: %6.3f tau: %d call: %6.3f put: %6.3f' % (tmp_init, tmp_unit * tmp_strike, tmp_vol, tmp_rates, tmp_steps, c, p))
euro_calls.append(c)
euro_puts.append(p)
sum(df_opt['biTree sim put'] < euro_puts)
sum(df_opt['biTree sim call'] < euro_calls)
df_euro = pd.DataFrame(data = {'Call': euro_calls, 'Put': euro_puts})
df_euro.to_csv('../tmp_euro.csv', index=False)
|
|
# -*- coding:utf-8 -*-
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image
from mask.utils import load_tflite_model
from config import config_import as conf
from mask import utils
MODEL_PATH = conf.get_config_data_by_key("mask_detection")["MODEL_PATH"]
interpreter, input_details, output_details = load_tflite_model(MODEL_PATH)
# Configure anchors
feature_map_sizes = utils.get_feature_map_sizes("tf")
anchor_sizes = utils.get_anchor_sizes()
anchor_ratios = utils.get_anchor_ratios()
# Generate anchors
anchors = utils.generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
# For inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: "Mask", 1: "NoMask"}
# def inference(image):
# pred = face_mask_detection(image)
# return pred
def inference(
image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(260, 260),
show_result=False,
blur=True,
):
"""
Driver function for face mask detection inference
Args:
image (np.array): 3D numpy array of input image
conf_thresh (float): Min threshold of classification probability
iou_thresh (float): IOU threshold of NMS
target_shape (tuple): Model input shape
show_result (bool): Image display flag
Returns:
(array): Array of bounding boxes
"""
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0
image_exp = np.expand_dims(image_np, axis=0)
image_exp = tf.cast(image_exp, dtype=tf.float32)
# Set img_in as tensor in the model's input_details
interpreter.set_tensor(input_details[0]["index"], image_exp)
interpreter.invoke()
# Get the output_details tensors (based on the given input above)
y_bboxes_output = interpreter.get_tensor(output_details[0]["index"])
y_cls_output = interpreter.get_tensor(output_details[1]["index"])
# remove the batch dimension, for batch is always 1 for inference
y_bboxes = utils.decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# to speed up, do single class NMS, not multiple classes NMS
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# keep_idx is the alive bounding box after nms
keep_idxs = utils.single_class_non_max_suppression(
y_bboxes, bbox_max_scores, conf_thresh=conf_thresh, iou_thresh=iou_thresh
)
boxes, masks_on = utils.draw_results(
keep_idxs, image, bbox_max_scores, bbox_max_score_classes, y_bboxes, blur=blur
)
# if show_result:
# Image.fromarray(image).show()
return boxes, masks_on
|
|
import numpy as np
import math as m
import random
"""
the first 3 coordinates tell which joints are connected in a linear fashion.
the 4 th coordinate tells the allowed angle movement is +ve or -ve in Elbow.
5th and 6th tell in what angles it must lie if a rotation is done.
All rotations in Z axis only
"""
CONNECTED_JOINTS_HANDS = np.array((
[17, 18, 19, 1, 2, 115], #left hand,
[25, 26, 27, 1, 2, 115] # right hand
))
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def length(v):
return m.sqrt(dotproduct(v, v))
def angle_between(v1, v2):
angle = m.acos(dotproduct(v1, v2) / (length(v1) * length(v2))) * 180 / m.pi
#print(angle)
return angle
def augment_kinematics(train_set):
print("Kinematics is set to True...")
augmented_kin = dict()
for (sub, act, string), data in train_set.items():
print("For :", sub, act, string)
"""
# elbows
data_new_elbows = apply_kinematics_elbow(data)
print("Kinematicating elbows..", len(data_new_elbows))
sname_e = string[:-3] + ".KIE.h5"
augmented_kin.update({(sub, act, sname_e): data_new_elbows})
"""
#total hands
data_new_hands = apply_kinematics_hands(data)
print("Kinematicating hands..", len(data_new_hands))
sname_h = string[:-3] + ".KIH.h5"
augmented_kin.update({(sub, act, sname_h): data_new_hands})
return augmented_kin
#this method only moves the elbow
#rotate only the wrist around the elbows
#http://doc.aldebaran.com/2-1/family/robots/joints_robot.html
def apply_kinematics_elbow(frames):
angles =np.array(([0]))
data_new = []
# radomly get how much degree you want to rotate around z
for frame in frames:
sk = np.array(frame)
sk = np.reshape(sk, (-1, 3))
# bring it to local frame
for arm in CONNECTED_JOINTS_HANDS:
root = sk[arm[0]]
sk = sk - root
deg = random.randint(0, 15) * arm[3] #determine if angle of rotation is postive or negative too
#print("degree :", deg)
angle = deg * m.pi / 180
# arm will contain the joints
# find the 2 vectors in the plane
# both vectors start from the the 0th and
# we rotate both of them
v1 = sk[arm[1]] - sk[arm[0]] # upper hand
v2 = sk[arm[2]] - sk[arm[1]] # elbow
org_ang = angle_between(v1,v2)
#print("old angle ", angle_between(v2,v1)* arm[3])
#axis of rotation is the cross product of the vectors formed
axis = np.cross(v1, v2)
Rz = get_rot_matrix_around_vector(axis, angle)
v2 = np.dot(Rz, v2) # rotating the wrist around the elbow only
#print("new angle ", angle_between(v2, v1)* arm[3])
#check the angle in between the v1 and v2 and it
#should be less than allowed
angle_bet = angle_between(v1, v2) * arm[3] #angle between always return +ve angle and hence we multiply by the respective sign
if not(arm[4] <= angle_bet and angle_bet <= arm[5] ):
#reject these pair of joints and proceed
#print("more than allowed rotation, not possible. Rejecting..", angle_bet)
#print("original angle: ", org_ang)
#print("degree :", deg)
#np.append(angles, [org_ang])
continue
# now add the coordinate back
v2 = v2 + sk[arm[1]]
sk[arm[2]] = v2
# add back to bring it to global
sk = sk + root
#import viz
#viz.plot(frame, connect=True, c=viz.connect_points32())
#viz.plot(sk.reshape((1, -1)), connect=True, c=viz.connect_points32())
#viz.plotNoisySkeleton(frame, sk.reshape((1, -1)), [17,18])
data_new.append(sk.reshape((1, -1)))
#print(np.amax(angles))
return data_new
def apply_kinematics_hands(frames):
data_new = []
#radomly get how much degree you want to rotate around z
for frame in frames:
sk = np.array(frame)
sk = np.reshape(sk, (-1,3))
#bring it to local frame
root = sk[0]
sk = sk - root
for arm in CONNECTED_JOINTS_HANDS:
deg = random.randint(-15, 15) * arm[3] * -1
#print("degree :", deg)
angle = deg * m.pi / 180
Rz = np.array((
[m.cos(angle), -m.sin(angle), 0],
[m.sin(angle), m.cos(angle), 0],
[0, 0, 1],
))
#arm will contain the joints
#find the 2 vectors in the plane
#both vectors start from the the 0th and
#we rotate both of them
v1 = sk[arm[1]] - sk[arm[0]] #upper hand to wrist
v2 = sk[arm[2]] - sk[arm[0]] #elbow to wrist
v1 = np.dot(Rz, v1)
v2 = np.dot(Rz, v2)
#now add the coordinate back
v1 = v1 + sk[arm[0]]
v2 = v2 + sk[arm[0]]
#new coordinates are sk[arm[0]], v1, v2
sk[arm[1]] = v1
sk[arm[2]] = v2
#add back to bring it to global
sk = sk + root
#import viz
#viz.plot(frame, connect=True, c=viz.connect_points32())
#viz.plot(sk.reshape((1, -1)), connect=True, c=viz.connect_points32())
data_new.append(sk.reshape((1,-1)))
#import viz
#viz.plotNoisySkeleton(frame, sk.reshape((1, -1)), [17, 18])
return data_new
#vector around which you are rotating
#refer to https://steve.hollasch.net/cgindex/math/rotvec.html
def get_rot_matrix_around_vector(v, angle):
#construct S matrix
v = v.T/np.linalg.norm(v)
x, y, z = v
v = np.reshape(v, (3,-1))
S = np.array((
[0, -z, y],
[z, 0, -x],
[-y, x, 0]
))
I = np.identity(3)
R = v*v.T + m.cos(angle) * (I - v*v.T) + m.sin(angle) * S
return R
|
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
class CAAnimate:
@staticmethod
def animate_ca(x: np.ndarray, filepath: str, interval: int = 10):
"""
Generates a animated gif of the input x.
Parameters
----------
x: np.ndarray
A list of x steps over time
filepath: str
Filepath of the output file as string
interval: int
The interval between frame in ms
"""
fig = plt.figure(figsize=x[0].shape)
ax = plt.axes()
ax.set_axis_off()
# Generator function to return an image for each step
def animate(i):
ax.clear() # clear the plot
ax.set_axis_off() # disable axis
# print("x[%s]: %s" % (i, x[i]))
img = ax.imshow(x[i], interpolation='none', cmap='binary')
return [img]
# call the animator
anim = animation.FuncAnimation(fig, animate, len(x), interval=interval, blit=True)
anim.save(filepath, writer='imagemagick')
|
|
from collections import defaultdict
import os
from scipy import sparse
from tqdm import tqdm
import numpy as np
import pandas as pd
def load_ratings(filename):
dirpath = './data/ml-latest-small'
ratings = pd.read_csv(os.path.join(dirpath, filename))
return ratings
def get_user_movie_dictionary(dataframe):
users = dataframe.userId.unique()
movies = dataframe.movieId.unique()
user2idx = {user: idx for idx, user in enumerate(users)}
movie2idx = {movie: idx for idx, movie in enumerate(movies)}
return user2idx, movie2idx
def transform_binary_matrix(dataframe, user2idx, movie2idx):
rows = list()
cols = list()
data = list()
stat = defaultdict(int)
for user, movie, rating in zip(
dataframe['userId'], dataframe['movieId'], dataframe['rating']):
user_idx = user2idx[user]
movie_idx = movie2idx[movie]
rows.append(user_idx)
cols.append(movie_idx)
if rating >= 2.0:
data.append(1.0)
stat['pos'] += 1
else:
data.append(-1.0)
stat['neg'] += 1
matrix = sparse.csr_matrix(
(data, (rows, cols)),
shape=(len(user2idx), len(movie2idx))
)
return matrix, stat
def split_matrix(original, user2idx, movie2idx):
np.random.seed(2020)
N_user = original.shape[0]
N_movie = original.shape[1]
rows_tr = list()
cols_tr = list()
data_tr = list()
rows_val = list()
cols_val = list()
data_val = list()
for rdx, cdx in tqdm(zip(*original.nonzero())):
rated_movie = len(original[rdx, :].nonzero()[1])
rated_user = len(original[:, cdx].nonzero()[0])
threshold = (rated_movie / N_movie) * (rated_user / N_user) + 0.8
random_number = np.random.rand()
if random_number <= threshold:
rows_tr.append(rdx)
cols_tr.append(cdx)
data_tr.append(original[rdx, cdx])
else:
rows_val.append(rdx)
cols_val.append(cdx)
data_val.append(original[rdx, cdx])
train_matrix = sparse.csr_matrix(
(data_tr, (rows_tr, cols_tr)), shape=(len(user2idx), len(movie2idx))
)
validation_matrix = sparse.csr_matrix(
(data_val, (rows_val, cols_val)), shape=(len(user2idx), len(movie2idx))
)
return train_matrix, validation_matrix
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code has been adapted from 'run_glue.py'
Finetuning the library models for generic sequence classification with cost-weighting (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).
"""
import os
import sys
import csv
import time
import torch
import logging
import dataclasses
import numpy as np
from typing import Dict, List, Optional
from filelock import FileLock
from dataclasses import dataclass, field
from sklearn.metrics import f1_score, recall_score, precision_score
from sklearn.metrics import matthews_corrcoef
from torch.utils.data.dataset import Dataset
from transformers import AutoTokenizer, EvalPrediction
from transformers import AutoConfig, AutoModelForSequenceClassification
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from transformers.data.processors.glue import glue_convert_examples_to_features
from transformers.data.processors.utils import InputFeatures
from transformers.data.processors.utils import DataProcessor, InputExample, InputFeatures
from transformers.tokenization_xlm_roberta import XLMRobertaTokenizer
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
## From /src/transformers/data/metrics/__init__.py
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
mcc = matthews_corrcoef(labels, preds) # labels and preds instead of other way!
return_dict = {
"acc" : acc,
"mcc" : mcc,
}
for average in [ "binary", "micro", "macro", "weighted"] :
f1 = f1_score (y_true=labels, y_pred=preds, average=average)
precision = precision_score(y_true=labels, y_pred=preds, average=average)
recall = recall_score (y_true=labels, y_pred=preds, average=average)
return_dict[ "f1_" + average ] = f1
return_dict[ "precision_" + average ] = precision
return_dict[ "recall_" + average ] = recall
return return_dict
# ColaProcessor from src/transformers/data/processors/glue.py
class Processor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
# GlueDataTrainingArguments from: src/transformers/data/datasets/glue.py
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
name: str = field(metadata={"help": "A name for this task (Cache folders will be based on this"})
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
# From: src/transformers/data/datasets/glue.py
class GlueDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
args: DataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: DataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
evaluate=False,
):
self.args = args
processor = Processor()
self.output_mode = 'classification'
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train", tokenizer.__class__.__name__, str(args.max_seq_length), args.name,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {args.data_dir}")
label_list = processor.get_labels()
# if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__ in (
# RobertaTokenizer,
# RobertaTokenizerFast,
# XLMRobertaTokenizer,
# ):
# # HACK(label indices are swapped in RoBERTa pretrained model)
# label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir)
if evaluate
else processor.get_train_examples(args.data_dir)
)
if limit_length is not None:
examples = examples[:limit_length]
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode='classification',
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
class_weights: Optional[str] = field(
default=None, metadata={"help": "Comma seperated list of class weights. Weight for Class 0, Weight for Class 1"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
num_labels = 2 ## Defined in processor. Change labels before changing this (also hardcoded elsewhere) .
output_mode = 'classification'
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=2, ## Defined in processor. Change labels before changing this (also hardcoded elsewhere) .
finetuning_task='cola', ## Not sure why we need this!
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
#############################################################################
# Set Class Weights ##
#############################################################################
class_weights = None
if not model_args.class_weights is None :
class_weights = model_args.class_weights.lstrip().rstrip().split( ',' )
class_weights = [ float(i) for i in class_weights ]
logger.info(
"Using Class Weights {}".format( ','.join( [ str(i) for i in class_weights ] ) )
)
class_weights = torch.tensor( class_weights, dtype=torch.float, device=training_args.device )
model.set_class_weights( class_weights )
#############################################################################
# Get datasets
train_dataset = GlueDataset(data_args, tokenizer=tokenizer) if training_args.do_train else None
eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, evaluate=True) if training_args.do_eval else None
def compute_metrics(p: EvalPrediction) -> Dict:
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
# return glue_compute_metrics(data_args.task_name, preds, p.label_ids)
with open( training_args.output_dir + 'predictions.csv', 'w' ) as fh:
writer = csv.writer( fh )
preds = [ [i] for i in preds ]
writer.writerows( preds )
return acc_and_f1(preds, p.label_ids)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval and training_args.local_rank in [-1, 0]:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
# if data_args.task_name == "mnli":
# mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
# eval_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, evaluate=True))
for eval_dataset in eval_datasets:
result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_classification.txt"
)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results classification *****")
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
|
# load_data.py
import numpy as np
import matplotlib.pyplot as plt
import torch
training_data = np.load('training_data.npy', allow_pickle=True)
print(len(training_data))
X = torch.Tensor([i[0] for i in training_data]).view(-1, 50, 50)
X = X/255.0
y = torch.Tensor([i[0] for i in training_data])
plt.imshow(X[0], cmap='gray')
print(y[0])
|
|
"""
Created on Feb 28, 2017
@author: Siyuan Qi
Description of the file.
"""
import os
import itertools
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics
import tabulate
import config
import metadata
def plot_segmentation(input_labels_list, endframe):
plt_idx = 0
for input_labels in input_labels_list:
seg_image = np.empty((10, endframe))
for frame in range(endframe):
seg_image[:, frame] = input_labels[frame]
plt_idx += 1
ax = plt.subplot(len(input_labels_list), 1, plt_idx)
plt.imshow(seg_image)
plt.show()
def visualize_tpg_labeling(gt_subactivity, gt_affordance, tpg, obj_num, end_frame):
# Visualization of segmentation and labeling results for subactivity and affordance
start_frame = tpg.terminals[0].start_frame
end_frame = np.min([gt_subactivity.shape[0], tpg.terminals[-1].end_frame-start_frame, end_frame])
# Get labels for every frame
subactivity_lables = np.empty(end_frame, dtype=int)
affordance_labels = np.empty((obj_num, end_frame), dtype=int)
for spg in tpg.terminals:
# Note: a spg spans [spg.start_frame, spg.end_frame], hence need to +1 in range()
for frame in range(spg.start_frame, spg.end_frame+1):
# print frame, spg.subactivity, metadata.subactivities[spg.subactivity]
if frame >= end_frame + start_frame:
break
subactivity_lables[frame-start_frame] = spg.subactivity
affordance_labels[:, frame-start_frame] = spg.affordance
# Add labels to the plot list
plot_labels = [gt_subactivity[:end_frame], subactivity_lables, (gt_subactivity[:end_frame]-subactivity_lables) == 0]
for o in range(obj_num):
plot_labels.append(gt_affordance[o, :end_frame])
plot_labels.append(affordance_labels[o, :])
plot_labels.append((gt_affordance[o, :end_frame]-affordance_labels[o, :]) == 0)
plot_segmentation(plot_labels, end_frame)
def plot_confusion_matrix(cm, classes, filename=None, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
# plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, ha='right')
plt.yticks(tick_marks, classes)
ax = plt.gca()
ax.tick_params(axis=u'both', which=u'both', length=0)
# matplotlib.rcParams.update({'font.size': 15})
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if cm[i, j] != 0:
plt.text(j, i, '{0:.2f}'.format(cm[i, j]), verticalalignment='center', horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
if not filename:
plt.show()
else:
plt.savefig(filename)
plt.close()
def save_results(paths, results):
result_folder = os.path.join(paths.tmp_root, 'results')
if not os.path.exists(result_folder):
os.makedirs(result_folder)
os.makedirs(os.path.join(result_folder, 'figs'))
with open(os.path.join(result_folder, 'labels.p'), 'wb') as f:
pickle.dump(results, f)
def load_results(paths):
with open(os.path.join(paths.tmp_root, 'results', 'labels.p'), 'rb') as f:
results = pickle.load(f)
return results
def print_latex_table(data, row_labels, col_labels):
data = data * 100
row_labels = np.array(row_labels)
row_labels = np.reshape(row_labels, [row_labels.shape[0], 1])
data = np.hstack((row_labels, data))
print
print(tabulate.tabulate(data, tablefmt="latex", floatfmt=".1f", numalign="center", headers=col_labels))
def analyze_results(paths):
def get_f1_score(precision, recall):
return 2 * (precision * recall) / (precision + recall)
def format_table(predict_frame):
data = np.empty((2, 8))
data[0, 0:3] = 1.0/len(metadata.subactivities[:-1])
data[0, 3] = get_f1_score(data[0, 0], data[0, 0])
data[0, 4:7] = 1.0/len(metadata.affordances)
data[0, 7] = get_f1_score(data[0, 4], data[0, 4])
precision, recall, beta_score, support = sklearn.metrics.precision_recall_fscore_support(gt_s[predict_frame], pred_s[predict_frame], labels=range(len(metadata.subactivities)-1), average='micro')
data[1, 0] = precision
precision, recall, beta_score, support = sklearn.metrics.precision_recall_fscore_support(gt_s[predict_frame], pred_s[predict_frame], labels=range(len(metadata.subactivities)-1), average='macro')
data[1, 1] = precision
data[1, 2] = recall
data[1, 3] = get_f1_score(precision, recall)
precision, recall, beta_score, support = sklearn.metrics.precision_recall_fscore_support(gt_u[predict_frame], pred_u[predict_frame], labels=range(len(metadata.affordances)), average='micro')
data[1, 4] = precision
precision, recall, beta_score, support = sklearn.metrics.precision_recall_fscore_support(gt_u[predict_frame], pred_u[predict_frame], labels=range(len(metadata.affordances)), average='macro')
data[1, 5] = precision
data[1, 6] = recall
data[1, 7] = get_f1_score(precision, recall)
print_latex_table(data, methods, metrics)
# ====================== Function starts here ======================
# fig_folder = os.path.join(paths.tmp_root, 'results', 'figs')
fig_folder = os.path.join(paths.project_root, 'fig', 'raw')
if not os.path.exists(fig_folder):
os.makedirs(fig_folder)
seg_gt_s, seg_pred_s, seg_gt_u, seg_pred_u, gt_s, pred_s, gt_u, pred_u, gt_e, pred_e = load_results(paths)
methods = ['chance', 'ours']
metrics = ['P/R', 'Prec.', 'Recall', 'F1-score', 'P/R', 'Prec.', 'Recall', 'F1-score']
# Evaluation
# TODO: see if need to exclude "null" class
# Online detection
predict_frame = 0
format_table(predict_frame)
# Future detection
predict_frame = 40
for i in range(predict_frame):
gt_s[predict_frame].extend(gt_s[i])
pred_s[predict_frame].extend(pred_s[i])
gt_u[predict_frame].extend(gt_u[i])
pred_u[predict_frame].extend(pred_u[i])
format_table(predict_frame)
# Plot confusion matrices
predict_frame = 0
confusion_matrix = sklearn.metrics.confusion_matrix(gt_u[predict_frame], pred_u[predict_frame], labels=range(len(metadata.affordances)))
plot_confusion_matrix(confusion_matrix, metadata.affordances, normalize=True, title='', filename=os.path.join(fig_folder, 'confusion_affordance.pdf'))
confusion_matrix = sklearn.metrics.confusion_matrix(gt_s[predict_frame], pred_s[predict_frame], labels=range(len(metadata.subactivities) - 1))
plot_confusion_matrix(confusion_matrix, metadata.subactivities[:-1], normalize=True, title='', filename=os.path.join(fig_folder, 'confusion_subactivity.pdf'))
confusion_matrix = sklearn.metrics.confusion_matrix(gt_e, pred_e, labels=range(len(metadata.activities)))
plot_confusion_matrix(confusion_matrix, metadata.activities, normalize=True, title='', filename=os.path.join(fig_folder, 'confusion_event.pdf'))
def main():
paths = config.Paths()
analyze_results(paths)
pass
if __name__ == '__main__':
main()
|
|
from coranking import coranking_matrix
from coranking.metrics import trustworthiness, continuity, LCMC
from nose import tools as nose
import numpy as np
import numpy.testing as npt
from sklearn import manifold, datasets
def test_coranking_matrix_perfect_case():
high_data = np.eye(3)
low_data = np.eye(3)
Q = coranking_matrix(high_data, low_data)
expected = np.array([[3, 0], [0, 3]])
npt.assert_almost_equal(Q, expected)
def test_trustworthiness_perfect_case():
high_data = np.eye(5)
low_data = np.eye(5)
Q = coranking_matrix(high_data, low_data)
t = trustworthiness(Q.astype(np.int64), min_k=2)
nose.assert_equal(t, 1.)
def test_continuity_perfect_case():
high_data = np.eye(5)
low_data = np.eye(5)
Q = coranking_matrix(high_data, low_data)
c = continuity(Q.astype(np.int64), min_k=2)
nose.assert_equal(c, 1.)
def test_trustworthiness():
high_data, low_data = make_datasets()
Q = coranking_matrix(high_data, low_data)
t = trustworthiness(Q.astype(np.int64), min_k=5, max_k=6)
nose.assert_almost_equal(t, 0.89, places=2)
def test_trustworthiness_array():
high_data, low_data = make_datasets()
Q = coranking_matrix(high_data, low_data)
result = trustworthiness(Q)
nose.assert_equal(result.shape, (297, ))
def test_continuity():
high_data, low_data = make_datasets()
Q = coranking_matrix(high_data, low_data)
c = continuity(Q, 5, 6)
nose.assert_almost_equal(c, 0.98, places=2)
c2 = trustworthiness(Q, 5, 6)
nose.assert_true(c, c2)
def test_continuity_array():
high_data, low_data = make_datasets()
Q = coranking_matrix(high_data, low_data)
result = continuity(Q)
nose.assert_equal(result.shape, (297, ))
def test_LCMC():
high_data, low_data = make_datasets()
Q = coranking_matrix(high_data, low_data)
l = LCMC(Q, 5, 6)
nose.assert_almost_equal(l, 0.377, places=3)
def test_LCMC_array():
high_data, low_data = make_datasets()
Q = coranking_matrix(high_data, low_data)
result = LCMC(Q)
nose.assert_equal(result.shape, (297, ))
def make_datasets():
high_data, color \
= datasets.samples_generator.make_swiss_roll(n_samples=300,
random_state=1)
isomap = manifold.Isomap(n_neighbors=12, n_components=2)
low_data = isomap.fit_transform(high_data)
return high_data, low_data
|
|
import gc
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
from misc.point_utils import transform_point_cloud, npmat2euler
def vcrnetIter(net, src, tgt, iter=1):
transformed_src = src
bFirst = True
for i in range(iter):
srcK, src_corrK, rotation_ab_pred, translation_ab_pred, rotation_ba_pred, translation_ba_pred = net(
transformed_src.transpose(-1, -2), tgt.transpose(-1, -2))
transformed_src = transform_point_cloud(transformed_src, rotation_ab_pred, translation_ab_pred)
if bFirst:
bFirst = False
rotation_ab_pred_final = rotation_ab_pred.detach()
translation_ab_pred_final = translation_ab_pred.detach()
else:
rotation_ab_pred_final = torch.matmul(rotation_ab_pred.detach(), rotation_ab_pred_final)
translation_ab_pred_final = torch.matmul(rotation_ab_pred.detach(),
translation_ab_pred_final.unsqueeze(2)).squeeze(
2) + translation_ab_pred.detach()
rotation_ba_pred_final = rotation_ab_pred_final.transpose(2, 1).contiguous()
translation_ba_pred_final = -torch.matmul(rotation_ba_pred_final, translation_ab_pred_final.unsqueeze(2)).squeeze(2)
return srcK, src_corrK, rotation_ab_pred_final, translation_ab_pred_final, rotation_ba_pred_final, translation_ba_pred_final
def test_one_epoch(iter, net, test_loader):
net.eval()
mse_ab = 0
mae_ab = 0
mse_ba = 0
mae_ba = 0
total_loss_VCRNet = 0
total_loss = 0
total_cycle_loss = 0
num_examples = 0
rotations_ab = []
translations_ab = []
rotations_ab_pred = []
translations_ab_pred = []
rotations_ba = []
translations_ba = []
rotations_ba_pred = []
translations_ba_pred = []
eulers_ab = []
eulers_ba = []
torch.cuda.empty_cache()
with torch.no_grad():
for src, target, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba, label in tqdm(
test_loader):
src = src.cuda()
target = target.cuda()
rotation_ab = rotation_ab.cuda()
translation_ab = translation_ab.cuda()
rotation_ba = rotation_ba.cuda()
translation_ba = translation_ba.cuda()
batch_size = src.size(0)
num_examples += batch_size
if iter > 0:
srcK, src_corrK, rotation_ab_pred, translation_ab_pred, rotation_ba_pred, translation_ba_pred = vcrnetIter(
net, src, target, iter=iter)
elif iter == 0:
srcK, src_corrK, rotation_ab_pred, translation_ab_pred, rotation_ba_pred, translation_ba_pred = vcrnetIcpNet(
net, src, target)
else:
raise RuntimeError('iter')
## save rotation and translation
rotations_ab.append(rotation_ab.detach().cpu().numpy())
translations_ab.append(translation_ab.detach().cpu().numpy())
rotations_ab_pred.append(rotation_ab_pred.detach().cpu().numpy())
translations_ab_pred.append(translation_ab_pred.detach().cpu().numpy())
eulers_ab.append(euler_ab.numpy())
##
rotations_ba.append(rotation_ba.detach().cpu().numpy())
translations_ba.append(translation_ba.detach().cpu().numpy())
rotations_ba_pred.append(rotation_ba_pred.detach().cpu().numpy())
translations_ba_pred.append(translation_ba_pred.detach().cpu().numpy())
eulers_ba.append(euler_ba.numpy())
# Predicted point cloud
transformed_target = transform_point_cloud(target, rotation_ba_pred, translation_ba_pred)
# Real point cloud
transformed_srcK = transform_point_cloud(srcK, rotation_ab, translation_ab)
###########################
identity = torch.eye(3).cuda().unsqueeze(0).repeat(batch_size, 1, 1)
loss_VCRNet = torch.nn.functional.mse_loss(transformed_srcK, src_corrK)
loss_pose = F.mse_loss(torch.matmul(rotation_ab_pred.transpose(2, 1), rotation_ab), identity) \
+ F.mse_loss(translation_ab_pred, translation_ab)
total_loss_VCRNet += loss_VCRNet.item() * batch_size
total_loss += loss_pose.item() * batch_size
mse_ab += torch.mean((transformed_srcK - src_corrK) ** 2, dim=[0, 1, 2]).item() * batch_size
mae_ab += torch.mean(torch.abs(transformed_srcK - src_corrK), dim=[0, 1, 2]).item() * batch_size
mse_ba += torch.mean((transformed_target - src) ** 2, dim=[0, 1, 2]).item() * batch_size
mae_ba += torch.mean(torch.abs(transformed_target - src), dim=[0, 1, 2]).item() * batch_size
rotations_ab = np.concatenate(rotations_ab, axis=0)
translations_ab = np.concatenate(translations_ab, axis=0)
rotations_ab_pred = np.concatenate(rotations_ab_pred, axis=0)
translations_ab_pred = np.concatenate(translations_ab_pred, axis=0)
rotations_ba = np.concatenate(rotations_ba, axis=0)
translations_ba = np.concatenate(translations_ba, axis=0)
rotations_ba_pred = np.concatenate(rotations_ba_pred, axis=0)
translations_ba_pred = np.concatenate(translations_ba_pred, axis=0)
eulers_ab = np.concatenate(eulers_ab, axis=0)
eulers_ba = np.concatenate(eulers_ba, axis=0)
return total_loss * 1.0 / num_examples, total_cycle_loss / num_examples, \
mse_ab * 1.0 / num_examples, mae_ab * 1.0 / num_examples, \
mse_ba * 1.0 / num_examples, mae_ba * 1.0 / num_examples, rotations_ab, \
translations_ab, rotations_ab_pred, translations_ab_pred, rotations_ba, \
translations_ba, rotations_ba_pred, translations_ba_pred, eulers_ab, eulers_ba, total_loss_VCRNet * 1.0 / num_examples
def train_one_epoch(params, net, train_loader, opt):
net.train()
mse_ab = 0
mae_ab = 0
mse_ba = 0
mae_ba = 0
total_loss_VCRNet = 0
total_loss = 0
total_cycle_loss = 0
num_examples = 0
rotations_ab = []
translations_ab = []
rotations_ab_pred = []
translations_ab_pred = []
rotations_ba = []
translations_ba = []
rotations_ba_pred = []
translations_ba_pred = []
eulers_ab = []
eulers_ba = []
torch.cuda.empty_cache()
for src, target, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba, label in tqdm(
train_loader):
src = src.cuda()
target = target.cuda()
rotation_ab = rotation_ab.cuda()
translation_ab = translation_ab.cuda()
rotation_ba = rotation_ba.cuda()
translation_ba = translation_ba.cuda()
batch_size = src.size(0)
opt.zero_grad()
num_examples += batch_size
srcK, src_corrK, rotation_ab_pred, translation_ab_pred, rotation_ba_pred, translation_ba_pred = net(
src.transpose(-1, -2), target.transpose(-1, -2))
## save rotation and translation
rotations_ab.append(rotation_ab.detach().cpu().numpy())
translations_ab.append(translation_ab.detach().cpu().numpy())
rotations_ab_pred.append(rotation_ab_pred.detach().cpu().numpy())
translations_ab_pred.append(translation_ab_pred.detach().cpu().numpy())
eulers_ab.append(euler_ab.numpy())
##
rotations_ba.append(rotation_ba.detach().cpu().numpy())
translations_ba.append(translation_ba.detach().cpu().numpy())
rotations_ba_pred.append(rotation_ba_pred.detach().cpu().numpy())
translations_ba_pred.append(translation_ba_pred.detach().cpu().numpy())
eulers_ba.append(euler_ba.numpy())
transformed_src = transform_point_cloud(src, rotation_ab_pred, translation_ab_pred)
transformed_target = transform_point_cloud(target, rotation_ba_pred, translation_ba_pred)
transformed_srcK = transform_point_cloud(srcK, rotation_ab, translation_ab)
###########################
identity = torch.eye(3).cuda().unsqueeze(0).repeat(batch_size, 1, 1)
loss_VCRNet = torch.nn.functional.mse_loss(transformed_srcK, src_corrK)
loss_VCRNet.backward()
total_loss_VCRNet += loss_VCRNet.item() * batch_size
loss_pose = F.mse_loss(torch.matmul(rotation_ab_pred.transpose(2, 1), rotation_ab), identity) \
+ F.mse_loss(translation_ab_pred, translation_ab)
opt.step()
total_loss += loss_pose.item() * batch_size
mse_ab += torch.mean((transformed_srcK - src_corrK) ** 2, dim=[0, 1, 2]).item() * batch_size
mae_ab += torch.mean(torch.abs(transformed_srcK - src_corrK), dim=[0, 1, 2]).item() * batch_size
mse_ba += torch.mean((transformed_target - src) ** 2, dim=[0, 1, 2]).item() * batch_size
mae_ba += torch.mean(torch.abs(transformed_target - src), dim=[0, 1, 2]).item() * batch_size
rotations_ab = np.concatenate(rotations_ab, axis=0)
translations_ab = np.concatenate(translations_ab, axis=0)
rotations_ab_pred = np.concatenate(rotations_ab_pred, axis=0)
translations_ab_pred = np.concatenate(translations_ab_pred, axis=0)
rotations_ba = np.concatenate(rotations_ba, axis=0)
translations_ba = np.concatenate(translations_ba, axis=0)
rotations_ba_pred = np.concatenate(rotations_ba_pred, axis=0)
translations_ba_pred = np.concatenate(translations_ba_pred, axis=0)
eulers_ab = np.concatenate(eulers_ab, axis=0)
eulers_ba = np.concatenate(eulers_ba, axis=0)
return total_loss * 1.0 / num_examples, total_cycle_loss / num_examples, \
mse_ab * 1.0 / num_examples, mae_ab * 1.0 / num_examples, \
mse_ba * 1.0 / num_examples, mae_ba * 1.0 / num_examples, rotations_ab, \
translations_ab, rotations_ab_pred, translations_ab_pred, rotations_ba, \
translations_ba, rotations_ba_pred, translations_ba_pred, eulers_ab, eulers_ba, total_loss_VCRNet * 1.0 / num_examples
def trainVCRNet(params, net, train_loader, test_loader, boardio, textio):
# opt = optim.Adam(net.parameters(), lr=0.01, weight_decay=1e-4)
opt = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
scheduler = ReduceLROnPlateau(opt, mode='min', factor=0.1, patience=3, verbose=False, threshold=0.0001)
best_test_loss = np.inf
best_test_cycle_loss = np.inf
best_test_mse_ab = np.inf
best_test_rmse_ab = np.inf
best_test_mae_ab = np.inf
best_test_r_mse_ab = np.inf
best_test_r_rmse_ab = np.inf
best_test_r_mae_ab = np.inf
best_test_t_mse_ab = np.inf
best_test_t_rmse_ab = np.inf
best_test_t_mae_ab = np.inf
for epoch in range(params.epochs):
train_loss_Pose, train_cycle_loss, \
train_mse_ab, train_mae_ab, train_mse_ba, train_mae_ba, train_rotations_ab, train_translations_ab, \
train_rotations_ab_pred, \
train_translations_ab_pred, train_rotations_ba, train_translations_ba, train_rotations_ba_pred, \
train_translations_ba_pred, train_eulers_ab, train_eulers_ba, train_loss_VCRNet = train_one_epoch(params, net,
train_loader,
opt)
test_loss_Pose, test_cycle_loss_Pose, \
test_mse_ab, test_mae_ab, test_mse_ba, test_mae_ba, test_rotations_ab, test_translations_ab, \
test_rotations_ab_pred, \
test_translations_ab_pred, test_rotations_ba, test_translations_ba, test_rotations_ba_pred, \
test_translations_ba_pred, test_eulers_ab, test_eulers_ba, test_loss_VCRNet = test_one_epoch(params.iter, net,
test_loader)
train_rmse_ab = np.sqrt(train_mse_ab)
test_rmse_ab = np.sqrt(test_mse_ab)
train_rotations_ab_pred_euler = npmat2euler(train_rotations_ab_pred)
train_r_mse_ab = np.mean((train_rotations_ab_pred_euler - np.degrees(train_eulers_ab)) ** 2)
train_r_rmse_ab = np.sqrt(train_r_mse_ab)
train_r_mae_ab = np.mean(np.abs(train_rotations_ab_pred_euler - np.degrees(train_eulers_ab)))
train_t_mse_ab = np.mean((train_translations_ab - train_translations_ab_pred) ** 2)
train_t_rmse_ab = np.sqrt(train_t_mse_ab)
train_t_mae_ab = np.mean(np.abs(train_translations_ab - train_translations_ab_pred))
test_rotations_ab_pred_euler = npmat2euler(test_rotations_ab_pred)
test_r_mse_ab = np.mean((test_rotations_ab_pred_euler - np.degrees(test_eulers_ab)) ** 2)
test_r_rmse_ab = np.sqrt(test_r_mse_ab)
test_r_mae_ab = np.mean(np.abs(test_rotations_ab_pred_euler - np.degrees(test_eulers_ab)))
test_t_mse_ab = np.mean((test_translations_ab - test_translations_ab_pred) ** 2)
test_t_rmse_ab = np.sqrt(test_t_mse_ab)
test_t_mae_ab = np.mean(np.abs(test_translations_ab - test_translations_ab_pred))
if best_test_loss >= test_loss_Pose:
best_test_loss = test_loss_Pose
best_test_cycle_loss = test_cycle_loss_Pose
best_test_mse_ab = test_mse_ab
best_test_rmse_ab = test_rmse_ab
best_test_mae_ab = test_mae_ab
best_test_r_mse_ab = test_r_mse_ab
best_test_r_rmse_ab = test_r_rmse_ab
best_test_r_mae_ab = test_r_mae_ab
best_test_t_mse_ab = test_t_mse_ab
best_test_t_rmse_ab = test_t_rmse_ab
best_test_t_mae_ab = test_t_mae_ab
import os
from os.path import join, exists
model_dir = join(params.log_dir, "model")
if not exists(model_dir):
os.makedirs(model_dir)
if torch.cuda.device_count() > 1:
torch.save(net.module.state_dict(), join(model_dir, "model.{}.t7".format(epoch)))
else:
torch.save(net.state_dict(), join(model_dir, "model.{}.t7".format(epoch)))
# scheduler.step()
scheduler.step(best_test_loss)
lr = opt.param_groups[0]['lr']
if lr <= 0.0000011:
break
textio.cprint('==TRAIN==')
textio.cprint('A--------->B')
textio.cprint(
'EPOCH:: %d, Loss: %f, LossPose: %f, Cycle Loss:, %f, lr: %f, MSE: %f, RMSE: %f, MAE: %f, rot_MSE: %f, rot_RMSE: %f, '
'rot_MAE: %f, trans_MSE: %f, trans_RMSE: %f, trans_MAE: %f'
% (
epoch, train_loss_VCRNet, train_loss_Pose, train_cycle_loss, lr, train_mse_ab, train_rmse_ab,
train_mae_ab,
train_r_mse_ab,
train_r_rmse_ab, train_r_mae_ab, train_t_mse_ab, train_t_rmse_ab, train_t_mae_ab))
textio.cprint('==TEST==')
textio.cprint('A--------->B')
textio.cprint(
'EPOCH:: %d, Loss: %f, LossPose: %f, Cycle Loss: %f, MSE: %f, RMSE: %f, MAE: %f, rot_MSE: %f, rot_RMSE: %f, '
'rot_MAE: %f, trans_MSE: %f, trans_RMSE: %f, trans_MAE: %f'
% (epoch, test_loss_VCRNet, test_loss_Pose, test_cycle_loss_Pose, test_mse_ab, test_rmse_ab, test_mae_ab,
test_r_mse_ab,
test_r_rmse_ab, test_r_mae_ab, test_t_mse_ab, test_t_rmse_ab, test_t_mae_ab))
textio.cprint('==BEST TEST==')
textio.cprint('A--------->B')
textio.cprint('EPOCH:: %d, Loss: %f, Cycle Loss: %f, MSE: %f, RMSE: %f, MAE: %f, rot_MSE: %f, rot_RMSE: %f, '
'rot_MAE: %f, trans_MSE: %f, trans_RMSE: %f, trans_MAE: %f'
% (epoch, best_test_loss, best_test_cycle_loss, best_test_mse_ab, best_test_rmse_ab,
best_test_mae_ab, best_test_r_mse_ab, best_test_r_rmse_ab,
best_test_r_mae_ab, best_test_t_mse_ab, best_test_t_rmse_ab, best_test_t_mae_ab))
# boardio.add_scalar('A->B/train/loss', train_loss_VCRNet, epoch)
# boardio.add_scalar('A->B/train/lossPose', train_loss_Pose, epoch)
############TEST
boardio.add_scalar('A->B/test/loss', test_loss_VCRNet, epoch)
boardio.add_scalar('A->B/test/lossPose', test_loss_Pose, epoch)
boardio.add_scalar('A->B/test/rotation/RMSE', test_r_rmse_ab, epoch)
boardio.add_scalar('A->B/test/translation/MAE', test_t_mae_ab, epoch)
############BEST TEST
boardio.add_scalar('A->B/best_test/lr', lr, epoch)
boardio.add_scalar('A->B/best_test/loss', best_test_loss, epoch)
boardio.add_scalar('A->B/best_test/rotation/MAE', best_test_r_mae_ab, epoch)
boardio.add_scalar('A->B/best_test/translation/MAE', best_test_t_mae_ab, epoch)
import os
from os.path import join, exists
model_dir = join(params.log_dir, "model")
if not exists(model_dir):
os.makedirs(model_dir)
if torch.cuda.device_count() > 1:
torch.save(net.module.state_dict(), join(model_dir, "model.{}.t7".format(epoch)))
else:
torch.save(net.state_dict(), join(model_dir, "model.{}.t7".format(epoch)))
gc.collect()
def testVCRNet(iter, net, test_loader):
test_loss_Pose, test_cycle_loss_Pose, \
test_mse_ab, test_mae_ab, test_mse_ba, test_mae_ba, test_rotations_ab, test_translations_ab, \
test_rotations_ab_pred, \
test_translations_ab_pred, test_rotations_ba, test_translations_ba, test_rotations_ba_pred, \
test_translations_ba_pred, test_eulers_ab, test_eulers_ba, test_loss_VCRNet = test_one_epoch(iter, net,
test_loader)
test_rmse_ab = np.sqrt(test_mse_ab)
test_rotations_ab_pred_euler = npmat2euler(test_rotations_ab_pred)
test_r_mse_ab = np.mean((test_rotations_ab_pred_euler - np.degrees(test_eulers_ab)) ** 2)
test_r_rmse_ab = np.sqrt(test_r_mse_ab)
test_r_mae_ab = np.mean(np.abs(test_rotations_ab_pred_euler - np.degrees(test_eulers_ab)))
test_t_mse_ab = np.mean((test_translations_ab - test_translations_ab_pred) ** 2)
test_t_rmse_ab = np.sqrt(test_t_mse_ab)
test_t_mae_ab = np.mean(np.abs(test_translations_ab - test_translations_ab_pred))
print('==TEST==')
print('A--------->B')
print(
'Loss: %f, LossPose: %f, Cycle Loss: %f, MSE: %f, RMSE: %f, MAE: %f, rot_MSE: %f, rot_RMSE: %f, '
'rot_MAE: %f, trans_MSE: %f, trans_RMSE: %f, trans_MAE: %f'
% (test_loss_VCRNet, test_loss_Pose, test_cycle_loss_Pose, test_mse_ab, test_rmse_ab, test_mae_ab,
test_r_mse_ab,
test_r_rmse_ab, test_r_mae_ab, test_t_mse_ab, test_t_rmse_ab, test_t_mae_ab))
|
|
"""
Module to parse all the data provided for Telstra Network Disruption
competition. Additionally perform feature engineering.
"""
from __future__ import print_function
import numpy as np
import pandas as pd
def parse_single_attr(attribute=None, frame=None, use_frame=False):
"""Parse single attribute DataFrame."""
# Create one hot version of event types frame
if use_frame:
single_attr_df = frame
else:
single_attr_df = pd.read_csv(attribute + ".csv")
if attribute == "volume":
one_hot_single_attrs = single_attr_df[attribute].str.get_dummies()
new_columns = [
"volume_" + col for col in one_hot_single_attrs.columns.tolist()]
one_hot_single_attrs.columns = new_columns
else:
one_hot_single_attrs = single_attr_df[attribute].str.join("").str.get_dummies()
# print (one_hot_single_attrs)
del single_attr_df[attribute]
single_attr_df = pd.concat([single_attr_df, one_hot_single_attrs], axis=1)
# compress frame by id
compressed_array = []
for name, single_attrs in single_attr_df.groupby(["id"]):
attr_arrays = np.array(single_attrs)
attr_array = attr_arrays[0]
for other_array in attr_arrays[1:]:
attr_array = np.add(attr_array, other_array)
attr_array[0] /= len(attr_arrays)
compressed_array.append(attr_array)
columns = single_attr_df.columns.tolist()
return pd.DataFrame(compressed_array, columns=columns)
def make_one_hot_data():
"""Convert all data to one_hot format."""
event_type_df = parse_single_attr(attribute="event_type")
resource_type_df = parse_single_attr(attribute="resource_type")
severity_type_df = parse_single_attr(attribute="severity_type")
log_feature_df = pd.read_csv("log_feature.csv")
log_frame = log_feature_df[["id", "log_feature"]]
volume_frame = log_feature_df[["id", "volume"]]
feature_type_df = parse_single_attr(attribute="log_feature", frame=log_frame, use_frame=True)
volume_type_df = (parse_single_attr(attribute="volume", frame=volume_frame, use_frame=True))
total_frame = pd.merge(event_type_df, resource_type_df, on="id")
total_frame = pd.merge(total_frame, severity_type_df, on="id")
total_frame = pd.merge(total_frame, feature_type_df, on="id")
total_frame = pd.merge(total_frame, volume_type_df, on="id")
total_frame.to_csv("one_hot_data.csv", index=False)
def main():
"""."""
# make_one_hot_data()
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
train_df["location"] = train_df["location"].apply(
lambda x: int(x.split()[1]))
test_df["location"] = test_df["location"].apply(
lambda x: int(x.split()[1]))
one_hot_df = pd.read_csv("one_hot_data.csv")
parsed_train_df = pd.merge(one_hot_df, train_df, on="id")
parsed_test_df = pd.merge(one_hot_df, test_df, on="id")
parsed_train_df.to_csv("parsed_train.csv", index=False)
parsed_test_df.to_csv("parsed_test.csv", index=False)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
import sys, os, time, numpy, scipy
def somefunc1(x): # function of a scalar argument
if x < 0:
r = 0
else:
r = scipy.sin(x)
return r
def somefunc2(x): # function of a scalar argument
if x < 0:
r = 0
else:
r = math.sin(x)
return r
def somefunc3(x): # function of a scalar argument
if x < 0:
r = 0
else:
r = numpy.sin(x)
return r
def somefunc_NumPy2(x):
# vectorized function by hand:
lt0_indices = less(x, 0) # find all indices i where x[i]<0
r = sin(x)
# insert 0 for all elements if x[i]<0:
r = where(lt0_indices, 0.0, r)
return r
#--------------- demonstrate some SciPy functionality ----------------
from scipy import *
def integrate_func():
def myfunc(x):
return sin(x)
result, error = integrate.quad(myfunc, 0, pi)
print result, error
def myfunc(x, a, b):
return a + b*sin(x)
a=0; b=1
result, error = integrate.quad(myfunc, 0, pi, args=(a,b), epsabs=1.0e-9)
print result, error
class Oscillator:
"""Implementation of the oscillator code using SciPy."""
def __init__(self, **kwargs):
"""Initialize parameters from keyword arguments."""
self.p = {'m': 1.0, 'b': 0.7, 'c': 5.0, 'func': 'y',
'A': 5.0, 'w': 2*pi, 'y0': 0.2,
'tstop': 30.0, 'dt': 0.05}
self.p.update(kwargs)
def scan(self):
"""
Read parameters from standard input in the same
sequence as the F77 oscillator code.
"""
for name in 'm', 'b', 'c', 'func', 'A', 'w', \
'y0', 'tstop', 'dt':
if name == 'func': # expect string
self.p['func'] = sys.stdin.readline().strip()
else:
self.p[name] = float(sys.stdin.readline())
def solve(self):
"""Solve ODE system."""
# mapping: name of f(y) to Python function for f(y):
self._fy = {'y': lambda y: y, 'siny': lambda y: sin(y),
'y3': lambda y: y - y**3/6.0}
# set initial conditions:
self.y0 = [self.p['y0'], 0.0]
# call SciPy solver:
from scitools.numpyutils import seq
self.t = seq(0, self.p['tstop'], self.p['dt'])
from scipy.integrate import odeint
self.yvec = odeint(self.f, self.y0, self.t)
self.y = self.yvec[:,0] # y(t)
# write t and y(t) to sim.dat file:
f = open('sim.dat', 'w')
for y, t in zip(self.y, self.t):
f.write('%g %g\n' % (t, y))
f.close()
def f(self, y, t):
"""Right-hand side of 1st-order ODE system."""
A, w, b, c, m = [p[k] for k in 'A', 'w', 'b', 'c', 'm']
f = self._fy[self.p['func']]
return [y[1], (A*cos(w*t) - b*y[1] - c*f(y[0]))/m]
def test_Oscillator(dt=0.05):
s = Oscillator(m=5, dt=dt)
t1 = os.times()
s.solve()
t2 = os.times()
print 'CPU time of odeint:', t2[0]-t1[0] + t2[1]-t1[1]
# compare with the oscillator program:
cmd = './simviz1.py -noscreenplot -case tmp1'
for option in s.p: # construct command-line options
cmd += ' -'+option + ' ' + str(s.p[option])
import commands
t3 = os.times()
failure, output = commands.getstatusoutput(cmd)
t4 = os.times()
print 'CPU time of oscillator:', t4[2]-t3[2] + t4[3]-t3[3]
# plot:
from scitools.filetable import readfile
t, y = readfile(os.path.join('tmp1','sim.dat'))
from scitools.easyviz import *
plot(t, y, 'r-', s.t, s.y, 'b-', legend=('RK2', 'LSODE'))
hardcopy('tmp.ps')
def statistics():
pd = stats.norm(loc=1, scale=0.5) # normal distribution N(1,0.5)
n=10000
r = pd.rvs(n) # random variates
import RandomArray
r = RandomArray.normal(1, 0.1, n)
s = stats.stats
print pd.stats()
print 'mean=%g stdev=%g skewness=%g kurtosis=%g' % \
(s.mean(r), s.variation(r), s.skew(r), s.kurtosis(r))
bin_counts, bin_min, min_width, noutside = s.histogram(r, numbins=50)
def linear_algebra():
# init:
n = 4
A = zeros(n*n, float); A.shape = (n,n)
x = zeros(n, float)
b = zeros(n, float)
for i in range(n):
x[i] = i/2.0 # some prescribed solution
for j in range(n):
A[i,j] = 2.0 + float(i+1)/float(j+i+1)
b = dot(A,x) # adjust rhs to fit x
y = linalg.solve(A, b)
if allclose(x, y, atol=1.0E-12, rtol=1.0E-12):
print 'correct solution'
else:
print 'wrong solution', x, y
# test part:
if __name__ == '__main__':
if len(sys.argv) <= 1:
print 'Usage: SciPy.py integrate | Oscillator dt | statistics'
sys.exit(1)
test = sys.argv[1]
if test == 'integrate':
integrate_func()
elif test == 'Oscillator':
test_Oscillator(dt=float(sys.argv[2]))
elif test == 'statistics':
statistics()
elif test == 'linalg':
linear_algebra()
else:
print test, 'not implemented'
|
|
#!/bin/python
"""
A module to handle 3D data with axes.
colorview2d.Data consists of a 2d array and x and y axes.
The class provides methods to rotate, flipp, copy and save
the datafile.
Example
-------
::
file = Data(np.random.random(100, 100))
file.rotate_cw()
file.report()
file.save('newdata.dat')
"""
import copy
import logging
import numpy as np
class Data(object):
"""
``Data`` hosts, well, the data and its axes.
Data is stored in a 2d :class:`numpy-ndarray`.
For the axes, only the bounds are stored. We assume linear scaling of the axes.
If no bounds are specified, we use ``(0, n)`` as boundaries, ``n``
being the number of rows and columns, respectively.
"""
def __init__(self, data, range_bounds=None):
"""Initialize a data object.
Args:
data (numpy.array): the two-dimensional array holding the data.
range_bounds (tuple of tuples): y-range boundaries as a tuple (bottom, top),
x-range boundaries as a tuple (left, right)
"""
self._zdata = data
self._xrange_bounds = None
self._yrange_bounds = None
try:
self.xrange_bounds = range_bounds[1]
self.yrange_bounds = range_bounds[0]
except (AssertionError, IndexError, TypeError):
logging.warn('Ranges not specified correctly. '
'Should be ((y_bottom, y_top), (x_left, x_right)). '
'Using index dimensions as ranges.')
self._xrange_bounds = (0., float(self._zdata.shape[1] - 1))
self._yrange_bounds = (0., float(self._zdata.shape[0] - 1))
@property
def xleft(self):
"""Right boundary value of the x-axis."""
return self._xrange_bounds[0]
@property
def xright(self):
"""Left boundary value of the x-axis."""
return self._xrange_bounds[1]
@property
def xmin(self):
"""Minimum value of the x-axis range."""
return min(self._xrange_bounds)
@property
def xmax(self):
"""Maximum value of the x-axis range."""
return max(self._xrange_bounds)
@property
def dx(self):
"""Spacing of x-axis values."""
return (self._xrange_bounds[1] - self._xrange_bounds[0]) /\
(self._zdata.shape[1] - 1)
@property
def ytop(self):
"""Top boundary value of the y-axis."""
return self._yrange_bounds[1]
@property
def ybottom(self):
"""Bottom boundary value of the y-axis."""
return self._yrange_bounds[0]
@property
def ymin(self):
"""Minimum value of the y-axis range."""
return min(self._yrange_bounds)
@property
def ymax(self):
"""Maximum value of the y-axis range."""
return max(self._yrange_bounds)
@property
def dy(self):
"""Spacing of y-axis values."""
return (self._yrange_bounds[1] - self._yrange_bounds[0]) /\
(self._zdata.shape[0] - 1)
@property
def zdata(self):
"""2d :class:`numpy.ndarray`."""
return self._zdata
@property
def zmin(self):
"""Minimum value of the 2d :class:`numpy.ndarray`."""
return np.amin(self._zdata)
@property
def zmax(self):
"""Maximum value of the 2d :class:`numpy.ndarray`."""
return np.amax(self._zdata)
@property
def xwidth(self):
"""Size of the array along the x-axis."""
return self._zdata.shape[1]
@property
def ywidth(self):
"""Size of the array along the y-axis."""
return self._zdata.shape[0]
@zdata.setter
def zdata(self, data):
"""Set a new 2d :class:`numpy.ndarray`."""
assert isinstance(data, np.ndarray), \
'Not a numpy array. Please provide a numpy array for Data creation.'
assert len(data.shape) == 2, 'Provide a two-dimensional array for Data creation.'
self._zdata = data
@property
def y_range(self):
"""A linear y-range array."""
return np.linspace(
self._yrange_bounds[0], self._yrange_bounds[1], self.zdata.shape[0])
@property
def x_range(self):
"""A linear x-range array."""
return np.linspace(
self._xrange_bounds[0], self._xrange_bounds[1], self.zdata.shape[1])
@property
def xrange_bounds(self):
"""Boundary values on the x-axis as a tuple (left, right)."""
return self._xrange_bounds
@xrange_bounds.setter
def xrange_bounds(self, range_boundaries):
assert len(range_boundaries) == 2, 'Boundaries of x-axis range not specified correctly.'
self._xrange_bounds = (float(range_boundaries[0]), float(range_boundaries[1]))
@property
def yrange_bounds(self):
"""Boundary values on the y-axis as a tuple (bottom, top)."""
return self._yrange_bounds
@yrange_bounds.setter
def yrange_bounds(self, range_boundaries):
assert len(range_boundaries) == 2, 'Boundaries of y-axis range not specified correctly.'
self._yrange_bounds = (float(range_boundaries[0]), float(range_boundaries[1]))
def report(self):
"""
Print a data report to the standart output.
"""
print(
"There are {0} lines and {1} columns in the datafile.\n"
.format(self._zdata.shape[0], self._zdata.shape[1]))
print(
"X-axis range from {0} to {1}".format(self.xleft, self.xright),
"Y-axis range from {0} to {1}".format(self.ybottom, self.ytop))
def deep_copy(self):
"""
Deep copy the :class:`colorview2d.Data` object and return the copy.
Returns:
A copy of the :class:`Colorview2d.Data` instance.
"""
tmp = copy.deepcopy(self)
tmp.zdata = np.copy(self._zdata)
return tmp
def rotate_cw(self):
"""
Rotate the data clockwise. The axes are updated as well.
"""
self.zdata = np.rot90(self._zdata, k=1)
old_xrange_boundaries = self._xrange_bounds
old_yrange_boundaries = self._yrange_bounds
self._xrange_bounds = old_yrange_boundaries
self._yrange_bounds = old_xrange_boundaries[::-1]
def rotate_ccw(self):
"""
Rotate the data counter-clockwise. The axes are updated as well.
"""
self.zdata = np.rot90(self._zdata, k=3)
old_xrange_boundaries = self._xrange_bounds
old_yrange_boundaries = self._yrange_bounds
self._xrange_bounds = old_yrange_boundaries[::-1]
self._yrange_bounds = old_xrange_boundaries
def flip_lr(self):
"""
Flip the left and the right side of the data. The axes are updated as well.
"""
self.zdata = np.fliplr(self._zdata)
self._xrange_bounds = self._xrange_bounds[::-1]
def flip_ud(self):
"""
Flip the up and the down side of the data. The axes are updated as well.
"""
self.zdata = np.flipud(self._zdata)
self._yrange_bounds = self._yrange_bounds[::-1]
def is_within_xbounds(self, val):
"""Check if the given value is within the xrange.
Returns:
a boolean.
"""
return val >= self.xmin or val <= self.xmax
def is_within_ybounds(self, val):
"""Check if the given value is within the yrange.
Returns:
a boolean.
"""
return val >= self.ymin or val <= self.ymax
def is_within_bounds(self, coordinate):
"""Check if the given coordinate (y, x) is within the ranges
of the axes.
Returns:
a boolean.
"""
return self.is_within_xbounds(coordinate[1]) or self.is_within_ybounds(coordinate[0])
def crop(self, boundaries):
"""
Crop the data to a subset of the array specifiying the corners of the subset in
units of the axes ranges.
Args:
boundaries (tuple): (bottom boundary, top boundary,
left boundary, right boundary)
"""
bottom_boundary, top_boundary = (boundaries[0], boundaries[1])
left_boundary, right_boundary = (boundaries[2], boundaries[3])
assert self.is_within_bounds((bottom_boundary, left_boundary)),\
'crop: Bottom left edge not within boundaries.'
assert self.is_within_bounds((top_boundary, right_boundary)),\
'crop: Top right edge not within boundaries.'
xleft_idx = self.x_range_idx_by_val(left_boundary)
xright_idx = self.x_range_idx_by_val(right_boundary)
ybottom_idx = self.y_range_idx_by_val(bottom_boundary)
ytop_idx = self.y_range_idx_by_val(top_boundary)
self._xrange_bounds = (left_boundary, right_boundary)
self._yrange_bounds = (bottom_boundary, top_boundary)
self.zdata = self._zdata[ybottom_idx:ytop_idx + 1, xleft_idx:xright_idx + 1]
def x_range_idx_by_val(self, value):
"""
Return the nearest index of a value within the x axis range.
Args:
value: A value in the range of the x axis
Returns:
The closest index on the x axis range.
"""
assert self.is_within_xbounds(value), 'Value %f out of xrange.' % value
return int(round(abs(self.xleft - value) / self.dx))
def y_range_idx_by_val(self, value):
"""
Return the nearest index of a value within the y axis range.
Args:
value: A value in the range of the y axis
Returns:
The closest index on the y axis range.
"""
assert self.is_within_ybounds(value), 'Value %f out of yrange.' % value
return int(round(abs(self.ybottom - value) / self.dy))
def idx_by_val_coordinate(self, coordinate):
"""Return the nearest index pair for a coordinate pair (y, x) along the
two axes.
Args:
coordinate (tuple): y-axis value, x-axis value (inverse order!)
Returns:
(y-axis index, x-axis index) -- both integer
"""
return (self.y_range_idx_by_val(coordinate[0]), self.x_range_idx_by_val(coordinate[1]))
def extract_ylinetrace(self, xval, ystartval, ystopval):
"""Extract a linetrace along a given y-axis range vor a specific
value on the x axis.
Args:
xval (float): Position of the linecut along the x-axis.
ystartval (float): First and ...
ystopval (float): last value of the range along the y-axis.
Returns:
numpy array with two rows
[0] linecutdata
[1] y-axis range
"""
y_start_idx = self.y_range_idx_by_val(ystartval)
y_stop_idx = self.y_range_idx_by_val(ystopval)
assert y_start_idx != y_stop_idx,\
'Startindex and stopindex %d are equal for ylinetrace.' % y_start_idx
sign = np.sign(y_stop_idx - y_start_idx)
if sign == 1:
return np.vstack(
(self.zdata[y_start_idx:y_stop_idx + 1, self.x_range_idx_by_val(xval)],
self.y_range[y_start_idx:y_stop_idx + 1]))
else:
data = self.zdata[y_stop_idx:y_start_idx + 1, self.x_range_idx_by_val(xval)]
y_range = self.y_range[y_stop_idx:y_start_idx + 1]
return np.vstack((data[::-1], y_range[::-1]))
def extract_xlinetrace(self, yval, xstartval, xstopval):
"""Extract a linetrace along a given y-axis range vor a specific
value on the x axis.
Args:
yval (float): Position of the linecut along the y-axis.
xstartval (float): Start and ...
xstopval (float): stop value of the range along the x-axis.
Returns:
numpy array with two rows
[0] linecutdata
[1] x-axis range
"""
x_start_idx = self.x_range_idx_by_val(xstartval)
x_stop_idx = self.x_range_idx_by_val(xstopval)
assert x_start_idx != x_stop_idx,\
'Startindex and stopindex %d are equal for xlinetrace.' % x_start_idx
sign = np.sign(x_stop_idx - x_start_idx)
if sign == 1:
return np.vstack(
(self.zdata[self.y_range_idx_by_val(yval), x_start_idx:x_stop_idx + 1],
self.x_range[x_start_idx:x_stop_idx + 1]))
else:
data = self.zdata[self.y_range_idx_by_val(yval), x_stop_idx:x_start_idx + 1]
x_range = self.x_range[x_stop_idx:x_start_idx + 1]
return np.vstack((data[::-1], x_range[::-1]))
def extract_ylinetrace_series(self, x_first, x_last, x_interval, ystart, ystop):
"""Extract linetraces along a given y-axis range for
values on the x axis within a given range and separated by
a given interval.
Args:
x_first (float): value on the x-axis for the first line trace in the series.
x_last (float): value on the x-axis for the last line trace in the series.
x_interval (float): the (positive) interval between two linecuts on the x-axis.
ystart (float): Start and ...
ystop (float): stop value of the range along the y-axis.
Returns:
a numpy array with n + 1 rows with the length equal to the y-dimensions of zdata.
n is the number of linecuts, i.e., abs(x_last - x_first) / x_interval.
The last row contains the y-axis range.
"""
result_array = self.extract_ylinetrace(x_first, ystart, ystop)
if self.x_range_idx_by_val(x_first) == self.x_range_idx_by_val(x_last):
return result_array
result_range = result_array[1]
result_array = result_array[0]
x_sign = np.sign(x_last - x_first)
x_pos = x_first + x_interval * x_sign
while x_pos * x_sign <= x_last * x_sign:
result_array = np.vstack((result_array, self.extract_ylinetrace(x_pos, ystart, ystop)[0]))
x_pos += x_interval * x_sign
return np.vstack((result_array, result_range))
def extract_xlinetrace_series(self, y_first, y_last, y_interval, xstart, xstop):
"""Extract linetraces along a given x-axis range for
values on the y axis within a given range and separated by
a given interval.
Args:
y_first (float): value on the y-axis for the first line trace in the series.
y_last (float): value on the y-axis for the last line trace in the series.
y_interval (float): the (positive) interval between two linecuts on the y-axis.
xstart (float): Start and ...
xstop (float): stop value of the range along the x-axis.
Returns:
a numpy array with n + 1 rows with the length equal to the x-dimensions of zdata.
n is the number of linecuts, i.e., abs(y_last - y_first) / y_interval.
The last row contains the x-axis range.
"""
result_array = self.extract_xlinetrace(y_first, xstart, xstop)
if self.y_range_idx_by_val(y_first) == self.y_range_idx_by_val(y_last):
return result_array
y_sign = np.sign(y_last - y_first)
y_pos = y_first + y_interval * y_sign
# For now we remove the range axis
result_range = result_array[1]
result_array = result_array[0]
while y_pos * y_sign <= y_last * y_sign:
# add the next linetrace to the other linetraces
result_array = np.vstack((result_array, self.extract_xlinetrace(y_pos, xstart, xstop)[0]))
y_pos += y_interval * y_sign
return np.vstack((result_array, result_range))
def extract_arbitrary_linetrace(self, coordinate_one, coordinate_two):
"""Extract a linetrace between two arbitrary points.
Args:
coordinate_one (tuple): coordinate in the coordinate system of the axis.
The order is (yval, xval)!
coordinate_two (tuple): coordinates in the coordinate system of the
x and y axes. The order is (yval, xval)!
Returns:
Array with the linetrace. No axis range is supplied since it does not make sense
along any arbitrary direction.
"""
# we transform to the grid
idx_one = self.idx_by_val_coordinate(coordinate_one)
idx_two = self.idx_by_val_coordinate(coordinate_two)
assert idx_one != idx_two, (
'Coordinate one and two are equal: (y=%d, x=%d).' % (idx_one[0], idx_one[1]),\
'Can not extract linetrace of zero length.')
# if one of the two coordinate axis has zero difference,
# we call the orthogonal version
# y axis difference is zero:
if idx_one[0] == idx_two[0]:
return self.extract_xlinetrace(
coordinate_one[0], coordinate_one[1], coordinate_two[1])[0]
# x axis difference is zero:
elif idx_one[1] == idx_two[1]:
return self.extract_ylinetrace(
coordinate_one[1], coordinate_one[0], coordinate_two[0])[0]
# which is the primary axis of the linetrace?
if abs(idx_one[0] - idx_two[0]) > abs(idx_one[1] - idx_two[1]):
primary_axis_index, secondary_axis_index = (0, 1)
else:
primary_axis_index, secondary_axis_index = (1, 0)
linetrace_slope = float(idx_two[secondary_axis_index] - idx_one[secondary_axis_index]) /\
float(idx_two[primary_axis_index] - idx_one[primary_axis_index])
# Note that the linetrace has one more points than its length
linetrace_size = abs(idx_two[primary_axis_index] - idx_one[primary_axis_index]) + 1
axis_sign = np.sign(idx_two[primary_axis_index] - idx_one[primary_axis_index])
# go along primary axis and extract closest point
# if the primary axis is y-axis
if primary_axis_index == 0:
# dy and dx are positive: increment on both axis postive (trivial case)
# dy > 0, dx < 0: increment on first axis positive, slope negative -> increment
# on second axis negative.
# dy < 0, dx > 0: increment on y negative, slope negative -> dx positive
# dy < 0, dx < 0: increment negative, slope positive -> dx negative
linetrace = np.array(
[self.zdata[yidx + idx_one[0], int(round(yidx * linetrace_slope + idx_one[1]))]
for yidx in np.arange(linetrace_size) * axis_sign])
else:
linetrace = np.array(
[self.zdata[int(round(xidx * linetrace_slope + idx_one[0])), xidx + idx_one[1]]
for xidx in np.arange(linetrace_size) * axis_sign])
return linetrace
def resize(self, new_ywidth, new_xwidth, order=1):
"""Interpolate the array to a new, larger size.
Uses scipy.misc.imresize.
The ranges are interpolated accordingly.
Args:
new_ywidth (int): new dimensions along the y-axis.
new_xwidth (int): new dimensions along the x-axis.
order (int): order of the interpolation. See ``scipy.misc.imresize()``
"""
# Check if scipy is available
try:
from scipy.ndimage import zoom
except ImportError:
logging.error(
'Module scipy is not available. scipy.misc.imresize is used for interpolation.')
return
xfactor = float(new_xwidth) / self.xwidth
yfactor = float(new_ywidth) / self.ywidth
self._zdata = zoom(self._zdata, (yfactor, xfactor), order=order)
|
|
import brightway2 as bw
import pandas as pd
import numpy as np
import math
def is_method_uncertain(method):
"""check if method is uncertain"""
cfs = bw.Method(method).load()
cf_values = [cf_value for flow, cf_value in cfs]
return any(isinstance(x, dict) for x in cf_values)
def uncertain_archetype_dict(biosphere_database=bw.Database("biosphere3")):
"""returns a dict with the key for all the flows without archetype defiend"""
biosphere_dict_unclassified = {}
for f in biosphere_database:
compartments = f["categories"]
compartment = compartments[0]
if len(compartments) != 2:
biosphere_dict_unclassified[(f["database"], f["code"])] = (
f["name"],
compartment,
)
# reverse it to: code: (name,compartment)
biosphere_dict_unclassified_rev = {
v: k for k, v in biosphere_dict_unclassified.items()
}
return biosphere_dict_unclassified_rev
def minmax_archetype(cf_df):
"""
args:
a pandas dataframe as formated by the function get_cf_info
returns: returns a pandas dataframe with columns:
- code: elementary flow key
- name: elementary flow name
- amount: best guess of CF (loc?)
- minimum: maximum value of CF
- maximum: minimum value of CF
only for the flows with uncertainty associated to the archetype
returns none if the method does not have any "uncertain" CF.
"""
# dict with the keys of flows with unespecified archetype
biosphere_dict_unclassified_rev = uncertain_archetype_dict()
assert len(biosphere_dict_unclassified_rev)>0,'the function needs the biosphere database to be defined to work'
if None not in cf_df.subcompartment.unique():
# case where there's no undefined subcompartment case
df_minmax = None
elif (cf_df.subcompartment.unique() == np.array(None)).all():
df_minmax = None
else:
# calculate range
df_minmax = (
cf_df.set_index(["name", "compartment", "subcompartment"])["amount"]
.unstack("subcompartment")
.rename({np.nan: "undefined"}, axis=1)
.drop("undefined", axis=1)
.apply(["min", "max"], axis=1)
)
# if only defined in "undefined" there are nones in the min max
df_minmax = df_minmax.dropna(axis="index")
# add default
df_minmax["amount"] = cf_df[cf_df.subcompartment.isna()].set_index(
["name", "compartment"]
)["amount"]
# min max is undefined because only
# unspecified subcompartmet exists
df_minmax = df_minmax.dropna(axis=0, how="all")
# add the biosphere key code to the dataframe with ranges
df_minmax = df_minmax.reset_index().rename(
{"level_0": "name", "level_1": "compartment"}, axis=1
)
df_minmax["key"] = list(zip(df_minmax["name"], df_minmax["compartment"]))
df_minmax["code"] = df_minmax["key"].map(biosphere_dict_unclassified_rev)
# I forgot why this is needed
df_minmax = df_minmax[df_minmax.code.notna()]
# translate to naming convention expected in the uncertainty dict
df_minmax = df_minmax.rename({"min": "minimum", "max": "maximum"}, axis=1)
# eliminate cases where they are nearly the same
df_minmax = df_minmax[~df_minmax.apply(lambda x:math.isclose(x.minimum,
x.maximum,rel_tol=0.001),axis=1)]
# drop cases with undefined amount
df_minmax = df_minmax[df_minmax.amount.notna()]
df_minmax = df_minmax[
["code", "name", "amount", "minimum", "maximum", "compartment"]
]
# if just one cf is defined and it is for the undefined archetype
# this will generate nans, that are not needed
df_minmax = df_minmax.dropna(axis=1)
return df_minmax
def get_cf_info(m):
"""extracts info on the characterisation factors of a method
given the name. Currently prepared only for methods without
uncertainty, where CF are only a tuple (key,amount)"""
assert m in bw.methods,f"{m} not in bw.methods"
assert is_method_uncertain(m) is False,f"{m} has uncertain CF. Not yet supported"
M = bw.Method(m)
cfs = M.load()
info = []
for cf in cfs:
key,value = cf
flow = bw.get_activity(key)
compartments = flow["categories"]
compartment = compartments[0]
try:
subcompartment = compartments[1]
except IndexError:
subcompartment = None
info.append(
(
flow["database"],
flow["code"],
flow["name"],
value,
flow["unit"],
flow["type"],
compartment,
subcompartment,
)
)
df = pd.DataFrame(
info,
columns=[
"database",
"code",
"name",
"amount",
"unit",
"type",
"compartment",
"subcompartment",
],
)
return df
def cf_add_uncertainty(method, uncertainty_type=4):
"""returns the cf with the uncertainty associated to the archetype
if existing. Otherwise it returns a None value"""
cf_df = get_cf_info(method)
number_cf = len(cf_df)
df_minmax = minmax_archetype(cf_df)
# cond 1: method does not have uncertain CF
cond1 = pd.api.types.is_object_dtype(cf_df.amount)
# cond 2,3 and 4 validate if there are cases to be calculated
cond2 = cf_df.subcompartment.isna().sum() == 0
cond3 = df_minmax is None
if cond3:
cond4 = False
else:
cond4 = len(df_minmax) == 0
if cond1 or cond2 or cond3 or cond4:
cflist = None
else:
cf_df["key"] = list(zip(cf_df["database"], cf_df["code"]))
# eleminate the static cf for the cases where an uncertain cf
# is defined
cf_df = cf_df.set_index("key").drop(df_minmax["code"].unique())
cflist_certain = list(zip(cf_df.index, cf_df["amount"]))
if uncertainty_type == 4:
df_minmax["uncertainty type"] = 4
df_minmax = df_minmax.set_index("code")[
["amount", "maximum", "minimum", "uncertainty type"]
]
cflist_uncertain = list(
zip(df_minmax.index, df_minmax.to_dict(orient="records"))
)
elif uncertainty_type == 5:
df_minmax["uncertainty type"] = 5
df_minmax.loc[:, "loc"] = df_minmax.loc[:, "amount"]
df_minmax = df_minmax.set_index("code")[
["amount", "maximum", "minimum", "loc", "uncertainty type"]
]
cflist_uncertain = list(
zip(df_minmax.index, df_minmax.to_dict(orient="records"))
)
else:
raise ValueError(
"uncertainty types only defined for uniform (4) or triangular (5)"
)
# add both
cflist = cflist_certain + cflist_uncertain
assert len(cflist_uncertain) + len(cflist_certain) == number_cf
return cflist
|
|
import shutil
from pathlib import Path
import pytest
import numpy as np
from spikeinterface.extractors import *
@pytest.mark.skip('')
def test_klustaextractors():
# no tested here, tested un run_klusta
pass
# klusta_folder = '/home/samuel/Documents/SpikeInterface/spikeinterface/spikeinterface/sorters/tests/klusta_output/'
# sorting = KlustaSortingExtractor(klusta_folder)
# print(sorting)
if __name__ == '__main__':
test_klustaextractors()
|
|
#!/usr/bin/env python3
import torch, random, sys, os, pickle, argparse
import numpy as np, pathos.multiprocessing as mp
import gym_util.common_util as cou
import polnet as pnet, util_bwopt as u
from collections import defaultdict
from poleval_pytorch import get_rpi_s, get_Ppi_ss, get_ppisteady_s
def main():
arg = parse_arg(); cfg = vars(arg)
cfg['ij'] = None # dummy ij coord, used in making mesh
maximize_singleobj_exactly(cfg)
def maximize_singleobj_exactly(cfg):
def fn(param, compute_derivative=True):
param_dict = {pi_net.weight_x_name: param[0], pi_net.weight_y_name: param[1]}
for n, p in pi_net.named_parameters():
p.data.fill_(param_dict[n])
p.data = p.data.double()
PI = pnet.policy_net2tabular(allstatefea, pi_net, requires_grad=True)
rpi = get_rpi_s(Rsa, PI); Ppi = get_Ppi_ss(Psas, PI)
ppi_steady = get_ppisteady_s(Ppi, PI)
Ppi_steady = torch.vstack([ppi_steady]*nS) # unichain: same rows
I = torch.eye(nS).double()
Dpi = torch.inverse(I - gamma*Ppi) # IMPROPER discounted state distrib
Zpi = torch.inverse(I - Ppi + Ppi_steady) # fundamental matrix
Hpi = torch.matmul(Zpi, I - Ppi_steady) # deviation matrix
g = torch.dot(ppi_steady, rpi) # gain
b = torch.matmul(Hpi, rpi) # bias
d = torch.matmul(Dpi, rpi) # discounted
if fval_mode=='gain':
fval = g
elif fval_mode=='bias':
fval = b[s0]
elif fval_mode=='disc':
fval = (1 - gamma)*d[s0] # the scaled value for sampling-enabler expression
# elif fval_mode=='pena':
# # `create_graph` needs to be `True` so that `grad_g_norm.requires_grad= True`
# grad_g = torch.autograd.grad(g, pi_net.parameters(),
# allow_unused=False, create_graph=True, retain_graph=True)
# grad_g = torch.vstack(grad_g).squeeze()
# assert torch.isfinite(grad_g).all()
# grad_g_norm = torch.linalg.norm(grad_g, ord=None)
# fval = b[s0] - 0.5*pen*(grad_g_norm**2)
else:
raise NotImplementedError(fval_mode)
if compute_derivative:
grad = torch.autograd.grad(fval, pi_net.parameters(),
allow_unused=False, create_graph=True, retain_graph=True)
grad = torch.vstack(grad).squeeze()
assert torch.isfinite(grad).all()
hess = []
for pidx in range(n_param):
mask = torch.zeros(n_param); mask[pidx] = 1.
hess_i = torch.autograd.grad(grad, pi_net.parameters(),
grad_outputs=mask, allow_unused=False, create_graph=False, retain_graph=True)
hess.append(torch.vstack(hess_i).squeeze())
hess = torch.vstack(hess); assert torch.isfinite(hess).all()
if 'fisher' in conditioner_mode:
fisher_atallstate = {s: torch.zeros(n_param, n_param).double() for s in range(nS)}
for s in range(nS):
for a in range(nA_list[s]):
# Do NOT use torch.log(PI[s, a]): unstable grad on extreme values (with sigmoid fn)
pi = pi_net(torch.from_numpy(env.get_statefeature([s], cfg['sfx'])))
prob_a = pi.probs.squeeze(dim=u.sample_dimth)[a]
logprob_a = pi.log_prob(torch.tensor([a]))
grad_logpi = torch.autograd.grad(logprob_a, pi_net.parameters(),
allow_unused=False, create_graph=False, retain_graph=True)
grad_logpi = torch.vstack(grad_logpi).squeeze()
fisher_atallstate[s] += prob_a*torch.outer(grad_logpi, grad_logpi)
fisher = torch.zeros(n_param, n_param).double()
rtol = env.tmix_cfg['rtol']; atol = env.tmix_cfg['atol']
if conditioner_mode=='fisher_steady' and fval_mode=='gain':
for s in range(nS):
fisher += Ppi_steady[s0, s]*fisher_atallstate[s]
elif conditioner_mode=='fisher_disc' and fval_mode=='disc':
for s in range(nS):
fisher += (1 - gamma)*Dpi[s0, s]*fisher_atallstate[s]
elif conditioner_mode=='fisher_disc_unnormalized' and fval_mode=='disc':
for s in range(nS):
fisher += Dpi[s0, s]*fisher_atallstate[s]
elif conditioner_mode=='fisher_devmat' and fval_mode=='bias':
for s in range(nS):
fisher += torch.abs(Hpi[s0, s])*fisher_atallstate[s]
elif conditioner_mode=='fisher_probtransient' and fval_mode=='bias':
t_begin = 0; t_end = tmax_xep
for t in range(t_begin, t_end + 1, 1):
Ppi_pwr = torch.matrix_power(Ppi, t)
for s in range(nS):
pb_s = torch.abs(Ppi_pwr[s0, s] - Ppi_steady[s0, s])
fisher += pb_s*fisher_atallstate[s]
if torch.allclose(Ppi_steady[s0,:], Ppi_pwr[s0,:], rtol=rtol, atol=atol):
break
assert t <= tmax_xep
elif ('fisher_transient_withsteadymul_upto_t' in conditioner_mode) and fval_mode=='bias' :
t_begin = 0; t_end = tmax_xep
t_transient_max = int(conditioner_mode.replace('fisher_transient_withsteadymul_upto_t', ''))
for t in range(t_begin, t_end + 1, 1):
Ppi_pwr = torch.matrix_power(Ppi, t)
if (t <= t_transient_max): # equiv to `t < tabs`
for s in range(nS):
fisher += Ppi_pwr[s0, s]*fisher_atallstate[s]
if torch.allclose(Ppi_steady[s0,:], Ppi_pwr[s0,:], rtol=rtol, atol=atol):
tau = (t_transient_max + 1) # +1: index begins at 0
for s in range(nS):
fisher += (tau*Ppi_steady[s0, s])*fisher_atallstate[s]
break
assert t <= tmax_xep
# elif conditioner_mode=='fisher_pena' and fval_mode=='pena':
# # gain part
# fisher_g = torch.zeros(n_param, n_param).double()
# for s in range(nS):
# fisher_g += Ppi_steady[s0, s]*fisher_atallstate[s]
# # bias part
# t_begin = 0; t_end = tmax_xep; t_transient_max = 1
# fisher_b = torch.zeros(n_param, n_param).double()
# for t in range(t_begin, t_end + 1, 1):
# Ppi_pwr = torch.matrix_power(Ppi, t)
# if (t <= t_transient_max):
# for s in range(nS):
# fisher_b += Ppi_pwr[s0, s]*fisher_atallstate[s]
# if torch.allclose(Ppi_steady[s0,:], Ppi_pwr[s0,:], rtol=rtol, atol=atol):
# tau = (t_transient_max + 1) # +1: index gins at 0
# for s in range(nS):
# fisher_b += (tau*Ppi_steady[s0, s])*fisher_atallstate[s]
# break
# assert t <= tmax_xep
# # fisher for the penalized obj
# fisher = fisher_b - pen*fisher_g
else:
raise NotImplementedError(conditioner_mode, fval_mode)
cond = fisher
elif conditioner_mode=='hess':
def modify_hess_to_negativedefinite(H):
kappa_min = 1e-2; kappa_multiplier = 2; kappa = 0.
for k in range(100):
try:
H_mod = H - kappa*torch.eye(n_param)
torch.cholesky(-H_mod) # test for positive definiteness
return H_mod
except:
# print('{} cholesky: failed using kappa {:.3f}'.format(k, kappa))
kappa = max(kappa_multiplier*kappa, kappa_min)
raise RuntimeError
cond = -1.*modify_hess_to_negativedefinite(hess)
elif conditioner_mode=='identity':
cond = torch.eye(n_param).double()
else:
raise NotImplementedError
assert torch.isfinite(cond).all()
return (fval, grad, hess, cond, {'gain': g, 'bias': b[s0], 'disc': d[s0]})
else:
return fval
############################################################# fn: end ######
envid = cfg['env']; seed = cfg['seed']; envid_short = u.get_shortenvid(envid)
tag = ['traj_exactopt', cfg['obj'], cfg['cond'], cfg['pnet'], envid_short]
logdir = os.path.join(cfg['logdir'], u.make_stamp(tag, timestamp=''))
log = defaultdict(list); os.makedirs(logdir, exist_ok=True)
torch.manual_seed(seed); np.random.seed(seed); random.seed(seed) # not used in exact
env = cou.make_single_env(envid, seed)
Psas = torch.tensor(env.get_Psas()).double()
Rsa = torch.tensor(env.get_Rsa()).double()
allstatefea = torch.from_numpy(env.get_allstatefeature(cfg['sfx']))
s0 = env.reset(); nS, nA = env.nS, env.nA; nA_list = env.nA_list
tmax_xep = env.tmax_xep if cfg['txep'] is None else cfg['txep']
PolicyNetClass = pnet.policynetclass_dict[cfg['pnet']]
pi_net = PolicyNetClass(nA_list)
n_param = sum([i.numel() for i in pi_net.parameters()])
p = torch.tensor(cfg['par']); assert len(cfg['par'])==2
conditioner_mode=cfg['cond']; eps = float(cfg['eps'])
fval_mode = cfg['obj'][0:4] # 4 letters: gain, bias, disc (eg `disc_0.99`)
gamma = float(cfg['obj'][5:]) if fval_mode=='disc' else float('Nan') # discount factor
pen, pen_mul = [float(i) for i in cfg['obj'][5:].split('_')] \
if fval_mode=='pena' else [float('Nan')]*2 # objective with penalty
for iterx_idx in range(cfg['niterx']): # outer optim loop
for iter_idx in range(cfg['niter']): # inner optim loop
# Evaluate
fval, grad, hess, cond, info = fn(p)
grad_norm = torch.linalg.norm(grad)
hess_eigval = torch.eig(hess, eigenvectors=False).eigenvalues[:, 0] # real part @idx=0
log['param'].append(p.detach().clone())
log['fval'].append(fval.item()); log['grad_norm'].append(grad_norm.item())
log['gain'].append(info['gain'].item()); log['bias'].append(info['bias'].item())
log['disc'].append(info['disc'].item()); log['pen'].append(pen)
if cfg['print']:
msgs = ['fval {:.5f}'.format(fval.item()), 'grad_norm {:.5f}'.format(grad_norm.item()),
'hess_eigval ({:.5f}, {:.5f})'.format(hess_eigval[0], hess_eigval[1]),
'gain {:.5f}'.format(info['gain'].item()), 'bias {:.5f}'.format(info['bias'].item()),
'disc {:.5f}'.format(info['disc'].item()), 'pen {:.5f}'.format(pen),
'xy ({:.5f}, {:.5f})'.format(p[0], p[1])]
print('{} {} {} {}: '.format(iterx_idx, iter_idx, fval_mode, cfg['cond']) + ' '.join(msgs))
if (grad_norm <= eps) and (hess_eigval <= 0.).all():
break
# Step (ascent) direction
# Using psuedo inverse, accomodating zero fisher_steady on bias-optim
stepdir = torch.matmul(torch.pinverse(cond), grad)
log['stepdir'].append(stepdir.detach().clone())
# Step size
steplen = u.backtracking_linesearch_ascent(
p=p.detach().clone().numpy(),
fval=fval.item(), grad=grad.detach().clone().numpy(),
stepdir=stepdir.detach().clone().numpy(),
fn=fn, niter=100)
log['steplen'].append(steplen)
# Update parameters
p += steplen*stepdir
# update penalty param (outer optim loop)
pen = pen*pen_mul
# Closure
log = dict(log); log['cfg'] = cfg
final_info = {'niter': iter_idx+1, 'ij': cfg['ij'], 'logdir': logdir, 'discountfactor': gamma}
for key in ['param', 'fval', 'grad_norm', 'gain', 'bias', 'disc']:
if len(log[key]) > 0:
final_info[key] = log[key][-1]
else:
final_info[key] = None
final_info['bias_diff'] = final_info['bias'] - env.cfg['deterministic_policy']['bs0max_gainmax']
final_info['gain_diff'] = final_info['gain'] - env.cfg['deterministic_policy']['gain_max']
initpar_str = '_'.join([str(p) for p in cfg['par']])
fname = '__'.join(['traj_exactopt', fval_mode, cfg['cond'], initpar_str, envid_short])
if cfg['write']:
fname += '.pkl'
with open(os.path.join(logdir, fname), 'wb') as f:
pickle.dump(log, f)
else:
fname += '.txt'
with open(os.path.join(logdir, fname), 'w') as f:
f.write('') # empty! just for indicating "done"
return final_info
def parse_arg():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='env id', type=str, default=None, required=True)
parser.add_argument('--obj', help='optimization objective', type=str, default=None, required=True)
parser.add_argument('--par', help='init param [x, y]', type=float, nargs='+', default=None, required=True)
parser.add_argument('--pnet', help='policy network mode id', type=str, default=None, required=True)
parser.add_argument('--sfx', help='state feature extractor id', type=str, default=None, required=True)
parser.add_argument('--eps', help='epsilon for grad norm', type=float, default=None, required=True)
parser.add_argument('--cond', help='preconditioning matrix mode', type=str, default=None, required=True)
parser.add_argument('--niter', help='max number of any inner optim iteration', type=int, default=None, required=True)
parser.add_argument('--niterx', help='max number of outer optim iteration', type=int, default=None, required=True)
parser.add_argument('--seed', help='rng seed', type=int, default=None, required=True)
parser.add_argument('--logdir', help='log dir path', type=str, default=None, required=True)
parser.add_argument('--print', help='msg printing', type=bool, default=True)
parser.add_argument('--write', help='trajectory writing', type=bool, default=True)
arg = parser.parse_args()
arg.logdir = arg.logdir.replace('file://','')
return arg
if __name__ == '__main__':
main()
|
|
# coding : utf-8
"""
ResFGB for multiclass classificcation problems.
"""
from __future__ import print_function, absolute_import, division, unicode_literals
from logging import getLogger, ERROR
import time
from tqdm import tqdm
import sys
import numpy as np
import theano
from resfgb.models import LogReg, SVM, ResGrad
logger = getLogger(__name__)
class ResFGB(object):
def __init__(self, model_type=u'logreg', model_hparams={}, resblock_hparams={},
fg_eta=None, max_iters=10, seed=99, proc_batch_size=10000):
self.show_param(model_type,
model_hparams['tune_eta'],
model_hparams['max_epoch'],
model_hparams['early_stop'],
max_iters)
self.__tune_eta__ = model_hparams['tune_eta']
self.__max_epoch__ = model_hparams['max_epoch']
self.__early_stop__ = model_hparams['early_stop']
del model_hparams['tune_eta']
del model_hparams['max_epoch']
del model_hparams['early_stop']
if model_type == u'logistic':
self.__model__ = LogReg(seed=seed, **model_hparams)
elif model_type == u'smooth_hinge':
self.__model__ = SVM(seed=seed, **model_hparams)
else:
logger.log(ERROR, 'invalid model_type: {0}'.format(model_type))
sys.exit(-1)
self.__max_iters__ = max_iters
self.__fg__ = ResGrad(self.__model__, eta=fg_eta,
resblock_hparams=resblock_hparams,
seed=seed, proc_batch_size=proc_batch_size)
def show_param(self, model_type, tune_eta, max_epoch, early_stop, max_iters):
logger.info('{0:<5}{1:^26}{2:>5}'.format('-' * 5, 'ResFGB setting', '-' * 5))
logger.info('{0:<15}{1:>21}'.format('model_type', model_type))
logger.info('{0:<15}{1:>21}'.format('tune_eta', tune_eta))
logger.info('{0:<15}{1:>21}'.format('max_epoch', max_epoch))
logger.info('{0:<15}{1:>21}'.format('early_stop', early_stop))
logger.info('{0:<15}{1:>21}'.format('max_iters', max_iters))
def evaluate(self, X, Y, sample_f=True, eval_metric=None):
if sample_f:
Z = self.__fg__.predict(X)
loss, acc = self.__model__.evaluate(Z, Y, eval_metric=eval_metric)
else:
loss, acc = self.__model__.evaluate(X, Y, eval_metric=eval_metric)
return loss, acc
def predict(self, X, sample_f=True):
if sample_f:
Z = self.__fg__.predict(X)
else:
Z = X
pred = self.__model__.predict(Z)
return pred
def predict_proba(self, X, sample_f=True):
if sample_f:
Z = self.__fg__.predict(X)
else:
Z = X
pred = self.__model__.predict_proba(Z)
return pred
def fit(self, X, Y, Xv=None, Yv=None, use_best_iter=False, eval_metric=None):
logger.info('{0:<5}{1:^26}{2:>5}'.format('-' * 5, 'Training ResFGB', '-' * 5))
best_val_acc = None
best_val_loss = 1e+10
best_param = None
best_n_layers = None
total_time = 0.
Z = np.array(X)
if Xv is not None:
monitor = True
Zv = np.array(Xv)
else:
monitor = False
Zv = None
for n_iter in range(self.__max_iters__):
logger.info('resfgb epoch: %s / %s' % (n_iter, self.__max_iters__))
# ----- apply functional gradient -----
stime = time.time()
if n_iter >= 1:
Z = self.__fg__.apply(Z, lfrom=n_iter - 1)
if monitor:
Zv = self.__fg__.apply(Zv, lfrom=n_iter - 1)
# ----- fit and evaluate -----
self.__model__.optimizer.reset_func()
if self.__tune_eta__ and (n_iter == 0):
self.__model__.determine_eta(Z, Y)
self.__model__.fit(Z, Y, self.__max_epoch__, early_stop=self.__early_stop__)
etime = time.time()
total_time += etime - stime
train_loss, train_acc = self.evaluate(Z, Y, sample_f=False, eval_metric=eval_metric)
logger.info('layer: {0:4}, time:{1:>14.1f} sec'
.format(n_iter, total_time))
logger.info('train_loss: {0:5.4f}, train_acc: {1:4.3f}'
.format(train_loss, train_acc))
if monitor:
val_loss, val_acc = self.evaluate(Zv, Yv, sample_f=False, eval_metric=eval_metric)
logger.info('val_loss: {0:8.4f}, val_acc: {1:7.3f}'
.format(val_loss, val_acc))
if best_val_acc is None or val_acc > best_val_acc:
best_n_layers = n_iter
best_val_acc = val_acc
best_val_loss = val_loss
best_param = self.__model__.get_params(real_f=True)
# ----- compute weight matrix -----
stime = time.time()
self.__fg__.compute_weight(Z, Y)
etime = time.time()
total_time += etime - stime
# ----- apply functional gradient -----
stime = time.time()
if self.__max_iters__ >= 1:
Z = self.__fg__.apply(Z, lfrom=self.__max_iters__ - 1)
if monitor:
Zv = self.__fg__.apply(Zv, lfrom=self.__max_iters__ - 1)
# ----- fit and evaluate -----
self.__model__.optimizer.reset_func()
self.__model__.fit(Z, Y, self.__max_epoch__, early_stop=self.__early_stop__)
etime = time.time()
total_time += etime - stime
train_loss, train_acc = self.evaluate(Z, Y, sample_f=False, eval_metric=eval_metric)
logger.info('layer: {0:4}, time:{1:>14.1f} sec'
.format(self.__max_iters__, total_time))
logger.info('train_loss: {0:5.4f}, train_acc: {1:4.3f}'
.format(train_loss, train_acc))
if monitor:
val_loss, val_acc = self.evaluate(Zv, Yv, sample_f=False, eval_metric=eval_metric)
logger.info('val_loss: {0:8.4f}, val_acc: {1:7.3f}'
.format(val_loss, val_acc))
if val_acc > best_val_acc:
best_n_layers = self.__max_iters__
best_val_acc = val_acc
best_val_loss = val_loss
best_param = self.__model__.get_params(real_f=True)
# ----- finalize -----
if monitor and use_best_iter is True:
if best_n_layers < self.__max_iters__:
del self.__fg__.params[best_n_layers:]
self.__model__.set_params(best_param)
if monitor:
if use_best_iter is True:
return (best_n_layers, best_val_loss, best_val_acc)
else:
return (self.__max_iters__, val_loss, val_acc)
else:
return (None, None, None)
|
|
import numpy as np
from typing import Union, Tuple
class Rotation:
def __init__(self,input_type: str, parameters: Union[np.ndarray, Tuple[Union[str,np.ndarray], np.ndarray]]):
"""
"""
assert type(input_type) is str, 'TODO'
assert len(parameters) >= 1, 'TODO'
if input_type.lower() == 'matrix':
assert type(parameters) is np.ndarray, 'TODO'
assert parameters.shape == (3,3), 'TODO'
self._matrix = parameters
elif input_type.lower() == 'quaternion':
assert type(parameters) is np.ndarray, 'TODO'
assert parameters.shape == (4,1) or parameters.shape == (4,), 'TODO'
self._matrix = Rotation.quaternion_to_matrix(parameters)
elif input_type.lower() == 'eulerangles':
assert type(parameters) is tuple, 'TODO'
assert len(parameters) == 2, 'TODO'
sequence = parameters[0]
angles = parameters[1]
assert type(sequence) is str and len(sequence) == 3, 'TODO'
assert type(angles) is np.ndarray, 'TODO'
assert angles.size == 3, 'TODO'
self._matrix = Rotation.eulerangles_to_matrix(sequence, angles)
elif input_type.lower() == 'axisangle':
assert type(parameters) is tuple, 'TODO'
assert len(parameters) == 2, 'TODO'
axis = parameters[0]
angle = parameters[1]
assert type(axis) is np.ndarray and type(angle) is float, 'TODO'
assert axis.size == 3 and angle.size == 1, 'TODO'
self._matrix = Rotation.axisangle_to_matrix(axis,angle)
@staticmethod
def eulerangles_to_matrix(sequence, angles):
"""
"""
assert type(sequence) is str, 'TODO'
assert len(sequence) == 3, 'TODO'
assert type(angles) is np.ndarray, 'TODO'
assert angles.size == 3, 'TODO'
rotations = [Rotation.RotationMatrix0,
Rotation.RotationMatrix1,
Rotation.RotationMatrix2]
R0 = rotations[int(sequence[0])-1]
R1 = rotations[int(sequence[1])-1]
R2 = rotations[int(sequence[2])-1]
return R2(angles[2])*R1(angles[1])*R0(angles[0])
@staticmethod
def RotationMatrix0(t):
matrix = np.array([[1, 0, 0],[0, np.cos(t), np.sin(t)],[0, -np.sin(t), np.cos(t)]])
return matrix
@staticmethod
def RotationMatrix1(t):
matrix = np.array([[np.cos(t), 0, -np.sin(t)],[0, 1, 0],[np.sin(t), 0, np.cos(t)]])
return matrix
@staticmethod
def RotationMatrix2(t):
matrix = np.array([[np.cos(t), np.sin(t), 0],[-np.sin(t), np.cos(t), 0],[0, 0, 1]])
return matrix
|
|
import io
import json
import math
import numpy
import os
import os.path
import skimage.io
import struct
import sys
import skyhook.ffmpeg as ffmpeg
def eprint(s):
sys.stderr.write(str(s) + "\n")
sys.stderr.flush()
# sometimes JSON that we input ends up containing null (=> None) entries instead of list
# this helper restores lists where lists are expected
def non_null_list(l):
if l is None:
return []
return l
def data_index(t, data, i):
if t == 'shape':
return {
'Shapes': data['Shapes'][i],
'Metadata': data['Metadata'],
}
if t == 'detection':
return {
'Detections': data['Detections'][i],
'Metadata': data['Metadata'],
}
if t == 'int':
return {
'Ints': data['Ints'][i],
'Metadata': data['Metadata'],
}
else:
return data[i]
# stack a bunch of individual data (like data_index output)
def data_stack(t, datas):
if t == 'image' or t == 'video' or t == 'array':
return numpy.stack(datas)
elif t == 'shape':
return {
'Shapes': [data['Shapes'] for data in datas],
'Metadata': datas[0].get('Metadata', {}),
}
elif t == 'detection':
return {
'Detections': [data['Detections'] for data in datas],
'Metadata': datas[0].get('Metadata', {}),
}
elif t == 'int':
return {
'Ints': [data['Ints'] for data in datas],
'Metadata': datas[0].get('Metadata', {}),
}
else:
return datas
# stack a bunch of regular data
# this fails for non-sequence data, unless len(datas)==1, in which case it simply returns the data
def data_concat(t, datas):
if len(datas) == 1:
return datas[0]
if t == 'image' or t == 'video' or t == 'array':
return numpy.concatenate(datas, axis=0)
elif t == 'shape':
return {
'Shapes': [shape_list for data in datas for shape_list in data['Shapes']],
'Metadata': datas[0].get('Metadata', {}),
}
elif t == 'detection':
return {
'Detections': [detection_list for data in datas for detection_list in data['Detections']],
'Metadata': datas[0].get('Metadata', {}),
}
elif t == 'int':
return {
'Ints': [x for data in datas for x in data['Ints']],
'Metadata': datas[0].get('Metadata', {}),
}
else:
return [x for data in datas for x in data]
def data_len(t, data):
if t == 'shape':
return len(data['Shapes'])
if t == 'detection':
return len(data['Detections'])
if t == 'int':
return len(data['Ints'])
return len(data)
# Load data from disk.
# The output corresponds to what we would get from input_datas.
# It can be passed to data_index, data_concat, data_len, etc.
def load_item(dataset, item):
fname = 'data/items/{}/{}.{}'.format(dataset['ID'], item['Key'], item['Ext'])
t = dataset['DataType']
metadata, format = item['Metadata'], item['Format']
if t == 'image':
im = skimage.io.imread(fname)
return [im]
elif t == 'video':
raise Exception('load_item cannot handle video data')
elif t == 'array':
metadata = json.loads(metadata)
dt = numpy.dtype(metadata['Type'])
dt = dt.newbyteorder('>')
return numpy.fromfile(fname, dtype=dt).reshape(-1, metadata['Height'], metadata['Width'], metadata['Channels'])
else:
with open(fname, 'r') as f:
data = json.load(f)
# transform to stream JSON format if needed
if t == 'shape':
data = [non_null_list(l) for l in data]
data = {
'Shapes': data,
'Metadata': json.loads(metadata),
}
elif t == 'detection':
data = [non_null_list(l) for l in data]
data = {
'Detections': data,
'Metadata': json.loads(metadata),
}
elif t == 'int':
data = {
'Ints': data,
'Metadata': json.loads(metadata),
}
return data
def load_video(dataset, item):
fname = 'data/items/{}/{}.{}'.format(dataset['ID'], item['Key'], item['Ext'])
metadata = json.loads(item['Metadata'])
return ffmpeg.Ffmpeg(fname, metadata['Dims'], metadata['Framerate'])
def per_frame_decorate(f):
def wrap(*args):
job_desc = args[0]
if job_desc['type'] == 'finish':
output_data_finish(job_desc['key'], job_desc['key'])
return
elif job_desc['type'] != 'job':
return
args = args[1:]
input_len = data_len(meta['InputTypes'][0], args[0])
outputs = []
for i in range(input_len):
inputs = [data_index(meta['InputTypes'][ds_idx], arg, i) for ds_idx, arg in enumerate(args)]
output = f(*inputs)
if not isinstance(output, tuple):
output = (output,)
outputs.append(output)
stack_outputs = []
for i, t in enumerate(meta['OutputTypes']):
stacked = data_stack(t, [output[i] for output in outputs])
stack_outputs.append(stacked)
output_datas(job_desc['key'], job_desc['key'], stack_outputs)
return wrap
def all_decorate(f):
def wrap(*args):
job_desc = args[0]
all_inputs = job_desc['state']
if job_desc['type'] == 'job':
args = args[1:]
if all_inputs is None:
all_inputs = [[arg] for arg in args]
else:
for i, arg in enumerate(args):
all_inputs[i].append(arg)
return all_inputs
elif job_desc['type'] == 'finish':
all_inputs = [data_concat(meta['InputTypes'][ds_idx], datas) for ds_idx, datas in enumerate(all_inputs)]
outputs = f(*all_inputs)
if not isinstance(outputs, tuple):
outputs = (outputs,)
output_len = data_len(meta['OutputTypes'][0], outputs[0])
output_datas(job_desc['key'], job_desc['key'], outputs)
output_data_finish(job_desc['key'], job_desc['key'])
return wrap
stdin = None
stdout = None
meta = None
def input_json():
buf = stdin.read(4)
if not buf:
return None
(hlen,) = struct.unpack('>I', buf[0:4])
json_data = stdin.read(hlen)
return json.loads(json_data.decode('utf-8'))
def input_array(channels=None, dt=None):
header = input_json()
if channels is None:
channels = header['Channels']
if dt is None:
dt = numpy.dtype(header['Type'])
dt = dt.newbyteorder('>')
size = header['Length']*header['Width']*header['Height']*channels*dt.itemsize
buf = stdin.read(size)
return numpy.frombuffer(buf, dtype=dt).reshape((header['Length'], header['Height'], header['Width'], channels))
def input_datas():
datas = []
for t in meta['InputTypes']:
if t == 'image' or t == 'video':
datas.append(input_array(channels=3, dt=numpy.dtype('uint8')))
elif t == 'array':
datas.append(input_array())
else:
datas.append(input_json())
return datas
def output_json(x):
s = json.dumps(x).encode()
stdout.write(struct.pack('>I', len(s)))
stdout.write(s)
def output_array(x):
output_json({
'Length': x.shape[0],
'Width': x.shape[2],
'Height': x.shape[1],
'Channels': x.shape[3],
'Type': x.dtype.name,
})
dt = numpy.dtype(x.dtype.name)
dt = dt.newbyteorder('>')
stdout.write(x.astype(dt, copy=False).tobytes())
def output_datas(in_key, key, datas):
output_json({
'Type': 'data_data',
'Key': in_key,
'OutputKey': key,
})
for i, t in enumerate(meta['OutputTypes']):
if t == 'image' or t == 'video' or t == 'array':
output_array(datas[i])
else:
output_json(datas[i])
stdout.flush()
def output_data_finish(in_key, key):
output_json({
'Type': 'data_finish',
'Key': in_key,
'OutputKey': key,
})
stdout.flush()
def run(callback_func, meta_func=None):
global stdin, stdout, meta
if sys.version_info[0] >= 3:
stdin = sys.stdin.detach()
stdout = sys.stdout.buffer
else:
stdin = sys.stdin
stdout = sys.stdout
meta = input_json()
if meta_func:
meta_func(meta)
states = {}
while True:
packet = input_json()
if packet is None:
break
if packet['Type'] == 'init':
states[packet['Key']] = None
elif packet['Type'] == 'job':
# job packet
key = packet['Key']
datas = input_datas()
inputs = [{
'type': 'job',
'length': packet['Length'],
'key': key,
'state': states[key],
}] + datas
states[key] = callback_func(*inputs)
elif packet['Type'] == 'finish':
key = packet['Key']
inputs = [{
'type': 'finish',
'key': key,
'state': states[key],
}]
inputs.extend([None]*len(meta['InputTypes']))
callback_func(*inputs)
del states[key]
output_json({
'Type': 'finish',
'Key': key,
})
stdout.flush()
|
|
import models, torch, copy
import numpy as np
from server import Server
class Client(object):
def __init__(self, conf, public_key, weights, data_x, data_y):
self.conf = conf
self.public_key = public_key
self.local_model = models.LR_Model(public_key=self.public_key, w=weights, encrypted=True)
#print(type(self.local_model.encrypt_weights))
self.data_x = data_x
self.data_y = data_y
#print(self.data_x.shape, self.data_y.shape)
def local_train(self, weights):
original_w = weights
self.local_model.set_encrypt_weights(weights)
neg_one = self.public_key.encrypt(-1)
for e in range(self.conf["local_epochs"]):
print("start epoch ", e)
#if e > 0 and e%2 == 0:
# print("re encrypt")
# self.local_model.encrypt_weights = Server.re_encrypt(self.local_model.encrypt_weights)
idx = np.arange(self.data_x.shape[0])
batch_idx = np.random.choice(idx, self.conf['batch_size'], replace=False)
#print(batch_idx)
x = self.data_x[batch_idx]
x = np.concatenate((x, np.ones((x.shape[0], 1))), axis=1)
y = self.data_y[batch_idx].reshape((-1, 1))
#print((0.25 * x.dot(self.local_model.encrypt_weights) + 0.5 * y.transpose() * neg_one).shape)
#print(x.transpose().shape)
#assert(False)
batch_encrypted_grad = x.transpose() * (0.25 * x.dot(self.local_model.encrypt_weights) + 0.5 * y.transpose() * neg_one)
encrypted_grad = batch_encrypted_grad.sum(axis=1) / y.shape[0]
for j in range(len(self.local_model.encrypt_weights)):
self.local_model.encrypt_weights[j] -= self.conf["lr"] * encrypted_grad[j]
weight_accumulators = []
#print(models.decrypt_vector(Server.private_key, weights))
for j in range(len(self.local_model.encrypt_weights)):
weight_accumulators.append(self.local_model.encrypt_weights[j] - original_w[j])
return weight_accumulators
|
|
'''
Factory for dataloaders
Author: Filippo Aleotti
Mail: filippo.aleotti2@unibo.it
'''
import tensorflow as tf
import numpy as np
from sceneflow.training.dataloader import Loader as SYNTH_LOADER
from kitti.training.dataloader import Loader as KITTI_LOADER
from sceneflow.test.dataloader import Loader as SF_TESTING_LOADER
from kitti.test.dataloader import Loader as KITTI_TESTING_LOADER
DATALOADER_FACTORY_TRAIN = {
'sceneflow': SYNTH_LOADER,
'kitti': KITTI_LOADER,
}
DATALOADER_FACTORY_TEST = {
'sceneflow': SF_TESTING_LOADER ,
'kitti': KITTI_TESTING_LOADER,
}
AVAILABLE_DATALOADER_TRAIN = DATALOADER_FACTORY_TRAIN.keys()
AVAILABLE_DATALOADER_TEST = DATALOADER_FACTORY_TEST.keys()
def get_dataloader(params):
name = params['experiment']['factory']
if params['experiment']['mode'] == 'training':
assert(name in AVAILABLE_DATALOADER_TRAIN)
return DATALOADER_FACTORY_TRAIN[name]
elif params['experiment']['mode'] == 'testing':
assert(name in AVAILABLE_DATALOADER_TEST)
return DATALOADER_FACTORY_TEST[name]
raise ValueError('Not valid mode. Expected training or testing')
|
|
# Copyright (c) Open-MMLab. All rights reserved.
import cv2
import numpy as np
def _scale_size(size, scale):
"""Rescale a size by a ratio.
Args:
size (tuple[int]): (w, h).
scale (float): Scaling factor.
Returns:
tuple[int]: scaled size.
"""
w, h = size
return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
interp_codes = {
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
'area': cv2.INTER_AREA,
'lanczos': cv2.INTER_LANCZOS4
}
def imresize(img,
size,
return_scale=False,
interpolation='bilinear',
out=None):
"""Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple[int]): Target size (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
out (ndarray): The output destination.
Returns:
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = img.shape[:2]
resized_img = cv2.resize(
img, size, dst=out, interpolation=interp_codes[interpolation])
if not return_scale:
return resized_img
else:
w_scale = size[0] / w
h_scale = size[1] / h
return resized_img, w_scale, h_scale
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):
"""Resize image to the same size of a given image.
Args:
img (ndarray): The input image.
dst_img (ndarray): The target image.
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Same as :func:`resize`.
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = dst_img.shape[:2]
return imresize(img, (w, h), return_scale, interpolation)
def rescale_size(old_size, scale, return_scale=False):
"""Calculate the new size to be rescaled to.
Args:
old_size (tuple[int]): The old size (w, h) of image.
scale (float | tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image size.
Returns:
tuple[int]: The new rescaled image size.
"""
w, h = old_size
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError(
f'Scale must be a number or tuple of int, but got {type(scale)}')
new_size = _scale_size((w, h), scale_factor)
if return_scale:
return new_size, scale_factor
else:
return new_size
def imrescale(img, scale, return_scale=False, interpolation='bilinear'):
"""Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float | tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image.
"""
h, w = img.shape[:2]
new_size, scale_factor = rescale_size((w, h), scale, return_scale=True)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return rescaled_img, scale_factor
else:
return rescaled_img
def imflip(img, direction='horizontal'):
"""Flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image.
"""
assert direction in ['horizontal', 'vertical']
if direction == 'horizontal':
return np.flip(img, axis=1)
else:
return np.flip(img, axis=0)
def imflip_(img, direction='horizontal'):
"""Inplace flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image (inplace).
"""
assert direction in ['horizontal', 'vertical']
if direction == 'horizontal':
return cv2.flip(img, 1, img)
else:
return cv2.flip(img, 0, img)
def imrotate(img,
angle,
center=None,
scale=1.0,
border_value=0,
auto_bound=False):
"""Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the rotation in
the source image. If not specified, the center of the image will be
used.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
"""
if center is not None and auto_bound:
raise ValueError('`auto_bound` conflicts with `center`')
h, w = img.shape[:2]
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
assert isinstance(center, tuple)
matrix = cv2.getRotationMatrix2D(center, -angle, scale)
if auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)
return rotated
def bbox_clip(bboxes, img_shape):
"""Clip bboxes to fit the image shape.
Args:
bboxes (ndarray): Shape (..., 4*k)
img_shape (tuple[int]): (height, width) of the image.
Returns:
ndarray: Clipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype)
cmin[0::2] = img_shape[1] - 1
cmin[1::2] = img_shape[0] - 1
clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0)
return clipped_bboxes
def bbox_scaling(bboxes, scale, clip_shape=None):
"""Scaling bboxes w.r.t the box center.
Args:
bboxes (ndarray): Shape(..., 4).
scale (float): Scaling factor.
clip_shape (tuple[int], optional): If specified, bboxes that exceed the
boundary will be clipped according to the given shape (h, w).
Returns:
ndarray: Scaled bboxes.
"""
if float(scale) == 1.0:
scaled_bboxes = bboxes.copy()
else:
w = bboxes[..., 2] - bboxes[..., 0] + 1
h = bboxes[..., 3] - bboxes[..., 1] + 1
dw = (w * (scale - 1)) * 0.5
dh = (h * (scale - 1)) * 0.5
scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
if clip_shape is not None:
return bbox_clip(scaled_bboxes, clip_shape)
else:
return scaled_bboxes
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (Number | list[Number]): Value to be filled for padding.
Default: None, which means no padding.
Returns:
list[ndarray] | ndarray: The cropped image patches.
"""
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 1:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start + w,
...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches
def impad(img, shape, pad_val=0):
"""Pad an image to a certain shape.
Args:
img (ndarray): Image to be padded.
shape (tuple[int]): Expected padding shape (h, w).
pad_val (Number | Sequence[Number]): Values to be filled in padding
areas. Default: 0.
Returns:
ndarray: The padded image.
"""
if not isinstance(pad_val, (int, float)):
assert len(pad_val) == img.shape[-1]
if len(shape) < len(img.shape):
shape = shape + (img.shape[-1], )
assert len(shape) == len(img.shape)
for i in range(len(shape)):
assert shape[i] >= img.shape[i]
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
pad[:img.shape[0], :img.shape[1], ...] = img
return pad
def impad_to_multiple(img, divisor, pad_val=0):
"""Pad an image to ensure each edge to be multiple to some number.
Args:
img (ndarray): Image to be padded.
divisor (int): Padded image edges will be multiple to divisor.
pad_val (Number | Sequence[Number]): Same as :func:`impad`.
Returns:
ndarray: The padded image.
"""
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
return impad(img, (pad_h, pad_w), pad_val)
|
|
import os.path as op
import numpy as np
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import ephypype
from ephypype.nodes import create_iterator
from ephypype.datasets import fetch_omega_dataset
#base_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')
#data_path = fetch_omega_dataset(base_path)
data_path = op.join('/scratch/hyruuk/saflow_data/saflow_bids')
#### PARAMETERS
import json # noqa
import pprint # noqa
params = json.load(open("params.json"))
pprint.pprint({'experiment parameters': params["general"]})
subject_ids = params["general"]["subject_ids"] # sub-003
session_ids = params["general"]["session_ids"]
run_ids = params["general"]["run_ids"] # ses-0001
NJOBS = params["general"]["NJOBS"]
pprint.pprint({'inverse parameters': params["inverse"]})
spacing = params["inverse"]['spacing'] # ico-5 vs oct-6
snr = params["inverse"]['snr'] # use smaller SNR for raw data
inv_method = params["inverse"]['img_method'] # sLORETA, MNE, dSPM, LCMV
parc = params["inverse"]['parcellation'] # parcellation to use: 'aparc' vs 'aparc.a2009s' # noqa
# noise covariance matrix filename template
noise_cov_fname = params["inverse"]['noise_cov_fname']
# set sbj dir path, i.e. where the FS folfers are
subjects_dir = op.join(data_path, params["general"]["subjects_dir"])
########
# workflow directory within the `base_dir`
src_reconstruction_pipeline_name = 'source_reconstruction_' + \
inv_method + '_' + parc.replace('.', '')
main_workflow = pe.Workflow(name=src_reconstruction_pipeline_name)
main_workflow.base_dir = data_path
infosource = create_iterator(['subject_id', 'session_id', 'run_id'],
[subject_ids, session_ids, run_ids])
############
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['raw_file', 'trans_file']), # noqa
name='datasource')
datasource.inputs.base_directory = data_path
datasource.inputs.template = '*%s/%s/meg/%s_%s_task-gradCPT_%s_meg_%s.fif'
datasource.inputs.template_args = dict(
raw_file=[['subject_id', 'session_id', 'subject_id', 'session_id', 'run_id', '-epo']],
trans_file=[['subject_id', 'session_id', 'subject_id', 'session_id', 'run_id', '-epotrans']])
datasource.inputs.sort_filelist = True
###########
from ephypype.pipelines import create_pipeline_source_reconstruction # noqa
event_id = {'Freq': 21, 'Rare': 31}
inv_sol_workflow = create_pipeline_source_reconstruction(
data_path, subjects_dir, spacing=spacing, inv_method=inv_method, parc=parc,
noise_cov_fname=noise_cov_fname, is_epoched=True, events_id={}, ROIs_mean=False, all_src_space=True)
###########
main_workflow.connect(infosource, 'subject_id', datasource, 'subject_id')
main_workflow.connect(infosource, 'session_id', datasource, 'session_id')
main_workflow.connect(infosource, 'run_id', datasource, 'run_id')
##########
main_workflow.connect(infosource, 'subject_id',
inv_sol_workflow, 'inputnode.sbj_id')
main_workflow.connect(datasource, 'raw_file',
inv_sol_workflow, 'inputnode.raw')
main_workflow.connect(datasource, 'trans_file',
inv_sol_workflow, 'inputnode.trans_file')
##########
#main_workflow.write_graph(graph2use='colored') # colored
#########
#import matplotlib.pyplot as plt # noqa
#img = plt.imread(op.join(data_path, src_reconstruction_pipeline_name, 'graph.png')) # noqa
#plt.figure(figsize=(8, 8))
#plt.imshow(img)
#plt.axis('off')
#########
main_workflow.config['execution'] = {'remove_unnecessary_outputs': 'false'}
# Run workflow locally on 1 CPU
main_workflow.run(plugin='MultiProc', plugin_args={'n_procs': NJOBS})
|
|
import unittest
import skimage.io
import numpy as np
from detector import Detector
detector = Detector("../weight/mask_rcnn_fashion.h5", "detection")
image_input = [10,10,20,20]
class TestDetector(unittest.TestCase):
def test_detection(self):
# Test with image fashion
image = skimage.io.imread("test.jpg")
detection_result = detector.detection(image)
output_length = len(detection_result['rois'])
self.assertGreaterEqual(output_length, 1)
def test_detection_error(self):
image = skimage.io.imread("wall.jpg")
detection_result = detector.detection(image)
output_length = len(detection_result['rois'])
self.assertEqual(output_length, 0)
def test_wrong_input(self):
self.assertRaises(ValueError, detector.detection, "Wrong input")
def test_get_width_of_object(self):
width = detector.get_width(image_input)
self.assertEqual(width, 10)
def test_get_height_of_object(self):
height = detector.get_height(image_input)
self.assertEqual(height, 10)
def test_get_area_of_object(self):
area = detector.get_area(image_input)
self.assertEqual(area, 100)
def test_get_biggest_box_of_object(self):
image = skimage.io.imread("test.jpg")
detection_result = detector.detection(image)
biggest_box = detector.get_biggest_box(detection_result['rois'])
self.assertIsInstance(biggest_box, np.ndarray)
def test_crop_object(self):
image = skimage.io.imread("test.jpg")
detection_result = detector.detection(image)
biggest_box = detector.get_biggest_box(detection_result['rois'])
resized = detector.crop_object(image, biggest_box)
self.assertEqual(resized.shape, (224,224,3))
|
|
import cv2
import numpy as np
img = cv2.imread('images/bookpage.jpg')
retval, threshold = cv2.threshold(img, 12, 255, cv2.THRESH_BINARY)
## different kinds of threshold
# grayscale
grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# normal threshold
retval2, threshold2 = cv2.threshold(grayscaled, 12, 255, cv2.THRESH_BINARY)
# adaptive threshold
gaus = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
# otsu threshold
retval3, otsu = cv2.threshold(grayscaled, 125, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('original', img)
cv2.imshow('threshold', threshold)
cv2.imshow('threshold2', threshold2)
cv2.imshow('gaus', gaus)
cv2.imshow('otsu', otsu)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
|
#!/usr/bin/env python3
import csv
import numpy as np
import pandas as pd
import os
import logging
import tqdm
import math
from data_class import data_util
from info import data_info
from util import io_util
from type import OpUnit, Target, ExecutionFeature
def write_extended_data(output_path, symbol, index_value_list, data_map):
# clear the content of the file
open(output_path, 'w').close()
io_util.write_csv_result(output_path, symbol, index_value_list)
for key, value in data_map.items():
io_util.write_csv_result(output_path, key, value)
def get_mini_runner_data(filename, model_results_path, txn_sample_interval, model_map={}, predict_cache={}, trim=0.2):
"""Get the training data from the mini runner
:param filename: the input data file
:param model_results_path: results log directory
:param txn_sample_interval: sampling interval for the transaction OUs
:param model_map: the map from OpUnit to the mini model
:param predict_cache: cache for the mini model prediction
:param trim: % of too high/too low anomalies to prune
:return: the list of Data for execution operating units
"""
if "txn" in filename:
# Cannot handle the transaction manager data yet
return _txn_get_mini_runner_data(filename, model_results_path, txn_sample_interval)
if "execution" in filename:
# Handle the execution data
return _execution_get_mini_runner_data(filename, model_map, predict_cache, trim)
if "gc" in filename or "log" in filename:
# Handle of the gc or log data with interval-based conversion
return _interval_get_mini_runner_data(filename, model_results_path)
return _default_get_mini_runner_data(filename)
def _default_get_mini_runner_data(filename):
# In the default case, the data does not need any pre-processing and the file name indicates the opunit
df = pd.read_csv(filename, skipinitialspace=True)
headers = list(df.columns.values)
data_info.instance.parse_csv_header(headers, False)
file_name = os.path.splitext(os.path.basename(filename))[0]
x = df.iloc[:, :-data_info.instance.METRICS_OUTPUT_NUM].values
y = df.iloc[:, -data_info.instance.MINI_MODEL_TARGET_NUM:].values
return [OpUnitData(OpUnit[file_name.upper()], x, y)]
def _txn_get_mini_runner_data(filename, model_results_path, txn_sample_interval):
# In the default case, the data does not need any pre-processing and the file name indicates the opunit
df = pd.read_csv(filename)
file_name = os.path.splitext(os.path.basename(filename))[0]
# prepending a column of ones as the base transaction data feature
base_x = pd.DataFrame(data=np.ones((df.shape[0], 1), dtype=int))
df = pd.concat([base_x, df], axis=1)
x = df.iloc[:, :-data_info.instance.METRICS_OUTPUT_NUM].values
y = df.iloc[:, -data_info.instance.MINI_MODEL_TARGET_NUM:].values
start_times = df.iloc[:, data_info.instance.TARGET_CSV_INDEX[data_info.instance.Target.START_TIME]].values
cpu_ids = df.iloc[:, data_info.instance.TARGET_CSV_INDEX[data_info.instance.Target.CPU_ID]].values
logging.info("Loaded file: {}".format(OpUnit[file_name.upper()]))
# change the data based on the interval for the periodically invoked operating units
prediction_path = "{}/{}_txn_converted_data.csv".format(model_results_path, file_name)
io_util.create_csv_file(prediction_path, [""])
interval = data_info.instance.CONTENDING_OPUNIT_INTERVAL
# Map from interval start time to the data in this interval
interval_x_map = {}
interval_y_map = {}
interval_id_map = {}
n = x.shape[0]
for i in tqdm.tqdm(list(range(n)), desc="Group data by interval"):
rounded_time = data_util.round_to_interval(start_times[i], interval)
if rounded_time not in interval_x_map:
interval_x_map[rounded_time] = []
interval_y_map[rounded_time] = []
interval_id_map[rounded_time] = set()
interval_x_map[rounded_time].append(x[i])
interval_y_map[rounded_time].append(y[i])
interval_id_map[rounded_time].add(cpu_ids[i])
# Construct the new data
x_list = []
y_list = []
for rounded_time in interval_x_map:
# Sum the features
x_new = np.sum(interval_x_map[rounded_time], axis=0)
# Concatenate the number of different threads
x_new = np.concatenate((x_new, [len(interval_id_map[rounded_time])]))
x_new *= txn_sample_interval + 1
x_list.append(x_new)
# The prediction is the average behavior
y_list.append(np.average(interval_y_map[rounded_time], axis=0))
io_util.write_csv_result(prediction_path, rounded_time, np.concatenate((x_list[-1], y_list[-1])))
return [OpUnitData(OpUnit[file_name.upper()], np.array(x_list), np.array(y_list))]
def _interval_get_mini_runner_data(filename, model_results_path):
# In the default case, the data does not need any pre-processing and the file name indicates the opunit
df = pd.read_csv(filename, skipinitialspace=True)
headers = list(df.columns.values)
data_info.instance.parse_csv_header(headers, False)
file_name = os.path.splitext(os.path.basename(filename))[0]
x = df.iloc[:, :-data_info.instance.METRICS_OUTPUT_NUM].values
y = df.iloc[:, -data_info.instance.MINI_MODEL_TARGET_NUM:].values
start_times = df.iloc[:, data_info.instance.RAW_TARGET_CSV_INDEX[Target.START_TIME]].values
logging.info("Loaded file: {}".format(OpUnit[file_name.upper()]))
# change the data based on the interval for the periodically invoked operating units
prediction_path = "{}/{}_interval_converted_data.csv".format(model_results_path, file_name)
io_util.create_csv_file(prediction_path, [""])
interval = data_info.instance.PERIODIC_OPUNIT_INTERVAL
# Map from interval start time to the data in this interval
interval_x_map = {}
interval_y_map = {}
n = x.shape[0]
for i in tqdm.tqdm(list(range(n)), desc="Group data by interval"):
rounded_time = data_util.round_to_interval(start_times[i], interval)
if rounded_time not in interval_x_map:
interval_x_map[rounded_time] = []
interval_y_map[rounded_time] = []
interval_x_map[rounded_time].append(x[i])
interval_y_map[rounded_time].append(y[i])
# Construct the new data
x_list = []
y_list = []
for rounded_time in interval_x_map:
# Sum the features
x_new = np.sum(interval_x_map[rounded_time], axis=0)
# Keep the interval parameter the same
# TODO: currently the interval parameter is always the last. Change the hard-coding later
x_new[-1] /= len(interval_x_map[rounded_time])
x_list.append(x_new)
# The prediction is the average behavior
y_list.append(np.average(interval_y_map[rounded_time], axis=0))
io_util.write_csv_result(prediction_path, rounded_time, np.concatenate((x_list[-1], y_list[-1])))
return [OpUnitData(OpUnit[file_name.upper()], np.array(x_list), np.array(y_list))]
def _execution_get_mini_runner_data(filename, model_map, predict_cache, trim):
"""Get the training data from the mini runner
:param filename: the input data file
:param model_map: the map from OpUnit to the mini model
:param predict_cache: cache for the mini model prediction
:param trim: % of too high/too low anomalies to prune
:return: the list of Data for execution operating units
"""
# Get the mini runner data for the execution engine
data_map = {}
raw_data_map = {}
input_output_boundary = math.nan
with open(filename, "r") as f:
reader = csv.reader(f, delimiter=",", skipinitialspace=True)
indexes = next(reader)
data_info.instance.parse_csv_header(indexes, True)
features_vector_index = data_info.instance.raw_features_csv_index[ExecutionFeature.FEATURES]
raw_boundary = data_info.instance.raw_features_csv_index[data_info.instance.INPUT_OUTPUT_BOUNDARY]
input_output_boundary = len(data_info.instance.input_csv_index)
for line in reader:
# drop query_id, pipeline_id, num_features, features_vector
record = [d for i, d in enumerate(line) if i >= raw_boundary]
data = list(map(data_util.convert_string_to_numeric, record))
x_multiple = data[:input_output_boundary]
y_merged = np.array(data[-data_info.instance.MINI_MODEL_TARGET_NUM:])
# Get the opunits located within
opunits = []
features = line[features_vector_index].split(';')
for idx, feature in enumerate(features):
opunit = OpUnit[feature]
x_loc = [v[idx] if type(v) == list else v for v in x_multiple]
if opunit in model_map:
key = [opunit] + x_loc
if tuple(key) not in predict_cache:
predict = model_map[opunit].predict(np.array(x_loc).reshape(1, -1))[0]
predict_cache[tuple(key)] = predict
assert len(predict) == len(y_merged)
y_merged = y_merged - predict
else:
predict = predict_cache[tuple(key)]
assert len(predict) == len(y_merged)
y_merged = y_merged - predict
y_merged = np.clip(y_merged, 0, None)
else:
opunits.append((opunit, x_loc))
if len(opunits) > 1:
raise Exception('Unmodelled OperatingUnits detected: {}'.format(opunits))
# Record into predict_cache
key = tuple([opunits[0][0]] + opunits[0][1])
if key not in raw_data_map:
raw_data_map[key] = []
raw_data_map[key].append(y_merged)
# Postprocess the raw_data_map -> data_map
# We need to do this here since we need to have seen all the data
# before we can start pruning. This step is done here so dropped
# data don't actually become a part of the model.
for key in raw_data_map:
len_vec = len(raw_data_map[key])
raw_data_map[key].sort(key=lambda x: x[-1])
# compute how much to trim
trim_side = trim * len_vec
low = int(math.ceil(trim_side))
high = len_vec - low
if low >= high:
# if bounds are bad, just take the median
raw_data_map[key] = np.median(raw_data_map[key], axis=0)
else:
# otherwise, x% trimmed mean
raw_data_map[key] = np.average(raw_data_map[key][low:high], axis=0)
# Expose the singular data point
opunit = key[0]
if opunit not in data_map:
data_map[opunit] = []
predict = raw_data_map[key]
predict_cache[key] = predict
data_map[opunit].append(list(key[1:]) + list(predict))
data_list = []
for opunit, values in data_map.items():
np_value = np.array(values)
x = np_value[:, :input_output_boundary]
y = np_value[:, -data_info.instance.MINI_MODEL_TARGET_NUM:]
data_list.append(OpUnitData(opunit, x, y))
return data_list
class OpUnitData:
"""
The class that stores data and provides basic functions to manipulate the training data for the operating unit
"""
def __init__(self, opunit, x, y):
"""
:param opunit: The opunit that the data is related to
:param x: The input feature
:param y: The outputs
"""
self.opunit = opunit
self.x = x
self.y = y
|
|
"""
Compute
Inception Score (IS),
Frechet Inception Discrepency (FID), ref "https://github.com/mseitzer/pytorch-fid/blob/master/fid_score.py"
Maximum Mean Discrepancy (MMD)
for a set of fake images
use numpy array
Xr: high-level features for real images; nr by d array
Yr: labels for real images
Xg: high-level features for fake images; ng by d array
Yg: labels for fake images
IMGSr: real images
IMGSg: fake images
"""
import os
import gc
import numpy as np
# from numpy import linalg as LA
from scipy import linalg
import torch
import torch.nn as nn
from scipy.stats import entropy
from torch.nn import functional as F
from torchvision.utils import save_image
from utils import SimpleProgressBar, IMGs_dataset
##############################################################################
# FID scores
##############################################################################
# compute FID based on extracted features
def FID(Xr, Xg, eps=1e-10):
'''
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
'''
#sample mean
MUr = np.mean(Xr, axis = 0)
MUg = np.mean(Xg, axis = 0)
mean_diff = MUr - MUg
#sample covariance
SIGMAr = np.cov(Xr.transpose())
SIGMAg = np.cov(Xg.transpose())
# Product might be almost singular
covmean, _ = linalg.sqrtm(SIGMAr.dot(SIGMAg), disp=False)#square root of a matrix
covmean = covmean.real
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(SIGMAr.shape[0]) * eps
covmean = linalg.sqrtm((SIGMAr + offset).dot(SIGMAg + offset))
#fid score
fid_score = mean_diff.dot(mean_diff) + np.trace(SIGMAr + SIGMAg - 2*covmean)
return fid_score
##test
#Xr = np.random.rand(10000,1000)
#Xg = np.random.rand(10000,1000)
#print(FID(Xr, Xg))
# compute FID from raw images
def cal_FID(PreNetFID, IMGSr, IMGSg, batch_size = 500, resize = None):
#resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
PreNetFID.eval()
nr = IMGSr.shape[0]
ng = IMGSg.shape[0]
nc = IMGSr.shape[1] #IMGSr is nrxNCxIMG_SIExIMG_SIZE
img_size = IMGSr.shape[2]
if batch_size > min(nr, ng):
batch_size = min(nr, ng)
# print("FID: recude batch size to {}".format(batch_size))
#compute the length of extracted features
with torch.no_grad():
test_img = torch.from_numpy(IMGSr[0].reshape((1,nc,img_size,img_size))).type(torch.float).cuda()
if resize is not None:
test_img = nn.functional.interpolate(test_img, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, test_features = PreNetFID(test_img)
test_features = PreNetFID(test_img)
d = test_features.shape[1] #length of extracted features
Xr = np.zeros((nr, d))
Xg = np.zeros((ng, d))
#batch_size = 500
with torch.no_grad():
tmp = 0
pb1 = SimpleProgressBar()
for i in range(nr//batch_size):
imgr_tensor = torch.from_numpy(IMGSr[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgr_tensor = nn.functional.interpolate(imgr_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, Xr_tmp = PreNetFID(imgr_tensor)
Xr_tmp = PreNetFID(imgr_tensor)
Xr[tmp:(tmp+batch_size)] = Xr_tmp.detach().cpu().numpy()
tmp+=batch_size
# pb1.update(min(float(i)*100/(nr//batch_size), 100))
pb1.update(min(max(tmp/nr*100,100), 100))
del Xr_tmp,imgr_tensor; gc.collect()
torch.cuda.empty_cache()
tmp = 0
pb2 = SimpleProgressBar()
for j in range(ng//batch_size):
imgg_tensor = torch.from_numpy(IMGSg[tmp:(tmp+batch_size)]).type(torch.float).cuda()
if resize is not None:
imgg_tensor = nn.functional.interpolate(imgg_tensor, size = resize, scale_factor=None, mode='bilinear', align_corners=False)
# _, Xg_tmp = PreNetFID(imgg_tensor)
Xg_tmp = PreNetFID(imgg_tensor)
Xg[tmp:(tmp+batch_size)] = Xg_tmp.detach().cpu().numpy()
tmp+=batch_size
# pb2.update(min(float(j)*100/(ng//batch_size), 100))
pb2.update(min(max(tmp/ng*100, 100), 100))
del Xg_tmp,imgg_tensor; gc.collect()
torch.cuda.empty_cache()
fid_score = FID(Xr, Xg, eps=1e-6)
return fid_score
##############################################################################
# label_score
# difference between assigned label and predicted label
##############################################################################
def cal_labelscore(PreNet, images, labels_assi, min_label_before_shift, max_label_after_shift, batch_size = 500, resize = None, num_workers=0):
'''
PreNet: pre-trained CNN
images: fake images
labels_assi: assigned labels
resize: if None, do not resize; if resize = (H,W), resize images to 3 x H x W
'''
PreNet.eval()
# assume images are nxncximg_sizeximg_size
n = images.shape[0]
nc = images.shape[1] #number of channels
img_size = images.shape[2]
labels_assi = labels_assi.reshape(-1)
eval_trainset = IMGs_dataset(images, labels_assi, normalize=False)
eval_dataloader = torch.utils.data.DataLoader(eval_trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
labels_pred = np.zeros(n+batch_size)
nimgs_got = 0
pb = SimpleProgressBar()
for batch_idx, (batch_images, batch_labels) in enumerate(eval_dataloader):
batch_images = batch_images.type(torch.float).cuda()
batch_labels = batch_labels.type(torch.float).cuda()
batch_size_curr = len(batch_labels)
batch_labels_pred, _ = PreNet(batch_images)
labels_pred[nimgs_got:(nimgs_got+batch_size_curr)] = batch_labels_pred.detach().cpu().numpy().reshape(-1)
nimgs_got += batch_size_curr
pb.update((float(nimgs_got)/n)*100)
del batch_images; gc.collect()
torch.cuda.empty_cache()
#end for batch_idx
labels_pred = labels_pred[0:n]
labels_pred = (labels_pred*max_label_after_shift)-np.abs(min_label_before_shift)
labels_assi = (labels_assi*max_label_after_shift)-np.abs(min_label_before_shift)
ls_mean = np.mean(np.abs(labels_pred-labels_assi))
ls_std = np.std(np.abs(labels_pred-labels_assi))
return ls_mean, ls_std
|
|
"""
This module contains code that creates n-dimensional arrays
"""
import numpy as np
mat = np.array([[1, 2], [3, 4]])
vec = np.array([1, 2])
mat.shape # (2, 2)
vec.shape # (2,)
mat.reshape(4,)
# array([1, 2, 3, 4])
mat1 = [[1, 2], [3, 4]]
mat2 = [[5, 6], [7, 8]]
mat3 = [[9, 10], [11, 12]]
arr_3d = np.array([mat1, mat2, mat3])
arr_3d.shape # (3, 2, 2)
mat[0, 0] # 1 - top left element
mat[1, 1] # 4 - bottom right element
mat[:, 0] # array([1, 3])
print(arr_3d)
|
|
from resizeimage import resizeimage
from PIL import Image,ImageDraw
from skimage import measure
import matplotlib.pyplot as plt
import numpy as np
import cv2
import csv
import os
import sys
specs_path = "../level1specs/"
im_array = []
unique_symbols = ["=","E","=","C","C","F","P","?","C","#","-","P","P","P","#","?","=","=","E","P","P","P","P","P","P","-"]
folder_levels = "../Edited/"
level_name = sys.argv[1]
original_path = "../levels_CSV/"
transposed_path = "../levels_transposed/"
ssim = []
maximum_value = 0
maximum_matrix = []
level_list=[]
weight, height = 223, 13;
Matrix = [[0 for x in range(weight)] for y in range(height)]
trans = [[0 for x in range(height)] for y in range(weight)]
for i in range(1,27):
name = specs_path + str(i) + ".png"
im_array.append(name)
def imgComp(imageA):
global ssim
for i in range(0,len(im_array)):
M=Image.open(im_array[i])
match = resizeimage.resize_cover(M, [16, 16])
match.save(im_array[i])
xyz= resizeimage.resize_cover(imageA, [16, 16])
xyz.save("../level1specs/temp.png")
xyz=cv2.imread("../level1specs/temp.png")
match=cv2.imread(im_array[i])
match=cv2.cvtColor(match, cv2.COLOR_BGR2GRAY)
xyz=cv2.cvtColor(xyz, cv2.COLOR_BGR2GRAY)
ssim_ = measure.compare_ssim(xyz,match)
ssim.append(ssim_)
maximum_value = max(ssim)
if(maximum_value < 0.5): maximum_matrix.append(11)
else:
maximum_matrix.append(ssim.index(max(ssim))+1)
ssim = []
im = Image.open(folder_levels+level_name)
count = 0
for y in range (7,215,16):
for x in range (16,3584,16):
if (x > 3584 or y > 215):
break
img2 =im.crop((x,y ,x+16, y+16))
imgComp(img2)
count += 1
string = ""
f = open(original_path + os.specs_path.splitext(level_name)[0] + ".csv", "weight")
iterator = 0
for i in range (0,13):
for j in range (0,223):
Matrix[i][j] = unique_symbols[maximum_matrix[iterator]-1]
string += (unique_symbols[maximum_matrix[iterator]-1])
iterator += 1
print(string)
f.write(string)
string = ""
f.write("\n")
f.close()
r = open(transposed_path + os.specs_path.splitext(level_name)[0] + "_trans.txt", "weight")
temp = ""
for i in range(0, 223):
for j in range(0, 13):
trans[i][j] = Matrix[j][i]
temp += trans[i][j]
r.write(temp)
temp = ""
r.write("\n")
r.close()
#print trans
|
|
import math
import numpy as np
import pytest
from skspatial.objects import Vector, Line, Plane, Circle, Sphere
@pytest.mark.parametrize(
"point, point_line, vector_line, point_expected, dist_expected",
[
([0, 5], [0, 0], [0, 1], [0, 5], 0),
([0, 5], [0, 0], [0, 100], [0, 5], 0),
([1, 5], [0, 0], [0, 100], [0, 5], 1),
([0, 1], [0, 0], [1, 1], [0.5, 0.5], math.sqrt(2) / 2),
([1, 0], [0, 0], [1, 1], [0.5, 0.5], math.sqrt(2) / 2),
([0, 2], [0, 0], [1, 1], [1, 1], math.sqrt(2)),
([-15, 5], [0, 0], [0, 100], [0, 5], 15),
([50, 10], [1, -5], [0, 3], [1, 10], 49),
],
)
def test_project_point_line(point, point_line, vector_line, point_expected, dist_expected):
line = Line(point_line, vector_line)
point_projected = line.project_point(point)
distance = line.distance_point(point)
assert point_projected.is_close(point_expected)
assert math.isclose(distance, dist_expected)
@pytest.mark.parametrize(
"point, point_plane, normal_plane, point_expected, dist_expected",
[
([0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], 0),
([0, 0, 0], [0, 0, 0], [0, 0, -1], [0, 0, 0], 0),
([0, 0, 1], [0, 0, 0], [0, 0, 1], [0, 0, 0], 1),
([0, 0, 1], [0, 0, 0], [0, 0, -1], [0, 0, 0], -1),
([0, 0, 1], [0, 0, 0], [0, 0, 50], [0, 0, 0], 1),
([0, 0, 1], [0, 0, 0], [0, 0, -50], [0, 0, 0], -1),
([0, 0, 5], [0, 0, 0], [0, 0, 50], [0, 0, 0], 5),
([0, 0, 5], [0, 0, 0], [0, 0, -50], [0, 0, 0], -5),
([5, -4, 1], [0, 0, 0], [0, 0, 1], [5, -4, 0], 1),
],
)
def test_project_point_plane(point, point_plane, normal_plane, point_expected, dist_expected):
plane = Plane(point_plane, normal_plane)
point_projected = plane.project_point(point)
distance_signed = plane.distance_point_signed(point)
assert point_projected.is_close(point_expected)
assert math.isclose(distance_signed, dist_expected)
@pytest.mark.parametrize(
"vector_u, vector_v, vector_expected",
[
([1, 1], [1, 0], [1, 0]),
([1, 5], [1, 0], [1, 0]),
([5, 5], [1, 0], [5, 0]),
# Scaling v by a non-zero scalar doesn't change the projection.
([0, 1], [0, 1], [0, 1]),
([0, 1], [0, -5], [0, 1]),
([0, 1], [0, 15], [0, 1]),
# The projection is the zero vector if u and v are perpendicular.
([1, 0], [0, 1], [0, 0]),
([5, 0], [0, 9], [0, 0]),
# The projection of the zero vector onto v is the zero vector.
([0, 0], [0, 1], [0, 0]),
],
)
def test_project_vector(vector_u, vector_v, vector_expected):
"""Test projecting vector u onto vector v."""
vector_u_projected = Vector(vector_v).project_vector(vector_u)
assert vector_u_projected.is_close(vector_expected)
@pytest.mark.parametrize(
"line, vector, vector_expected",
[
(Line([0, 0], [1, 0]), [1, 1], [1, 0]),
(Line([-56, 72], [1, 0]), [1, 1], [1, 0]),
(Line([-56, 72], [200, 0]), [5, 9], [5, 0]),
(Line([-56, 72], [200, 0]), [-5, 9], [-5, 0]),
],
)
def test_project_vector_line(line, vector, vector_expected):
vector_projected = line.project_vector(vector)
assert vector_projected.is_close(vector_expected)
@pytest.mark.parametrize(
"plane, vector, vector_expected",
[
(Plane([0, 0, 0], [0, 0, 1]), [1, 1, 0], [1, 1, 0]),
(Plane([0, 0, 0], [0, 0, 1]), [1, 1, 1], [1, 1, 0]),
(Plane([0, 0, 0], [0, 0, 1]), [7, -5, 20], [7, -5, 0]),
(Plane([0, 0, 0], [0, 0, -10]), [7, -5, 20], [7, -5, 0]),
],
)
def test_project_vector_plane(plane, vector, vector_expected):
vector_projected = plane.project_vector(vector)
assert vector_projected.is_close(vector_expected)
@pytest.mark.parametrize(
"circle, point, point_expected",
[
(Circle([0, 0], 1), [1, 0], [1, 0]),
(Circle([0, 0], 1), [2, 0], [1, 0]),
(Circle([0, 0], 1), [-2, 0], [-1, 0]),
(Circle([0, 0], 1), [0, 2], [0, 1]),
(Circle([0, 0], 1), [0, -2], [0, -1]),
(Circle([0, 0], 5), [0, -2], [0, -5]),
(Circle([0, 1], 5), [0, -2], [0, -4]),
(Circle([0, 0], 1), [1, 1], math.sqrt(2) / 2 * np.ones(2)),
(Circle([0, 0], 2), [1, 1], math.sqrt(2) * np.ones(2)),
],
)
def test_project_point_circle(circle, point, point_expected):
point_projected = circle.project_point(point)
assert point_projected.is_close(point_expected)
@pytest.mark.parametrize(
"sphere, point, point_expected",
[
(Sphere([0, 0, 0], 1), [1, 0, 0], [1, 0, 0]),
(Sphere([0, 0, 0], 2), [1, 0, 0], [2, 0, 0]),
(Sphere([0, 0, 0], 0.1), [1, 0, 0], [0.1, 0, 0]),
(Sphere([-1, 0, 0], 1), [1, 0, 0], [0, 0, 0]),
(Sphere([0, 0, 0], 1), [1, 1, 1], math.sqrt(3) / 3 * np.ones(3)),
(Sphere([0, 0, 0], 3), [1, 1, 1], math.sqrt(3) * np.ones(3)),
],
)
def test_project_point_sphere(sphere, point, point_expected):
point_projected = sphere.project_point(point)
assert point_projected.is_close(point_expected)
@pytest.mark.parametrize(
"circle_or_sphere, point",
[
# The point to project cannot be the center of the circle/sphere.
(Circle([0, 0], 1), [0, 0]),
(Circle([0, 0], 5), [0, 0]),
(Circle([7, -1], 5), [7, -1]),
(Sphere([0, 0, 0], 1), [0, 0, 0]),
(Sphere([0, 0, 0], 5), [0, 0, 0]),
(Sphere([5, 2, -6], 5), [5, 2, -6]),
],
)
def test_project_point_circle_sphere_failure(circle_or_sphere, point):
with pytest.raises(Exception):
circle_or_sphere.project_point(point)
|
|
import argparse
import os
from functools import lru_cache
from glob import glob
import albumentations as albu
import cv2
import numpy as np
import pandas as pd
import torch
from torch.jit import load
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
BATCH_SIZE = 32
torch.backends.cudnn.benchmark = True
def make_crops(img, target, idx):
w, h, _ = img.shape
assert w == h
margin = w - target
crops = [
lambda img: img[:-margin, :-margin, :],
lambda img: img[:-margin, margin:, :],
lambda img: img[margin:, margin:, :],
lambda img: img[margin:, :-margin, :],
]
return crops[idx](img)
def get_normalize():
normalize = albu.Normalize()
def process(x):
r = normalize(image=x)
return r['image']
return process
NORM_FN = get_normalize()
@lru_cache(8)
def read_img(x, target=384):
x = cv2.imread(x)
x = cv2.resize(x, (target, target))
x = NORM_FN(x)
return x
class TestAntispoofDataset(Dataset):
def __init__(self, paths):
self.paths = paths
self.n_crops = 4
def __getitem__(self, index):
img_idx = index // self.n_crops
crop_idx = index % self.n_crops
image_info = self.paths[img_idx]
img = read_img(image_info['path'])
img = make_crops(img, target=256, idx=crop_idx)
return image_info['id'], np.transpose(img, (2, 0, 1))
def __len__(self):
return len(self.paths) * self.n_crops
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path-images-csv', type=str, required=True)
parser.add_argument('--path-test-dir', type=str, required=True)
parser.add_argument('--path-submission-csv', type=str, required=True)
args = parser.parse_args()
# prepare image paths
test_dataset_paths = pd.read_csv(args.path_images_csv)
path_test_dir = args.path_test_dir
paths = [{'id': row.id,
'frame': row.frame,
'path': os.path.join(path_test_dir, row.path)}
for _, row in test_dataset_paths.iterrows()]
dataset = TestAntispoofDataset(paths=paths)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# predict
samples, probabilities = [], []
models = [load(x).to(device) for x in glob('*.trcd')]
with torch.no_grad():
for video, batch in tqdm(dataloader):
batch = batch.to(device)
for model in models:
proba = torch.softmax(model(batch), dim=1).cpu().numpy()
proba = proba[:, :-1].sum(axis=1)
samples.extend(video)
probabilities.extend(proba)
# save
predictions = pd.DataFrame.from_dict({
'id': samples,
'probability': probabilities})
predictions = predictions.groupby('id').mean().reset_index()
predictions['prediction'] = predictions.probability
predictions[['id', 'prediction']].to_csv(args.path_submission_csv, index=False)
|
|
import numpy as np
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(1337)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(7331)
import logging
from keras import layers, regularizers
from keras.models import Model, load_model
from data.datasets import *
from eval import keras_metrics, metrics
from nlp import chunker, tokenizer as tk
from utils import info, preprocessing, postprocessing, plots
# LOGGING CONFIGURATION
logging.basicConfig(
format='%(asctime)s\t%(levelname)s\t%(message)s',
level=logging.DEBUG)
info.log_versions()
# END LOGGING CONFIGURATION
# GLOBAL VARIABLES
SAVE_MODEL = False
MODEL_PATH = "models/answerrnn2test.h5"
SHOW_PLOTS = False
SAMPLE_SIZE = -1 # training set will be restricted to SAMPLE_SIZE. Set to -1 to disable
KP_CLASS_WEIGHT = 1. # weight of positives samples while training the model. NOTE: MUST be a float
# END GLOBAL VARIABLES
# Dataset and hyperparameters for each dataset
DATASET = Hulth
if DATASET == Semeval2017:
tokenizer = tk.tokenizers.nltk
DATASET_FOLDER = "data/Semeval2017"
MAX_DOCUMENT_LENGTH = 400
MAX_VOCABULARY_SIZE = 20000
MAX_ANSWER_LENGTH = 16
EMBEDDINGS_SIZE = 300
BATCH_SIZE = 256
PREDICT_BATCH_SIZE = 256
EPOCHS = 10
elif DATASET == Hulth:
tokenizer = tk.tokenizers.nltk
DATASET_FOLDER = "data/Hulth2003"
MAX_DOCUMENT_LENGTH = 540
MAX_VOCABULARY_SIZE = 20000
MAX_ANSWER_LENGTH = 12
EMBEDDINGS_SIZE = 50
BATCH_SIZE = 256
PREDICT_BATCH_SIZE = 2048
EPOCHS = 9
else:
raise NotImplementedError("Can't set the hyperparameters: unknown dataset")
# END PARAMETERS
# Loss function
def cos_distance(y_true, y_pred):
import keras.backend as K
def l2_normalize(x, axis):
norm = K.sqrt(K.sum(K.square(x), axis=axis, keepdims=True))
return K.sign(x) * K.maximum(K.abs(x), K.epsilon()) / K.maximum(norm, K.epsilon())
y_true = K.l2_normalize(y_true, axis=-1)
y_pred = K.l2_normalize(y_pred, axis=-1)
return K.mean(1 - K.sum((y_true * y_pred), axis=-1))
# End loss
logging.info("Loading dataset...")
data = DATASET(DATASET_FOLDER)
train_doc_str, train_answer_str = data.load_train()
test_doc_str, test_answer_str = data.load_test()
val_doc_str, val_answer_str = data.load_validation()
train_doc, train_answer = tk.tokenize_set(train_doc_str, train_answer_str, tokenizer)
test_doc, test_answer = tk.tokenize_set(test_doc_str, test_answer_str, tokenizer)
val_doc, val_answer = tk.tokenize_set(val_doc_str, val_answer_str, tokenizer)
logging.info("Dataset loaded. Generating candidate keyphrases...")
train_candidates = chunker.extract_candidates_from_set(train_doc_str, tokenizer)
test_candidates = chunker.extract_candidates_from_set(test_doc_str, tokenizer)
val_candidates = chunker.extract_candidates_from_set(val_doc_str, tokenizer)
logging.debug("Candidates recall on training set : %.4f", metrics.recall(train_answer, train_candidates))
logging.debug("Candidates recall on test set : %.4f", metrics.recall(test_answer, test_candidates))
logging.debug("Candidates recall on validation set : %.4f", metrics.recall(val_answer, val_candidates))
logging.info("Candidates generated. Preprocessing data...")
train_x, train_y, test_x, test_y, val_x, val_y, val_x_b, val_y_b, embedding_matrix, dictionary = preprocessing. \
prepare_answer_2(train_doc, train_answer, train_candidates,
test_doc, test_answer, test_candidates,
val_doc, val_answer, val_candidates,
max_document_length=MAX_DOCUMENT_LENGTH,
max_answer_length=MAX_ANSWER_LENGTH,
max_vocabulary_size=MAX_VOCABULARY_SIZE,
embeddings_size=EMBEDDINGS_SIZE)
# Finalize the ys: remove one-hot
train_y = np.argmax(train_y, axis=1)
test_y = np.argmax(test_y, axis=1)
val_y = np.argmax(val_y, axis=1)
val_y_b = np.argmax(val_y_b,axis=1)
logging.info("Data preprocessing complete.")
if not SAVE_MODEL or not os.path.isfile(MODEL_PATH):
# Dataset sampling
if 0 < SAMPLE_SIZE < np.shape(train_x[0])[0]:
logging.warning("Training network on %s samples" % SAMPLE_SIZE)
samples_indices = rn.sample(range(np.shape(train_x[0])[0]), SAMPLE_SIZE)
train_x_doc_sample = np.zeros((SAMPLE_SIZE, MAX_DOCUMENT_LENGTH))
train_x_answer_sample = np.zeros((SAMPLE_SIZE, MAX_ANSWER_LENGTH))
shape_y = list(np.shape(train_y))
shape_y[0] = SAMPLE_SIZE
train_y_sample = np.zeros(tuple(shape_y))
i = 0
for j in samples_indices:
train_x_doc_sample[i] = train_x[0][j]
train_x_answer_sample[i] = train_x[1][j]
train_y_sample[i] = train_y[j]
i += 1
train_x = [train_x_doc_sample, train_x_answer_sample]
train_y = train_y_sample
logging.debug("Sampled Training set documents size : %s", np.shape(train_x[0]))
logging.debug("Sampled Training set answers size : %s", np.shape(train_x[1]))
# end sampling.
# Class weights
class_weights = {0: 1.,
1: KP_CLASS_WEIGHT}
logging.debug("Building the network...")
document = layers.Input(shape=(MAX_DOCUMENT_LENGTH,))
encoded_document = layers.Embedding(np.shape(embedding_matrix)[0],
EMBEDDINGS_SIZE,
weights=[embedding_matrix],
input_length=MAX_DOCUMENT_LENGTH,
trainable=False)(document)
# Size of the output layer for a Convolutional Layer
# (from http://cs231n.github.io/convolutional-networks/)
# We can compute the spatial size of the output volume as a function of the input volume size (W),
# the receptive field size of the Conv Layer neurons (F), the stride with which they are applied (S),
# and the amount of zero padding used (P) on the border.
# You can convince yourself that the correct formula for calculating how many neurons “fit” is given by ((W−F+2P)/S)+1.
#encoded_document = layers.Bidirectional(
# layers.LSTM(int(EMBEDDINGS_SIZE),
# activation='hard_sigmoid',
# recurrent_activation='hard_sigmoid',
# return_sequences=True))\
# (encoded_document)
encoded_document = layers.Conv1D(filters=128, kernel_size=32, strides=4, activation='relu')(encoded_document)
# Size: 131
encoded_document = layers.MaxPool1D(pool_size=2)(encoded_document)
encoded_document = layers.Activation('relu')(encoded_document)
# Size: 65
encoded_document = layers.Conv1D(filters=128, kernel_size=8, strides=2, activation='relu')(encoded_document)
# # Size: 29
encoded_document = layers.MaxPool1D(pool_size=2)(encoded_document)
encoded_document = layers.Activation('relu')(encoded_document)
# # Size: 14
encoded_document = layers.Conv1D(filters=128, kernel_size=4, strides=1, activation='relu')(encoded_document)
# # Size: 11
encoded_document = layers.MaxPool1D(pool_size=2)(encoded_document)
encoded_document = layers.Activation('relu')(encoded_document)
# # Size: 5
#encoded_document = layers.TimeDistributed(layers.Dense(10, activation='softmax'))(encoded_document)
encoded_document = layers.Flatten()(encoded_document)
#print((Model(document, encoded_document)).summary())
candidate = layers.Input(shape=(MAX_ANSWER_LENGTH,))
encoded_candidate = layers.Embedding(np.shape(embedding_matrix)[0],
EMBEDDINGS_SIZE,
weights=[embedding_matrix],
input_length=MAX_ANSWER_LENGTH,
trainable=False)(candidate)
#encoded_candidate = layers.Bidirectional(
# layers.LSTM(int(EMBEDDINGS_SIZE),
# activation='hard_sigmoid',
# recurrent_activation='hard_sigmoid',
# return_sequences=True))\
# (encoded_candidate)
encoded_candidate = layers.Conv1D(filters=128, kernel_size=2, activation='relu')(encoded_candidate)
encoded_candidate = layers.MaxPool1D(pool_size=2)(encoded_candidate)
encoded_candidate = layers.Activation('relu')(encoded_candidate)
encoded_candidate = layers.Flatten()(encoded_candidate)
#print((Model(candidate, encoded_candidate)).summary())
prediction = layers.dot([encoded_document, encoded_candidate], axes=-1, normalize=True)
model = Model([document, candidate], prediction)
logging.info("Compiling the network...")
model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
#merged = layers.add([encoded_document, encoded_candidate])
#prediction = layers.Dense(int(EMBEDDINGS_SIZE / 4), activation='relu',kernel_regularizer=regularizers.l2(0.01))(merged)
#prediction = layers.Dropout(0.25)(prediction)
#prediction = layers.Dense(2, activation='softmax')(prediction)
#model = Model([document, candidate], prediction)
#logging.info("Compiling the network...")
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
print(model.summary())
metrics_callback = keras_metrics.MetricsCallbackQA(val_x, val_y, batch_size=PREDICT_BATCH_SIZE)
logging.info("Fitting the network...")
history = model.fit(train_x, train_y,
validation_data=(val_x_b, val_y_b),
epochs=EPOCHS,
batch_size=BATCH_SIZE,
class_weight=class_weights,
callbacks=[metrics_callback])
if SHOW_PLOTS:
plots.plot_accuracy(history)
plots.plot_loss(history)
plots.plot_prf(metrics_callback)
if SAVE_MODEL:
model.save(MODEL_PATH)
logging.info("Model saved in %s", MODEL_PATH)
else:
logging.info("Loading existing model from %s...", MODEL_PATH)
model = load_model(MODEL_PATH)
logging.info("Predicting on test set...")
output = model.predict(x=test_x, verbose=1, batch_size=PREDICT_BATCH_SIZE)
logging.debug("Shape of output array: %s", np.shape(output))
obtained_words = postprocessing.get_answers(test_candidates, test_x, output, dictionary)
precision = metrics.precision(test_answer, obtained_words)
recall = metrics.recall(test_answer, obtained_words)
f1 = metrics.f1(precision, recall)
print("### Obtained Scores ###")
print("### (full dataset) ###")
print("###")
print("### Precision : %.4f" % precision)
print("### Recall : %.4f" % recall)
print("### F1 : %.4f" % f1)
print("### ###")
keras_precision = keras_metrics.keras_precision_qa(test_y, output)
keras_recall = keras_metrics.keras_recall_qa(test_y, output)
keras_f1 = keras_metrics.keras_f1_qa(test_y, output)
print("### Obtained Scores ###")
print("### (fixed dataset) ###")
print("###")
print("### Precision : %.4f" % keras_precision)
print("### Recall : %.4f" % keras_recall)
print("### F1 : %.4f" % keras_f1)
print("### ###")
obtained_words_top = postprocessing.get_top_answers(test_candidates, test_x, output, dictionary,5)
precision_top = metrics.precision(test_answer, obtained_words_top)
recall_top = metrics.recall(test_answer, obtained_words_top)
f1_top = metrics.f1(precision_top, recall_top)
print("### Obtained Scores ###")
print("### (full dataset, top 5) ###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall : %.4f" % recall_top)
print("### F1 : %.4f" % f1_top)
print("### ###")
obtained_words_top = postprocessing.get_top_answers(test_candidates, test_x, output, dictionary,10)
precision_top = metrics.precision(test_answer, obtained_words_top)
recall_top = metrics.recall(test_answer, obtained_words_top)
f1_top = metrics.f1(precision_top, recall_top)
print("### Obtained Scores ###")
print("### (full dataset, top 10)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall : %.4f" % recall_top)
print("### F1 : %.4f" % f1_top)
print("### ###")
obtained_words_top = postprocessing.get_top_answers(test_candidates, test_x, output, dictionary,15)
precision_top = metrics.precision(test_answer, obtained_words_top)
recall_top = metrics.recall(test_answer, obtained_words_top)
f1_top = metrics.f1(precision_top, recall_top)
print("### Obtained Scores ###")
print("### (full dataset, top 15)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall : %.4f" % recall_top)
print("### F1 : %.4f" % f1_top)
print("### ###")
print("### ###")
print("### ###")
print("### STEMMING ###")
print("### ###")
print("### ###")
STEM_MODE = metrics.stemMode.both
precision = metrics.precision(test_answer, obtained_words,STEM_MODE)
recall = metrics.recall(test_answer, obtained_words,STEM_MODE)
f1 = metrics.f1(precision, recall)
print("### Obtained Scores ###")
print("### (full dataset) ###")
print("###")
print("### Precision : %.4f" % precision)
print("### Recall : %.4f" % recall)
print("### F1 : %.4f" % f1)
print("### ###")
clean_words = postprocessing.get_valid_patterns(obtained_words)
precision = metrics.precision(test_answer, clean_words,STEM_MODE)
recall = metrics.recall(test_answer, clean_words,STEM_MODE)
f1 = metrics.f1(precision, recall)
print("### Obtained Scores ###")
print("### (full dataset, ###")
print("### pos patterns filter) ###")
print("###")
print("### Precision : %.4f" % precision)
print("### Recall : %.4f" % recall)
print("### F1 : %.4f" % f1)
print("### ###")
obtained_words_top = postprocessing.get_top_answers(test_candidates, test_x, output, dictionary,5)
precision_top = metrics.precision(test_answer, obtained_words_top,STEM_MODE)
recall_top = metrics.recall(test_answer, obtained_words_top,STEM_MODE)
f1_top = metrics.f1(precision_top, recall_top)
print("### Obtained Scores ###")
print("### (full dataset, top 5) ###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall : %.4f" % recall_top)
print("### F1 : %.4f" % f1_top)
print("### ###")
obtained_words_top = postprocessing.get_top_answers(test_candidates, test_x, output, dictionary,10)
precision_top = metrics.precision(test_answer, obtained_words_top,STEM_MODE)
recall_top = metrics.recall(test_answer, obtained_words_top,STEM_MODE)
f1_top = metrics.f1(precision_top, recall_top)
print("### Obtained Scores ###")
print("### (full dataset, top 10)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall : %.4f" % recall_top)
print("### F1 : %.4f" % f1_top)
print("### ###")
obtained_words_top = postprocessing.get_top_answers(test_candidates, test_x, output, dictionary,15)
precision_top = metrics.precision(test_answer, obtained_words_top,STEM_MODE)
recall_top = metrics.recall(test_answer, obtained_words_top,STEM_MODE)
f1_top = metrics.f1(precision_top, recall_top)
print("### Obtained Scores ###")
print("### (full dataset, top 15)###")
print("###")
print("### Precision : %.4f" % precision_top)
print("### Recall : %.4f" % recall_top)
print("### F1 : %.4f" % f1_top)
print("### ###")
if DATASET == Semeval2017:
from eval import anno_generator
anno_generator.write_anno("/tmp/simplernn", test_doc_str, obtained_words)
from data.Semeval2017 import eval
eval.calculateMeasures("data/Semeval2017/test", "/tmp/simplernn", remove_anno=["types"])
|
|
import os
import cv2
import numpy as np
import torch
from torch import nn
import torch.fft
"""
# --------------------------------------------
# Sobel Filter
# --------------------------------------------
# Jiahao Huang (j.huang21@imperial.uk.ac)
# 30/Jan/2022
# --------------------------------------------
"""
# Sobel
def sobel(src, device):
src = torch.clamp(src, 0, 1)
# define convolution
conv_opx = nn.Conv2d(src.shape[1], src.shape[1], kernel_size=3, padding=1, bias=False).to(device)
conv_opy = nn.Conv2d(src.shape[1], src.shape[1], kernel_size=3, padding=1, bias=False).to(device)
# define sobel kernel
sobel_kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype='float32')
sobel_kernel_x = sobel_kernel_x.reshape((1, 1, 3, 3))
sobel_kernel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype='float32')
sobel_kernel_y = sobel_kernel_y.reshape((1, 1, 3, 3))
# set kernel channel
sobel_kernel_x = np.repeat(np.repeat(sobel_kernel_x, src.shape[1], axis=0), src.shape[1], axis=1)
sobel_kernel_y = np.repeat(np.repeat(sobel_kernel_y, src.shape[1], axis=0), src.shape[1], axis=1)
# load conv kernel
conv_opx.weight.data = torch.from_numpy(sobel_kernel_x).to(device)
conv_opy.weight.data = torch.from_numpy(sobel_kernel_y).to(device)
dst_x = conv_opx(src)
dst_y = conv_opy(src)
dst = torch.abs(torch.add(dst_x/2, dst_y/2))
# 0~+
return dst
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
slice_idx = 0
# slice 1 in batch
x1 = cv2.imread('../tmp/GT_ 1024.png', cv2.IMREAD_GRAYSCALE)
# (w, h) --> (n, c, w, h)
x1 = x1[np.newaxis, np.newaxis, :, :]
x1= x1/255
# slice 2 in batch
x2 = cv2.imread('../tmp/GT_1043.png', cv2.IMREAD_GRAYSCALE)
# (w, h) --> (n, c, w, h)
x2 = x2[np.newaxis, np.newaxis, :, :]
x2 = x2/255
# slice 3 in batch
x3 = cv2.imread('../tmp/GT_1043.png', cv2.IMREAD_GRAYSCALE)
# (w, h) --> (n, c, w, h)
x3 = x3[np.newaxis, np.newaxis, :, :]
x3 = x3/255
x = np.concatenate((x1, x2, x3), axis=0)
#
# -1~1
x = torch.Tensor(x).to(device)
#
# gabor
x_sobel = sobel(x, device)
x_sobel = x_sobel.cpu().squeeze().detach().numpy()
print(x_sobel.shape)
x_sobel = x_sobel[slice_idx, :, :]
cv2.imwrite("../tmp/Sobel.png", 255 * (x_sobel-x_sobel.min())/(x_sobel.max()-x_sobel.min()))
|
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import TruncatedSVD
## data load ##
rent = pd.read_csv("d:/data/KNN_data_rent.csv",
encoding='euc-kr')
all_data = pd.read_csv("d:/data/KK_k150_2021.csv",
encoding='euc-kr')
data_new = pd.read_csv("d:/data/test.csv",
encoding='euc-kr')
all_data['add_name'] = all_data['Name'] + all_data['Add']
data_new['add_name'] = data_new['Name'] + data_new['Add']
def data_concat(data1, data2):
''' 기존 데이터와 전월세 추정 데이터를 mapping해서 concat '''
rent_name = data2[['Name','Add','predK25']]
rent_name['add_name'] = rent_name['Name'] + rent_name['Add']
rent_name1 = rent_name[['add_name', 'predK25']]
rent_name1.columns = ['add_name', 'rent']
data1 = pd.merge(data1, rent_name1, on='add_name')
X = data1[['predK25', 'center_access', 'people_access',
'center_access_2', 'people_access_2', 'rent']]
y = data1[['Name']]
return X, y
def scale_svd(X, y):
'''## scaling + svd + mean ##'''
ms = MinMaxScaler()
X_scale = ms.fit_transform(X)
## svd ##
svd = TruncatedSVD(n_components=3, random_state=77)
X_svd = svd.fit_transform(X_scale)
np.sum(svd.explained_variance_ratio_)
X_svd = pd.DataFrame(X_svd)
avg = np.mean(X_svd, axis=1)
y['new'] = avg
return y
X_old, y_old = data_concat(all_data, rent)
y_old_svd = scale_svd(X_old, y_old)
def sort_rank(y):
y_old_sort = y.sort_values(by=['new'])
y_old_sort['rank'] = y_old_sort['new'].rank() / len(y_old_sort) # rank / 1412
return y_old_sort
y_old_rank = sort_rank(y_old_svd)
X_new, y_new = data_concat(data_new, rent)
y_new_svd = scale_svd(X_new, y_new)
y_new_rank = sort_rank(y_new_svd)
|
|
import datetime
import numpy as np
import pandas as pd
from util import log, timeit
from CONSTANT import *
@timeit
def clean_df(df):
fillna(df)
@timeit
def fillna(df):
for c in [c for c in df if c.startswith(NUMERICAL_PREFIX)]:
df[c].fillna(-1, inplace=True)
for c in [c for c in df if c.startswith(CATEGORY_PREFIX)]:
df[c].fillna("0", inplace=True)
for c in [c for c in df if c.startswith(TIME_PREFIX)]:
df[c].fillna(datetime.datetime(1970, 1, 1), inplace=True)
for c in [c for c in df if c.startswith(MULTI_CAT_PREFIX)]:
df[c].fillna("0", inplace=True)
@timeit
def feature_engineer(df):
transform_categorical_hash(df)
# transform_datetime(df)
@timeit
def transform_categorical_hash(df):
for c in [c for c in df if c.startswith(CATEGORY_PREFIX)]:
df[c] = df[c].apply(lambda x: int(x))
for c in [c for c in df if c.startswith(MULTI_CAT_PREFIX)]:
df[c] = df[c].apply(lambda x: int(x.split(',')[0]))
|
|
"""
##
Code modified by Yuhan Helena Liu, PhD Candidate, University of Washington
Modified to keep adjacency matrix, i.e. disable stochastic rewiring by Deep R, for better biological plausibility
Modified from https://github.com/IGITUGraz/LSNN-official
with the following copyright message retained from the original code:
##
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
import numpy as np
import numpy.random as rd
import numpy.linalg as la
import matplotlib.pyplot as plt
def balance_matrix_per_neuron(M):
M = M.copy()
n_in, n_out = M.shape
for k in range(n_out):
# Change only non zero synapses to keep as much zeros as possible
e_act = M[:, k] > 0
i_act = M[:, k] < 0
if np.sum(i_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from inhibitory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
if np.sum(e_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from excitatory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
s_e = M[e_act, k].sum()
s_i = M[i_act, k].sum()
# Add a small portion to compensate if the mean is not balanced
if s_e + s_i < 0:
M[e_act, k] += np.abs(s_e + s_i) / np.sum(e_act)
else:
M[i_act, k] -= np.abs(s_e + s_i) / np.sum(i_act)
sum_check = M[:, k].sum()
assert sum_check ** 2 < 1e-5, 'Mismatch of row balancing for neuron {}, sum is {} with on exci {} and inhib {}'.format(
k, sum_check, s_e, s_i)
return M
def max_eigen_value_on_unit_circle(w):
vals = np.abs(la.eig(w)[0])
factor = 1. / np.max(vals)
return w * factor, factor
def random_sparse_signed_matrix(neuron_sign, p=1., balance_zero_mean_per_neuron=True, n_out=None):
'''
Provide a good initialization for a matrix with restricted sign.
This is a personal recipe.
:param neuron_sign:
:param p:
:param balance_zero_mean_per_neuron:
:param n_out:
:return:
'''
E = neuron_sign > 0
I = neuron_sign < 0
n = neuron_sign.__len__()
if n_out is None:
n_out = n
# Random numbers
is_con = rd.rand(n, n) < p
theta = np.abs(rd.randn(n, n))
theta = (2 * is_con - 1) * theta
sign = np.tile(np.expand_dims(neuron_sign, 1), (1, n))
w = lambda theta, sign: (theta) * (theta > 0) * sign
_w = w(theta, sign)
if (np.sum(I) > 0):
# Normalize a first time, but this is obsolete if the stabilization happens also on a single neuron basis
val_E = np.sum(_w[E, :])
val_I = - np.sum(_w[I, :])
assert val_I > 0 and val_E > 0, 'Sign error'
theta[I, :] *= val_E / val_I
_w = w(theta, sign)
if balance_zero_mean_per_neuron:
w_balanced = balance_matrix_per_neuron(_w)
theta[theta > 0] = np.abs(w_balanced[theta > 0])
_w = w(theta, sign)
assert (_w[np.logical_not(is_con)] == 0).all(), 'Balancing the neurons procuded a sign error'
else:
print("Warning: no inhibitory neurons detected, no balancing is performed")
# Normalize to scale the eigenvalues
_, factor = max_eigen_value_on_unit_circle(_w)
theta *= factor
_w = w(theta, sign)
assert (_w[E] >= 0).all(), 'Found negative excitatory weights'
assert (_w[I] <= 0).all(), 'Found negative excitatory weights'
if n_out is None:
return w, sign, theta, is_con
elif n < n_out:
sel = np.random.choice(n, size=n_out)
else:
sel = np.arange(n_out)
theta = theta[:, sel]
sign = sign[:, sel]
is_con = is_con[:, sel]
return w(theta, sign), sign, theta, is_con
def test_random_sparse_signed_matrix():
# Define parameter
p = .33
p_e = .75
mean_E = .4
std_E = 0
n_in = 400
neuron_sign = rd.choice([1, -1], n_in, p=[p_e, 1 - p_e])
M1, M1_sign, M1_theta, M1_is_con = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=p,
balance_zero_mean_per_neuron=True)
s1, _ = la.eig(M1)
assert np.all(np.abs(M1[M1_is_con]) == M1_theta[M1_is_con])
assert np.all(np.sign(M1) == M1_sign * M1_is_con)
assert np.all(M1_is_con == (M1_theta > 0))
M2, _, _, _ = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=1., balance_zero_mean_per_neuron=True)
M2 = M2 * (rd.rand(n_in, n_in) < p)
s2, _ = la.eig(M2)
fig, ax_list = plt.subplots(2)
ax_list[0].set_title('Random sign constrained without neuron specific balance (p={:.3g})'.format(p))
ax_list[1].set_title('Random sign constrained, probability mask taken after scaling')
ax_list[0].scatter(s1.real, s1.imag)
ax_list[1].scatter(s2.real, s2.imag)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[0].add_artist(c)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[1].add_artist(c)
for ax in ax_list:
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
plt.show()
def sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(theta_list, ps, upper_bound_check=False):
with tf.name_scope('NBreconnectGenerator'):
theta_vals = [theta.read_value() for theta in theta_list]
# Compute size and probability of connections
nb_possible_connections_list = [tf.cast(tf.size(th), dtype=tf.float32) * p for th, p in zip(theta_list, ps)]
total_possible_connections = tf.reduce_sum(nb_possible_connections_list)
max_total_connections = tf.cast(total_possible_connections, dtype=tf.int32)
sampling_probs = [nb_possible_connections / total_possible_connections \
for nb_possible_connections in nb_possible_connections_list]
def nb_connected(theta_val):
is_con = tf.greater(theta_val, 0)
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
return n_connected
total_connected = tf.reduce_sum([nb_connected(theta) for theta in theta_vals])
if upper_bound_check:
assert_upper_bound_check = tf.Assert(tf.less_equal(total_connected, max_total_connections),
data=[max_total_connections, total_connected],
name='RewiringUpperBoundCheck')
else:
assert_upper_bound_check = tf.Assert(True,
data=[max_total_connections, total_connected],
name='SkippedRewiringUpperBoundCheck')
with tf.control_dependencies([assert_upper_bound_check]):
nb_reconnect = tf.maximum(0, max_total_connections - total_connected)
sample_split = tf.distributions.Categorical(probs=sampling_probs).sample(nb_reconnect)
is_class_i_list = [tf.equal(sample_split, i) for i in range(len(theta_list))]
counts = [tf.reduce_sum(tf.cast(is_class_i, dtype=tf.int32)) for is_class_i in is_class_i_list]
return counts
def compute_gradients_with_rewiring_variables_NP(dEdWi, dEdWr, opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
rewiring_gradient_list[0] = dEdWi
rewiring_gradient_list[1] = dEdWr
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def compute_gradients_with_rewiring_variables(opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities):
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th.read_value(), 0) for th in rewiring_var_list]
n_connected_list = [tf.reduce_sum(tf.cast(is_pos, dtype=tf.float32)) for is_pos in is_positive_theta_list]
size_list = [tf.size(is_pos) for is_pos in is_positive_theta_list]
init_n_connected_list = [tf.cast(size, dtype=tf.float32) * p for size, p in
zip(size_list, rewiring_connectivities)]
total_connected = tf.reduce_sum(n_connected_list)
limit_connected = tf.reduce_sum(init_n_connected_list)
check_connectivity = tf.Assert(total_connected <= limit_connected, [total_connected, limit_connected],
name='CheckRewiringConnectivityBound')
return check_connectivity
def rewiring_optimizer_wrapper(opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None):
if var_list is None:
var_list = tf.trainable_variables()
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
rewiring_con_list = []
for v,c in zip(tf.get_collection('Rewiring/Variables'), tf.get_collection('Rewiring/ini_con')):
if v in var_list:
rewiring_var_list.append(v)
rewiring_con_list.append(c)
if grads_and_vars is None:
grads_and_vars = compute_gradients_with_rewiring_variables(opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, ic, nb_reconnect=nb) for th, ic, nb in
zip(rewiring_var_list, rewiring_con_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def rewiring_optimizer_wrapper_NP(dEdWi, dEdWr, opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None):
if var_list is None:
var_list = tf.trainable_variables()
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
rewiring_con_list = []
for v,c in zip(tf.get_collection('Rewiring/Variables'), tf.get_collection('Rewiring/ini_con')):
if v in var_list:
rewiring_var_list.append(v)
rewiring_con_list.append(c)
if grads_and_vars is None:
grads_and_vars = compute_gradients_with_rewiring_variables_NP(dEdWi, dEdWr, opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
# 0*
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, ic, nb_reconnect=nb) for th, ic, nb in
zip(rewiring_var_list, rewiring_con_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def weight_sampler(n_in, n_out, p, dtype=tf.float32, neuron_sign=None, w_scale=1., eager=False):
'''
Returns a weight matrix and its underlying, variables, and sign matrices needed for rewiring.
:param n_in:
:param n_out:
:param p0:
:param dtype:
:return:
'''
if eager:
Variable = tf.contrib.eager.Variable
else:
Variable = tf.Variable
with tf.name_scope('SynapticSampler'):
nb_non_zero = int(n_in * n_out * p)
is_con_0 = np.zeros((n_in, n_out), dtype=bool)
ind_in = rd.choice(np.arange(n_in), size=nb_non_zero)
ind_out = rd.choice(np.arange(n_out), size=nb_non_zero)
is_con_0[ind_in, ind_out] = True
# Generate random signs
if neuron_sign is None:
theta_0 = np.abs(rd.randn(n_in, n_out) / np.sqrt(n_in)) # initial weight values
theta_0 = theta_0 * is_con_0
sign_0 = np.sign(rd.randn(n_in, n_out))
else:
assert np.size(neuron_sign) == n_in, 'Size of neuron_sign vector {}, for n_in {} expected'.format(
np.size(neuron_sign), n_in)
_, sign_0, theta_0, _ = random_sparse_signed_matrix(neuron_sign, n_out=n_out) # p=1
theta_0 *= is_con_0
# Define the tensorflow matrices
th = Variable(theta_0 * w_scale, dtype=dtype, name='theta')
w_sign = Variable(sign_0, dtype=dtype, trainable=False, name='sign')
is_connected = tf.greater(th, 0, name='mask')
w = tf.where(condition=is_connected, x=w_sign * th, y=tf.zeros((n_in, n_out), dtype=dtype), name='weight')
ini_con = tf.greater(theta_0 * w_scale, 0)
# Add to collections to by pass and fetch them in the rewiring wrapper function
tf.add_to_collection('Rewiring/Variables', th)
tf.add_to_collection('Rewiring/Signs', w_sign)
tf.add_to_collection('Rewiring/Weights', w)
tf.add_to_collection('Rewiring/ini_con', ini_con)
return w, w_sign, th, ini_con
def assert_connection_number(theta, targeted_number):
'''
Function to check during the tensorflow simulation if the number of connection in well defined after each simulation.
:param theta:
:param targeted_number:
:return:
'''
th = theta.read_value()
is_con = tf.greater(th, 0)
nb_is_con = tf.reduce_sum(tf.cast(is_con, tf.int32))
assert_is_con = tf.Assert(tf.equal(nb_is_con, targeted_number), data=[nb_is_con, targeted_number],
name='NumberOfConnectionCheck')
return assert_is_con
def rewiring(theta, ini_con, target_nb_connection=None, nb_reconnect=None, epsilon=1e-12, check_zero_numbers=False):
'''
The rewiring operation to use after each iteration.
:param theta:
:param target_nb_connection:
:return:
'''
with tf.name_scope('rewiring'):
th = theta.read_value()
is_con = tf.greater(th, 0)
#reconnect_candidate_coord = tf.where(tf.logical_not(is_con), name='CandidateCoord')
reconnect_candidate_coord = tf.where(tf.logical_and(tf.logical_not(is_con),ini_con), name='CandidateCoord')
n_candidates = tf.shape(reconnect_candidate_coord)[0]
if nb_reconnect is None:
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
nb_reconnect = target_nb_connection - n_connected
nb_reconnect = tf.clip_by_value(nb_reconnect, 0, n_candidates)
reconnect_sample_id = tf.random_shuffle(tf.range(n_candidates))[:nb_reconnect]
reconnect_sample_coord = tf.gather(reconnect_candidate_coord, reconnect_sample_id, name='SelectedCoord')
# Apply the rewiring
reconnect_vals = tf.fill(dims=[nb_reconnect], value=epsilon, name='InitValues')
reconnect_op = tf.scatter_nd_update(theta, reconnect_sample_coord, reconnect_vals, name='Reconnect')
with tf.control_dependencies([reconnect_op]):
if check_zero_numbers and target_nb_connection is not None:
connection_check = assert_connection_number(theta=theta, targeted_number=target_nb_connection)
with tf.control_dependencies([connection_check]):
return tf.no_op('Rewiring')
else:
return tf.no_op('Rewiring')
if __name__ == '__main__':
test_random_sparse_signed_matrix()
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
import numpy as np
import torch
from torch import nn
from typing import Dict
def mem2str(num_bytes):
assert num_bytes >= 0
if num_bytes >= 2 ** 30: # GB
val = float(num_bytes) / (2 ** 30)
result = "%.3f GB" % val
elif num_bytes >= 2 ** 20: # MB
val = float(num_bytes) / (2 ** 20)
result = "%.3f MB" % val
elif num_bytes >= 2 ** 10: # KB
val = float(num_bytes) / (2 ** 10)
result = "%.3f KB" % val
else:
result = "%d bytes" % num_bytes
return result
def sec2str(seconds):
seconds = int(seconds)
hour = seconds // 3600
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%dH %02dM %02dS" % (hour, minutes, seconds)
def get_mem_usage():
import psutil
mem = psutil.virtual_memory()
result = ""
result += "available: %s, " % (mem2str(mem.available))
result += "used: %s, " % (mem2str(mem.used))
result += "free: %s" % (mem2str(mem.free))
# result += "active: %s\t" % (mem2str(mem.active))
# result += "inactive: %s\t" % (mem2str(mem.inactive))
# result += "buffers: %s\t" % (mem2str(mem.buffers))
# result += "cached: %s\t" % (mem2str(mem.cached))
# result += "shared: %s\t" % (mem2str(mem.shared))
# result += "slab: %s\t" % (mem2str(mem.slab))
return result
def flatten_first2dim(batch):
if isinstance(batch, torch.Tensor):
size = batch.size()[2:]
batch = batch.view(-1, *size)
return batch
elif isinstance(batch, dict):
return {key: flatten_first2dim(batch[key]) for key in batch}
else:
assert False, "unsupported type: %s" % type(batch)
def _tensor_slice(t, dim, b, e):
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
def tensor_slice(t, dim, b, e):
if isinstance(t, dict):
return {key: tensor_slice(t[key], dim, b, e) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, b, e).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def tensor_index(t, dim, i):
if isinstance(t, dict):
return {key: tensor_index(t[key], dim, i) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def one_hot(x, n):
assert x.dim() == 2 and x.size(1) == 1
one_hot_x = torch.zeros(x.size(0), n, device=x.device)
one_hot_x.scatter_(1, x, 1)
return one_hot_x
def set_all_seeds(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed + 1)
torch.manual_seed(rand_seed + 2)
torch.cuda.manual_seed(rand_seed + 3)
def weights_init(m):
"""custom weights initialization"""
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal(m.weight.data)
nn.init.orthogonal_(m.weight.data)
else:
print("%s is not custom-initialized." % m.__class__)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def count_output_size(input_shape, model):
fake_input = torch.FloatTensor(*input_shape)
output_size = model.forward(fake_input).view(-1).size()[0]
return output_size
def write_frame_to_image(frame, path, size=(300, 300)):
batchsize = frame.size(0)
assert batchsize == 1
frame = frame[0].cpu().numpy()
rows = 2
cols = 2
fig, ax = plt.subplots(rows, cols, figsize=(cols * 10, rows * 10))
for i in range(rows * cols):
r = i // cols
c = i % cols
data = frame[i]
# data = data / 255.0
ax[r, c].axis("off")
if data.shape[0] == 3:
data = data.swapaxes(0, 1).swapaxes(1, 2)
ax[r, c].imshow(data, vmin=0, vmax=1)
continue
# print('>>>', data.shape)
# if data.shape[0] > 3:
# data = data[0]
# print(data.shape)
ax[r, c].imshow(data, vmin=0, vmax=1, cmap="gray")
# ax[r, c].set_title('c%d_%s' % (i, channel_names[i]), fontsize=50)
# break
plt.tight_layout()
plt.savefig(path)
plt.close()
def write_frame_to_image2(frame, path, size=(300, 300)):
# batchsize = frame.size(0)
# assert(batchsize == 1)
frame = frame.cpu().numpy()
rows = 4
cols = 4
fig, ax = plt.subplots(rows, cols, figsize=(cols * 10, rows * 10))
for i in range(rows * cols):
r = i // cols
c = i % cols
data = frame[i][3]
# data = data / 255.0
ax[r, c].axis("off")
if data.shape[0] == 3:
data = data.swapaxes(0, 1).swapaxes(1, 2)
ax[r, c].imshow(data, vmin=0, vmax=1)
continue
# print('>>>', data.shape)
# if data.shape[0] > 3:
# data = data[0]
# print(data.shape)
ax[r, c].imshow(data, vmin=0, vmax=1, cmap="gray")
# ax[r, c].set_title('c%d_%s' % (i, channel_names[i]), fontsize=50)
# break
plt.tight_layout()
plt.savefig(path)
plt.close()
def num2str(n):
if n < 1e3:
s = str(n)
unit = ""
elif n < 1e6:
n /= 1e3
s = "%.3f" % n
unit = "K"
else:
n /= 1e6
s = "%.3f" % n
unit = "M"
s = s.rstrip("0").rstrip(".")
return s + unit
|
|
import unittest
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
import dymos as dm
from dymos.utils.lgl import lgl
from dymos.models.eom import FlightPathEOM2D
import numpy as np
class TestInputParameterConnections(unittest.TestCase):
def test_dynamic_input_parameter_connections_radau(self):
class TrajectoryODE(om.Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('sum', om.ExecComp('m_tot = sum(m)',
m={'value': np.zeros((nn, 2, 2)),
'units': 'kg'},
m_tot={'value': np.zeros(nn),
'units': 'kg'}))
self.add_subsystem('eom', FlightPathEOM2D(num_nodes=nn))
self.connect('sum.m_tot', 'eom.m')
optimizer = 'SLSQP'
num_segments = 1
transcription_order = 5
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = optimizer
p.driver.declare_coloring()
seg_ends, _ = lgl(num_segments + 1)
# @dm.declare_time(units='s')
# @dm.declare_state('v', rate_source='eom.v_dot', units='m/s')
# @dm.declare_state('h', rate_source='eom.h_dot', units='m')
# @dm.declare_parameter('m', targets='sum.m', units='kg', shape=(2, 2))
phase = dm.Phase(ode_class=TrajectoryODE,
transcription=dm.Radau(num_segments=num_segments, order=transcription_order,
segment_ends=seg_ends))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(initial_bounds=(0.0, 100.0), duration_bounds=(0., 100.), units='s')
phase.add_state('h', fix_initial=True, fix_final=True, lower=0.0, units='m', rate_source='eom.h_dot')
phase.add_state('v', fix_initial=True, fix_final=False, units='m/s', rate_source='eom.v_dot')
phase.add_input_parameter('m', val=[[1, 2], [3, 4]], units='kg', targets='sum.m')
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 100.0
p['phase0.states:h'] = phase.interpolate(ys=[20, 0], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, -5], nodes='state_input')
p.run_model()
expected = np.broadcast_to(np.array([[1, 2], [3, 4]]),
(p.model.phase0.options['transcription'].grid_data.num_nodes, 2, 2))
assert_near_equal(p.get_val('phase0.rhs_all.sum.m'), expected)
def test_static_input_parameter_connections_radau(self):
class TrajectoryODE(om.Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('sum', om.ExecComp('m_tot = sum(m)',
m={'value': np.zeros((2, 2)),
'units': 'kg'},
m_tot={'value': np.zeros(nn),
'units': 'kg'}))
self.add_subsystem('eom', FlightPathEOM2D(num_nodes=nn))
self.connect('sum.m_tot', 'eom.m')
optimizer = 'SLSQP'
num_segments = 1
transcription_order = 5
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = optimizer
p.driver.declare_coloring()
seg_ends, _ = lgl(num_segments + 1)
phase = dm.Phase(ode_class=TrajectoryODE,
transcription=dm.Radau(num_segments=num_segments,
order=transcription_order,
segment_ends=seg_ends))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(initial_bounds=(0.0, 100.0), duration_bounds=(0., 100.))
phase.add_state('h', fix_initial=True, fix_final=True, lower=0.0, units='m', rate_source='eom.h_dot')
phase.add_state('v', fix_initial=True, fix_final=False, units='m/s', rate_source='eom.v_dot')
phase.add_input_parameter('m', val=[[1, 2], [3, 4]], units='kg', targets='sum.m', dynamic=False)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 100.0
p['phase0.states:h'] = phase.interpolate(ys=[20, 0], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, -5], nodes='state_input')
p.run_model()
expected = np.array([[1, 2], [3, 4]])
assert_near_equal(p.get_val('phase0.rhs_all.sum.m'), expected)
def test_dynamic_input_parameter_connections_gl(self):
class TrajectoryODE(om.Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('sum', om.ExecComp('m_tot = sum(m)',
m={'value': np.zeros((nn, 2, 2)),
'units': 'kg'},
m_tot={'value': np.zeros(nn),
'units': 'kg'}))
self.add_subsystem('eom', FlightPathEOM2D(num_nodes=nn))
self.connect('sum.m_tot', 'eom.m')
optimizer = 'SLSQP'
num_segments = 1
transcription_order = 5
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = optimizer
p.driver.declare_coloring()
seg_ends, _ = lgl(num_segments + 1)
phase = dm.Phase(ode_class=TrajectoryODE,
transcription=dm.GaussLobatto(num_segments=num_segments,
order=transcription_order,
segment_ends=seg_ends))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(initial_bounds=(0.0, 100.0), duration_bounds=(0., 100.), units='s')
phase.add_state('h', fix_initial=True, fix_final=True, lower=0.0, units='m', rate_source='eom.h_dot')
phase.add_state('v', fix_initial=True, fix_final=False, units='m/s', rate_source='eom.v_dot')
phase.add_input_parameter('m', val=[[1, 2], [3, 4]], units='kg', targets='sum.m')
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 100.0
p['phase0.states:h'] = phase.interpolate(ys=[20, 0], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, -5], nodes='state_input')
p.run_model()
gd = p.model.phase0.options['transcription'].grid_data
expected = np.broadcast_to(np.array([[1, 2], [3, 4]]),
(gd.subset_num_nodes['state_disc'], 2, 2))
assert_near_equal(p.get_val('phase0.rhs_disc.sum.m'), expected)
expected = np.broadcast_to(np.array([[1, 2], [3, 4]]),
(gd.subset_num_nodes['col'], 2, 2))
assert_near_equal(p.get_val('phase0.rhs_col.sum.m'), expected)
def test_static_input_parameter_connections_gl(self):
class TrajectoryODE(om.Group):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('sum', om.ExecComp('m_tot = sum(m)',
m={'value': np.zeros((2, 2)),
'units': 'kg'},
m_tot={'value': np.zeros(nn),
'units': 'kg'}))
self.add_subsystem('eom', FlightPathEOM2D(num_nodes=nn))
self.connect('sum.m_tot', 'eom.m')
optimizer = 'SLSQP'
num_segments = 1
transcription_order = 5
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = optimizer
p.driver.declare_coloring()
seg_ends, _ = lgl(num_segments + 1)
phase = dm.Phase(ode_class=TrajectoryODE,
transcription=dm.GaussLobatto(num_segments=num_segments,
order=transcription_order,
segment_ends=seg_ends))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(initial_bounds=(0.0, 100.0), duration_bounds=(0., 100.), units='s')
phase.add_state('h', fix_initial=True, fix_final=True, lower=0.0, units='m', rate_source='eom.h_dot')
phase.add_state('v', fix_initial=True, fix_final=False, units='m/s', rate_source='eom.v_dot')
phase.add_input_parameter('m', val=[[1, 2], [3, 4]], units='kg', targets='sum.m', dynamic=False)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 100.0
p['phase0.states:h'] = phase.interpolate(ys=[20, 0], nodes='state_input')
p['phase0.states:v'] = phase.interpolate(ys=[0, -5], nodes='state_input')
p.run_model()
expected = np.array([[1, 2], [3, 4]])
assert_near_equal(p.get_val('phase0.rhs_disc.sum.m'), expected)
assert_near_equal(p.get_val('phase0.rhs_col.sum.m'), expected)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
|
import sys
sys.path.append("./helpers/")
import json
import pyspark
import helpers
import postgres
import numpy as np
from pyspark.streaming.kafka import KafkaUtils, TopicAndPartition
####################################################################
class SparkStreamerFromKafka:
"""
class that streams messages from Kafka topic and cleans up the message content
"""
def __init__(self, kafka_configfile, schema_configfile, stream_configfile, start_offset):
"""
class constructor that initializes the instance according to the configurations
of Kafka (brokers, topic, offsets), data schema and batch interval for streaming
:type kafka_configfile: str path to s3 config file
:type schema_configfile: str path to schema config file
:type stream_configfile: str path to stream config file
:type start_offset: int offset from which to read from partitions of Kafka topic
"""
self.kafka_config = helpers.parse_config(kafka_configfile)
self.stream_config = helpers.parse_config(stream_configfile)
self.schema = helpers.parse_config(schema_configfile)
self.start_offset = start_offset
self.sc = pyspark.SparkContext().getOrCreate()
self.ssc = pyspark.streaming.StreamingContext(self.sc, self.stream_config["INTERVAL"])
self.sc.setLogLevel("ERROR")
def initialize_stream(self):
"""
initializes stream from Kafka topic
"""
topic, n = self.kafka_config["TOPIC"], self.kafka_config["PARTITIONS"]
try:
fromOffsets = {TopicAndPartition(topic, i): long(self.start_offset) for i in range(n)}
except:
fromOffsets = None
self.dataStream = KafkaUtils.createDirectStream(self.ssc, [topic],
{"metadata.broker.list": self.kafka_config["BROKERS_IP"]},
fromOffsets=fromOffsets)
def process_stream(self):
"""
cleans the streamed data
"""
self.initialize_stream()
partitions = self.stream_config["PARTITIONS"]
self.dataStream = (self.dataStream
.repartition(partitions)
.map(lambda x: json.loads(x[1]))
.map(helpers.add_block_fields)
.map(helpers.add_time_slot_field)
.filter(lambda x: x is not None)
.map(lambda x: ((x["time_slot"], x["block_latid"], x["block_lonid"]),
(x["vehicle_id"], x["longitude"], x["latitude"], x["datetime"]))))
def run(self):
"""
starts streaming
"""
self.process_stream()
self.ssc.start()
self.ssc.awaitTermination()
####################################################################
class TaxiStreamer(SparkStreamerFromKafka):
"""
class that provides each taxi driver with the top-n pickup spots
"""
def __init__(self, kafka_configfile, schema_configfile, stream_configfile, psql_configfile, start_offset=0):
"""
class constructor that initializes the instance according to the configurations
of Kafka (brokers, topic, offsets), PostgreSQL database, data schema and batch interval for streaming
:type kafka_configfile: str path to s3 config file
:type schema_configfile: str path to schema config file
:type stream_configfile: str path to stream config file
:type psql_configfile: str path to psql config file
:type start_offset: int offset from which to read from partitions of Kafka topic
"""
SparkStreamerFromKafka.__init__(self, kafka_configfile, schema_configfile, stream_configfile, start_offset)
self.psql_config = helpers.parse_config(psql_configfile)
self.sqlContext = pyspark.sql.SQLContext(self.sc)
self.load_batch_data()
self.psql_n = 0
def load_batch_data(self):
"""
reads result of batch transformation from PostgreSQL database, splits it into BATCH_PARTS parts
by time_slot field value and caches them
"""
self.parts = self.stream_config["BATCH_PARTS"]
self.total = self.stream_config["MAX_PARTS"]
self.hdata = {}
query = "(SELECT * FROM %s WHERE time_slot BETWEEN {} AND {}) tmp" % self.psql_config["dbtable_batch"]
for tsl in range(self.parts):
tmin, tmax = self.total/self.parts*tsl, self.total/self.parts*(tsl+1)-1
configs = {key: self.psql_config[key] for key in ["url", "driver", "user", "password"]}
configs["dbtable"] = query.format(tmin, tmax)
self.hdata[tsl] = postgres.read_from_postgresql(self.sqlContext, configs)
self.hdata[tsl] = (self.hdata[tsl].rdd.repartition(self.stream_config["PARTITIONS"])
.map(lambda x: x.asDict())
.map(lambda x: ((x["time_slot"], x["block_latid"], x["block_lonid"]),
(x["longitude"], x["latitude"], x["passengers"]))))
self.hdata[tsl].persist(pyspark.StorageLevel.MEMORY_ONLY_2)
print "loaded batch {}/{} with {} rows".format(tsl+1, self.parts, self.hdata[tsl].count())
def process_each_rdd(self, time, rdd):
"""
for every record in rdd, queries database historic_data for the answer
:type time: datetime timestamp for each RDD batch
:type rdd: RDD Spark RDD from the stream
"""
def my_join(x):
"""
joins the record from table with historical data with the records of the taxi drivers' locations
on the key (time_slot, block_latid, block_lonid)
schema for x: ((time_slot, block_latid, block_lonid), (longitude, latitude, passengers))
schema for el: (vehicle_id, longitude, latitude, datetime)
:type x: tuple( tuple(int, int, int), tuple(float, float, int) )
"""
try:
return map(lambda el: ( el[0],
(el[1], el[2]),
zip( x[1][0], x[1][1]),
x[1][2],
el[3] ), rdd_bcast.value[x[0]])
except:
return [None]
def select_customized_spots(x):
"""
chooses no more than 3 pickup spots from top-n,
based on the total number of rides from that spot
and on the order in which the drivers send their location data
schema for x: (vehicle_id, (longitude, latitude), [list of spots (lon, lat)], [list of passenger pickups], datetime)
:type x: tuple( str, tuple(float, float), list[tuple(float, float)], tuple(int, list[int]), str )
"""
try:
length, total = len(x[3]), sum(x[3])
np.random.seed(4040 + int(x[0]))
choices = np.random.choice(length, min(3, length), p=np.array(x[3])/float(total), replace=False)
return {"vehicle_id": x[0], "vehicle_pos": list(x[1]),
"spot_lon": [x[2][c][0] for c in choices],
"spot_lat": [x[2][c][1] for c in choices],
"datetime": x[4]}
except:
return {"vehicle_id": x[0], "vehicle_pos": list(x[1]),
"spot_lon": [], "spot_lat": [], "datetime": x[4]}
global iPass
try:
iPass += 1
except:
iPass = 1
print("========= RDD Batch Number: {0} - {1} =========".format(iPass, str(time)))
try:
parts, total = self.parts, self.total
# calculate list of distinct time_slots in current RDD batch
tsl_list = rdd.map(lambda x: x[0][0]*parts/total).distinct().collect()
# transform rdd and broadcast to workers
# rdd_bcast has the following schema
# rdd_bcast = {key: [list of value]}
# key = (time_slot, block_latid, block_lonid)
# value = (vehicle_id, longitude, latitude, datetime)
rdd_bcast = (rdd.groupByKey()
.mapValues(lambda x: sorted(x, key=lambda el: el[3]))
.collect())
if len(rdd_bcast) == 0:
return
rdd_bcast = self.sc.broadcast({x[0]:x[1] for x in rdd_bcast})
# join the batch dataset with rdd_bcast, filter None values,
# and from all the spot suggestions select specific for the driver to ensure no competition
resDF = self.sc.union([(self.hdata[tsl]
.flatMap(my_join, preservesPartitioning=True)
.filter(lambda x: x is not None)
.map(select_customized_spots)) for tsl in tsl_list])
# save data
self.psql_n += 1
configs = {key: self.psql_config[key] for key in ["url", "driver", "user", "password"]}
configs["dbtable"] = self.psql_config["dbtable_stream"]
postgres.save_to_postgresql(resDF, self.sqlContext, configs, self.stream_config["mode_stream"])
if self.psql_n == 1:
postgres.add_index_postgresql(configs["dbtable"], "vehicle_id", self.psql_config)
except:
pass
def process_stream(self):
"""
processes each RDD in the stream
"""
SparkStreamerFromKafka.process_stream(self)
process = self.process_each_rdd
self.dataStream.foreachRDD(process)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.