code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import sys
# import libraries
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import pickle
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
nltk.download(['punkt','stopwords','wordnet'])
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.tree import DecisionTreeClassifier
def load_data(database_filepath):
"""
Load data from the SQL db provided with the filepath
INPUT
database_filepath: path to the db
OUTPUT
X: df containing "message" col
Y: df containing rest of the dataset
category_names = list containing the name of each of the categories to classify
"""
# load data from database
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table('Messages', engine)
X = df['message']
Y = df.iloc[:,4:]
category_names = list(Y.columns)
return X, Y, category_names
def tokenize(text):
"""
Take a string and perform the following:
- Normalize making it lowercase and removing punctuation
- Tokenize
- Remove Stop-Words
- Stemming / Lemmatizing
INPUT
text: string containing text of the message
OUTPUT
lemmatized: Array of tokens after pocessing the text
"""
# make every word lowercase
text = re.sub(r"[^a-zA-Z0-9]"," ", text.lower())
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# tokenize text
tokens = word_tokenize(text)
# lemmatize and remove stop words
lemmatized = [lemmatizer.lemmatize(token) for token in tokens if token not in stop_words]
return lemmatized
def build_model():
"""
INPUT
No input
OUTPUT
cv - GridSearchCV objedt containing pipeline and hyperparameters
in order to tune the model
NOTES
It builds a ML Pipeline including:
- Vectorizer (Bag of words)
- TFIDF Transformer
- Multioutput Classifier
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer = tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(class_weight = 'balanced'))) ])
print(pipeline.get_params())
parameters = {'clf__estimator__n_estimators' : [40,100], 'clf__estimator__min_samples_split' : [2,3] }
print ('Training pipeline in GridSearhCV')
cv = GridSearchCV(pipeline, param_grid=parameters, cv=3, scoring = 'f1_weighted', verbose = 3)
return cv
def display_results(category_names, Y, y_test, y_pred):
"""
INPUT:
- category names array of names for categories in the multioutput classifier
- y_test subset of data to test the model´s performance
- y_pred preds made by the model
OUTPUT
- df containing model performance metrics: 'Accuracy', 'Precision', 'Recall', 'F1'
"""
results = precision_recall_fscore_support(y_test, y_pred)
metric = []
for i, col in enumerate(category_names):
accuracy = accuracy_score(y_test.iloc[:,i].values, y_pred[:,i])
precision = precision_score(y_test.iloc[:,i].values, y_pred[:, i], average='weighted')
recall = recall_score(y_test.iloc[:,i].values, y_pred[:, i], average='weighted')
f1_sco = f1_score(y_test.iloc[:,i].values, y_pred[:, i], average='weighted')
perc_df = Y[col].sum() / Y.shape[0]
metric.append([accuracy, precision, recall, f1_sco, perc_df])
# Create dataframe containing metrics
metric = np.array(metric)
metrics_df = pd.DataFrame(data = metric, index = category_names,
columns = ['Accuracy', 'Precision', 'Recall', 'F1', '%df'])
return metrics_df
def evaluate_model(model, X_test, Y_test, Y, category_names):
'''
INPUT
model: trained model
X_test: df containing test data excepting the label feature
Y_test: df containing label feature for test data
category_names: list of category names
OUTPUT
metrics of the model based on real and predicted values
'''
# predict on test data
y_pred = model.predict(X_test)
metrics = display_results(category_names, Y, Y_test, y_pred)
print(metrics)
def save_model(model, model_filepath):
'''
INPUT
model: trained model
model_filepath: path where to save the given trained model
OUTPUT
'''
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
def main():
'''
Performs the whole training job using
the functions defined above
'''
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, Y, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | [
"sklearn.model_selection.GridSearchCV",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.metrics.f1_score",
"pickle.dump",
"nltk.corpus.stopwords.words",
"nltk.download",
"pandas.DataFrame",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
... | [((308, 356), 'nltk.download', 'nltk.download', (["['punkt', 'stopwords', 'wordnet']"], {}), "(['punkt', 'stopwords', 'wordnet'])\n", (321, 356), False, 'import nltk\n'), ((1358, 1395), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""Messages"""', 'engine'], {}), "('Messages', engine)\n", (1375, 1395), True, 'import pandas as pd\n'), ((1958, 1984), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1973, 1984), False, 'from nltk.corpus import stopwords\n'), ((2002, 2021), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2019, 2021), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((2055, 2074), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2068, 2074), False, 'from nltk.tokenize import word_tokenize\n'), ((2959, 3048), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipeline'], {'param_grid': 'parameters', 'cv': '(3)', 'scoring': '"""f1_weighted"""', 'verbose': '(3)'}), "(pipeline, param_grid=parameters, cv=3, scoring='f1_weighted',\n verbose=3)\n", (2971, 3048), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((3447, 3494), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3478, 3494), False, 'from sklearn.metrics import precision_recall_fscore_support, accuracy_score\n'), ((4084, 4100), 'numpy.array', 'np.array', (['metric'], {}), '(metric)\n', (4092, 4100), True, 'import numpy as np\n'), ((4118, 4227), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'metric', 'index': 'category_names', 'columns': "['Accuracy', 'Precision', 'Recall', 'F1', '%df']"}), "(data=metric, index=category_names, columns=['Accuracy',\n 'Precision', 'Recall', 'F1', '%df'])\n", (4130, 4227), True, 'import pandas as pd\n'), ((3575, 3629), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test.iloc[:, i].values', 'y_pred[:, i]'], {}), '(y_test.iloc[:, i].values, y_pred[:, i])\n', (3589, 3629), False, 'from sklearn.metrics import precision_recall_fscore_support, accuracy_score\n'), ((3648, 3723), 'sklearn.metrics.precision_score', 'precision_score', (['y_test.iloc[:, i].values', 'y_pred[:, i]'], {'average': '"""weighted"""'}), "(y_test.iloc[:, i].values, y_pred[:, i], average='weighted')\n", (3663, 3723), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((3740, 3812), 'sklearn.metrics.recall_score', 'recall_score', (['y_test.iloc[:, i].values', 'y_pred[:, i]'], {'average': '"""weighted"""'}), "(y_test.iloc[:, i].values, y_pred[:, i], average='weighted')\n", (3752, 3812), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((3829, 3897), 'sklearn.metrics.f1_score', 'f1_score', (['y_test.iloc[:, i].values', 'y_pred[:, i]'], {'average': '"""weighted"""'}), "(y_test.iloc[:, i].values, y_pred[:, i], average='weighted')\n", (3837, 3897), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((5006, 5030), 'pickle.dump', 'pickle.dump', (['model', 'file'], {}), '(model, file)\n', (5017, 5030), False, 'import pickle\n'), ((5399, 5436), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (5415, 5436), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((2589, 2624), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'tokenizer': 'tokenize'}), '(tokenizer=tokenize)\n', (2604, 2624), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2647, 2665), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (2663, 2665), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2706, 2753), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '"""balanced"""'}), "(class_weight='balanced')\n", (2728, 2753), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n')] |
# -*- coding: utf-8 -*-
"""
TODO: separate out the tests and make this file just generate the demo data
"""
import logging
import itertools as it
import numpy as np
import utool as ut
from wbia.algo.graph.state import POSTV, NEGTV, INCMP, UNREV
from wbia.algo.graph.state import SAME, DIFF, NULL # NOQA
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
def make_dummy_infr(annots_per_name):
import wbia
nids = [val for val, num in enumerate(annots_per_name, start=1) for _ in range(num)]
aids = range(len(nids))
infr = wbia.AnnotInference(None, aids, nids=nids, autoinit=True, verbose=1)
return infr
def demodata_mtest_infr(state='empty'):
import wbia
ibs = wbia.opendb(db='PZ_MTEST')
annots = ibs.annots()
names = list(annots.group_items(annots.nids).values())
ut.shuffle(names, rng=321)
test_aids = ut.flatten(names[1::2])
infr = wbia.AnnotInference(ibs, test_aids, autoinit=True)
infr.reset(state=state)
return infr
def demodata_infr2(defaultdb='PZ_MTEST'):
defaultdb = 'PZ_MTEST'
import wbia
ibs = wbia.opendb(defaultdb=defaultdb)
annots = ibs.annots()
names = list(annots.group_items(annots.nids).values())[0:20]
def dummy_phi(c, n):
x = np.arange(n)
phi = c * x / (c * x + 1)
phi = phi / phi.sum()
phi = np.diff(phi)
return phi
phis = {c: dummy_phi(c, 30) for c in range(1, 4)}
aids = ut.flatten(names)
infr = wbia.AnnotInference(ibs, aids, autoinit=True)
infr.init_termination_criteria(phis)
infr.init_refresh_criteria()
# Partially review
n1, n2, n3, n4 = names[0:4]
for name in names[4:]:
for a, b in ut.itertwo(name.aids):
infr.add_feedback((a, b), POSTV)
for name1, name2 in it.combinations(names[4:], 2):
infr.add_feedback((name1.aids[0], name2.aids[0]), NEGTV)
return infr
def demo2():
"""
CommandLine:
python -m wbia.algo.graph.demo demo2 --viz
python -m wbia.algo.graph.demo demo2
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.algo.graph.demo import * # NOQA
>>> result = demo2()
>>> print(result)
"""
import wbia.plottool as pt
from wbia.scripts.thesis import TMP_RC
import matplotlib as mpl
mpl.rcParams.update(TMP_RC)
# ---- Synthetic data params
params = {
'redun.pos': 2,
'redun.neg': 2,
}
# oracle_accuracy = .98
# oracle_accuracy = .90
# oracle_accuracy = (.8, 1.0)
oracle_accuracy = (0.85, 1.0)
# oracle_accuracy = 1.0
# --- draw params
VISUALIZE = ut.get_argflag('--viz')
# QUIT_OR_EMEBED = 'embed'
QUIT_OR_EMEBED = 'quit'
TARGET_REVIEW = ut.get_argval('--target', type_=int, default=None)
START = ut.get_argval('--start', type_=int, default=None)
END = ut.get_argval('--end', type_=int, default=None)
# ------------------
# rng = np.random.RandomState(42)
# infr = demodata_infr(num_pccs=4, size=3, size_std=1, p_incon=0)
# infr = demodata_infr(num_pccs=6, size=7, size_std=1, p_incon=0)
# infr = demodata_infr(num_pccs=3, size=5, size_std=.2, p_incon=0)
infr = demodata_infr(pcc_sizes=[5, 2, 4])
infr.verbose = 100
# apply_dummy_viewpoints(infr)
# infr.ensure_cliques()
infr.ensure_cliques()
infr.ensure_full()
# infr.apply_edge_truth()
# Dummy scoring
infr.init_simulation(oracle_accuracy=oracle_accuracy, name='demo2')
# infr_gt = infr.copy()
dpath = ut.ensuredir(ut.truepath('~/Desktop/demo'))
ut.remove_files_in_dir(dpath)
fig_counter = it.count(0)
def show_graph(infr, title, final=False, selected_edges=None):
if not VISUALIZE:
return
# TODO: rich colored text?
latest = '\n'.join(infr.latest_logs())
showkw = dict(
# fontsize=infr.graph.graph['fontsize'],
# fontname=infr.graph.graph['fontname'],
show_unreviewed_edges=True,
show_inferred_same=False,
show_inferred_diff=False,
outof=(len(infr.aids)),
# show_inferred_same=True,
# show_inferred_diff=True,
selected_edges=selected_edges,
show_labels=True,
simple_labels=True,
# show_recent_review=not final,
show_recent_review=False,
# splines=infr.graph.graph['splines'],
reposition=False,
# with_colorbar=True
)
verbose = infr.verbose
infr.verbose = 0
infr_ = infr.copy()
infr_ = infr
infr_.verbose = verbose
infr_.show(pickable=True, verbose=0, **showkw)
infr.verbose = verbose
# logger.info('status ' + ut.repr4(infr_.status()))
# infr.show(**showkw)
ax = pt.gca()
pt.set_title(title, fontsize=20)
fig = pt.gcf()
fontsize = 22
if True:
# postprocess xlabel
lines = []
for line in latest.split('\n'):
if False and line.startswith('ORACLE ERROR'):
lines += ['ORACLE ERROR']
else:
lines += [line]
latest = '\n'.join(lines)
if len(lines) > 10:
fontsize = 16
if len(lines) > 12:
fontsize = 14
if len(lines) > 14:
fontsize = 12
if len(lines) > 18:
fontsize = 10
if len(lines) > 23:
fontsize = 8
if True:
pt.adjust_subplots(top=0.95, left=0, right=1, bottom=0.45, fig=fig)
ax.set_xlabel('\n' + latest)
xlabel = ax.get_xaxis().get_label()
xlabel.set_horizontalalignment('left')
# xlabel.set_x(.025)
xlabel.set_x(-0.6)
# xlabel.set_fontname('CMU Typewriter Text')
xlabel.set_fontname('Inconsolata')
xlabel.set_fontsize(fontsize)
ax.set_aspect('equal')
# ax.xaxis.label.set_color('red')
from os.path import join
fpath = join(dpath, 'demo_{:04d}.png'.format(next(fig_counter)))
fig.savefig(
fpath,
dpi=300,
# transparent=True,
edgecolor='none',
)
# pt.save_figure(dpath=dpath, dpi=300)
infr.latest_logs()
if VISUALIZE:
infr.update_visual_attrs(groupby='name_label')
infr.set_node_attrs('pin', 'true')
node_dict = ut.nx_node_dict(infr.graph)
logger.info(ut.repr4(node_dict[1]))
if VISUALIZE:
infr.latest_logs()
# Pin Nodes into the target groundtruth position
show_graph(infr, 'target-gt')
logger.info(ut.repr4(infr.status()))
infr.clear_feedback()
infr.clear_name_labels()
infr.clear_edges()
logger.info(ut.repr4(infr.status()))
infr.latest_logs()
if VISUALIZE:
infr.update_visual_attrs()
infr.prioritize('prob_match')
if VISUALIZE or TARGET_REVIEW is None or TARGET_REVIEW == 0:
show_graph(infr, 'initial state')
def on_new_candidate_edges(infr, edges):
# hack updateing visual attrs as a callback
infr.update_visual_attrs()
infr.on_new_candidate_edges = on_new_candidate_edges
infr.params.update(**params)
infr.refresh_candidate_edges()
VIZ_ALL = VISUALIZE and TARGET_REVIEW is None and START is None
logger.info('VIZ_ALL = %r' % (VIZ_ALL,))
if VIZ_ALL or TARGET_REVIEW == 0:
show_graph(infr, 'find-candidates')
# _iter2 = enumerate(infr.generate_reviews(**params))
# _iter2 = list(_iter2)
# assert len(_iter2) > 0
# prog = ut.ProgIter(_iter2, label='demo2', bs=False, adjust=False,
# enabled=False)
count = 1
first = 1
for edge, priority in infr._generate_reviews(data=True):
msg = 'review #%d, priority=%.3f' % (count, priority)
logger.info('\n----------')
infr.print('pop edge {} with priority={:.3f}'.format(edge, priority))
# logger.info('remaining_reviews = %r' % (infr.remaining_reviews()),)
# Make the next review
if START is not None:
VIZ_ALL = count >= START
if END is not None and count >= END:
break
infr.print(msg)
if ut.allsame(infr.pos_graph.node_labels(*edge)) and first:
# Have oracle make a mistake early
feedback = infr.request_oracle_review(edge, accuracy=0)
first -= 1
else:
feedback = infr.request_oracle_review(edge)
AT_TARGET = TARGET_REVIEW is not None and count >= TARGET_REVIEW - 1
SHOW_CANDIATE_POP = True
if SHOW_CANDIATE_POP and (VIZ_ALL or AT_TARGET):
# import utool
# utool.embed()
infr.print(
ut.repr2(infr.task_probs['match_state'][edge], precision=4, si=True)
)
infr.print('len(queue) = %r' % (len(infr.queue)))
# Show edge selection
infr.print('Oracle will predict: ' + feedback['evidence_decision'])
show_graph(infr, 'pre' + msg, selected_edges=[edge])
if count == TARGET_REVIEW:
infr.EMBEDME = QUIT_OR_EMEBED == 'embed'
infr.add_feedback(edge, **feedback)
infr.print('len(queue) = %r' % (len(infr.queue)))
# infr.apply_nondynamic_update()
# Show the result
if VIZ_ALL or AT_TARGET:
show_graph(infr, msg)
# import sys
# sys.exit(1)
if count == TARGET_REVIEW:
break
count += 1
infr.print('status = ' + ut.repr4(infr.status(extended=False)))
show_graph(infr, 'post-review (#reviews={})'.format(count), final=True)
# ROUND 2 FIGHT
# if TARGET_REVIEW is None and round2_params is not None:
# # HACK TO GET NEW THINGS IN QUEUE
# infr.params = round2_params
# _iter2 = enumerate(infr.generate_reviews(**params))
# prog = ut.ProgIter(_iter2, label='round2', bs=False, adjust=False,
# enabled=False)
# for count, (aid1, aid2) in prog:
# msg = 'reviewII #%d' % (count)
# logger.info('\n----------')
# logger.info(msg)
# logger.info('remaining_reviews = %r' % (infr.remaining_reviews()),)
# # Make the next review evidence_decision
# feedback = infr.request_oracle_review(edge)
# if count == TARGET_REVIEW:
# infr.EMBEDME = QUIT_OR_EMEBED == 'embed'
# infr.add_feedback(edge, **feedback)
# # Show the result
# if PRESHOW or TARGET_REVIEW is None or count >= TARGET_REVIEW - 1:
# show_graph(infr, msg)
# if count == TARGET_REVIEW:
# break
# show_graph(infr, 'post-re-review', final=True)
if not getattr(infr, 'EMBEDME', False):
if ut.get_computer_name().lower() in ['hyrule', 'ooo']:
pt.all_figures_tile(monitor_num=0, percent_w=0.5)
else:
pt.all_figures_tile()
ut.show_if_requested()
valid_views = ['L', 'F', 'R', 'B']
adjacent_views = {
v: [valid_views[(count + i) % len(valid_views)] for i in [-1, 0, 1]]
for count, v in enumerate(valid_views)
}
def get_edge_truth(infr, n1, n2):
node_dict = ut.nx_node_dict(infr.graph)
nid1 = node_dict[n1]['orig_name_label']
nid2 = node_dict[n2]['orig_name_label']
try:
view1 = node_dict[n1]['viewpoint']
view2 = node_dict[n2]['viewpoint']
comparable = view1 in adjacent_views[view2]
except KeyError:
comparable = True
# raise
same = nid1 == nid2
if not comparable:
return 2
else:
return int(same)
def apply_dummy_viewpoints(infr):
transition_rate = 0.5
transition_rate = 0
valid_views = ['L', 'F', 'R', 'B']
rng = np.random.RandomState(42)
class MarkovView(object):
def __init__(self):
self.dir_ = +1
self.state = 0
def __call__(self):
return self.next_state()
def next_state(self):
if self.dir_ == -1 and self.state <= 0:
self.dir_ = +1
if self.dir_ == +1 and self.state >= len(valid_views) - 1:
self.dir_ = -1
if rng.rand() < transition_rate:
self.state += self.dir_
return valid_views[self.state]
mkv = MarkovView()
nid_to_aids = ut.group_pairs(
[(n, d['name_label']) for n, d in infr.graph.nodes(data=True)]
)
grouped_nodes = list(nid_to_aids.values())
node_to_view = {node: mkv() for nodes in grouped_nodes for node in nodes}
infr.set_node_attrs('viewpoint', node_to_view)
def make_demo_infr(ccs, edges=[], nodes=[], infer=True):
"""
Depricate in favor of demodata_infr
"""
import wbia
import networkx as nx
if nx.__version__.startswith('1'):
nx.add_path = nx.Graph.add_path
G = wbia.AnnotInference._graph_cls()
G.add_nodes_from(nodes)
for cc in ccs:
if len(cc) == 1:
G.add_nodes_from(cc)
nx.add_path(G, cc, evidence_decision=POSTV, meta_decision=NULL)
# for edge in edges:
# u, v, d = edge if len(edge) == 3 else tuple(edge) + ({},)
G.add_edges_from(edges)
infr = wbia.AnnotInference.from_netx(G, infer=infer)
infr.verbose = 3
infr.relabel_using_reviews(rectify=False)
infr.graph.graph['dark_background'] = False
infr.graph.graph['ignore_labels'] = True
infr.set_node_attrs('width', 40)
infr.set_node_attrs('height', 40)
# infr.set_node_attrs('fontsize', fontsize)
# infr.set_node_attrs('fontname', fontname)
infr.set_node_attrs('fixed_size', True)
return infr
@profile
def demodata_infr(**kwargs):
"""
kwargs = {}
CommandLine:
python -m wbia.algo.graph.demo demodata_infr --show
python -m wbia.algo.graph.demo demodata_infr --num_pccs=25
python -m wbia.algo.graph.demo demodata_infr --profile --num_pccs=100
Ignore:
>>> from wbia.algo.graph.demo import * # NOQA
>>> from wbia.algo.graph import demo
>>> import networkx as nx
>>> kwargs = dict(num_pccs=6, p_incon=.5, size_std=2)
>>> kwargs = ut.argparse_dict(kwargs)
>>> infr = demo.demodata_infr(**kwargs)
>>> pccs = list(infr.positive_components())
>>> assert len(pccs) == kwargs['num_pccs']
>>> nonfull_pccs = [cc for cc in pccs if len(cc) > 1 and nx.is_empty(nx.complement(infr.pos_graph.subgraph(cc)))]
>>> expected_n_incon = len(nonfull_pccs) * kwargs['p_incon']
>>> n_incon = len(list(infr.inconsistent_components()))
>>> # TODO can test that we our sample num incon agrees with pop mean
>>> #sample_mean = n_incon / len(nonfull_pccs)
>>> #pop_mean = kwargs['p_incon']
>>> print('status = ' + ut.repr4(infr.status(extended=True)))
>>> ut.quit_if_noshow()
>>> infr.show(pickable=True, groupby='name_label')
>>> ut.show_if_requested()
Ignore:
kwargs = {
'ccs': [[1, 2, 3], [4, 5]]
}
"""
import networkx as nx
import vtool as vt
from wbia.algo.graph import nx_utils
def kwalias(*args):
params = args[0:-1]
default = args[-1]
for key in params:
if key in kwargs:
return kwargs[key]
return default
num_pccs = kwalias('num_pccs', 16)
size_mean = kwalias('pcc_size_mean', 'pcc_size', 'size', 5)
size_std = kwalias('pcc_size_std', 'size_std', 0)
# p_pcc_incon = kwargs.get('p_incon', .1)
p_pcc_incon = kwargs.get('p_incon', 0)
p_pcc_incomp = kwargs.get('p_incomp', 0)
pcc_sizes = kwalias('pcc_sizes', None)
pos_redun = kwalias('pos_redun', [1, 2, 3])
pos_redun = ut.ensure_iterable(pos_redun)
# number of maximum inconsistent edges per pcc
max_n_incon = kwargs.get('n_incon', 3)
rng = np.random.RandomState(0)
counter = 1
if pcc_sizes is None:
pcc_sizes = [
int(randn(size_mean, size_std, rng=rng, a_min=1)) for _ in range(num_pccs)
]
else:
num_pccs = len(pcc_sizes)
if 'ccs' in kwargs:
# Overwrites other options
pcc_sizes = list(map(len, kwargs['ccs']))
num_pccs = len(pcc_sizes)
size_mean = None
size_std = 0
new_ccs = []
pcc_iter = list(enumerate(pcc_sizes))
pcc_iter = ut.ProgIter(pcc_iter, enabled=num_pccs > 20, label='make pos-demo')
for i, size in pcc_iter:
p = 0.1
want_connectivity = rng.choice(pos_redun)
want_connectivity = min(size - 1, want_connectivity)
# Create basic graph of positive edges with desired connectivity
g = nx_utils.random_k_edge_connected_graph(
size, k=want_connectivity, p=p, rng=rng
)
nx.set_edge_attributes(g, name='evidence_decision', values=POSTV)
nx.set_edge_attributes(g, name='truth', values=POSTV)
# nx.set_node_attributes(g, name='orig_name_label', values=i)
assert nx.is_connected(g)
# Relabel graph with non-conflicting names
if 'ccs' in kwargs:
g = nx.relabel_nodes(g, dict(enumerate(kwargs['ccs'][i])))
else:
# Make sure nodes do not conflict with others
g = nx.relabel_nodes(g, dict(enumerate(range(counter, len(g) + counter + 1))))
counter += len(g)
# The probability any edge is inconsistent is `p_incon`
# This is 1 - P(all edges consistent)
# which means p(edge is consistent) = (1 - p_incon) / N
complement_edges = ut.estarmap(nx_utils.e_, nx_utils.complement_edges(g))
if len(complement_edges) > 0:
# compute probability that any particular edge is inconsistent
# to achieve probability the PCC is inconsistent
p_edge_inconn = 1 - (1 - p_pcc_incon) ** (1 / len(complement_edges))
p_edge_unrev = 0.1
p_edge_notcomp = 1 - (1 - p_pcc_incomp) ** (1 / len(complement_edges))
probs = np.array([p_edge_inconn, p_edge_unrev, p_edge_notcomp])
# if the total probability is greater than 1 the parameters
# are invalid, so we renormalize to "fix" it.
# if probs.sum() > 1:
# warnings.warn('probabilities sum to more than 1')
# probs = probs / probs.sum()
pcumsum = probs.cumsum()
# Determine which mutually exclusive state each complement edge is in
# logger.info('pcumsum = %r' % (pcumsum,))
states = np.searchsorted(pcumsum, rng.rand(len(complement_edges)))
incon_idxs = np.where(states == 0)[0]
if len(incon_idxs) > max_n_incon:
logger.info('max_n_incon = %r' % (max_n_incon,))
chosen = rng.choice(incon_idxs, max_n_incon, replace=False)
states[np.setdiff1d(incon_idxs, chosen)] = len(probs)
grouped_edges = ut.group_items(complement_edges, states)
for state, edges in grouped_edges.items():
truth = POSTV
if state == 0:
# Add in inconsistent edges
evidence_decision = NEGTV
# TODO: truth could be INCMP or POSTV
# new_edges.append((u, v, {'evidence_decision': NEGTV}))
elif state == 1:
evidence_decision = UNREV
# TODO: truth could be INCMP or POSTV
# new_edges.append((u, v, {'evidence_decision': UNREV}))
elif state == 2:
evidence_decision = INCMP
truth = INCMP
else:
continue
# Add in candidate edges
attrs = {'evidence_decision': evidence_decision, 'truth': truth}
for (u, v) in edges:
g.add_edge(u, v, **attrs)
new_ccs.append(g)
# (list(g.nodes()), new_edges))
pos_g = nx.union_all(new_ccs)
assert len(new_ccs) == len(list(nx.connected_components(pos_g)))
assert num_pccs == len(new_ccs)
# Add edges between the PCCS
neg_edges = []
if not kwalias('ignore_pair', False):
logger.info('making pairs')
pair_attrs_lookup = {
0: {'evidence_decision': NEGTV, 'truth': NEGTV},
1: {'evidence_decision': INCMP, 'truth': INCMP},
2: {'evidence_decision': UNREV, 'truth': NEGTV}, # could be incomp or neg
}
# These are the probabilities that one edge has this state
p_pair_neg = kwalias('p_pair_neg', 0.4)
p_pair_incmp = kwalias('p_pair_incmp', 0.2)
p_pair_unrev = kwalias('p_pair_unrev', 0)
# p_pair_neg = 1
cc_combos = (
(list(g1.nodes()), list(g2.nodes()))
for (g1, g2) in it.combinations(new_ccs, 2)
)
valid_cc_combos = [(cc1, cc2) for cc1, cc2 in cc_combos if len(cc1) and len(cc2)]
for cc1, cc2 in ut.ProgIter(valid_cc_combos, label='make neg-demo'):
possible_edges = ut.estarmap(nx_utils.e_, it.product(cc1, cc2))
# probability that any edge between these PCCs is negative
n_edges = len(possible_edges)
p_edge_neg = 1 - (1 - p_pair_neg) ** (1 / n_edges)
p_edge_incmp = 1 - (1 - p_pair_incmp) ** (1 / n_edges)
p_edge_unrev = 1 - (1 - p_pair_unrev) ** (1 / n_edges)
# Create event space with sizes proportional to probabilities
pcumsum = np.cumsum([p_edge_neg, p_edge_incmp, p_edge_unrev])
# Roll dice for each of the edge to see which state it lands on
possible_pstate = rng.rand(len(possible_edges))
states = np.searchsorted(pcumsum, possible_pstate)
flags = states < len(pcumsum)
stateful_states = states.compress(flags)
stateful_edges = ut.compress(possible_edges, flags)
unique_states, groupxs_list = vt.group_indices(stateful_states)
for state, groupxs in zip(unique_states, groupxs_list):
# logger.info('state = %r' % (state,))
# Add in candidate edges
edges = ut.take(stateful_edges, groupxs)
attrs = pair_attrs_lookup[state]
for (u, v) in edges:
neg_edges.append((u, v, attrs))
logger.info('Made {} neg_edges between PCCS'.format(len(neg_edges)))
else:
logger.info('ignoring pairs')
import wbia
G = wbia.AnnotInference._graph_cls()
G.add_nodes_from(pos_g.nodes(data=True))
G.add_edges_from(pos_g.edges(data=True))
G.add_edges_from(neg_edges)
infr = wbia.AnnotInference.from_netx(G, infer=kwargs.get('infer', True))
infr.verbose = 3
infr.relabel_using_reviews(rectify=False)
# fontname = 'Ubuntu'
fontsize = 12
fontname = 'sans'
splines = 'spline'
# splines = 'ortho'
# splines = 'line'
infr.set_node_attrs('shape', 'circle')
infr.graph.graph['ignore_labels'] = True
infr.graph.graph['dark_background'] = False
infr.graph.graph['fontname'] = fontname
infr.graph.graph['fontsize'] = fontsize
infr.graph.graph['splines'] = splines
infr.set_node_attrs('width', 29)
infr.set_node_attrs('height', 29)
infr.set_node_attrs('fontsize', fontsize)
infr.set_node_attrs('fontname', fontname)
infr.set_node_attrs('fixed_size', True)
# Set synthetic ground-truth attributes for testing
# infr.apply_edge_truth()
infr.edge_truth = infr.get_edge_attrs('truth')
# Make synthetic verif
infr.dummy_verif = DummyVerif(infr)
infr.verifiers = {}
infr.verifiers['match_state'] = infr.dummy_verif
infr.demokw = kwargs
return infr
def randn(mean=0, std=1, shape=[], a_max=None, a_min=None, rng=None):
a = (rng.randn(*shape) * std) + mean
if a_max is not None or a_min is not None:
a = np.clip(a, a_min, a_max)
return a
class DummyVerif(object):
"""
generates dummy scores between edges (not necesarilly in the graph)
CommandLine:
python -m wbia.algo.graph.demo DummyVerif:1
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.demo import * # NOQA
>>> from wbia.algo.graph import demo
>>> import networkx as nx
>>> kwargs = dict(num_pccs=6, p_incon=.5, size_std=2)
>>> infr = demo.demodata_infr(**kwargs)
>>> infr.dummy_verif.predict_edges([(1, 2)])
>>> infr.dummy_verif.predict_edges([(1, 21)])
>>> assert len(infr.dummy_verif.infr.task_probs['match_state']) == 2
"""
def __init__(verif, infr):
verif.rng = np.random.RandomState(4033913)
verif.dummy_params = {
NEGTV: {'mean': 0.2, 'std': 0.25},
POSTV: {'mean': 0.85, 'std': 0.2},
INCMP: {'mean': 0.15, 'std': 0.1},
}
verif.score_dist = randn
verif.infr = infr
verif.orig_nodes = set(infr.aids)
verif.orig_labels = infr.get_node_attrs('orig_name_label')
verif.orig_groups = ut.invert_dict(verif.orig_labels, False)
verif.orig_groups = ut.map_vals(set, verif.orig_groups)
def show_score_probs(verif):
"""
CommandLine:
python -m wbia.algo.graph.demo DummyVerif.show_score_probs --show
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.demo import * # NOQA
>>> import wbia
>>> infr = wbia.AnnotInference(None)
>>> verif = DummyVerif(infr)
>>> verif.show_score_probs()
>>> ut.show_if_requested()
"""
import wbia.plottool as pt
dist = verif.score_dist
n = 100000
for key in verif.dummy_params.keys():
probs = dist(
shape=[n], rng=verif.rng, a_max=1, a_min=0, **verif.dummy_params[key]
)
color = verif.infr._get_truth_colors()[key]
pt.plt.hist(probs, bins=100, label=key, alpha=0.8, color=color)
pt.legend()
def dummy_ranker(verif, u, K=10):
"""
simulates the ranking algorithm. Order is defined using the dummy vsone
scores, but tests are only applied to randomly selected gt and gf
pairs. So, you usually will get a gt result, but you might not if all
the scores are bad.
"""
infr = verif.infr
nid = verif.orig_labels[u]
others = verif.orig_groups[nid]
others_gt = sorted(others - {u})
others_gf = sorted(verif.orig_nodes - others)
# rng = np.random.RandomState(u + 4110499444 + len(others))
rng = verif.rng
vs_list = []
k_gt = min(len(others_gt), max(1, K // 2))
k_gf = min(len(others_gf), max(1, K * 4))
if k_gt > 0:
gt = rng.choice(others_gt, k_gt, replace=False)
vs_list.append(gt)
if k_gf > 0:
gf = rng.choice(others_gf, k_gf, replace=False)
vs_list.append(gf)
u_edges = [infr.e_(u, v) for v in it.chain.from_iterable(vs_list)]
u_probs = np.array(infr.dummy_verif.predict_edges(u_edges))
# infr.set_edge_attrs('prob_match', ut.dzip(u_edges, u_probs))
# Need to determenistically sort here
# sortx = np.argsort(u_probs)[::-1][0:K]
sortx = np.argsort(u_probs)[::-1][0:K]
ranked_edges = ut.take(u_edges, sortx)
# assert len(ranked_edges) == K
return ranked_edges
def find_candidate_edges(verif, K=10):
"""
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.demo import * # NOQA
>>> from wbia.algo.graph import demo
>>> import networkx as nx
>>> kwargs = dict(num_pccs=40, size=2)
>>> infr = demo.demodata_infr(**kwargs)
>>> edges = list(infr.dummy_verif.find_candidate_edges(K=100))
>>> scores = np.array(infr.dummy_verif.predict_edges(edges))
"""
new_edges = []
nodes = list(verif.infr.graph.nodes())
for u in nodes:
new_edges.extend(verif.dummy_ranker(u, K=K))
# logger.info('new_edges = %r' % (ut.hash_data(new_edges),))
new_edges = set(new_edges)
return new_edges
def _get_truth(verif, edge):
infr = verif.infr
if edge in infr.edge_truth:
return infr.edge_truth[edge]
node_dict = ut.nx_node_dict(infr.graph)
nid1 = node_dict[edge[0]]['orig_name_label']
nid2 = node_dict[edge[1]]['orig_name_label']
return POSTV if nid1 == nid2 else NEGTV
def predict_proba_df(verif, edges):
"""
CommandLine:
python -m wbia.algo.graph.demo DummyVerif.predict_edges
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.graph.demo import * # NOQA
>>> from wbia.algo.graph import demo
>>> import networkx as nx
>>> kwargs = dict(num_pccs=40, size=2)
>>> infr = demo.demodata_infr(**kwargs)
>>> verif = infr.dummy_verif
>>> edges = list(infr.graph.edges())
>>> probs = verif.predict_proba_df(edges)
>>> #print('scores = %r' % (scores,))
>>> #hashid = ut.hash_data(scores)
>>> #print('hashid = %r' % (hashid,))
>>> #assert hashid == 'cdlkytilfeqgmtsihvhqwffmhczqmpil'
"""
infr = verif.infr
edges = list(it.starmap(verif.infr.e_, edges))
prob_cache = infr.task_probs['match_state']
is_miss = np.array([e not in prob_cache for e in edges])
# is_hit = ~is_miss
if np.any(is_miss):
miss_edges = ut.compress(edges, is_miss)
miss_truths = [verif._get_truth(edge) for edge in miss_edges]
grouped_edges = ut.group_items(miss_edges, miss_truths, sorted_=False)
# Need to make this determenistic too
states = [POSTV, NEGTV, INCMP]
for key in sorted(grouped_edges.keys()):
group = grouped_edges[key]
probs0 = randn(
shape=[len(group)],
rng=verif.rng,
a_max=1,
a_min=0,
**verif.dummy_params[key],
)
# Just randomly assign other probs
probs1 = verif.rng.rand(len(group)) * (1 - probs0)
probs2 = 1 - (probs0 + probs1)
for edge, probs in zip(group, zip(probs0, probs1, probs2)):
prob_cache[edge] = ut.dzip(states, probs)
from wbia.algo.graph import nx_utils as nxu
import pandas as pd
probs = pd.DataFrame(
ut.take(prob_cache, edges),
index=nxu.ensure_multi_index(edges, ('aid1', 'aid2')),
)
return probs
def predict_edges(verif, edges):
pos_scores = verif.predict_proba_df(edges)[POSTV]
return pos_scores
| [
"logging.getLogger",
"numpy.clip",
"wbia.algo.graph.nx_utils.ensure_multi_index",
"utool.truepath",
"wbia.AnnotInference",
"utool.invert_dict",
"numpy.argsort",
"networkx.union_all",
"utool.take",
"networkx.set_edge_attributes",
"numpy.array",
"numpy.cumsum",
"wbia.plottool.all_figures_tile"... | [((327, 347), 'utool.inject2', 'ut.inject2', (['__name__'], {}), '(__name__)\n', (337, 347), True, 'import utool as ut\n'), ((357, 382), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (374, 382), False, 'import logging\n'), ((568, 636), 'wbia.AnnotInference', 'wbia.AnnotInference', (['None', 'aids'], {'nids': 'nids', 'autoinit': '(True)', 'verbose': '(1)'}), '(None, aids, nids=nids, autoinit=True, verbose=1)\n', (587, 636), False, 'import wbia\n'), ((722, 748), 'wbia.opendb', 'wbia.opendb', ([], {'db': '"""PZ_MTEST"""'}), "(db='PZ_MTEST')\n", (733, 748), False, 'import wbia\n'), ((838, 864), 'utool.shuffle', 'ut.shuffle', (['names'], {'rng': '(321)'}), '(names, rng=321)\n', (848, 864), True, 'import utool as ut\n'), ((881, 904), 'utool.flatten', 'ut.flatten', (['names[1::2]'], {}), '(names[1::2])\n', (891, 904), True, 'import utool as ut\n'), ((916, 966), 'wbia.AnnotInference', 'wbia.AnnotInference', (['ibs', 'test_aids'], {'autoinit': '(True)'}), '(ibs, test_aids, autoinit=True)\n', (935, 966), False, 'import wbia\n'), ((1109, 1141), 'wbia.opendb', 'wbia.opendb', ([], {'defaultdb': 'defaultdb'}), '(defaultdb=defaultdb)\n', (1120, 1141), False, 'import wbia\n'), ((1460, 1477), 'utool.flatten', 'ut.flatten', (['names'], {}), '(names)\n', (1470, 1477), True, 'import utool as ut\n'), ((1489, 1534), 'wbia.AnnotInference', 'wbia.AnnotInference', (['ibs', 'aids'], {'autoinit': '(True)'}), '(ibs, aids, autoinit=True)\n', (1508, 1534), False, 'import wbia\n'), ((1805, 1834), 'itertools.combinations', 'it.combinations', (['names[4:]', '(2)'], {}), '(names[4:], 2)\n', (1820, 1834), True, 'import itertools as it\n'), ((2324, 2351), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['TMP_RC'], {}), '(TMP_RC)\n', (2343, 2351), True, 'import matplotlib as mpl\n'), ((2647, 2670), 'utool.get_argflag', 'ut.get_argflag', (['"""--viz"""'], {}), "('--viz')\n", (2661, 2670), True, 'import utool as ut\n'), ((2750, 2800), 'utool.get_argval', 'ut.get_argval', (['"""--target"""'], {'type_': 'int', 'default': 'None'}), "('--target', type_=int, default=None)\n", (2763, 2800), True, 'import utool as ut\n'), ((2813, 2862), 'utool.get_argval', 'ut.get_argval', (['"""--start"""'], {'type_': 'int', 'default': 'None'}), "('--start', type_=int, default=None)\n", (2826, 2862), True, 'import utool as ut\n'), ((2873, 2920), 'utool.get_argval', 'ut.get_argval', (['"""--end"""'], {'type_': 'int', 'default': 'None'}), "('--end', type_=int, default=None)\n", (2886, 2920), True, 'import utool as ut\n'), ((3592, 3621), 'utool.remove_files_in_dir', 'ut.remove_files_in_dir', (['dpath'], {}), '(dpath)\n', (3614, 3621), True, 'import utool as ut\n'), ((3641, 3652), 'itertools.count', 'it.count', (['(0)'], {}), '(0)\n', (3649, 3652), True, 'import itertools as it\n'), ((11419, 11446), 'utool.nx_node_dict', 'ut.nx_node_dict', (['infr.graph'], {}), '(infr.graph)\n', (11434, 11446), True, 'import utool as ut\n'), ((11980, 12005), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (12001, 12005), True, 'import numpy as np\n'), ((13006, 13036), 'networkx.__version__.startswith', 'nx.__version__.startswith', (['"""1"""'], {}), "('1')\n", (13031, 13036), True, 'import networkx as nx\n'), ((13087, 13119), 'wbia.AnnotInference._graph_cls', 'wbia.AnnotInference._graph_cls', ([], {}), '()\n', (13117, 13119), False, 'import wbia\n'), ((13431, 13476), 'wbia.AnnotInference.from_netx', 'wbia.AnnotInference.from_netx', (['G'], {'infer': 'infer'}), '(G, infer=infer)\n', (13460, 13476), False, 'import wbia\n'), ((15963, 15992), 'utool.ensure_iterable', 'ut.ensure_iterable', (['pos_redun'], {}), '(pos_redun)\n', (15981, 15992), True, 'import utool as ut\n'), ((16099, 16123), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (16120, 16123), True, 'import numpy as np\n'), ((16595, 16662), 'utool.ProgIter', 'ut.ProgIter', (['pcc_iter'], {'enabled': '(num_pccs > 20)', 'label': '"""make pos-demo"""'}), "(pcc_iter, enabled=num_pccs > 20, label='make pos-demo')\n", (16606, 16662), True, 'import utool as ut\n'), ((20209, 20230), 'networkx.union_all', 'nx.union_all', (['new_ccs'], {}), '(new_ccs)\n', (20221, 20230), True, 'import networkx as nx\n'), ((22747, 22779), 'wbia.AnnotInference._graph_cls', 'wbia.AnnotInference._graph_cls', ([], {}), '()\n', (22777, 22779), False, 'import wbia\n'), ((1271, 1283), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1280, 1283), True, 'import numpy as np\n'), ((1362, 1374), 'numpy.diff', 'np.diff', (['phi'], {}), '(phi)\n', (1369, 1374), True, 'import numpy as np\n'), ((1712, 1733), 'utool.itertwo', 'ut.itertwo', (['name.aids'], {}), '(name.aids)\n', (1722, 1733), True, 'import utool as ut\n'), ((3557, 3586), 'utool.truepath', 'ut.truepath', (['"""~/Desktop/demo"""'], {}), "('~/Desktop/demo')\n", (3568, 3586), True, 'import utool as ut\n'), ((4844, 4852), 'wbia.plottool.gca', 'pt.gca', ([], {}), '()\n', (4850, 4852), True, 'import wbia.plottool as pt\n'), ((4861, 4893), 'wbia.plottool.set_title', 'pt.set_title', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (4873, 4893), True, 'import wbia.plottool as pt\n'), ((4908, 4916), 'wbia.plottool.gcf', 'pt.gcf', ([], {}), '()\n', (4914, 4916), True, 'import wbia.plottool as pt\n'), ((6545, 6572), 'utool.nx_node_dict', 'ut.nx_node_dict', (['infr.graph'], {}), '(infr.graph)\n', (6560, 6572), True, 'import utool as ut\n'), ((11170, 11192), 'utool.show_if_requested', 'ut.show_if_requested', ([], {}), '()\n', (11190, 11192), True, 'import utool as ut\n'), ((13233, 13296), 'networkx.add_path', 'nx.add_path', (['G', 'cc'], {'evidence_decision': 'POSTV', 'meta_decision': 'NULL'}), '(G, cc, evidence_decision=POSTV, meta_decision=NULL)\n', (13244, 13296), True, 'import networkx as nx\n'), ((16905, 16984), 'wbia.algo.graph.nx_utils.random_k_edge_connected_graph', 'nx_utils.random_k_edge_connected_graph', (['size'], {'k': 'want_connectivity', 'p': 'p', 'rng': 'rng'}), '(size, k=want_connectivity, p=p, rng=rng)\n', (16943, 16984), False, 'from wbia.algo.graph import nx_utils\n'), ((17015, 17080), 'networkx.set_edge_attributes', 'nx.set_edge_attributes', (['g'], {'name': '"""evidence_decision"""', 'values': 'POSTV'}), "(g, name='evidence_decision', values=POSTV)\n", (17037, 17080), True, 'import networkx as nx\n'), ((17089, 17142), 'networkx.set_edge_attributes', 'nx.set_edge_attributes', (['g'], {'name': '"""truth"""', 'values': 'POSTV'}), "(g, name='truth', values=POSTV)\n", (17111, 17142), True, 'import networkx as nx\n'), ((17228, 17246), 'networkx.is_connected', 'nx.is_connected', (['g'], {}), '(g)\n', (17243, 17246), True, 'import networkx as nx\n'), ((21213, 21264), 'utool.ProgIter', 'ut.ProgIter', (['valid_cc_combos'], {'label': '"""make neg-demo"""'}), "(valid_cc_combos, label='make neg-demo')\n", (21224, 21264), True, 'import utool as ut\n'), ((24156, 24180), 'numpy.clip', 'np.clip', (['a', 'a_min', 'a_max'], {}), '(a, a_min, a_max)\n', (24163, 24180), True, 'import numpy as np\n'), ((24903, 24933), 'numpy.random.RandomState', 'np.random.RandomState', (['(4033913)'], {}), '(4033913)\n', (24924, 24933), True, 'import numpy as np\n'), ((25313, 25353), 'utool.invert_dict', 'ut.invert_dict', (['verif.orig_labels', '(False)'], {}), '(verif.orig_labels, False)\n', (25327, 25353), True, 'import utool as ut\n'), ((25382, 25417), 'utool.map_vals', 'ut.map_vals', (['set', 'verif.orig_groups'], {}), '(set, verif.orig_groups)\n', (25393, 25417), True, 'import utool as ut\n'), ((26282, 26293), 'wbia.plottool.legend', 'pt.legend', ([], {}), '()\n', (26291, 26293), True, 'import wbia.plottool as pt\n'), ((27636, 27659), 'utool.take', 'ut.take', (['u_edges', 'sortx'], {}), '(u_edges, sortx)\n', (27643, 27659), True, 'import utool as ut\n'), ((28680, 28707), 'utool.nx_node_dict', 'ut.nx_node_dict', (['infr.graph'], {}), '(infr.graph)\n', (28695, 28707), True, 'import utool as ut\n'), ((29827, 29875), 'numpy.array', 'np.array', (['[(e not in prob_cache) for e in edges]'], {}), '([(e not in prob_cache) for e in edges])\n', (29835, 29875), True, 'import numpy as np\n'), ((29913, 29928), 'numpy.any', 'np.any', (['is_miss'], {}), '(is_miss)\n', (29919, 29928), True, 'import numpy as np\n'), ((5600, 5667), 'wbia.plottool.adjust_subplots', 'pt.adjust_subplots', ([], {'top': '(0.95)', 'left': '(0)', 'right': '(1)', 'bottom': '(0.45)', 'fig': 'fig'}), '(top=0.95, left=0, right=1, bottom=0.45, fig=fig)\n', (5618, 5667), True, 'import wbia.plottool as pt\n'), ((6593, 6615), 'utool.repr4', 'ut.repr4', (['node_dict[1]'], {}), '(node_dict[1])\n', (6601, 6615), True, 'import utool as ut\n'), ((11064, 11113), 'wbia.plottool.all_figures_tile', 'pt.all_figures_tile', ([], {'monitor_num': '(0)', 'percent_w': '(0.5)'}), '(monitor_num=0, percent_w=0.5)\n', (11083, 11113), True, 'import wbia.plottool as pt\n'), ((11140, 11161), 'wbia.plottool.all_figures_tile', 'pt.all_figures_tile', ([], {}), '()\n', (11159, 11161), True, 'import wbia.plottool as pt\n'), ((17818, 17846), 'wbia.algo.graph.nx_utils.complement_edges', 'nx_utils.complement_edges', (['g'], {}), '(g)\n', (17843, 17846), False, 'from wbia.algo.graph import nx_utils\n'), ((18237, 18292), 'numpy.array', 'np.array', (['[p_edge_inconn, p_edge_unrev, p_edge_notcomp]'], {}), '([p_edge_inconn, p_edge_unrev, p_edge_notcomp])\n', (18245, 18292), True, 'import numpy as np\n'), ((19161, 19201), 'utool.group_items', 'ut.group_items', (['complement_edges', 'states'], {}), '(complement_edges, states)\n', (19175, 19201), True, 'import utool as ut\n'), ((21749, 21800), 'numpy.cumsum', 'np.cumsum', (['[p_edge_neg, p_edge_incmp, p_edge_unrev]'], {}), '([p_edge_neg, p_edge_incmp, p_edge_unrev])\n', (21758, 21800), True, 'import numpy as np\n'), ((21958, 21999), 'numpy.searchsorted', 'np.searchsorted', (['pcumsum', 'possible_pstate'], {}), '(pcumsum, possible_pstate)\n', (21973, 21999), True, 'import numpy as np\n'), ((22125, 22159), 'utool.compress', 'ut.compress', (['possible_edges', 'flags'], {}), '(possible_edges, flags)\n', (22136, 22159), True, 'import utool as ut\n'), ((22203, 22236), 'vtool.group_indices', 'vt.group_indices', (['stateful_states'], {}), '(stateful_states)\n', (22219, 22236), True, 'import vtool as vt\n'), ((26210, 26273), 'wbia.plottool.plt.hist', 'pt.plt.hist', (['probs'], {'bins': '(100)', 'label': 'key', 'alpha': '(0.8)', 'color': 'color'}), '(probs, bins=100, label=key, alpha=0.8, color=color)\n', (26221, 26273), True, 'import wbia.plottool as pt\n'), ((29723, 29755), 'itertools.starmap', 'it.starmap', (['verif.infr.e_', 'edges'], {}), '(verif.infr.e_, edges)\n', (29733, 29755), True, 'import itertools as it\n'), ((29955, 29982), 'utool.compress', 'ut.compress', (['edges', 'is_miss'], {}), '(edges, is_miss)\n', (29966, 29982), True, 'import utool as ut\n'), ((30085, 30139), 'utool.group_items', 'ut.group_items', (['miss_edges', 'miss_truths'], {'sorted_': '(False)'}), '(miss_edges, miss_truths, sorted_=False)\n', (30099, 30139), True, 'import utool as ut\n'), ((30986, 31012), 'utool.take', 'ut.take', (['prob_cache', 'edges'], {}), '(prob_cache, edges)\n', (30993, 31012), True, 'import utool as ut\n'), ((8895, 8963), 'utool.repr2', 'ut.repr2', (["infr.task_probs['match_state'][edge]"], {'precision': '(4)', 'si': '(True)'}), "(infr.task_probs['match_state'][edge], precision=4, si=True)\n", (8903, 8963), True, 'import utool as ut\n'), ((18850, 18871), 'numpy.where', 'np.where', (['(states == 0)'], {}), '(states == 0)\n', (18858, 18871), True, 'import numpy as np\n'), ((20267, 20297), 'networkx.connected_components', 'nx.connected_components', (['pos_g'], {}), '(pos_g)\n', (20290, 20297), True, 'import networkx as nx\n'), ((21061, 21088), 'itertools.combinations', 'it.combinations', (['new_ccs', '(2)'], {}), '(new_ccs, 2)\n', (21076, 21088), True, 'import itertools as it\n'), ((21320, 21340), 'itertools.product', 'it.product', (['cc1', 'cc2'], {}), '(cc1, cc2)\n', (21330, 21340), True, 'import itertools as it\n'), ((22425, 22457), 'utool.take', 'ut.take', (['stateful_edges', 'groupxs'], {}), '(stateful_edges, groupxs)\n', (22432, 22457), True, 'import utool as ut\n'), ((27297, 27328), 'itertools.chain.from_iterable', 'it.chain.from_iterable', (['vs_list'], {}), '(vs_list)\n', (27319, 27328), True, 'import itertools as it\n'), ((27582, 27601), 'numpy.argsort', 'np.argsort', (['u_probs'], {}), '(u_probs)\n', (27592, 27601), True, 'import numpy as np\n'), ((31032, 31079), 'wbia.algo.graph.nx_utils.ensure_multi_index', 'nxu.ensure_multi_index', (['edges', "('aid1', 'aid2')"], {}), "(edges, ('aid1', 'aid2'))\n", (31054, 31079), True, 'from wbia.algo.graph import nx_utils as nxu\n'), ((10999, 11021), 'utool.get_computer_name', 'ut.get_computer_name', ([], {}), '()\n', (11019, 11021), True, 'import utool as ut\n'), ((19085, 19117), 'numpy.setdiff1d', 'np.setdiff1d', (['incon_idxs', 'chosen'], {}), '(incon_idxs, chosen)\n', (19097, 19117), True, 'import numpy as np\n'), ((30839, 30861), 'utool.dzip', 'ut.dzip', (['states', 'probs'], {}), '(states, probs)\n', (30846, 30861), True, 'import utool as ut\n')] |
## @package device_checker
# Module caffe2.python.device_checker
import numpy as np
import copy
from caffe2.python import workspace
from caffe2.python.core import InferOpBlobDevicesAsDict
from future.utils import viewitems
class DeviceChecker(object):
"""A device checker in Python to check consistency across multiple devices.
This is not the most efficient way to check devices, as the Python interface
will involve a lot of copies back and forth operations. Use at your own risk.
"""
def __init__(self, threshold, device_options):
self._threshold = threshold
self._device_options = device_options
def CheckSimple(self, op, inputs, outputs_to_check,
input_device_options=None):
"""Checks the operator with different device implementations.
Inputs:
op: the operator to be checked.
inputs: the input data in numpy arrays.
outputs_to_check: the outputs to check between devices.
input_device_options: a mapping from input name to a device to use
(instead of self._device_options)
Outputs:
boolean: True if it passes, False if it does not pass.
"""
op = copy.deepcopy(op)
# Entering the checker workspace
old_ws_name = workspace.CurrentWorkspace()
results = []
workspace.SwitchWorkspace("_device_check_", True)
for i, device_option in enumerate(self._device_options):
op.device_option.CopyFrom(device_option)
_input_device_options = input_device_options or \
InferOpBlobDevicesAsDict(op)[0]
print(_input_device_options)
for i, arr in enumerate(inputs):
workspace.FeedBlob(
op.input[i], np.array(arr),
_input_device_options.get(op.input[i], device_option)
)
workspace.RunOperatorOnce(op)
results.append(
[workspace.FetchBlob(op.output[idx])
for idx in outputs_to_check])
# Everything is done, reset the workspace.
workspace.ResetWorkspace()
# After running on all devices, check correctness
success = True
for i in range(1, len(self._device_options)):
for j in range(len(outputs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
' and output {}. The outputs are:'
.format(i, op.output[outputs_to_check[j]]))
print(x.flatten())
print(y.flatten())
print(np.max(np.abs(x - y)))
success = False
# else:
# print ('Passed device pair (0, %d), %s %s' %
# (i, outputs_to_check[j], y.shape))
workspace.SwitchWorkspace(old_ws_name)
return success
def CheckNet(self, net, inputs=None, blobs_to_check=None, ignore=None):
"""Checks a network by inspecting all of its intermediate results, and
see if things match.
"""
if inputs is None:
inputs = {}
if ignore is None:
ignore = set()
old_ws_name = workspace.CurrentWorkspace()
results = []
if blobs_to_check is None:
blobs_to_check = sum([list(op.output) for op in net.op], [])
blobs_to_check = [b for b in blobs_to_check if b not in ignore]
workspace.SwitchWorkspace("_device_check_", True)
for device_option in self._device_options:
for name, arr in viewitems(inputs):
# print 'feeding', name
workspace.FeedBlob(name, arr, device_option)
for op in net.op:
op.device_option.CopyFrom(device_option)
workspace.RunNetOnce(net)
results.append(
[workspace.FetchBlob(name) for name in blobs_to_check]
)
# After running on all devices, check correctness
success = True
for i in range(1, len(results)):
for j in range(len(blobs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
' and output {}. The outputs are:'
.format(i, blobs_to_check[j]))
print(x.flatten())
print(y.flatten())
print(np.max(np.abs(x - y)))
success = False
# else:
# print ('Passed device pair (%d, %d), %s %s: %s' %
# (i, j, blobs_to_check[j], y.shape,
# str(y.flatten())))
workspace.SwitchWorkspace(old_ws_name)
return success
| [
"caffe2.python.workspace.ResetWorkspace",
"numpy.abs",
"numpy.allclose",
"caffe2.python.workspace.RunOperatorOnce",
"caffe2.python.workspace.SwitchWorkspace",
"caffe2.python.workspace.FetchBlob",
"caffe2.python.workspace.RunNetOnce",
"numpy.array",
"caffe2.python.workspace.CurrentWorkspace",
"futu... | [((1220, 1237), 'copy.deepcopy', 'copy.deepcopy', (['op'], {}), '(op)\n', (1233, 1237), False, 'import copy\n'), ((1301, 1329), 'caffe2.python.workspace.CurrentWorkspace', 'workspace.CurrentWorkspace', ([], {}), '()\n', (1327, 1329), False, 'from caffe2.python import workspace\n'), ((1359, 1408), 'caffe2.python.workspace.SwitchWorkspace', 'workspace.SwitchWorkspace', (['"""_device_check_"""', '(True)'], {}), "('_device_check_', True)\n", (1384, 1408), False, 'from caffe2.python import workspace\n'), ((3060, 3098), 'caffe2.python.workspace.SwitchWorkspace', 'workspace.SwitchWorkspace', (['old_ws_name'], {}), '(old_ws_name)\n', (3085, 3098), False, 'from caffe2.python import workspace\n'), ((3446, 3474), 'caffe2.python.workspace.CurrentWorkspace', 'workspace.CurrentWorkspace', ([], {}), '()\n', (3472, 3474), False, 'from caffe2.python import workspace\n'), ((3684, 3733), 'caffe2.python.workspace.SwitchWorkspace', 'workspace.SwitchWorkspace', (['"""_device_check_"""', '(True)'], {}), "('_device_check_', True)\n", (3709, 3733), False, 'from caffe2.python import workspace\n'), ((5095, 5133), 'caffe2.python.workspace.SwitchWorkspace', 'workspace.SwitchWorkspace', (['old_ws_name'], {}), '(old_ws_name)\n', (5120, 5133), False, 'from caffe2.python import workspace\n'), ((1911, 1940), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', (['op'], {}), '(op)\n', (1936, 1940), False, 'from caffe2.python import workspace\n'), ((2136, 2162), 'caffe2.python.workspace.ResetWorkspace', 'workspace.ResetWorkspace', ([], {}), '()\n', (2160, 2162), False, 'from caffe2.python import workspace\n'), ((3814, 3831), 'future.utils.viewitems', 'viewitems', (['inputs'], {}), '(inputs)\n', (3823, 3831), False, 'from future.utils import viewitems\n'), ((4033, 4058), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', (['net'], {}), '(net)\n', (4053, 4058), False, 'from caffe2.python import workspace\n'), ((3889, 3933), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', (['name', 'arr', 'device_option'], {}), '(name, arr, device_option)\n', (3907, 3933), False, 'from caffe2.python import workspace\n'), ((1605, 1633), 'caffe2.python.core.InferOpBlobDevicesAsDict', 'InferOpBlobDevicesAsDict', (['op'], {}), '(op)\n', (1629, 1633), False, 'from caffe2.python.core import InferOpBlobDevicesAsDict\n'), ((1792, 1805), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (1800, 1805), True, 'import numpy as np\n'), ((1986, 2021), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['op.output[idx]'], {}), '(op.output[idx])\n', (2005, 2021), False, 'from caffe2.python import workspace\n'), ((2440, 2501), 'numpy.allclose', 'np.allclose', (['x', 'y'], {'atol': 'self._threshold', 'rtol': 'self._threshold'}), '(x, y, atol=self._threshold, rtol=self._threshold)\n', (2451, 2501), True, 'import numpy as np\n'), ((4104, 4129), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', (['name'], {}), '(name)\n', (4123, 4129), False, 'from caffe2.python import workspace\n'), ((4434, 4495), 'numpy.allclose', 'np.allclose', (['x', 'y'], {'atol': 'self._threshold', 'rtol': 'self._threshold'}), '(x, y, atol=self._threshold, rtol=self._threshold)\n', (4445, 4495), True, 'import numpy as np\n'), ((2845, 2858), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (2851, 2858), True, 'import numpy as np\n'), ((4826, 4839), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (4832, 4839), True, 'import numpy as np\n')] |
from functools import partial
import math
import os
from tempfile import TemporaryDirectory
from pymbtiles import MBtiles
import rasterio
import numpy as np
from tilecutter.rgb import hex_to_rgb
from tilecutter.png import to_smallest_png, to_paletted_png
from tilecutter.tiles import read_tiles
from tilecutter.raster import get_mbtiles_meta, to_indexed_tif
def tif_to_mbtiles(
infilename,
outfilename,
min_zoom,
max_zoom,
tile_size=256,
metadata=None,
tile_renderer=to_smallest_png,
resampling="nearest",
):
"""Convert a tif to mbtiles, rendering each tile using tile_renderer.
By default, this renders tiles as data using the smallest PNG image type
for the data type of infilename.
Parameters
----------
infilename : path to input GeoTIFF file
outfilename : path to output mbtiles file
min_zoom : int
max_zoom : int
tile_size : int, optional (default: 256)
metadata : dict, optional
metadata dictionary to add to the mbtiles metadata
tile_renderer : function, optional (default: to_smallest_png)
function that takes as input the data array for the tile and returns a PNG or None
resampling : str, optional (default 'nearest')
Must be a supported value of rasterio.enums.Resampling
"""
with rasterio.Env() as env:
with rasterio.open(infilename) as src:
with MBtiles(outfilename, mode="w") as mbtiles:
meta = {
"tilejson": "2.0.0",
"version": "1.0.0",
"minzoom": min_zoom,
"maxzoom": max_zoom,
}
meta.update(get_mbtiles_meta(src, min_zoom))
if metadata is not None:
meta.update(metadata)
mbtiles.meta = meta
for tile, data in read_tiles(
src,
min_zoom=min_zoom,
max_zoom=max_zoom,
tile_size=tile_size,
resampling=resampling,
):
# Only write out non-empty tiles
if (data is not None) and (not np.all(data == src.nodata)):
png = tile_renderer(data)
if png is None:
continue
# flip tile Y to match xyz scheme
tiley = int(math.pow(2, tile.z)) - tile.y - 1
mbtiles.write_tile(tile.z, tile.x, tiley, png)
def render_tif_to_mbtiles(
infilename,
outfilename,
colormap,
min_zoom,
max_zoom,
metadata=None,
tile_size=256,
resampling="nearest",
):
"""Convert a tif to mbtiles, rendered according to the colormap.
The tif is first converted into an indexed image that matches the number of colors in the colormap,
and all values not in the colormap are masked out.
Parameters
----------
infilename : path to input GeoTIFF file
outfilename : path to output mbtiles file
colormap : dict of values to hex color codes
min_zoom : int, optional (default: 0)
max_zoom : int, optional (default: None, which means it will automatically be calculated from extent)
metadata : dict, optional
metadata dictionary to add to the mbtiles metadata
resampling : str, optional (default 'nearest')
Must be a supported value of rasterio.enums.Resampling
"""
# palette is created as a series of r,g,b values. Positions correspond to the index
# of each value in the image
values = sorted(colormap.keys())
palette = np.array([hex_to_rgb(colormap[value]) for value in values], dtype="uint8")
with TemporaryDirectory() as tmpdir:
with rasterio.open(infilename) as src:
if src.count > 1:
raise ValueError("tif must be single band")
# Convert the image to indexed, if necessary
print("Inspecting unique values")
nodata = src.nodatavals[0]
data = src.read(1)
unique_values = np.unique(data[data != nodata])
if len(set(unique_values).difference(values)):
# convert the image to indexed
print("Converting tif to indexed tif")
indexedfilename = os.path.join(tmpdir, "indexed.tif")
to_indexed_tif(infilename, indexedfilename, values)
else:
indexedfilename = infilename
paletted_renderer = partial(to_paletted_png, palette=palette, nodata=src.nodata)
tif_to_mbtiles(
indexedfilename,
outfilename,
min_zoom,
max_zoom,
tile_size,
metadata=metadata,
tile_renderer=paletted_renderer,
resampling=resampling,
)
| [
"tempfile.TemporaryDirectory",
"pymbtiles.MBtiles",
"tilecutter.raster.to_indexed_tif",
"numpy.all",
"numpy.unique",
"math.pow",
"rasterio.open",
"tilecutter.raster.get_mbtiles_meta",
"rasterio.Env",
"os.path.join",
"functools.partial",
"tilecutter.tiles.read_tiles",
"tilecutter.rgb.hex_to_r... | [((1316, 1330), 'rasterio.Env', 'rasterio.Env', ([], {}), '()\n', (1328, 1330), False, 'import rasterio\n'), ((3735, 3755), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (3753, 3755), False, 'from tempfile import TemporaryDirectory\n'), ((4531, 4591), 'functools.partial', 'partial', (['to_paletted_png'], {'palette': 'palette', 'nodata': 'src.nodata'}), '(to_paletted_png, palette=palette, nodata=src.nodata)\n', (4538, 4591), False, 'from functools import partial\n'), ((1352, 1377), 'rasterio.open', 'rasterio.open', (['infilename'], {}), '(infilename)\n', (1365, 1377), False, 'import rasterio\n'), ((3660, 3687), 'tilecutter.rgb.hex_to_rgb', 'hex_to_rgb', (['colormap[value]'], {}), '(colormap[value])\n', (3670, 3687), False, 'from tilecutter.rgb import hex_to_rgb\n'), ((3780, 3805), 'rasterio.open', 'rasterio.open', (['infilename'], {}), '(infilename)\n', (3793, 3805), False, 'import rasterio\n'), ((4106, 4137), 'numpy.unique', 'np.unique', (['data[data != nodata]'], {}), '(data[data != nodata])\n', (4115, 4137), True, 'import numpy as np\n'), ((1403, 1433), 'pymbtiles.MBtiles', 'MBtiles', (['outfilename'], {'mode': '"""w"""'}), "(outfilename, mode='w')\n", (1410, 1433), False, 'from pymbtiles import MBtiles\n'), ((1869, 1970), 'tilecutter.tiles.read_tiles', 'read_tiles', (['src'], {'min_zoom': 'min_zoom', 'max_zoom': 'max_zoom', 'tile_size': 'tile_size', 'resampling': 'resampling'}), '(src, min_zoom=min_zoom, max_zoom=max_zoom, tile_size=tile_size,\n resampling=resampling)\n', (1879, 1970), False, 'from tilecutter.tiles import read_tiles\n'), ((4334, 4369), 'os.path.join', 'os.path.join', (['tmpdir', '"""indexed.tif"""'], {}), "(tmpdir, 'indexed.tif')\n", (4346, 4369), False, 'import os\n'), ((4386, 4437), 'tilecutter.raster.to_indexed_tif', 'to_indexed_tif', (['infilename', 'indexedfilename', 'values'], {}), '(infilename, indexedfilename, values)\n', (4400, 4437), False, 'from tilecutter.raster import get_mbtiles_meta, to_indexed_tif\n'), ((1680, 1711), 'tilecutter.raster.get_mbtiles_meta', 'get_mbtiles_meta', (['src', 'min_zoom'], {}), '(src, min_zoom)\n', (1696, 1711), False, 'from tilecutter.raster import get_mbtiles_meta, to_indexed_tif\n'), ((2191, 2217), 'numpy.all', 'np.all', (['(data == src.nodata)'], {}), '(data == src.nodata)\n', (2197, 2217), True, 'import numpy as np\n'), ((2442, 2461), 'math.pow', 'math.pow', (['(2)', 'tile.z'], {}), '(2, tile.z)\n', (2450, 2461), False, 'import math\n')] |
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import constants
def evaluate_next_state_1nn(dataset, next_state_predictions):
nn = KNeighborsClassifier(n_neighbors=1)
nn.fit(dataset[constants.EMBEDDINGS], dataset[constants.STATE_LABELS])
nearest_labels = nn.predict(next_state_predictions)
cls_accuracies = []
for cls in np.unique(dataset[constants.NEXT_STATE_LABELS]):
if cls == -1:
continue
mask = np.logical_and(
dataset[constants.NEXT_STATE_LABELS] == cls, np.logical_not(dataset[constants.DONES])
)
tmp_accuracy = np.mean(nearest_labels[mask] == dataset[constants.NEXT_STATE_LABELS][mask])
cls_accuracies.append(tmp_accuracy)
balanced_t_accuracy = np.mean(cls_accuracies)
return cls_accuracies, balanced_t_accuracy
def get_perplexities(log_distribution):
log2 = log_distribution / np.log(2)
return 2 ** (- np.sum(np.exp(log_distribution) * log2, axis=1))
| [
"numpy.mean",
"numpy.unique",
"numpy.logical_not",
"numpy.log",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.exp"
] | [((161, 196), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (181, 196), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((368, 415), 'numpy.unique', 'np.unique', (['dataset[constants.NEXT_STATE_LABELS]'], {}), '(dataset[constants.NEXT_STATE_LABELS])\n', (377, 415), True, 'import numpy as np\n'), ((771, 794), 'numpy.mean', 'np.mean', (['cls_accuracies'], {}), '(cls_accuracies)\n', (778, 794), True, 'import numpy as np\n'), ((624, 699), 'numpy.mean', 'np.mean', (['(nearest_labels[mask] == dataset[constants.NEXT_STATE_LABELS][mask])'], {}), '(nearest_labels[mask] == dataset[constants.NEXT_STATE_LABELS][mask])\n', (631, 699), True, 'import numpy as np\n'), ((915, 924), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (921, 924), True, 'import numpy as np\n'), ((550, 590), 'numpy.logical_not', 'np.logical_not', (['dataset[constants.DONES]'], {}), '(dataset[constants.DONES])\n', (564, 590), True, 'import numpy as np\n'), ((951, 975), 'numpy.exp', 'np.exp', (['log_distribution'], {}), '(log_distribution)\n', (957, 975), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 10:13:41 2017
@author: <NAME> (Weizmann Institute of Science)
@details: An example for a simple run of the simulation code
Physical Parameters:
G: gravity constant
m1: mass of body no. 1
m2: mass of body no. 2
m3: mass of body no. 3
a: inner semi major axis
e: inner eccentricity
M0_in: inner mean anomaly
M0_out: outer mean anomaly
inclination: mutual inclination
Omega: longitude of ascending node
omega: argument of periapsis
rper_over_a: hierarchy (outer pericenter over inner semi major axis)
eper: outer eccentricity
Simulation Parameters:
samples_per_Pcirc: number of sampling points per circular inner orbit
max_periods: maximal periods to stop (-1 for don't care)
dump_every: how many iterations between two state dump to file (heavy operation, better be a big number)
save_every: how many iterations between two state saves (if too small, result file might be very big)
save_every_P: how many periods between two state saves (instead of save_every; 0 for don't care)
rmax: the simulation stops if one of the bodies is unbound to the other two, and its distance is larger
than rmax times the distance between the other two.
save_last: how many last iterations to save
ca_saveall: boolean. save all close approaches or only the smallest one until each time.
dt00: different method to initialize the time-step (np.nan for don't care)
"""
# append code directory to sys_path, so that imports would work
import os
import sys
CODE_DIR = os.path.join(os.sep, 'home', 'path', 'to', 'code', 'dir')
sys.path.append(CODE_DIR)
import numpy as np
import time
from sim.run_simulation import run_simulation
params_phys = {
'G': 1.0, # Gravity constant
'm1': 0.5, # mass of body no. 1
'm2': 0.5, # mass of body no. 2
'm3': 0.5, # mass of body no. 3
'a': np.double(1), # inner semi major axis
'e': 0.1, # inner eccentricity
'M0_in': 2.649, # inner mean anomaly
'M0_out': 3.89, # outer mean anomaly
'inclination': 1.489, # mutual inclination
'Omega': 1.34, # longitude of ascending node
'omega': 0.932, # argument of periapsis
'rper_over_a': 5.0, # hierarchy (outer pericenter over inner semi major axis)
'eper': 0.425, # outer eccentricity
}
params_sim = {
'samples_per_Pcirc': np.int64(500),
'max_periods': np.int64(1000), # -1 for don't care
'dump_every': np.int64(10000000),
'save_every': np.int64(20),
'save_every_P': np.int64(0), # 0 for don't care
'rmax': 50 * params_phys['a'],
'save_last': np.int64(5),
'ca_saveall': np.int64(0),
'dt00': np.nan, # np.nan for don't care
}
# save results to this path (in matlab file format)
save_to = os.path.join('.', 'sim.mat')
# run simulation
start = time.time()
run_simulation(N=np.int64(1e8), params_phys=params_phys, params_sim=params_sim,
save_to=save_to, dump_to=save_to, save_as='mat',
post_process=False)
print('time:', time.time() - start)
| [
"numpy.int64",
"numpy.double",
"os.path.join",
"time.time",
"sys.path.append"
] | [((1733, 1790), 'os.path.join', 'os.path.join', (['os.sep', '"""home"""', '"""path"""', '"""to"""', '"""code"""', '"""dir"""'], {}), "(os.sep, 'home', 'path', 'to', 'code', 'dir')\n", (1745, 1790), False, 'import os\n'), ((1792, 1817), 'sys.path.append', 'sys.path.append', (['CODE_DIR'], {}), '(CODE_DIR)\n', (1807, 1817), False, 'import sys\n'), ((3112, 3140), 'os.path.join', 'os.path.join', (['"""."""', '"""sim.mat"""'], {}), "('.', 'sim.mat')\n", (3124, 3140), False, 'import os\n'), ((3170, 3181), 'time.time', 'time.time', ([], {}), '()\n', (3179, 3181), False, 'import time\n'), ((2129, 2141), 'numpy.double', 'np.double', (['(1)'], {}), '(1)\n', (2138, 2141), True, 'import numpy as np\n'), ((2668, 2681), 'numpy.int64', 'np.int64', (['(500)'], {}), '(500)\n', (2676, 2681), True, 'import numpy as np\n'), ((2703, 2717), 'numpy.int64', 'np.int64', (['(1000)'], {}), '(1000)\n', (2711, 2717), True, 'import numpy as np\n'), ((2764, 2782), 'numpy.int64', 'np.int64', (['(10000000)'], {}), '(10000000)\n', (2772, 2782), True, 'import numpy as np\n'), ((2803, 2815), 'numpy.int64', 'np.int64', (['(20)'], {}), '(20)\n', (2811, 2815), True, 'import numpy as np\n'), ((2838, 2849), 'numpy.int64', 'np.int64', (['(0)'], {}), '(0)\n', (2846, 2849), True, 'import numpy as np\n'), ((2932, 2943), 'numpy.int64', 'np.int64', (['(5)'], {}), '(5)\n', (2940, 2943), True, 'import numpy as np\n'), ((2964, 2975), 'numpy.int64', 'np.int64', (['(0)'], {}), '(0)\n', (2972, 2975), True, 'import numpy as np\n'), ((3200, 3221), 'numpy.int64', 'np.int64', (['(100000000.0)'], {}), '(100000000.0)\n', (3208, 3221), True, 'import numpy as np\n'), ((3380, 3391), 'time.time', 'time.time', ([], {}), '()\n', (3389, 3391), False, 'import time\n')] |
import sys
import random
from dqn import Agent
import numpy as np
class MockEnv:
def __init__(self, env_name):
self.action_space = MockActionSpace(10)
self.observation_space = MockObservationSpace((1, 1, 1))
def reset(self):
return self.random_observation()
def step(self, action):
print("stepping")
return self.random_observation(), 5, random.randint(0, 1000) == 555, None
def random_observation(self):
return np.zeros((1, 1, 1, 1))
#return [[0] * 12]
class MockActionSpace:
def __init__(self, n):
self.n = n
class MockObservationSpace:
def __init__(self, shape):
self.shape = shape
num_episodes = 20
env_name = sys.argv[1] if len(sys.argv) > 1 else "MsPacman-v0"
env = MockEnv(env_name)
agent = Agent(state_size=env.observation_space.shape,
number_of_actions=env.action_space.n,
save_name=env_name)
for e in range(num_episodes):
observation = env.reset()
done = False
agent.new_episode()
total_cost = 0.0
total_reward = 0.0
frame = 0
while not done:
frame += 1
#env.render()
action, values = agent.act(observation)
print(action)
#action = env.action_space.sample()
observation, reward, done, info = env.step(action)
total_cost += agent.observe(reward)
total_reward += reward
print("total reward {}".format(total_reward))
print("mean cost {}".format(total_cost/frame)) | [
"numpy.zeros",
"random.randint",
"dqn.Agent"
] | [((838, 946), 'dqn.Agent', 'Agent', ([], {'state_size': 'env.observation_space.shape', 'number_of_actions': 'env.action_space.n', 'save_name': 'env_name'}), '(state_size=env.observation_space.shape, number_of_actions=env.\n action_space.n, save_name=env_name)\n', (843, 946), False, 'from dqn import Agent\n'), ((499, 521), 'numpy.zeros', 'np.zeros', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (507, 521), True, 'import numpy as np\n'), ((409, 432), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (423, 432), False, 'import random\n')] |
## @ingroup Methods-Aerodynamics-Supersonic_Zero-Drag
# parasite_drag_fuselage.py
#
# Created: Aug 2014, <NAME>
# Modified: Nov 2016, <NAME>
# Feb 2019, <NAME>
# Jan 2020, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Helper_Functions import compressible_turbulent_flat_plate
from SUAVE.Core import Data
from SUAVE.Methods.Utilities.Cubic_Spline_Blender import Cubic_Spline_Blender
import numpy as np
# ----------------------------------------------------------------------
# Parasite Drag Fuselage
# ----------------------------------------------------------------------
## @ingroup Methods-Aerodynamics-Supersonic_Zero-Drag
def parasite_drag_fuselage(state,settings,geometry):
"""Computes the parasite drag due to the fuselage
Assumptions:
Basic fit
Source:
http://aerodesign.stanford.edu/aircraftdesign/aircraftdesign.html (Stanford AA241 A/B Course Notes)
Inputs:
state.conditions.freestream.
mach_number [Unitless]
temperature [K]
reynolds_number [Unitless]
settings.fuselage_parasite_drag_form_factor [Unitless]
geometry.fuselage.
areas.front_projected [m^2]
areas.wetted [m^2]
lengths.total [m]
effective_diameter [m]
Outputs:
fuselage_parasite_drag [Unitless]
Properties Used:
N/A
"""
# unpack inputs
configuration = settings
form_factor = configuration.fuselage_parasite_drag_form_factor
low_cutoff = configuration.fuselage_parasite_drag_begin_blend_mach
high_cutoff = configuration.fuselage_parasite_drag_end_blend_mach
fuselage = geometry
freestream = state.conditions.freestream
Sref = fuselage.areas.front_projected
Swet = fuselage.areas.wetted
l_fus = fuselage.lengths.total
d_fus = fuselage.effective_diameter
# conditions
Mc = freestream.mach_number
Tc = freestream.temperature
re = freestream.reynolds_number
# reynolds number
Re_fus = re*(l_fus)
# skin friction coefficient
cf_fus, k_comp, k_reyn = compressible_turbulent_flat_plate(Re_fus,Mc,Tc)
# form factor for cylindrical bodies
d_d = float(d_fus)/float(l_fus)
D_low = np.array([[0.0]] * len(Mc))
a_low = np.array([[0.0]] * len(Mc))
du_max_u_low = np.array([[0.0]] * len(Mc))
D_high = np.array([[0.0]] * len(Mc))
a_high = np.array([[0.0]] * len(Mc))
du_max_u_high = np.array([[0.0]] * len(Mc))
k_fus = np.array([[0.0]] * len(Mc))
low_inds = Mc < high_cutoff
high_inds = Mc > low_cutoff
D_low[low_inds] = np.sqrt(1 - (1-Mc[low_inds]**2) * d_d**2)
a_low[low_inds] = 2 * (1-Mc[low_inds]**2) * (d_d**2) *(np.arctanh(D_low[low_inds])-D_low[low_inds]) / (D_low[low_inds]**3)
du_max_u_low[low_inds] = a_low[low_inds] / ( (2-a_low[low_inds]) * (1-Mc[low_inds]**2)**0.5 )
D_high[high_inds] = np.sqrt(1 - d_d**2)
a_high[high_inds] = 2 * (d_d**2) *(np.arctanh(D_high[high_inds])-D_high[high_inds]) / (D_high[high_inds]**3)
du_max_u_high[high_inds] = a_high[high_inds] / ( (2-a_high[high_inds]) )
spline = Cubic_Spline_Blender(low_cutoff,high_cutoff)
h00 = lambda M:spline.compute(M)
du_max_u = du_max_u_low*(h00(Mc)) + du_max_u_high*(1-h00(Mc))
k_fus = (1 + form_factor*du_max_u)**2
fuselage_parasite_drag = k_fus * cf_fus * Swet / Sref
# dump data to conditions
fuselage_result = Data(
wetted_area = Swet ,
reference_area = Sref ,
parasite_drag_coefficient = fuselage_parasite_drag ,
skin_friction_coefficient = cf_fus ,
compressibility_factor = k_comp ,
reynolds_factor = k_reyn ,
form_factor = k_fus ,
)
try:
state.conditions.aerodynamics.drag_breakdown.parasite[fuselage.tag] = fuselage_result
except:
print("Drag Polar Mode fuse parasite")
return fuselage_parasite_drag | [
"numpy.sqrt",
"SUAVE.Methods.Utilities.Cubic_Spline_Blender.Cubic_Spline_Blender",
"SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Helper_Functions.compressible_turbulent_flat_plate",
"SUAVE.Core.Data",
"numpy.arctanh"
] | [((2481, 2530), 'SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Helper_Functions.compressible_turbulent_flat_plate', 'compressible_turbulent_flat_plate', (['Re_fus', 'Mc', 'Tc'], {}), '(Re_fus, Mc, Tc)\n', (2514, 2530), False, 'from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Helper_Functions import compressible_turbulent_flat_plate\n'), ((3024, 3071), 'numpy.sqrt', 'np.sqrt', (['(1 - (1 - Mc[low_inds] ** 2) * d_d ** 2)'], {}), '(1 - (1 - Mc[low_inds] ** 2) * d_d ** 2)\n', (3031, 3071), True, 'import numpy as np\n'), ((3320, 3341), 'numpy.sqrt', 'np.sqrt', (['(1 - d_d ** 2)'], {}), '(1 - d_d ** 2)\n', (3327, 3341), True, 'import numpy as np\n'), ((3549, 3594), 'SUAVE.Methods.Utilities.Cubic_Spline_Blender.Cubic_Spline_Blender', 'Cubic_Spline_Blender', (['low_cutoff', 'high_cutoff'], {}), '(low_cutoff, high_cutoff)\n', (3569, 3594), False, 'from SUAVE.Methods.Utilities.Cubic_Spline_Blender import Cubic_Spline_Blender\n'), ((3871, 4081), 'SUAVE.Core.Data', 'Data', ([], {'wetted_area': 'Swet', 'reference_area': 'Sref', 'parasite_drag_coefficient': 'fuselage_parasite_drag', 'skin_friction_coefficient': 'cf_fus', 'compressibility_factor': 'k_comp', 'reynolds_factor': 'k_reyn', 'form_factor': 'k_fus'}), '(wetted_area=Swet, reference_area=Sref, parasite_drag_coefficient=\n fuselage_parasite_drag, skin_friction_coefficient=cf_fus,\n compressibility_factor=k_comp, reynolds_factor=k_reyn, form_factor=k_fus)\n', (3875, 4081), False, 'from SUAVE.Core import Data\n'), ((3125, 3152), 'numpy.arctanh', 'np.arctanh', (['D_low[low_inds]'], {}), '(D_low[low_inds])\n', (3135, 3152), True, 'import numpy as np\n'), ((3380, 3409), 'numpy.arctanh', 'np.arctanh', (['D_high[high_inds]'], {}), '(D_high[high_inds])\n', (3390, 3409), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
"""Functions related to facet recognition of nanocrystals."""
from scipy.ndimage.measurements import center_of_mass
from scipy.signal import convolve
from scipy.interpolate import RegularGridInterpolator
from scipy.interpolate import griddata
from scipy import stats
from scipy import ndimage
from skimage.feature import corner_peaks
from skimage.morphology import watershed
from numbers import Real
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
import sys
from bcdi.graph import graph_utils as gu
from bcdi.utils import utilities as util
from bcdi.utils import validation as valid
colormap = gu.Colormap()
default_cmap = colormap.cmap
def calc_stereoproj_facet(projection_axis, vectors, radius_mean, stereo_center):
"""
Calculate the coordinates of normals in the stereographic projection.
The calculation depends on the reference axis. See: Nanoscale 10, 4833 (2018).
:param projection_axis: the projection is performed on q plane perpendicular to
that axis (0, 1 or 2)
:param vectors: array of vectors to be projected (nb_vectors rows x 3 columns)
:param radius_mean: q radius from which the projection will be done
:param stereo_center: offset of the projection plane along the reflection axis,
in the same unit as radius_mean. If stereo_center = 0, the projection plane will
be the equator.
:return: the coordinates of the stereographic projection for the projection from
the South pole(1st and 2nd columns) and from the North pole (3rd and 4th
columns) projection, rescaled from radius_mean to 90 degrees
"""
if projection_axis not in [0, 1, 2]:
raise ValueError(
"reflection_axis should be a basis axis of the reconstructed array"
)
# calculate u and v from xyz
stereo_proj = np.zeros((vectors.shape[0], 4), dtype=vectors.dtype)
# stereo_proj[:, 0] is the euclidian u_south,
# stereo_proj[:, 1] is the euclidian v_south
# stereo_proj[:, 2] is the euclidian u_north,
# stereo_proj[:, 3] is the euclidian v_north
if (
projection_axis == 0
): # q aligned along the 1st axis (Z downstream in CXI convention)
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + vectors[idx, 0] - stereo_center)
) # u_s
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + vectors[idx, 0] - stereo_center)
) # v_s
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + stereo_center - vectors[idx, 0])
) # u_n
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + stereo_center - vectors[idx, 0])
) # v_n
uv_labels = (
"axis 1",
"axis 2",
) # axes corresponding to u and v respectively, used in plots
elif (
projection_axis == 1
): # q aligned along the 2nd axis (Y vertical up in CXI convention)
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + vectors[idx, 1] - stereo_center)
) # u_s
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + vectors[idx, 1] - stereo_center)
) # v_s
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + stereo_center - vectors[idx, 1])
) # u_n
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 2]
/ (radius_mean + stereo_center - vectors[idx, 1])
) # v_n
uv_labels = (
"axis 0",
"axis 2",
) # axes corresponding to u and v respectively, used in plots
else: # q aligned along the 3rd axis (X outboard in CXI convention)
for idx in range(vectors.shape[0]):
stereo_proj[idx, 0] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + vectors[idx, 2] - stereo_center)
) # u_s
stereo_proj[idx, 1] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + vectors[idx, 2] - stereo_center)
) # v_s
stereo_proj[idx, 2] = (
radius_mean
* vectors[idx, 0]
/ (radius_mean + stereo_center - vectors[idx, 2])
) # u_n
stereo_proj[idx, 3] = (
radius_mean
* vectors[idx, 1]
/ (radius_mean + stereo_center - vectors[idx, 2])
) # v_n
uv_labels = (
"axis 0",
"axis 1",
) # axes corresponding to u and v respectively, used in plots
stereo_proj = stereo_proj / radius_mean * 90 # rescale from radius_mean to 90
return stereo_proj, uv_labels
def detect_edges(faces):
"""
Find indices of vertices defining non-shared edges.
:param faces: ndarray of m*3 faces
:return: 1D list of indices of vertices defining non-shared edges (near hole...)
"""
# Get the three edges per triangle
edge1 = np.copy(faces[:, 0:2])
edge2 = np.array([np.copy(faces[:, 0]), np.copy(faces[:, 2])]).T
edge3 = np.array([np.copy(faces[:, 1]), np.copy(faces[:, 2])]).T
edge1.sort(axis=1)
edge2.sort(axis=1)
edge3.sort(axis=1)
# list of edges without redundancy
edges = np.concatenate((edge1, edge2, edge3), axis=0)
edge_list, _, edges_counts = np.unique(
edges, return_index=True, return_counts=True, axis=0
)
# isolate non redundant edges
unique_edges = edge_list[edges_counts == 1].flatten()
return unique_edges
def distance_threshold(fit, indices, plane_shape, max_distance=0.90):
"""
Filter out pixels depending on their distance to a fit plane.
:param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0
:param indices: tuple or array of plane indices, x being the 1st tuple element or
array row, y the 2nd tuple element or array row and z the third tuple element or
array row
:param plane_shape: shape of the initial plane array
:param max_distance: max distance allowed from the fit plane in pixels
:return: the updated plane, a stop flag
"""
indices = np.asarray(indices)
plane = np.zeros(plane_shape, dtype=int)
no_points = False
if len(indices[0]) == 0:
no_points = True
return plane, no_points
# remove outsiders based on their distance to the plane
plane_normal = np.array(
[fit[0], fit[1], fit[2]]
) # normal is [a, b, c] if ax+by+cz+d=0
for point in range(len(indices[0])):
dist = abs(
fit[0] * indices[0, point]
+ fit[1] * indices[1, point]
+ fit[2] * indices[2, point]
+ fit[3]
) / np.linalg.norm(plane_normal)
if dist < max_distance:
plane[indices[0, point], indices[1, point], indices[2, point]] = 1
if plane[plane == 1].sum() == 0:
print("Distance_threshold: no points for plane")
no_points = True
return plane, no_points
return plane, no_points
def equirectangular_proj(
normals,
intensity,
cmap=default_cmap,
bw_method=0.03,
min_distance=10,
background_threshold=-0.35,
debugging=False,
):
"""
Detect facets in an object.
It uses an equirectangular projection of normals to mesh triangles and watershed
segmentation.
:param normals: normals array
:param intensity: intensity array
:param cmap: colormap used for plotting
:param bw_method: bw_method of gaussian_kde
:param min_distance: min_distance of corner_peaks()
:param background_threshold: threshold for background determination
(depth of the KDE)
:param debugging: if True, show plots for debugging
:return: ndarray of labelled regions
"""
# check normals for nan
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
# calculate latitude and longitude from xyz,
# this is equal to the equirectangular flat square projection
long_lat = np.zeros((normals.shape[0], 2), dtype=normals.dtype)
for i in range(normals.shape[0]):
if normals[i, 1] == 0 and normals[i, 0] == 0:
continue
long_lat[i, 0] = np.arctan2(normals[i, 1], normals[i, 0]) # longitude
long_lat[i, 1] = np.arcsin(normals[i, 2]) # latitude
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(long_lat[:, 0], long_lat[:, 1], c=intensity, cmap=cmap)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
plt.axis("scaled")
plt.title("Equirectangular projection of the weighted point densities before KDE")
plt.pause(0.1)
# kernel density estimation
kde = stats.gaussian_kde(long_lat.T, bw_method=bw_method)
# input should be a 2D array with shape (# of dims, # of data)
# Create a regular 3D grid
yi, xi = np.mgrid[
-np.pi / 2 : np.pi / 2 : 150j, -np.pi : np.pi : 300j
] # vertical, horizontal
# Evaluate the KDE on a regular grid...
coords = np.vstack([item.ravel() for item in [xi, yi]])
# coords is a contiguous flattened array of coordinates of shape (2, size(xi))
density = -1 * kde(coords).reshape(
xi.shape
) # inverse density for later watershed segmentation
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(xi, yi, c=density, cmap=cmap, vmin=-1.5, vmax=0)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
fig.colorbar(scatter)
plt.axis("scaled")
plt.title("Equirectangular projection of the KDE")
plt.pause(0.1)
# identification of local minima
density[density > background_threshold] = 0 # define the background
mask = np.copy(density)
mask[mask != 0] = 1
plt.figure()
plt.imshow(mask, cmap=cmap, interpolation="nearest")
plt.title("Background mask")
plt.gca().invert_yaxis()
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(xi, yi, c=density, cmap=cmap)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi / 2, np.pi / 2)
fig.colorbar(scatter)
plt.axis("scaled")
plt.title("KDE after background definition")
plt.pause(0.1)
# Generate the markers as local minima of the distance to the background
distances = ndimage.distance_transform_edt(density)
if debugging:
plt.figure()
plt.imshow(distances, cmap=cmap, interpolation="nearest")
plt.title("Distances")
plt.gca().invert_yaxis()
plt.pause(0.1)
# find peaks
local_maxi = corner_peaks(
distances, exclude_border=False, min_distance=min_distance, indices=False
) #
if debugging:
plt.figure()
plt.imshow(local_maxi, interpolation="nearest")
plt.title("local_maxi")
plt.gca().invert_yaxis()
plt.pause(0.1)
# define markers for each peak
markers = ndimage.label(local_maxi)[0]
if debugging:
plt.figure()
plt.imshow(markers, interpolation="nearest")
plt.title("markers")
plt.colorbar()
plt.gca().invert_yaxis()
plt.pause(0.1)
# watershed segmentation
labels = watershed(-1 * distances, markers, mask=mask)
print("There are", str(labels.max()), "facets") # label 0 is the background
plt.figure()
plt.imshow(labels, cmap=cmap, interpolation="nearest")
plt.title("Separated objects")
plt.colorbar()
plt.gca().invert_yaxis()
plt.pause(0.1)
return labels, long_lat
def find_facet(
refplane_indices,
surf_indices,
original_shape,
step_shift,
plane_label,
plane_coeffs,
min_points,
debugging=False,
):
"""
Shift a fit plane along its normal until it reaches the surface of a faceted object.
:param refplane_indices: a tuple of 3 arrays (1D, length N) describing the
coordinates of the plane voxels, x values being the 1st tuple element, y values
the 2nd tuple element and z values the 3rd tuple element (output of np.nonzero)
:param surf_indices: a tuple of 3 arrays (1D, length N) describing the coordinates
of the surface voxels, x values being the 1st tuple element, y values the 2nd
tuple element and z values the 3rd tuple element (output of np.nonzero)
:param original_shape: the shape of the full dataset (amplitude object,
eventually upsampled)
:param step_shift: the amplitude of the shift to be applied to the plane
along its normal
:param plane_label: the label of the plane, used in comments
:param plane_coeffs: a tuple of coefficient (a, b, c, d) such that ax+by+cz+d=0
:param min_points: threshold, minimum number of points that should coincide
between the fit plane and the object surface
:param debugging: True to see debugging plots
:return: the shift that needs to be applied to the fit plane in order to best
match with the object surface
"""
if not isinstance(refplane_indices, tuple):
raise ValueError("refplane_indices should be a tuple of 3 1D ndarrays")
if not isinstance(surf_indices, tuple):
raise ValueError("surf_indices should be a tuple of 3 1D ndarrays")
surf0, surf1, surf2 = surf_indices
plane_normal = np.array(
[plane_coeffs[0], plane_coeffs[1], plane_coeffs[2]]
) # normal is [a, b, c] if ax+by+cz+d=0
# loop until the surface is crossed or the iteration limit is reached
common_previous = 0
found_plane = 0
nbloop = 1
crossed_surface = 0
shift_direction = 0
while found_plane == 0:
common_points = 0
nb_points = len(surf0)
# shift indices
plane_newindices0, plane_newindices1, plane_newindices2 = offset_plane(
indices=refplane_indices,
offset=nbloop * step_shift,
plane_normal=plane_normal,
)
nb_newpoints = len(plane_newindices0)
for point in range(nb_newpoints):
for point2 in range(nb_points):
if (
plane_newindices0[point] == surf0[point2]
and plane_newindices1[point] == surf1[point2]
and plane_newindices2[point] == surf2[point2]
):
common_points = common_points + 1
if debugging:
temp_coeff3 = plane_coeffs[3] - nbloop * step_shift
dist = np.zeros(nb_points)
for point in range(nb_points):
dist[point] = (
plane_coeffs[0] * surf0[point]
+ plane_coeffs[1] * surf1[point]
+ plane_coeffs[2] * surf2[point]
+ temp_coeff3
) / np.linalg.norm(plane_normal)
temp_mean_dist = dist.mean()
plane = np.zeros(original_shape)
plane[plane_newindices0, plane_newindices1, plane_newindices2] = 1
# plot plane points overlaid with the support
gu.scatter_plot_overlaid(
arrays=(
np.concatenate(
(
plane_newindices0[:, np.newaxis],
plane_newindices1[:, np.newaxis],
plane_newindices2[:, np.newaxis],
),
axis=1,
),
np.concatenate(
(
surf0[:, np.newaxis],
surf1[:, np.newaxis],
surf2[:, np.newaxis],
),
axis=1,
),
),
markersizes=(8, 2),
markercolors=("b", "r"),
labels=("axis 0", "axis 1", "axis 2"),
title="Plane"
+ str(plane_label)
+ " after shifting - iteration"
+ str(nbloop),
)
print(
"(while) iteration ",
nbloop,
"- Mean distance of the plane to outer shell = "
+ str("{:.2f}".format(temp_mean_dist))
+ "\n pixels - common_points = ",
common_points,
)
if common_points != 0: # some plane points are in commun with the surface layer
if common_points >= common_previous:
found_plane = 0
common_previous = common_points
print(
"(while, common_points != 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
crossed_surface = 1
elif (
common_points < min_points
): # try to keep enough points for statistics, half step back
found_plane = 1
print(
"(while, common_points != 0), "
"exiting while loop after threshold reached - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
"- next step common points=",
common_points,
)
else:
found_plane = 0
common_previous = common_points
print(
"(while, common_points != 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
crossed_surface = 1
else: # no commun points, the plane is not intersecting the surface layer
if crossed_surface == 1: # found the outer shell, which is 1 step before
found_plane = 1
print(
"(while, common_points = 0), exiting while loop - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
"- next step common points=",
common_points,
)
elif not shift_direction:
if nbloop < 5: # continue to scan
print(
"(while, common_points = 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
else: # scan in the other direction
shift_direction = 1
print("Shift scanning direction")
step_shift = -1 * step_shift
nbloop = 1
else: # shift_direction = 1
if nbloop < 10:
print(
"(while, common_points = 0), iteration ",
nbloop,
" - ",
common_previous,
"points belonging to the facet for plane ",
plane_label,
)
nbloop = nbloop + 1
else: # we were already unsuccessfull in the other direction, give up
print(
"(while, common_points = 0),"
" no point from support is intersecting the plane ",
plane_label,
)
break
return (nbloop - 1) * step_shift
def find_neighbours(vertices, faces):
"""
Get the list of neighbouring vertices for each vertex.
:param vertices: ndarray of n*3 vertices
:param faces: ndarray of m*3 faces
:return: list of lists of indices
"""
neighbors = [None] * vertices.shape[0]
nb_faces = faces.shape[0]
for indx in range(nb_faces):
if neighbors[faces[indx, 0]] is None:
neighbors[faces[indx, 0]] = [faces[indx, 1], faces[indx, 2]]
else:
neighbors[faces[indx, 0]].append(faces[indx, 1])
neighbors[faces[indx, 0]].append(faces[indx, 2])
if neighbors[faces[indx, 1]] is None:
neighbors[faces[indx, 1]] = [faces[indx, 2], faces[indx, 0]]
else:
neighbors[faces[indx, 1]].append(faces[indx, 2])
neighbors[faces[indx, 1]].append(faces[indx, 0])
if neighbors[faces[indx, 2]] is None:
neighbors[faces[indx, 2]] = [faces[indx, 0], faces[indx, 1]]
else:
neighbors[faces[indx, 2]].append(faces[indx, 0])
neighbors[faces[indx, 2]].append(faces[indx, 1])
for indx, neighbor in enumerate(neighbors):
# remove None values
temp_list = [point for point in neighbor if point is not None]
# remove redundant indices in each sublist
neighbors[indx] = list(set(temp_list))
return neighbors
def fit_plane(plane, label, debugging=False):
"""
Fit a plane to labelled indices using the equation a*x+ b*y + c*z + d = 0.
:param plane: 3D binary array, where the voxels belonging to the plane are set
to 1 and others are set to 0.
:param label: int, label of the plane used for the title in plots
:param debugging: show plots for debugging
:return: fit parameters (a, b, c, d), plane indices after filtering,
errors associated, a stop flag
"""
indices = np.asarray(np.nonzero(plane))
no_points = False
if len(indices[0]) == 0:
no_points = True
return 0, indices, 0, no_points
for idx in range(2):
# remove isolated points, which probably do not belong to the plane
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before coordination threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
neighbors = plane[
indices[0, point] - 2 : indices[0, point] + 3,
indices[1, point] - 2 : indices[1, point] + 3,
indices[2, point] - 2 : indices[2, point] + 3,
].sum()
if neighbors < 5:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points isolated, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after coordination threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
# update plane indices
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) == 0:
no_points = True
return 0, indices, 0, no_points
# remove also points farther away than the median distance to the COM
dist = np.zeros(indices.shape[1])
x_com, y_com, z_com = center_of_mass(plane)
for point in range(indices.shape[1]):
dist[point] = np.sqrt(
(indices[0, point] - x_com) ** 2
+ (indices[1, point] - y_com) ** 2
+ (indices[2, point] - z_com) ** 2
)
median_dist = np.median(dist)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points before distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
for point in range(indices.shape[1]):
if dist[point] > median_dist:
plane[indices[0, point], indices[1, point], indices[2, point]] = 0
print(
"Fit plane",
label,
", ",
str(indices.shape[1] - plane[plane == 1].sum()),
"points too far from COM, ",
str(plane[plane == 1].sum()),
"remaining",
)
if debugging:
gu.scatter_plot(
np.asarray(np.nonzero(plane)).transpose(),
labels=("axis 0", "axis 1", "axis 2"),
title="Points after distance threshold plane "
+ str(label)
+ f"\niteration {idx}",
)
# update plane indices and check if enough points remain
indices = np.asarray(np.nonzero(plane))
if len(indices[0]) < 5:
no_points = True
return 0, indices, 0, no_points
# the fit parameters are (a, b, c, d) such that a*x + b*y + c*z + d = 0
params, std_param, valid_plane = util.plane_fit(
indices=indices, label=label, threshold=1, debugging=debugging
)
if not valid_plane:
plane[indices] = 0
no_points = True
return params, indices, std_param, no_points
def grow_facet(fit, plane, label, support, max_distance=0.90, debugging=True):
"""
Find voxels of the object which belong to a facet.
It uses the facet plane equation and the distance to the plane to find such voxels.
:param fit: coefficients of the plane (a, b, c, d) such that a*x + b*y + c*z + d = 0
:param plane: 3D binary support of the plane, with shape of the full dataset
:param label: the label of the plane processed
:param support: 3D binary support of the reconstructed object,
with shape of the full dataset
:param max_distance: in pixels, maximum allowed distance to the facet plane
of a voxel
:param debugging: set to True to see plots
:return: the updated plane, a stop flag
"""
nbz, nby, nbx = plane.shape
indices = np.nonzero(plane)
if len(indices[0]) == 0:
no_points = True
return plane, no_points
kernel = np.ones((3, 3, 3))
start_z = max(indices[0].min() - 20, 0)
stop_z = min(indices[0].max() + 21, nbz)
start_y = max(indices[1].min() - 20, 0)
stop_y = min(indices[1].max() + 21, nby)
start_x = max(indices[2].min() - 20, 0)
stop_x = min(indices[2].max() + 21, nbx)
# find nearby voxels using the coordination number
obj = np.copy(plane[start_z:stop_z, start_y:stop_y, start_x:stop_x])
coord = np.rint(convolve(obj, kernel, mode="same"))
coord = coord.astype(int)
coord[np.nonzero(coord)] = 1
if debugging:
gu.scatter_plot_overlaid(
arrays=(np.asarray(np.nonzero(coord)).T, np.asarray(np.nonzero(obj)).T),
markersizes=(2, 8),
markercolors=("b", "r"),
labels=("x", "y", "z"),
title="Plane" + str(label) + " before facet growing and coord matrix",
)
# update plane with new voxels
temp_plane = np.copy(plane)
temp_plane[start_z:stop_z, start_y:stop_y, start_x:stop_x] = coord
# remove voxels not belonging to the support
temp_plane[support == 0] = 0
# check distance of new voxels to the plane
plane, no_points = distance_threshold(
fit=fit,
indices=np.nonzero(temp_plane),
plane_shape=temp_plane.shape,
max_distance=max_distance,
)
plane_normal = fit[:-1] # normal is [a, b, c] if ax+by+cz+d=0
# calculate the local gradient for each point of the plane,
# gradients is a list of arrays of 3 vector components
indices = np.nonzero(plane)
gradients = surface_gradient(
list(zip(indices[0], indices[1], indices[2])), support=support
)
count_grad = 0
nb_indices = len(indices[0])
for idx in range(nb_indices):
if np.dot(plane_normal, gradients[idx]) < 0.75:
# 0.85 is too restrictive checked CH4760 S11 plane 1
plane[indices[0][idx], indices[1][idx], indices[2][idx]] = 0
count_grad += 1
indices = np.nonzero(plane)
if debugging and len(indices[0]) != 0:
gu.scatter_plot(
array=np.asarray(indices).T,
labels=("x", "y", "z"),
title="Plane" + str(label) + " after 1 cycle of facet growing",
)
print(f"{count_grad} points excluded by gradient filtering")
print(str(len(indices[0])) + " after 1 cycle of facet growing")
return plane, no_points
def offset_plane(indices, offset, plane_normal):
"""
Shift plane indices by the offset value in order to scan perpendicular to the plane.
:param indices: tuple of 3 1D ndarrays (array shape = nb_points)
:param offset: offset to be applied to the indices (offset of the plane)
:param plane_normal: ndarray of 3 elements, normal to the plane
:return: offseted indices
"""
if not isinstance(indices, tuple):
raise ValueError("indices should be a tuple of 3 1D ndarrays")
new_indices0 = np.rint(
indices[0]
+ offset
* np.dot(np.array([1, 0, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices1 = np.rint(
indices[1]
+ offset
* np.dot(np.array([0, 1, 0]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
new_indices2 = np.rint(
indices[2]
+ offset
* np.dot(np.array([0, 0, 1]), plane_normal / np.linalg.norm(plane_normal))
).astype(int)
return new_indices0, new_indices1, new_indices2
def remove_duplicates(vertices, faces, debugging=False):
"""
Remove duplicates in a list of vertices and faces.
A face is a triangle made of three vertices.
:param vertices: a ndarray of vertices, shape (N, 3)
:param faces: a ndarray of vertex indices, shape (M, 3)
:param debugging: True to see which vertices are duplicated and how lists are
modified
:return: the updated vertices and faces with duplicates removed in place
"""
# find indices which are duplicated
uniq_vertices, uniq_inverse = np.unique(vertices, axis=0, return_inverse=True)
indices, count = np.unique(uniq_inverse, return_counts=True)
duplicated_indices = indices[count != 1] # list of vertices which are not unique
# for each duplicated vertex, build the list of the corresponding identical vertices
list_duplicated = []
for idx, value in enumerate(duplicated_indices):
same_vertices = np.argwhere(vertices == uniq_vertices[value, :])
# same_vertices is a ndarray of the form
# [[ind0, 0], [ind0, 1], [ind0, 2], [ind1, 0], [ind1, 1], [ind1, 2],...]
list_duplicated.append(list(same_vertices[::3, 0]))
# remove duplicates in vertices
remove_vertices = [value for sublist in list_duplicated for value in sublist[1:]]
vertices = np.delete(vertices, remove_vertices, axis=0)
print(len(remove_vertices), "duplicated vertices removed")
# remove duplicated_vertices in faces
for idx, temp_array in enumerate(list_duplicated):
for idy in range(1, len(temp_array)):
duplicated_value = temp_array[idy]
faces[faces == duplicated_value] = temp_array[0]
# temp_array[0] is the unique value, others are duplicates
# all indices above duplicated_value have to be decreased by 1
# to keep the match with the number of vertices
faces[faces > duplicated_value] = faces[faces > duplicated_value] - 1
# update accordingly all indices above temp_array[idy]
if debugging:
print("temp_array before", temp_array)
print("list_duplicated before", list_duplicated)
temp_array = [
(value - 1) if value > duplicated_value else value
for value in temp_array
]
list_duplicated = [
[
(value - 1) if value > duplicated_value else value
for value in sublist
]
for sublist in list_duplicated
]
if debugging:
print("temp_array after", temp_array)
print("list_duplicated after", list_duplicated)
# look for faces with 2 identical vertices
# (cannot define later a normal to these faces)
remove_faces = []
for idx in range(faces.shape[0]):
if np.unique(faces[idx, :], axis=0).shape[0] != faces[idx, :].shape[0]:
remove_faces.append(idx)
faces = np.delete(faces, remove_faces, axis=0)
print(len(remove_faces), "faces with identical vertices removed")
return vertices, faces
def surface_indices(surface, plane_indices, margin=3):
"""
Find surface indices potentially belonging to a plane.
It crops the surface around the plane with a certain margin, and find corresponding
surface indices.
:param surface: the 3D surface binary array
:param plane_indices: tuple of 3 1D-arrays of plane indices
:param margin: margin to include aroung plane indices, in pixels
:return: 3*1D arrays of surface indices
"""
valid.valid_ndarray(surface, ndim=3)
if not isinstance(plane_indices, tuple):
plane_indices = tuple(plane_indices)
surf_indices = np.nonzero(
surface[
plane_indices[0].min() - margin : plane_indices[0].max() + margin,
plane_indices[1].min() - margin : plane_indices[1].max() + margin,
plane_indices[2].min() - margin : plane_indices[2].max() + margin,
]
)
surf0 = (
surf_indices[0] + plane_indices[0].min() - margin
) # add margin plane_indices[0].min() - margin
surf1 = (
surf_indices[1] + plane_indices[1].min() - margin
) # add margin plane_indices[1].min() - margin
surf2 = (
surf_indices[2] + plane_indices[2].min() - margin
) # add margin plane_indices[2].min() - margin
return surf0, surf1, surf2
def stereographic_proj(
normals,
intensity,
max_angle,
savedir,
voxel_size,
projection_axis,
min_distance=10,
background_south=-1000,
background_north=-1000,
save_txt=False,
cmap=default_cmap,
planes_south=None,
planes_north=None,
plot_planes=True,
scale="linear",
comment_fig="",
debugging=False,
):
"""
Detect facets in an object.
It uses a stereographic projection of normals to mesh triangles and watershed
segmentation.
:param normals: array of normals to mesh triangles (nb_normals rows x 3 columns)
:param intensity: array of intensities (nb_normals rows x 1 column)
:param max_angle: maximum angle in degree of the stereographic projection
(should be larger than 90)
:param savedir: directory for saving figures
:param voxel_size: tuple of three numbers corresponding to the real-space
voxel size in each dimension
:param projection_axis: the projection is performed on a plane perpendicular to
that axis (0, 1 or 2)
:param min_distance: min_distance of corner_peaks()
:param background_south: threshold for background determination in the projection
from South
:param background_north: threshold for background determination in the projection
from North
:param save_txt: if True, will save coordinates in a .txt file
:param cmap: colormap used for plotting pole figures
:param planes_south: dictionnary of crystallographic planes, e.g.
{'111':angle_with_reflection}
:param planes_north: dictionnary of crystallographic planes, e.g.
{'111':angle_with_reflection}
:param plot_planes: if True, will draw circles corresponding to crystallographic
planes in the pole figure
:param scale: 'linear' or 'log', scale for the colorbar of the plot
:param comment_fig: string, comment for the filename when saving figures
:param debugging: show plots for debugging
:return:
- labels_south and labels_north as 2D arrays for each projection from South and
North
- a (Nx4) array: projected coordinates of normals from South (u column 0,
v column 1) and North (u column2 , v column 3). The coordinates are in
degrees, not indices.
- the list of rows to remove
"""
def mouse_move(event):
"""Write the density value at the position of the mouse pointer."""
nonlocal density_south, density_north, u_grid, v_grid, ax0, ax1
if event.inaxes == ax0:
index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)
index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)
sys.stdout.write(
"\rKDE South:" + str("{:.0f}".format(density_south[index_v, index_u]))
)
sys.stdout.flush()
elif event.inaxes == ax1:
index_u = util.find_nearest(u_grid[0, :], event.xdata, width=None)
index_v = util.find_nearest(v_grid[:, 0], event.ydata, width=None)
sys.stdout.write(
"\rKDE North:" + str("{:.0f}".format(density_north[index_v, index_u]))
)
sys.stdout.flush()
else:
pass
if comment_fig and comment_fig[-1] != "_":
comment_fig = comment_fig + "_"
radius_mean = 1 # normals are normalized
stereo_center = 0 # COM of the weighted point density,
# where the projection plane intersects the reference axis
# since the normals have their origin at 0,
# the projection plane is the equator and stereo_center=0
# check normals for nan
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
# recalculate normals considering the anisotropy of voxel sizes
# (otherwise angles are wrong)
# the stereographic projection is in reciprocal space,
# therefore we need to use the reciprocal voxel sizes
iso_normals = np.copy(normals)
iso_normals[:, 0] = iso_normals[:, 0] * 2 * np.pi / voxel_size[0]
iso_normals[:, 1] = iso_normals[:, 1] * 2 * np.pi / voxel_size[1]
iso_normals[:, 2] = iso_normals[:, 2] * 2 * np.pi / voxel_size[2]
# normalize iso_normals
iso_normals_length = np.sqrt(
iso_normals[:, 0] ** 2 + iso_normals[:, 1] ** 2 + iso_normals[:, 2] ** 2
)
iso_normals = iso_normals / iso_normals_length[:, np.newaxis]
# calculate the normalized Euclidian metric coordinates u and v from xyz
stereo_proj, uv_labels = calc_stereoproj_facet(
projection_axis=projection_axis,
vectors=iso_normals,
radius_mean=radius_mean,
stereo_center=stereo_center,
)
# stereo_proj[:, 0] is the euclidian u_south,
# stereo_proj[:, 1] is the euclidian v_south
# stereo_proj[:, 2] is the euclidian u_north,
# stereo_proj[:, 3] is the euclidian v_north
# remove intensity where stereo_proj is infinite
list_bad = np.argwhere(
np.isinf(stereo_proj) | np.isnan(stereo_proj)
) # elementwise or
remove_row = list(set(list_bad[:, 0])) # remove duplicated row indices
print(
"remove_row indices (the stereographic projection is infinite or nan): ",
remove_row,
"\n",
)
stereo_proj = np.delete(stereo_proj, remove_row, axis=0)
intensity = np.delete(intensity, remove_row, axis=0)
fig, _ = gu.contour_stereographic(
euclidian_u=stereo_proj[:, 0],
euclidian_v=stereo_proj[:, 1],
color=intensity,
radius_mean=radius_mean,
planes=planes_south,
max_angle=max_angle,
scale=scale,
title="Projection from\nSouth pole",
plot_planes=plot_planes,
uv_labels=uv_labels,
debugging=debugging,
)
fig.savefig(savedir + comment_fig + "South pole_" + scale + ".png")
fig, _ = gu.contour_stereographic(
euclidian_u=stereo_proj[:, 2],
euclidian_v=stereo_proj[:, 3],
color=intensity,
radius_mean=radius_mean,
planes=planes_north,
max_angle=max_angle,
scale=scale,
title="Projection from\nNorth pole",
plot_planes=plot_planes,
uv_labels=uv_labels,
debugging=debugging,
)
fig.savefig(savedir + comment_fig + "North pole_" + scale + ".png")
# regrid stereo_proj
# stereo_proj[:, 0] is the euclidian u_south,
# stereo_proj[:, 1] is the euclidian v_south
# stereo_proj[:, 2] is the euclidian u_north,
# stereo_proj[:, 3] is the euclidian v_north
nb_points = 4 * max_angle + 1
v_grid, u_grid = np.mgrid[
-max_angle : max_angle : (nb_points * 1j),
-max_angle : max_angle : (nb_points * 1j),
]
# v_grid changes vertically, u_grid horizontally
nby, nbx = u_grid.shape
density_south = griddata(
(stereo_proj[:, 0], stereo_proj[:, 1]),
intensity,
(u_grid, v_grid),
method="linear",
) # S
density_north = griddata(
(stereo_proj[:, 2], stereo_proj[:, 3]),
intensity,
(u_grid, v_grid),
method="linear",
) # N
# normalize for plotting
density_south = density_south / density_south[density_south > 0].max() * 10000
density_north = density_north / density_north[density_north > 0].max() * 10000
if save_txt:
# save metric coordinates in text file
density_south[np.isnan(density_south)] = 0.0
density_north[np.isnan(density_north)] = 0.0
with open(savedir + "CDI_poles.dat", "w") as file:
for ii in range(len(v_grid)):
for jj in range(len(u_grid)):
file.write(
str(v_grid[ii, 0])
+ "\t"
+ str(u_grid[0, jj])
+ "\t"
+ str(density_south[ii, jj])
+ "\t"
+ str(v_grid[ii, 0])
+ "\t"
+ str(u_grid[0, jj])
+ "\t"
+ str(density_north[ii, jj])
+ "\n"
)
# inverse densities for watershed segmentation
density_south = -1 * density_south
density_north = -1 * density_north
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))
img0 = ax0.scatter(u_grid, v_grid, c=density_south, cmap=cmap)
ax0.set_xlim(-max_angle, max_angle)
ax0.set_ylim(-max_angle, max_angle)
ax0.axis("scaled")
gu.colorbar(img0)
ax0.set_title("KDE \nSouth pole")
img1 = ax1.scatter(u_grid, v_grid, c=density_north, cmap=cmap)
ax1.set_xlim(-max_angle, max_angle)
ax1.set_ylim(-max_angle, max_angle)
ax1.axis("scaled")
gu.colorbar(img1)
ax1.set_title("KDE \nNorth pole")
fig.text(0.32, 0.90, "Read the threshold value in the console", size=16)
fig.text(0.32, 0.85, "Click on the figure to resume the execution", size=16)
fig.tight_layout()
cid = plt.connect("motion_notify_event", mouse_move)
fig.waitforbuttonpress()
plt.disconnect(cid)
print("\n")
# identification of local minima
density_south[
density_south > background_south
] = 0 # define the background in the density of normals
mask_south = np.copy(density_south)
mask_south[mask_south != 0] = 1
density_north[
density_north > background_north
] = 0 # define the background in the density of normals
mask_north = np.copy(density_north)
mask_north[mask_north != 0] = 1
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, figsize=(12, 9))
ax0.imshow(mask_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Background mask South")
ax0.invert_yaxis()
img1 = ax1.scatter(u_grid, v_grid, c=density_south, cmap=cmap)
ax1.set_xlim(-max_angle, max_angle)
ax1.set_ylim(-max_angle, max_angle)
ax1.axis("scaled")
gu.colorbar(img1)
ax1.set_title("KDE South pole\nafter background definition")
circle = patches.Circle((0, 0), 90, color="w", fill=False, linewidth=1.5)
ax1.add_artist(circle)
ax2.imshow(mask_north, cmap=cmap, interpolation="nearest")
ax2.set_title("Background mask North")
ax2.invert_yaxis()
img3 = ax3.scatter(u_grid, v_grid, c=density_north, cmap=cmap)
ax3.set_xlim(-max_angle, max_angle)
ax3.set_ylim(-max_angle, max_angle)
ax3.axis("scaled")
gu.colorbar(img3)
ax3.set_title("KDE North pole\nafter background definition")
circle = patches.Circle((0, 0), 90, color="w", fill=False, linewidth=1.5)
ax3.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
##########################################################################
# Generate the markers as local maxima of the distance to the background #
##########################################################################
distances_south = ndimage.distance_transform_edt(density_south)
distances_north = ndimage.distance_transform_edt(density_north)
if debugging:
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
img0 = ax0.imshow(distances_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Distances South")
gu.colorbar(img0)
ax0.invert_yaxis()
img1 = ax1.imshow(distances_north, cmap=cmap, interpolation="nearest")
ax1.set_title("Distances North")
gu.colorbar(img1)
ax1.invert_yaxis()
fig.tight_layout()
plt.pause(0.1)
local_maxi_south = corner_peaks(
distances_south, exclude_border=False, min_distance=min_distance, indices=False
)
local_maxi_north = corner_peaks(
distances_north, exclude_border=False, min_distance=min_distance, indices=False
)
if debugging:
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
ax0.imshow(local_maxi_south, interpolation="nearest")
ax0.set_title("local_maxi South before filtering")
ax0.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax0.add_artist(circle)
ax1.imshow(local_maxi_north, interpolation="nearest")
ax1.set_title("local_maxi North before filtering")
ax1.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax1.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
# define the marker for each peak
markers_south = ndimage.label(local_maxi_south)[0] # range from 0 to nb_peaks
# define non overlaping markers for the North projection:
# the first marker value is (markers_south.max()+1)
markers_north = ndimage.label(local_maxi_north)[0] + markers_south.max(initial=None)
# markers_north.min() is 0 since it is the background
markers_north[markers_north == markers_south.max(initial=None)] = 0
if debugging:
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
ax0.imshow(
markers_south, interpolation="nearest", cmap="binary", vmin=0, vmax=1
)
ax0.set_title("markers South")
ax0.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax0.add_artist(circle)
ax1.imshow(
markers_north, interpolation="nearest", cmap="binary", vmin=0, vmax=1
)
ax1.set_title("markers North")
ax1.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax1.add_artist(circle)
fig.tight_layout()
plt.pause(0.1)
##########################
# watershed segmentation #
##########################
labels_south = watershed(-1 * distances_south, markers_south, mask=mask_south)
labels_north = watershed(-1 * distances_north, markers_north, mask=mask_north)
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, figsize=(12, 9))
img0 = ax0.imshow(labels_south, cmap=cmap, interpolation="nearest")
ax0.set_title("Labels South")
ax0.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax0.add_artist(circle)
gu.colorbar(img0, numticks=int(labels_south.max() + 1))
img1 = ax1.imshow(labels_north, cmap=cmap, interpolation="nearest")
ax1.set_title("Labels North")
ax1.invert_yaxis()
circle = patches.Ellipse(
(nbx // 2, nby // 2), 361, 361, color="r", fill=False, linewidth=1.5
)
ax1.add_artist(circle)
gu.colorbar(img1, numticks=int(labels_north.max() + 1))
fig.tight_layout()
plt.pause(0.1)
fig.savefig(savedir + comment_fig + "labels.png")
return labels_south, labels_north, stereo_proj, remove_row
def surface_gradient(points, support, width=2):
"""
Calculate the support gradient at point.
:param points: tuple or list of tuples of 3 integers (z, y, x), position where
to calculate the gradient vector
:param support: 3D numpy binary array, being 1 in the crystal and 0 outside
:param width: half-width of the window where the gradient will be calculated
(the support gradient is nonzero on a single layer, it avoids missing it)
:return: a list of normalized vector(s) (array(s) of 3 numbers) oriented
towards the exterior of the cristal
"""
gradz, grady, gradx = np.gradient(support, 1) # support
vectors = []
if not isinstance(points, list):
points = [points]
for _, point in enumerate(points):
# round the point to integer numbers
point = [int(np.rint(point[idx])) for idx in range(3)]
# calculate the gradient in a small window around point
# (gradient will be nonzero on a single layer)
gradz_slice = gradz[
point[0] - width : point[0] + width + 1,
point[1] - width : point[1] + width + 1,
point[2] - width : point[2] + width + 1,
]
val = (gradz_slice != 0).sum()
if val == 0:
vector_z = 0
else:
vector_z = gradz_slice.sum() / val
grady_slice = grady[
point[0] - width : point[0] + width + 1,
point[1] - width : point[1] + width + 1,
point[2] - width : point[2] + width + 1,
]
val = (grady_slice != 0).sum()
if val == 0:
vector_y = 0
else:
vector_y = grady_slice.sum() / val
gradx_slice = gradx[
point[0] - width : point[0] + width + 1,
point[1] - width : point[1] + width + 1,
point[2] - width : point[2] + width + 1,
]
val = (gradx_slice != 0).sum()
if val == 0:
vector_x = 0
else:
vector_x = gradx_slice.sum() / val
# support was 1 inside, 0 outside,
# the vector needs to be flipped to point towards the outside
vectors.append(
[-vector_z, -vector_y, -vector_x]
/ np.linalg.norm([-vector_z, -vector_y, -vector_x])
)
return vectors
def taubin_smooth(
faces,
vertices,
cmap=default_cmap,
iterations=10,
lamda=0.33,
mu=0.34,
radius=0.1,
debugging=False,
):
"""
Perform Taubin's smoothing of a mesh.
It performs a back and forward Laplacian smoothing "without shrinking" of a
triangulated mesh, as described by <NAME> (ICCV '95)
:param faces: m*3 ndarray of m faces defined by 3 indices of vertices
:param vertices: n*3 ndarray of n vertices defined by 3 positions
:param cmap: colormap used for plotting
:param iterations: number of iterations for smoothing
:param lamda: smoothing variable 0 < lambda < mu < 1
:param mu: smoothing variable 0 < lambda < mu < 1
:param radius: radius around which the normals are integrated in the calculation
of the density of normals
:param debugging: show plots for debugging
:return: smoothened vertices (ndarray n*3), normals to triangle (ndarray m*3),
weighted density of normals, updated faces, errors
"""
from mpl_toolkits.mplot3d import Axes3D
plt.ion()
print("Original number of vertices:", vertices.shape[0])
print("Original number of faces:", faces.shape[0])
new_vertices = np.copy(vertices)
for k in range(iterations):
# check the unicity of vertices otherwise 0 distance would happen
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / lambda: duplicated vertices at iteration", k)
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
vertices = np.copy(new_vertices)
neighbours = find_neighbours(
vertices, faces
) # get the indices of neighboring vertices for each vertex
indices_edges = detect_edges(
faces
) # find indices of vertices defining non-shared edges (near hole...)
for i in range(vertices.shape[0]):
indices = neighbours[i] # list of indices
distances = np.sqrt(
np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)
)
weights = distances ** (-1)
vectoren = weights[:, np.newaxis] * vertices[indices, :]
totaldist = sum(weights)
new_vertices[i, :] = vertices[i, :] + lamda * (
np.sum(vectoren, axis=0) / totaldist - vertices[i, :]
)
if indices_edges.size != 0:
new_vertices[indices_edges, :] = vertices[indices_edges, :]
# check the unicity of vertices otherwise 0 distance would happen
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / mu: duplicated vertices at iteration", k)
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
vertices = np.copy(new_vertices)
neighbours = find_neighbours(
vertices, faces
) # get the indices of neighboring vertices for each vertex
indices_edges = detect_edges(
faces
) # find indices of vertices defining non-shared edges (near hole...)
for i in range(vertices.shape[0]):
indices = neighbours[i] # list of indices
distances = np.sqrt(
np.sum((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)
)
weights = distances ** (-1)
vectoren = weights[:, np.newaxis] * vertices[indices, :]
totaldist = sum(weights)
new_vertices[i, :] = vertices[i, :] - mu * (
sum(vectoren) / totaldist - vertices[i, :]
)
if indices_edges.size != 0:
new_vertices[indices_edges, :] = vertices[indices_edges, :]
# check the unicity of vertices otherwise 0 distance would happen
if np.unique(new_vertices, axis=0).shape[0] != new_vertices.shape[0]:
print("\nTaubin smoothing / exiting loop: duplicated vertices")
new_vertices, faces = remove_duplicates(vertices=new_vertices, faces=faces)
nan_vertices = np.argwhere(np.isnan(new_vertices[:, 0]))
print(
"Number of nan in new_vertices:",
nan_vertices.shape[0],
"; Total number of vertices:",
new_vertices.shape[0],
)
# Create an indexed view into the vertex array using
# the array of three indices for triangles
tris = new_vertices[faces]
# Calculate the normal for all the triangles,
# by taking the cross product of the vectors v1-v0,
# and v2-v0 in each triangle
normals = np.cross(tris[:, 1] - tris[:, 0], tris[:, 2] - tris[::, 0])
areas = np.array([1 / 2 * np.linalg.norm(normal) for normal in normals])
normals_length = np.sqrt(
normals[:, 0] ** 2 + normals[:, 1] ** 2 + normals[:, 2] ** 2
)
normals = -1 * normals / normals_length[:, np.newaxis] # flip and normalize normals
# n is now an array of normalized normals, one per triangle.
# calculate the colormap for plotting
# the weighted point density of normals on a sphere
intensity = np.zeros(normals.shape[0], dtype=normals.dtype)
for i in range(normals.shape[0]):
distances = np.sqrt(
np.sum((normals - normals[i, :]) ** 2, axis=1)
) # ndarray of normals.shape[0]
intensity[i] = np.multiply(
areas[distances < radius], distances[distances < radius]
).sum()
# normals are weighted by the area of mesh triangles
intensity = intensity / max(intensity)
if debugging:
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(normals[:, 0], normals[:, 1], normals[:, 2], c=intensity, cmap=cmap)
ax.set_xlim(-1, 1)
ax.set_xlabel("z")
ax.set_ylim(-1, 1)
ax.set_ylabel("y")
ax.set_zlim(-1, 1)
ax.set_zlabel("x")
plt.title("Weighted point densities before KDE")
plt.pause(0.1)
err_normals = np.argwhere(np.isnan(normals[:, 0]))
normals[err_normals, :] = normals[err_normals - 1, :]
plt.ioff()
# check normals for nan
list_nan = np.argwhere(np.isnan(normals))
normals = np.delete(normals, list_nan[::3, 0], axis=0)
intensity = np.delete(intensity, list_nan[::3, 0], axis=0)
return new_vertices, normals, areas, intensity, faces, err_normals
def update_logfile(
support,
strain_array,
summary_file,
allpoints_file,
label=0,
angle_plane=np.nan,
plane_coeffs=(0, 0, 0, 0),
plane_normal=(0, 0, 0),
):
"""
Update log files use in the facet_strain.py script.
:param support: the 3D binary support defining voxels to be saved in the logfile
:param strain_array: the 3D strain array
:param summary_file: the handle for the file summarizing strain statistics per facet
:param allpoints_file: the handle for the file giving the strain and the label
for each voxel
:param label: the label of the plane
:param angle_plane: the angle of the plane with the measurement direction
:param plane_coeffs: the fit coefficients (a,b,c,d) of the plane such
that ax+by+cz+d=0
:param plane_normal: the normal to the plane
:return: nothing
"""
if (support.ndim != 3) or (strain_array.ndim != 3):
raise ValueError("The support and the strain arrays should be 3D arrays")
support_indices = np.nonzero(support == 1)
ind_z = support_indices[0]
ind_y = support_indices[1]
ind_x = support_indices[2]
nb_points = len(support_indices[0])
for idx in range(nb_points):
if strain_array[ind_z[idx], ind_y[idx], ind_x[idx]] != 0:
# remove the artefact from YY reconstrutions at the bottom facet
allpoints_file.write(
"{0: <10}".format(str(label))
+ "\t"
+ "{0: <10}".format(str("{:.3f}".format(angle_plane)))
+ "\t"
+ "{0: <10}".format(
str(
"{:.7f}".format(
strain_array[ind_z[idx], ind_y[idx], ind_x[idx]]
)
)
)
+ "\t"
+ "{0: <10}".format(str(ind_z[idx]))
+ "\t"
+ "{0: <10}".format(str(ind_y[idx]))
+ "\t"
+ "{0: <10}".format(str(ind_x[idx]))
+ "\n"
)
str_array = strain_array[support == 1]
str_array[
str_array == 0
] = np.nan # remove the artefact from YY reconstrutions at the bottom facet
support_strain = np.mean(str_array[~np.isnan(str_array)])
support_deviation = np.std(str_array[~np.isnan(str_array)])
# support_strain = np.mean(strain_array[support == 1])
# support_deviation = np.std(strain_array[support == 1])
summary_file.write(
"{0: <10}".format(str(label))
+ "\t"
+ "{0: <10}".format(str("{:.3f}".format(angle_plane)))
+ "\t"
+ "{0: <10}".format(str(nb_points))
+ "\t"
+ "{0: <10}".format(str("{:.7f}".format(support_strain)))
+ "\t"
+ "{0: <10}".format(str("{:.7f}".format(support_deviation)))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[0])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[1])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[2])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_coeffs[3])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[0])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[1])))
+ "\t"
+ "{0: <10}".format(str("{:.5f}".format(plane_normal[2])))
+ "\n"
)
def upsample(array, upsampling_factor, voxelsizes=None, title="", debugging=False):
"""
Upsample array using a factor of upsampling.
:param array: the real array to be upsampled
:param upsampling_factor: int, the upsampling factor
:param voxelsizes: list, the voxel sizes of array
:param title: title for the debugging plot
:param debugging: True to see plots
:return: the upsampled array
"""
valid.valid_ndarray(array, ndim=(2, 3))
ndim = array.ndim
valid.valid_item(
value=upsampling_factor,
allowed_types=int,
min_included=1,
name="utils.upsample",
)
if voxelsizes is None:
voxelsizes = (1,) * ndim
valid.valid_container(
voxelsizes,
container_types=(list, tuple, np.ndarray),
length=ndim,
item_types=Real,
min_excluded=0,
name="utils.upsample",
)
vmin, vmax = array.min(), array.max()
if ndim == 3:
if debugging:
gu.multislices_plot(
array,
sum_frames=False,
title=title + " before upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
nbz, nby, nbx = array.shape
numz, numy, numx = (
nbz * upsampling_factor,
nby * upsampling_factor,
nbx * upsampling_factor,
)
newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]
newz, newy, newx = np.meshgrid(
np.arange(-numz // 2, numz // 2, 1) * newvoxelsizes[0],
np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[1],
np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[2],
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nbz // 2, nbz // 2) * voxelsizes[0],
np.arange(-nby // 2, nby // 2) * voxelsizes[1],
np.arange(-nbx // 2, nbx // 2) * voxelsizes[2],
),
array,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(
newz.reshape((1, newz.size)),
newy.reshape((1, newz.size)),
newx.reshape((1, newz.size)),
)
).transpose()
)
obj = obj.reshape((numz, numy, numx)).astype(array.dtype)
if debugging:
gu.multislices_plot(
obj,
sum_frames=False,
title=title + " after upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
else: # 2D case
if debugging:
gu.imshow_plot(
array,
title=title + " before upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
nby, nbx = array.shape
numy, numx = nby * upsampling_factor, nbx * upsampling_factor
newvoxelsizes = [voxsize / upsampling_factor for voxsize in voxelsizes]
newy, newx = np.meshgrid(
np.arange(-numy // 2, numy // 2, 1) * newvoxelsizes[0],
np.arange(-numx // 2, numx // 2, 1) * newvoxelsizes[1],
indexing="ij",
)
rgi = RegularGridInterpolator(
(
np.arange(-nby // 2, nby // 2) * voxelsizes[0],
np.arange(-nbx // 2, nbx // 2) * voxelsizes[1],
),
array,
method="linear",
bounds_error=False,
fill_value=0,
)
obj = rgi(
np.concatenate(
(newy.reshape((1, newy.size)), newx.reshape((1, newy.size)))
).transpose()
)
obj = obj.reshape((numy, numx)).astype(array.dtype)
if debugging:
gu.imshow_plot(
obj,
title=title + " after upsampling",
vmin=vmin,
vmax=vmax,
scale="linear",
plot_colorbar=True,
reciprocal_space=False,
is_orthogonal=True,
)
return obj, newvoxelsizes
| [
"scipy.signal.convolve",
"numpy.sqrt",
"bcdi.utils.validation.valid_ndarray",
"numpy.array",
"numpy.arctan2",
"scipy.ndimage.measurements.center_of_mass",
"numpy.linalg.norm",
"bcdi.graph.graph_utils.Colormap",
"numpy.gradient",
"numpy.arange",
"matplotlib.pyplot.imshow",
"bcdi.graph.graph_uti... | [((881, 894), 'bcdi.graph.graph_utils.Colormap', 'gu.Colormap', ([], {}), '()\n', (892, 894), True, 'from bcdi.graph import graph_utils as gu\n'), ((2077, 2129), 'numpy.zeros', 'np.zeros', (['(vectors.shape[0], 4)'], {'dtype': 'vectors.dtype'}), '((vectors.shape[0], 4), dtype=vectors.dtype)\n', (2085, 2129), True, 'import numpy as np\n'), ((5784, 5806), 'numpy.copy', 'np.copy', (['faces[:, 0:2]'], {}), '(faces[:, 0:2])\n', (5791, 5806), True, 'import numpy as np\n'), ((6066, 6111), 'numpy.concatenate', 'np.concatenate', (['(edge1, edge2, edge3)'], {'axis': '(0)'}), '((edge1, edge2, edge3), axis=0)\n', (6080, 6111), True, 'import numpy as np\n'), ((6145, 6208), 'numpy.unique', 'np.unique', (['edges'], {'return_index': '(True)', 'return_counts': '(True)', 'axis': '(0)'}), '(edges, return_index=True, return_counts=True, axis=0)\n', (6154, 6208), True, 'import numpy as np\n'), ((6961, 6980), 'numpy.asarray', 'np.asarray', (['indices'], {}), '(indices)\n', (6971, 6980), True, 'import numpy as np\n'), ((6993, 7025), 'numpy.zeros', 'np.zeros', (['plane_shape'], {'dtype': 'int'}), '(plane_shape, dtype=int)\n', (7001, 7025), True, 'import numpy as np\n'), ((7214, 7248), 'numpy.array', 'np.array', (['[fit[0], fit[1], fit[2]]'], {}), '([fit[0], fit[1], fit[2]])\n', (7222, 7248), True, 'import numpy as np\n'), ((8666, 8710), 'numpy.delete', 'np.delete', (['normals', 'list_nan[::3, 0]'], {'axis': '(0)'}), '(normals, list_nan[::3, 0], axis=0)\n', (8675, 8710), True, 'import numpy as np\n'), ((8727, 8773), 'numpy.delete', 'np.delete', (['intensity', 'list_nan[::3, 0]'], {'axis': '(0)'}), '(intensity, list_nan[::3, 0], axis=0)\n', (8736, 8773), True, 'import numpy as np\n'), ((8905, 8957), 'numpy.zeros', 'np.zeros', (['(normals.shape[0], 2)'], {'dtype': 'normals.dtype'}), '((normals.shape[0], 2), dtype=normals.dtype)\n', (8913, 8957), True, 'import numpy as np\n'), ((9222, 9234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9232, 9234), True, 'from matplotlib import pyplot as plt\n'), ((9410, 9428), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (9418, 9428), True, 'from matplotlib import pyplot as plt\n'), ((9433, 9520), 'matplotlib.pyplot.title', 'plt.title', (['"""Equirectangular projection of the weighted point densities before KDE"""'], {}), "(\n 'Equirectangular projection of the weighted point densities before KDE')\n", (9442, 9520), True, 'from matplotlib import pyplot as plt\n'), ((9520, 9534), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (9529, 9534), True, 'from matplotlib import pyplot as plt\n'), ((9578, 9629), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['long_lat.T'], {'bw_method': 'bw_method'}), '(long_lat.T, bw_method=bw_method)\n', (9596, 9629), False, 'from scipy import stats\n'), ((10158, 10170), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10168, 10170), True, 'from matplotlib import pyplot as plt\n'), ((10375, 10393), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (10383, 10393), True, 'from matplotlib import pyplot as plt\n'), ((10398, 10448), 'matplotlib.pyplot.title', 'plt.title', (['"""Equirectangular projection of the KDE"""'], {}), "('Equirectangular projection of the KDE')\n", (10407, 10448), True, 'from matplotlib import pyplot as plt\n'), ((10453, 10467), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (10462, 10467), True, 'from matplotlib import pyplot as plt\n'), ((10590, 10606), 'numpy.copy', 'np.copy', (['density'], {}), '(density)\n', (10597, 10606), True, 'import numpy as np\n'), ((10636, 10648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10646, 10648), True, 'from matplotlib import pyplot as plt\n'), ((10653, 10705), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {'cmap': 'cmap', 'interpolation': '"""nearest"""'}), "(mask, cmap=cmap, interpolation='nearest')\n", (10663, 10705), True, 'from matplotlib import pyplot as plt\n'), ((10710, 10738), 'matplotlib.pyplot.title', 'plt.title', (['"""Background mask"""'], {}), "('Background mask')\n", (10719, 10738), True, 'from matplotlib import pyplot as plt\n'), ((10778, 10790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10788, 10790), True, 'from matplotlib import pyplot as plt\n'), ((10976, 10994), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (10984, 10994), True, 'from matplotlib import pyplot as plt\n'), ((10999, 11043), 'matplotlib.pyplot.title', 'plt.title', (['"""KDE after background definition"""'], {}), "('KDE after background definition')\n", (11008, 11043), True, 'from matplotlib import pyplot as plt\n'), ((11048, 11062), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (11057, 11062), True, 'from matplotlib import pyplot as plt\n'), ((11157, 11196), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['density'], {}), '(density)\n', (11187, 11196), False, 'from scipy import ndimage\n'), ((11424, 11515), 'skimage.feature.corner_peaks', 'corner_peaks', (['distances'], {'exclude_border': '(False)', 'min_distance': 'min_distance', 'indices': '(False)'}), '(distances, exclude_border=False, min_distance=min_distance,\n indices=False)\n', (11436, 11515), False, 'from skimage.feature import corner_peaks\n'), ((12034, 12079), 'skimage.morphology.watershed', 'watershed', (['(-1 * distances)', 'markers'], {'mask': 'mask'}), '(-1 * distances, markers, mask=mask)\n', (12043, 12079), False, 'from skimage.morphology import watershed\n'), ((12166, 12178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12176, 12178), True, 'from matplotlib import pyplot as plt\n'), ((12183, 12237), 'matplotlib.pyplot.imshow', 'plt.imshow', (['labels'], {'cmap': 'cmap', 'interpolation': '"""nearest"""'}), "(labels, cmap=cmap, interpolation='nearest')\n", (12193, 12237), True, 'from matplotlib import pyplot as plt\n'), ((12242, 12272), 'matplotlib.pyplot.title', 'plt.title', (['"""Separated objects"""'], {}), "('Separated objects')\n", (12251, 12272), True, 'from matplotlib import pyplot as plt\n'), ((12277, 12291), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (12289, 12291), True, 'from matplotlib import pyplot as plt\n'), ((12325, 12339), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (12334, 12339), True, 'from matplotlib import pyplot as plt\n'), ((14094, 14155), 'numpy.array', 'np.array', (['[plane_coeffs[0], plane_coeffs[1], plane_coeffs[2]]'], {}), '([plane_coeffs[0], plane_coeffs[1], plane_coeffs[2]])\n', (14102, 14155), True, 'import numpy as np\n'), ((26139, 26217), 'bcdi.utils.utilities.plane_fit', 'util.plane_fit', ([], {'indices': 'indices', 'label': 'label', 'threshold': '(1)', 'debugging': 'debugging'}), '(indices=indices, label=label, threshold=1, debugging=debugging)\n', (26153, 26217), True, 'from bcdi.utils import utilities as util\n'), ((27156, 27173), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (27166, 27173), True, 'import numpy as np\n'), ((27273, 27291), 'numpy.ones', 'np.ones', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (27280, 27291), True, 'import numpy as np\n'), ((27626, 27688), 'numpy.copy', 'np.copy', (['plane[start_z:stop_z, start_y:stop_y, start_x:stop_x]'], {}), '(plane[start_z:stop_z, start_y:stop_y, start_x:stop_x])\n', (27633, 27688), True, 'import numpy as np\n'), ((28196, 28210), 'numpy.copy', 'np.copy', (['plane'], {}), '(plane)\n', (28203, 28210), True, 'import numpy as np\n'), ((28798, 28815), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (28808, 28815), True, 'import numpy as np\n'), ((29251, 29268), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (29261, 29268), True, 'import numpy as np\n'), ((31272, 31320), 'numpy.unique', 'np.unique', (['vertices'], {'axis': '(0)', 'return_inverse': '(True)'}), '(vertices, axis=0, return_inverse=True)\n', (31281, 31320), True, 'import numpy as np\n'), ((31342, 31385), 'numpy.unique', 'np.unique', (['uniq_inverse'], {'return_counts': '(True)'}), '(uniq_inverse, return_counts=True)\n', (31351, 31385), True, 'import numpy as np\n'), ((32041, 32085), 'numpy.delete', 'np.delete', (['vertices', 'remove_vertices'], {'axis': '(0)'}), '(vertices, remove_vertices, axis=0)\n', (32050, 32085), True, 'import numpy as np\n'), ((33726, 33764), 'numpy.delete', 'np.delete', (['faces', 'remove_faces'], {'axis': '(0)'}), '(faces, remove_faces, axis=0)\n', (33735, 33764), True, 'import numpy as np\n'), ((34335, 34371), 'bcdi.utils.validation.valid_ndarray', 'valid.valid_ndarray', (['surface'], {'ndim': '(3)'}), '(surface, ndim=3)\n', (34354, 34371), True, 'from bcdi.utils import validation as valid\n'), ((38834, 38878), 'numpy.delete', 'np.delete', (['normals', 'list_nan[::3, 0]'], {'axis': '(0)'}), '(normals, list_nan[::3, 0], axis=0)\n', (38843, 38878), True, 'import numpy as np\n'), ((38895, 38941), 'numpy.delete', 'np.delete', (['intensity', 'list_nan[::3, 0]'], {'axis': '(0)'}), '(intensity, list_nan[::3, 0], axis=0)\n', (38904, 38941), True, 'import numpy as np\n'), ((39181, 39197), 'numpy.copy', 'np.copy', (['normals'], {}), '(normals)\n', (39188, 39197), True, 'import numpy as np\n'), ((39461, 39546), 'numpy.sqrt', 'np.sqrt', (['(iso_normals[:, 0] ** 2 + iso_normals[:, 1] ** 2 + iso_normals[:, 2] ** 2)'], {}), '(iso_normals[:, 0] ** 2 + iso_normals[:, 1] ** 2 + iso_normals[:, 2] **\n 2)\n', (39468, 39546), True, 'import numpy as np\n'), ((40484, 40526), 'numpy.delete', 'np.delete', (['stereo_proj', 'remove_row'], {'axis': '(0)'}), '(stereo_proj, remove_row, axis=0)\n', (40493, 40526), True, 'import numpy as np\n'), ((40543, 40583), 'numpy.delete', 'np.delete', (['intensity', 'remove_row'], {'axis': '(0)'}), '(intensity, remove_row, axis=0)\n', (40552, 40583), True, 'import numpy as np\n'), ((40598, 40908), 'bcdi.graph.graph_utils.contour_stereographic', 'gu.contour_stereographic', ([], {'euclidian_u': 'stereo_proj[:, 0]', 'euclidian_v': 'stereo_proj[:, 1]', 'color': 'intensity', 'radius_mean': 'radius_mean', 'planes': 'planes_south', 'max_angle': 'max_angle', 'scale': 'scale', 'title': '"""Projection from\nSouth pole"""', 'plot_planes': 'plot_planes', 'uv_labels': 'uv_labels', 'debugging': 'debugging'}), '(euclidian_u=stereo_proj[:, 0], euclidian_v=\n stereo_proj[:, 1], color=intensity, radius_mean=radius_mean, planes=\n planes_south, max_angle=max_angle, scale=scale, title=\n """Projection from\nSouth pole""", plot_planes=plot_planes, uv_labels=\n uv_labels, debugging=debugging)\n', (40622, 40908), True, 'from bcdi.graph import graph_utils as gu\n'), ((41066, 41376), 'bcdi.graph.graph_utils.contour_stereographic', 'gu.contour_stereographic', ([], {'euclidian_u': 'stereo_proj[:, 2]', 'euclidian_v': 'stereo_proj[:, 3]', 'color': 'intensity', 'radius_mean': 'radius_mean', 'planes': 'planes_north', 'max_angle': 'max_angle', 'scale': 'scale', 'title': '"""Projection from\nNorth pole"""', 'plot_planes': 'plot_planes', 'uv_labels': 'uv_labels', 'debugging': 'debugging'}), '(euclidian_u=stereo_proj[:, 2], euclidian_v=\n stereo_proj[:, 3], color=intensity, radius_mean=radius_mean, planes=\n planes_north, max_angle=max_angle, scale=scale, title=\n """Projection from\nNorth pole""", plot_planes=plot_planes, uv_labels=\n uv_labels, debugging=debugging)\n', (41090, 41376), True, 'from bcdi.graph import graph_utils as gu\n'), ((42019, 42118), 'scipy.interpolate.griddata', 'griddata', (['(stereo_proj[:, 0], stereo_proj[:, 1])', 'intensity', '(u_grid, v_grid)'], {'method': '"""linear"""'}), "((stereo_proj[:, 0], stereo_proj[:, 1]), intensity, (u_grid, v_grid\n ), method='linear')\n", (42027, 42118), False, 'from scipy.interpolate import griddata\n'), ((42178, 42277), 'scipy.interpolate.griddata', 'griddata', (['(stereo_proj[:, 2], stereo_proj[:, 3])', 'intensity', '(u_grid, v_grid)'], {'method': '"""linear"""'}), "((stereo_proj[:, 2], stereo_proj[:, 3]), intensity, (u_grid, v_grid\n ), method='linear')\n", (42186, 42277), False, 'from scipy.interpolate import griddata\n'), ((43508, 43555), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 9)'}), '(nrows=1, ncols=2, figsize=(12, 9))\n', (43520, 43555), True, 'from matplotlib import pyplot as plt\n'), ((43730, 43747), 'bcdi.graph.graph_utils.colorbar', 'gu.colorbar', (['img0'], {}), '(img0)\n', (43741, 43747), True, 'from bcdi.graph import graph_utils as gu\n'), ((43960, 43977), 'bcdi.graph.graph_utils.colorbar', 'gu.colorbar', (['img1'], {}), '(img1)\n', (43971, 43977), True, 'from bcdi.graph import graph_utils as gu\n'), ((44207, 44253), 'matplotlib.pyplot.connect', 'plt.connect', (['"""motion_notify_event"""', 'mouse_move'], {}), "('motion_notify_event', mouse_move)\n", (44218, 44253), True, 'from matplotlib import pyplot as plt\n'), ((44287, 44306), 'matplotlib.pyplot.disconnect', 'plt.disconnect', (['cid'], {}), '(cid)\n', (44301, 44306), True, 'from matplotlib import pyplot as plt\n'), ((44499, 44521), 'numpy.copy', 'np.copy', (['density_south'], {}), '(density_south)\n', (44506, 44521), True, 'import numpy as np\n'), ((44697, 44719), 'numpy.copy', 'np.copy', (['density_north'], {}), '(density_north)\n', (44704, 44719), True, 'import numpy as np\n'), ((44793, 44840), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(12, 9)'}), '(nrows=2, ncols=2, figsize=(12, 9))\n', (44805, 44840), True, 'from matplotlib import pyplot as plt\n'), ((45144, 45161), 'bcdi.graph.graph_utils.colorbar', 'gu.colorbar', (['img1'], {}), '(img1)\n', (45155, 45161), True, 'from bcdi.graph import graph_utils as gu\n'), ((45240, 45304), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)', '(90)'], {'color': '"""w"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((0, 0), 90, color='w', fill=False, linewidth=1.5)\n", (45254, 45304), False, 'from matplotlib import patches\n'), ((45635, 45652), 'bcdi.graph.graph_utils.colorbar', 'gu.colorbar', (['img3'], {}), '(img3)\n', (45646, 45652), True, 'from bcdi.graph import graph_utils as gu\n'), ((45731, 45795), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)', '(90)'], {'color': '"""w"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((0, 0), 90, color='w', fill=False, linewidth=1.5)\n", (45745, 45795), False, 'from matplotlib import patches\n'), ((45850, 45864), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (45859, 45864), True, 'from matplotlib import pyplot as plt\n'), ((46125, 46170), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['density_south'], {}), '(density_south)\n', (46155, 46170), False, 'from scipy import ndimage\n'), ((46193, 46238), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['density_north'], {}), '(density_north)\n', (46223, 46238), False, 'from scipy import ndimage\n'), ((46734, 46832), 'skimage.feature.corner_peaks', 'corner_peaks', (['distances_south'], {'exclude_border': '(False)', 'min_distance': 'min_distance', 'indices': '(False)'}), '(distances_south, exclude_border=False, min_distance=\n min_distance, indices=False)\n', (46746, 46832), False, 'from skimage.feature import corner_peaks\n'), ((46865, 46963), 'skimage.feature.corner_peaks', 'corner_peaks', (['distances_north'], {'exclude_border': '(False)', 'min_distance': 'min_distance', 'indices': '(False)'}), '(distances_north, exclude_border=False, min_distance=\n min_distance, indices=False)\n', (46877, 46963), False, 'from skimage.feature import corner_peaks\n'), ((49071, 49134), 'skimage.morphology.watershed', 'watershed', (['(-1 * distances_south)', 'markers_south'], {'mask': 'mask_south'}), '(-1 * distances_south, markers_south, mask=mask_south)\n', (49080, 49134), False, 'from skimage.morphology import watershed\n'), ((49154, 49217), 'skimage.morphology.watershed', 'watershed', (['(-1 * distances_north)', 'markers_north'], {'mask': 'mask_north'}), '(-1 * distances_north, markers_north, mask=mask_north)\n', (49163, 49217), False, 'from skimage.morphology import watershed\n'), ((49240, 49287), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 9)'}), '(nrows=1, ncols=2, figsize=(12, 9))\n', (49252, 49287), True, 'from matplotlib import pyplot as plt\n'), ((49430, 49519), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(nbx // 2, nby // 2)', '(361)', '(361)'], {'color': '"""r"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((nbx // 2, nby // 2), 361, 361, color='r', fill=False,\n linewidth=1.5)\n", (49445, 49519), False, 'from matplotlib import patches\n'), ((49759, 49848), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(nbx // 2, nby // 2)', '(361)', '(361)'], {'color': '"""r"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((nbx // 2, nby // 2), 361, 361, color='r', fill=False,\n linewidth=1.5)\n", (49774, 49848), False, 'from matplotlib import patches\n'), ((49973, 49987), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (49982, 49987), True, 'from matplotlib import pyplot as plt\n'), ((50723, 50746), 'numpy.gradient', 'np.gradient', (['support', '(1)'], {}), '(support, 1)\n', (50734, 50746), True, 'import numpy as np\n'), ((53479, 53488), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (53486, 53488), True, 'from matplotlib import pyplot as plt\n'), ((53625, 53642), 'numpy.copy', 'np.copy', (['vertices'], {}), '(vertices)\n', (53632, 53642), True, 'import numpy as np\n'), ((56990, 57048), 'numpy.cross', 'np.cross', (['(tris[:, 1] - tris[:, 0])', '(tris[:, 2] - tris[:, 0])'], {}), '(tris[:, 1] - tris[:, 0], tris[:, 2] - tris[:, 0])\n', (56998, 57048), True, 'import numpy as np\n'), ((57148, 57217), 'numpy.sqrt', 'np.sqrt', (['(normals[:, 0] ** 2 + normals[:, 1] ** 2 + normals[:, 2] ** 2)'], {}), '(normals[:, 0] ** 2 + normals[:, 1] ** 2 + normals[:, 2] ** 2)\n', (57155, 57217), True, 'import numpy as np\n'), ((57501, 57548), 'numpy.zeros', 'np.zeros', (['normals.shape[0]'], {'dtype': 'normals.dtype'}), '(normals.shape[0], dtype=normals.dtype)\n', (57509, 57548), True, 'import numpy as np\n'), ((58460, 58470), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (58468, 58470), True, 'from matplotlib import pyplot as plt\n'), ((58560, 58604), 'numpy.delete', 'np.delete', (['normals', 'list_nan[::3, 0]'], {'axis': '(0)'}), '(normals, list_nan[::3, 0], axis=0)\n', (58569, 58604), True, 'import numpy as np\n'), ((58621, 58667), 'numpy.delete', 'np.delete', (['intensity', 'list_nan[::3, 0]'], {'axis': '(0)'}), '(intensity, list_nan[::3, 0], axis=0)\n', (58630, 58667), True, 'import numpy as np\n'), ((59772, 59796), 'numpy.nonzero', 'np.nonzero', (['(support == 1)'], {}), '(support == 1)\n', (59782, 59796), True, 'import numpy as np\n'), ((62619, 62658), 'bcdi.utils.validation.valid_ndarray', 'valid.valid_ndarray', (['array'], {'ndim': '(2, 3)'}), '(array, ndim=(2, 3))\n', (62638, 62658), True, 'from bcdi.utils import validation as valid\n'), ((62686, 62789), 'bcdi.utils.validation.valid_item', 'valid.valid_item', ([], {'value': 'upsampling_factor', 'allowed_types': 'int', 'min_included': '(1)', 'name': '"""utils.upsample"""'}), "(value=upsampling_factor, allowed_types=int, min_included=1,\n name='utils.upsample')\n", (62702, 62789), True, 'from bcdi.utils import validation as valid\n'), ((62889, 63038), 'bcdi.utils.validation.valid_container', 'valid.valid_container', (['voxelsizes'], {'container_types': '(list, tuple, np.ndarray)', 'length': 'ndim', 'item_types': 'Real', 'min_excluded': '(0)', 'name': '"""utils.upsample"""'}), "(voxelsizes, container_types=(list, tuple, np.ndarray),\n length=ndim, item_types=Real, min_excluded=0, name='utils.upsample')\n", (62910, 63038), True, 'from bcdi.utils import validation as valid\n'), ((8633, 8650), 'numpy.isnan', 'np.isnan', (['normals'], {}), '(normals)\n', (8641, 8650), True, 'import numpy as np\n'), ((9096, 9136), 'numpy.arctan2', 'np.arctan2', (['normals[i, 1]', 'normals[i, 0]'], {}), '(normals[i, 1], normals[i, 0])\n', (9106, 9136), True, 'import numpy as np\n'), ((9175, 9199), 'numpy.arcsin', 'np.arcsin', (['normals[i, 2]'], {}), '(normals[i, 2])\n', (9184, 9199), True, 'import numpy as np\n'), ((11223, 11235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11233, 11235), True, 'from matplotlib import pyplot as plt\n'), ((11244, 11301), 'matplotlib.pyplot.imshow', 'plt.imshow', (['distances'], {'cmap': 'cmap', 'interpolation': '"""nearest"""'}), "(distances, cmap=cmap, interpolation='nearest')\n", (11254, 11301), True, 'from matplotlib import pyplot as plt\n'), ((11310, 11332), 'matplotlib.pyplot.title', 'plt.title', (['"""Distances"""'], {}), "('Distances')\n", (11319, 11332), True, 'from matplotlib import pyplot as plt\n'), ((11374, 11388), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (11383, 11388), True, 'from matplotlib import pyplot as plt\n'), ((11555, 11567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11565, 11567), True, 'from matplotlib import pyplot as plt\n'), ((11576, 11623), 'matplotlib.pyplot.imshow', 'plt.imshow', (['local_maxi'], {'interpolation': '"""nearest"""'}), "(local_maxi, interpolation='nearest')\n", (11586, 11623), True, 'from matplotlib import pyplot as plt\n'), ((11632, 11655), 'matplotlib.pyplot.title', 'plt.title', (['"""local_maxi"""'], {}), "('local_maxi')\n", (11641, 11655), True, 'from matplotlib import pyplot as plt\n'), ((11697, 11711), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (11706, 11711), True, 'from matplotlib import pyplot as plt\n'), ((11762, 11787), 'scipy.ndimage.label', 'ndimage.label', (['local_maxi'], {}), '(local_maxi)\n', (11775, 11787), False, 'from scipy import ndimage\n'), ((11817, 11829), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11827, 11829), True, 'from matplotlib import pyplot as plt\n'), ((11838, 11882), 'matplotlib.pyplot.imshow', 'plt.imshow', (['markers'], {'interpolation': '"""nearest"""'}), "(markers, interpolation='nearest')\n", (11848, 11882), True, 'from matplotlib import pyplot as plt\n'), ((11891, 11911), 'matplotlib.pyplot.title', 'plt.title', (['"""markers"""'], {}), "('markers')\n", (11900, 11911), True, 'from matplotlib import pyplot as plt\n'), ((11920, 11934), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11932, 11934), True, 'from matplotlib import pyplot as plt\n'), ((11976, 11990), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (11985, 11990), True, 'from matplotlib import pyplot as plt\n'), ((22594, 22611), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (22604, 22611), True, 'import numpy as np\n'), ((24392, 24418), 'numpy.zeros', 'np.zeros', (['indices.shape[1]'], {}), '(indices.shape[1])\n', (24400, 24418), True, 'import numpy as np\n'), ((24449, 24470), 'scipy.ndimage.measurements.center_of_mass', 'center_of_mass', (['plane'], {}), '(plane)\n', (24463, 24470), False, 'from scipy.ndimage.measurements import center_of_mass\n'), ((24739, 24754), 'numpy.median', 'np.median', (['dist'], {}), '(dist)\n', (24748, 24754), True, 'import numpy as np\n'), ((27709, 27743), 'scipy.signal.convolve', 'convolve', (['obj', 'kernel'], {'mode': '"""same"""'}), "(obj, kernel, mode='same')\n", (27717, 27743), False, 'from scipy.signal import convolve\n'), ((27785, 27802), 'numpy.nonzero', 'np.nonzero', (['coord'], {}), '(coord)\n', (27795, 27802), True, 'import numpy as np\n'), ((31664, 31712), 'numpy.argwhere', 'np.argwhere', (['(vertices == uniq_vertices[value, :])'], {}), '(vertices == uniq_vertices[value, :])\n', (31675, 31712), True, 'import numpy as np\n'), ((38801, 38818), 'numpy.isnan', 'np.isnan', (['normals'], {}), '(normals)\n', (38809, 38818), True, 'import numpy as np\n'), ((46283, 46313), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (46295, 46313), True, 'from matplotlib import pyplot as plt\n'), ((46442, 46459), 'bcdi.graph.graph_utils.colorbar', 'gu.colorbar', (['img0'], {}), '(img0)\n', (46453, 46459), True, 'from bcdi.graph import graph_utils as gu\n'), ((46615, 46632), 'bcdi.graph.graph_utils.colorbar', 'gu.colorbar', (['img1'], {}), '(img1)\n', (46626, 46632), True, 'from bcdi.graph import graph_utils as gu\n'), ((46695, 46709), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (46704, 46709), True, 'from matplotlib import pyplot as plt\n'), ((47017, 47047), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (47029, 47047), True, 'from matplotlib import pyplot as plt\n'), ((47213, 47302), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(nbx // 2, nby // 2)', '(361)', '(361)'], {'color': '"""r"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((nbx // 2, nby // 2), 361, 361, color='r', fill=False,\n linewidth=1.5)\n", (47228, 47302), False, 'from matplotlib import patches\n'), ((47517, 47606), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(nbx // 2, nby // 2)', '(361)', '(361)'], {'color': '"""r"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((nbx // 2, nby // 2), 361, 361, color='r', fill=False,\n linewidth=1.5)\n", (47532, 47606), False, 'from matplotlib import patches\n'), ((47691, 47705), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (47700, 47705), True, 'from matplotlib import pyplot as plt\n'), ((47765, 47796), 'scipy.ndimage.label', 'ndimage.label', (['local_maxi_south'], {}), '(local_maxi_south)\n', (47778, 47796), False, 'from scipy import ndimage\n'), ((48209, 48239), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (48221, 48239), True, 'from matplotlib import pyplot as plt\n'), ((48435, 48524), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(nbx // 2, nby // 2)', '(361)', '(361)'], {'color': '"""r"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((nbx // 2, nby // 2), 361, 361, color='r', fill=False,\n linewidth=1.5)\n", (48450, 48524), False, 'from matplotlib import patches\n'), ((48769, 48858), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(nbx // 2, nby // 2)', '(361)', '(361)'], {'color': '"""r"""', 'fill': '(False)', 'linewidth': '(1.5)'}), "((nbx // 2, nby // 2), 361, 361, color='r', fill=False,\n linewidth=1.5)\n", (48784, 48858), False, 'from matplotlib import patches\n'), ((48943, 48957), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (48952, 48957), True, 'from matplotlib import pyplot as plt\n'), ((54021, 54042), 'numpy.copy', 'np.copy', (['new_vertices'], {}), '(new_vertices)\n', (54028, 54042), True, 'import numpy as np\n'), ((55277, 55298), 'numpy.copy', 'np.copy', (['new_vertices'], {}), '(new_vertices)\n', (55284, 55298), True, 'import numpy as np\n'), ((56511, 56539), 'numpy.isnan', 'np.isnan', (['new_vertices[:, 0]'], {}), '(new_vertices[:, 0])\n', (56519, 56539), True, 'import numpy as np\n'), ((57975, 57987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (57985, 57987), True, 'from matplotlib import pyplot as plt\n'), ((58001, 58012), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (58007, 58012), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((58271, 58319), 'matplotlib.pyplot.title', 'plt.title', (['"""Weighted point densities before KDE"""'], {}), "('Weighted point densities before KDE')\n", (58280, 58319), True, 'from matplotlib import pyplot as plt\n'), ((58328, 58342), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (58337, 58342), True, 'from matplotlib import pyplot as plt\n'), ((58373, 58396), 'numpy.isnan', 'np.isnan', (['normals[:, 0]'], {}), '(normals[:, 0])\n', (58381, 58396), True, 'import numpy as np\n'), ((58527, 58544), 'numpy.isnan', 'np.isnan', (['normals'], {}), '(normals)\n', (58535, 58544), True, 'import numpy as np\n'), ((7517, 7545), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (7531, 7545), True, 'import numpy as np\n'), ((10743, 10752), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10750, 10752), True, 'from matplotlib import pyplot as plt\n'), ((12296, 12305), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12303, 12305), True, 'from matplotlib import pyplot as plt\n'), ((15233, 15252), 'numpy.zeros', 'np.zeros', (['nb_points'], {}), '(nb_points)\n', (15241, 15252), True, 'import numpy as np\n'), ((15629, 15653), 'numpy.zeros', 'np.zeros', (['original_shape'], {}), '(original_shape)\n', (15637, 15653), True, 'import numpy as np\n'), ((24173, 24190), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (24183, 24190), True, 'import numpy as np\n'), ((24543, 24658), 'numpy.sqrt', 'np.sqrt', (['((indices[0, point] - x_com) ** 2 + (indices[1, point] - y_com) ** 2 + (\n indices[2, point] - z_com) ** 2)'], {}), '((indices[0, point] - x_com) ** 2 + (indices[1, point] - y_com) ** 2 +\n (indices[2, point] - z_com) ** 2)\n', (24550, 24658), True, 'import numpy as np\n'), ((25901, 25918), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (25911, 25918), True, 'import numpy as np\n'), ((28489, 28511), 'numpy.nonzero', 'np.nonzero', (['temp_plane'], {}), '(temp_plane)\n', (28499, 28511), True, 'import numpy as np\n'), ((29025, 29061), 'numpy.dot', 'np.dot', (['plane_normal', 'gradients[idx]'], {}), '(plane_normal, gradients[idx])\n', (29031, 29061), True, 'import numpy as np\n'), ((37695, 37751), 'bcdi.utils.utilities.find_nearest', 'util.find_nearest', (['u_grid[0, :]', 'event.xdata'], {'width': 'None'}), '(u_grid[0, :], event.xdata, width=None)\n', (37712, 37751), True, 'from bcdi.utils import utilities as util\n'), ((37774, 37830), 'bcdi.utils.utilities.find_nearest', 'util.find_nearest', (['v_grid[:, 0]', 'event.ydata'], {'width': 'None'}), '(v_grid[:, 0], event.ydata, width=None)\n', (37791, 37830), True, 'from bcdi.utils import utilities as util\n'), ((37974, 37992), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (37990, 37992), False, 'import sys\n'), ((40187, 40208), 'numpy.isinf', 'np.isinf', (['stereo_proj'], {}), '(stereo_proj)\n', (40195, 40208), True, 'import numpy as np\n'), ((40211, 40232), 'numpy.isnan', 'np.isnan', (['stereo_proj'], {}), '(stereo_proj)\n', (40219, 40232), True, 'import numpy as np\n'), ((42600, 42623), 'numpy.isnan', 'np.isnan', (['density_south'], {}), '(density_south)\n', (42608, 42623), True, 'import numpy as np\n'), ((42653, 42676), 'numpy.isnan', 'np.isnan', (['density_north'], {}), '(density_north)\n', (42661, 42676), True, 'import numpy as np\n'), ((47966, 47997), 'scipy.ndimage.label', 'ndimage.label', (['local_maxi_north'], {}), '(local_maxi_north)\n', (47979, 47997), False, 'from scipy import ndimage\n'), ((57628, 57674), 'numpy.sum', 'np.sum', (['((normals - normals[i, :]) ** 2)'], {'axis': '(1)'}), '((normals - normals[i, :]) ** 2, axis=1)\n', (57634, 57674), True, 'import numpy as np\n'), ((63186, 63376), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['array'], {'sum_frames': '(False)', 'title': "(title + ' before upsampling')", 'vmin': 'vmin', 'vmax': 'vmax', 'scale': '"""linear"""', 'plot_colorbar': '(True)', 'reciprocal_space': '(False)', 'is_orthogonal': '(True)'}), "(array, sum_frames=False, title=title +\n ' before upsampling', vmin=vmin, vmax=vmax, scale='linear',\n plot_colorbar=True, reciprocal_space=False, is_orthogonal=True)\n", (63205, 63376), True, 'from bcdi.graph import graph_utils as gu\n'), ((64825, 65012), 'bcdi.graph.graph_utils.multislices_plot', 'gu.multislices_plot', (['obj'], {'sum_frames': '(False)', 'title': "(title + ' after upsampling')", 'vmin': 'vmin', 'vmax': 'vmax', 'scale': '"""linear"""', 'plot_colorbar': '(True)', 'reciprocal_space': '(False)', 'is_orthogonal': '(True)'}), "(obj, sum_frames=False, title=title +\n ' after upsampling', vmin=vmin, vmax=vmax, scale='linear',\n plot_colorbar=True, reciprocal_space=False, is_orthogonal=True)\n", (64844, 65012), True, 'from bcdi.graph import graph_utils as gu\n'), ((65220, 65388), 'bcdi.graph.graph_utils.imshow_plot', 'gu.imshow_plot', (['array'], {'title': "(title + ' before upsampling')", 'vmin': 'vmin', 'vmax': 'vmax', 'scale': '"""linear"""', 'plot_colorbar': '(True)', 'reciprocal_space': '(False)', 'is_orthogonal': '(True)'}), "(array, title=title + ' before upsampling', vmin=vmin, vmax=\n vmax, scale='linear', plot_colorbar=True, reciprocal_space=False,\n is_orthogonal=True)\n", (65234, 65388), True, 'from bcdi.graph import graph_utils as gu\n'), ((66482, 66646), 'bcdi.graph.graph_utils.imshow_plot', 'gu.imshow_plot', (['obj'], {'title': "(title + ' after upsampling')", 'vmin': 'vmin', 'vmax': 'vmax', 'scale': '"""linear"""', 'plot_colorbar': '(True)', 'reciprocal_space': '(False)', 'is_orthogonal': '(True)'}), "(obj, title=title + ' after upsampling', vmin=vmin, vmax=vmax,\n scale='linear', plot_colorbar=True, reciprocal_space=False,\n is_orthogonal=True)\n", (66496, 66646), True, 'from bcdi.graph import graph_utils as gu\n'), ((5829, 5849), 'numpy.copy', 'np.copy', (['faces[:, 0]'], {}), '(faces[:, 0])\n', (5836, 5849), True, 'import numpy as np\n'), ((5851, 5871), 'numpy.copy', 'np.copy', (['faces[:, 2]'], {}), '(faces[:, 2])\n', (5858, 5871), True, 'import numpy as np\n'), ((5898, 5918), 'numpy.copy', 'np.copy', (['faces[:, 1]'], {}), '(faces[:, 1])\n', (5905, 5918), True, 'import numpy as np\n'), ((5920, 5940), 'numpy.copy', 'np.copy', (['faces[:, 2]'], {}), '(faces[:, 2])\n', (5927, 5940), True, 'import numpy as np\n'), ((11341, 11350), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11348, 11350), True, 'from matplotlib import pyplot as plt\n'), ((11664, 11673), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11671, 11673), True, 'from matplotlib import pyplot as plt\n'), ((11943, 11952), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11950, 11952), True, 'from matplotlib import pyplot as plt\n'), ((38049, 38105), 'bcdi.utils.utilities.find_nearest', 'util.find_nearest', (['u_grid[0, :]', 'event.xdata'], {'width': 'None'}), '(u_grid[0, :], event.xdata, width=None)\n', (38066, 38105), True, 'from bcdi.utils import utilities as util\n'), ((38128, 38184), 'bcdi.utils.utilities.find_nearest', 'util.find_nearest', (['v_grid[:, 0]', 'event.ydata'], {'width': 'None'}), '(v_grid[:, 0], event.ydata, width=None)\n', (38145, 38184), True, 'from bcdi.utils import utilities as util\n'), ((38328, 38346), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (38344, 38346), False, 'import sys\n'), ((50944, 50963), 'numpy.rint', 'np.rint', (['point[idx]'], {}), '(point[idx])\n', (50951, 50963), True, 'import numpy as np\n'), ((52338, 52387), 'numpy.linalg.norm', 'np.linalg.norm', (['[-vector_z, -vector_y, -vector_x]'], {}), '([-vector_z, -vector_y, -vector_x])\n', (52352, 52387), True, 'import numpy as np\n'), ((54461, 54521), 'numpy.sum', 'np.sum', (['((vertices[indices, :] - vertices[i, :]) ** 2)'], {'axis': '(1)'}), '((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)\n', (54467, 54521), True, 'import numpy as np\n'), ((55718, 55778), 'numpy.sum', 'np.sum', (['((vertices[indices, :] - vertices[i, :]) ** 2)'], {'axis': '(1)'}), '((vertices[indices, :] - vertices[i, :]) ** 2, axis=1)\n', (55724, 55778), True, 'import numpy as np\n'), ((56256, 56287), 'numpy.unique', 'np.unique', (['new_vertices'], {'axis': '(0)'}), '(new_vertices, axis=0)\n', (56265, 56287), True, 'import numpy as np\n'), ((57080, 57102), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (57094, 57102), True, 'import numpy as np\n'), ((57740, 57809), 'numpy.multiply', 'np.multiply', (['areas[distances < radius]', 'distances[distances < radius]'], {}), '(areas[distances < radius], distances[distances < radius])\n', (57751, 57809), True, 'import numpy as np\n'), ((61017, 61036), 'numpy.isnan', 'np.isnan', (['str_array'], {}), '(str_array)\n', (61025, 61036), True, 'import numpy as np\n'), ((61081, 61100), 'numpy.isnan', 'np.isnan', (['str_array'], {}), '(str_array)\n', (61089, 61100), True, 'import numpy as np\n'), ((63847, 63882), 'numpy.arange', 'np.arange', (['(-numz // 2)', '(numz // 2)', '(1)'], {}), '(-numz // 2, numz // 2, 1)\n', (63856, 63882), True, 'import numpy as np\n'), ((63915, 63950), 'numpy.arange', 'np.arange', (['(-numy // 2)', '(numy // 2)', '(1)'], {}), '(-numy // 2, numy // 2, 1)\n', (63924, 63950), True, 'import numpy as np\n'), ((63983, 64018), 'numpy.arange', 'np.arange', (['(-numx // 2)', '(numx // 2)', '(1)'], {}), '(-numx // 2, numx // 2, 1)\n', (63992, 64018), True, 'import numpy as np\n'), ((65751, 65786), 'numpy.arange', 'np.arange', (['(-numy // 2)', '(numy // 2)', '(1)'], {}), '(-numy // 2, numy // 2, 1)\n', (65760, 65786), True, 'import numpy as np\n'), ((65819, 65854), 'numpy.arange', 'np.arange', (['(-numx // 2)', '(numx // 2)', '(1)'], {}), '(-numx // 2, numx // 2, 1)\n', (65828, 65854), True, 'import numpy as np\n'), ((15539, 15567), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (15553, 15567), True, 'import numpy as np\n'), ((29355, 29374), 'numpy.asarray', 'np.asarray', (['indices'], {}), '(indices)\n', (29365, 29374), True, 'import numpy as np\n'), ((33608, 33640), 'numpy.unique', 'np.unique', (['faces[idx, :]'], {'axis': '(0)'}), '(faces[idx, :], axis=0)\n', (33617, 33640), True, 'import numpy as np\n'), ((53761, 53792), 'numpy.unique', 'np.unique', (['new_vertices'], {'axis': '(0)'}), '(new_vertices, axis=0)\n', (53770, 53792), True, 'import numpy as np\n'), ((55021, 55052), 'numpy.unique', 'np.unique', (['new_vertices'], {'axis': '(0)'}), '(new_vertices, axis=0)\n', (55030, 55052), True, 'import numpy as np\n'), ((64146, 64176), 'numpy.arange', 'np.arange', (['(-nbz // 2)', '(nbz // 2)'], {}), '(-nbz // 2, nbz // 2)\n', (64155, 64176), True, 'import numpy as np\n'), ((64210, 64240), 'numpy.arange', 'np.arange', (['(-nby // 2)', '(nby // 2)'], {}), '(-nby // 2, nby // 2)\n', (64219, 64240), True, 'import numpy as np\n'), ((64274, 64304), 'numpy.arange', 'np.arange', (['(-nbx // 2)', '(nbx // 2)'], {}), '(-nbx // 2, nbx // 2)\n', (64283, 64304), True, 'import numpy as np\n'), ((65982, 66012), 'numpy.arange', 'np.arange', (['(-nby // 2)', '(nby // 2)'], {}), '(-nby // 2, nby // 2)\n', (65991, 66012), True, 'import numpy as np\n'), ((66046, 66076), 'numpy.arange', 'np.arange', (['(-nbx // 2)', '(nbx // 2)'], {}), '(-nbx // 2, nbx // 2)\n', (66055, 66076), True, 'import numpy as np\n'), ((15875, 16006), 'numpy.concatenate', 'np.concatenate', (['(plane_newindices0[:, np.newaxis], plane_newindices1[:, np.newaxis],\n plane_newindices2[:, np.newaxis])'], {'axis': '(1)'}), '((plane_newindices0[:, np.newaxis], plane_newindices1[:, np.\n newaxis], plane_newindices2[:, np.newaxis]), axis=1)\n', (15889, 16006), True, 'import numpy as np\n'), ((16205, 16300), 'numpy.concatenate', 'np.concatenate', (['(surf0[:, np.newaxis], surf1[:, np.newaxis], surf2[:, np.newaxis])'], {'axis': '(1)'}), '((surf0[:, np.newaxis], surf1[:, np.newaxis], surf2[:, np.\n newaxis]), axis=1)\n', (16219, 16300), True, 'import numpy as np\n'), ((22910, 22927), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (22920, 22927), True, 'import numpy as np\n'), ((23875, 23892), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (23885, 23892), True, 'import numpy as np\n'), ((24833, 24850), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (24843, 24850), True, 'import numpy as np\n'), ((25573, 25590), 'numpy.nonzero', 'np.nonzero', (['plane'], {}), '(plane)\n', (25583, 25590), True, 'import numpy as np\n'), ((27891, 27908), 'numpy.nonzero', 'np.nonzero', (['coord'], {}), '(coord)\n', (27901, 27908), True, 'import numpy as np\n'), ((27924, 27939), 'numpy.nonzero', 'np.nonzero', (['obj'], {}), '(obj)\n', (27934, 27939), True, 'import numpy as np\n'), ((30261, 30280), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (30269, 30280), True, 'import numpy as np\n'), ((30426, 30445), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (30434, 30445), True, 'import numpy as np\n'), ((30591, 30610), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (30599, 30610), True, 'import numpy as np\n'), ((54758, 54782), 'numpy.sum', 'np.sum', (['vectoren'], {'axis': '(0)'}), '(vectoren, axis=0)\n', (54764, 54782), True, 'import numpy as np\n'), ((30297, 30325), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (30311, 30325), True, 'import numpy as np\n'), ((30462, 30490), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (30476, 30490), True, 'import numpy as np\n'), ((30627, 30655), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (30641, 30655), True, 'import numpy as np\n')] |
"""
Utilities for testing
"""
from os.path import join as pjoin
from pkg_resources import resource_filename
import numpy as np
from peakdet import io, operations
from peakdet.utils import _get_call
def get_call_func(arg1, arg2, *, kwarg1=10, kwarg2=20,
exclude=['exclude', 'serializable'],
serializable=True):
""" Function for testing `peakdet.utils._get_call()` """
if arg1 > 10:
kwarg1 = kwarg1 + arg1
if arg2 > 20:
kwarg2 = kwarg2 + arg2
return _get_call(exclude=exclude, serializable=serializable)
def get_test_data_path(fname=None):
""" Function for getting `peakdet` test data path """
path = resource_filename('peakdet', 'tests/data')
return pjoin(path, fname) if fname is not None else path
def get_sample_data():
""" Function for generating tiny sine wave form for testing """
data = np.sin(np.linspace(0, 20, 40))
peaks, troughs = np.array([3, 15, 28]), np.array([9, 21, 34])
return data, peaks, troughs
def get_peak_data():
""" Function for getting some pregenerated physio data """
physio = io.load_physio(get_test_data_path('ECG.csv'), fs=1000)
filt = operations.filter_physio(physio, [5., 15.], 'bandpass')
peaks = operations.peakfind_physio(filt)
return peaks
| [
"peakdet.operations.peakfind_physio",
"os.path.join",
"pkg_resources.resource_filename",
"numpy.array",
"numpy.linspace",
"peakdet.utils._get_call",
"peakdet.operations.filter_physio"
] | [((519, 572), 'peakdet.utils._get_call', '_get_call', ([], {'exclude': 'exclude', 'serializable': 'serializable'}), '(exclude=exclude, serializable=serializable)\n', (528, 572), False, 'from peakdet.utils import _get_call\n'), ((680, 722), 'pkg_resources.resource_filename', 'resource_filename', (['"""peakdet"""', '"""tests/data"""'], {}), "('peakdet', 'tests/data')\n", (697, 722), False, 'from pkg_resources import resource_filename\n'), ((1183, 1240), 'peakdet.operations.filter_physio', 'operations.filter_physio', (['physio', '[5.0, 15.0]', '"""bandpass"""'], {}), "(physio, [5.0, 15.0], 'bandpass')\n", (1207, 1240), False, 'from peakdet import io, operations\n'), ((1251, 1283), 'peakdet.operations.peakfind_physio', 'operations.peakfind_physio', (['filt'], {}), '(filt)\n', (1277, 1283), False, 'from peakdet import io, operations\n'), ((734, 752), 'os.path.join', 'pjoin', (['path', 'fname'], {}), '(path, fname)\n', (739, 752), True, 'from os.path import join as pjoin\n'), ((895, 917), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(40)'], {}), '(0, 20, 40)\n', (906, 917), True, 'import numpy as np\n'), ((940, 961), 'numpy.array', 'np.array', (['[3, 15, 28]'], {}), '([3, 15, 28])\n', (948, 961), True, 'import numpy as np\n'), ((963, 984), 'numpy.array', 'np.array', (['[9, 21, 34]'], {}), '([9, 21, 34])\n', (971, 984), True, 'import numpy as np\n')] |
import random
import itertools
import numpy as np
import scipy
from scipy.special import wofz
import pandas as pd
import plotly.express as px
import collections
from functools import partial
def G(x, alpha):
"""Return Gaussian line shape at x with HWHM alpha"""
return np.sqrt(np.log(2) / np.pi) / alpha * np.exp(-((x / alpha) ** 2) * np.log(2))
def L(x, gamma):
"""Return Lorentzian line shape at x with HWHM gamma"""
return gamma / np.pi / (x**2 + gamma**2)
def V(x, alpha, gamma):
"""
Return the Voigt line shape at x with Lorentzian component HWHM gamma
and Gaussian component HWHM alpha.
"""
sigma = alpha / np.sqrt(2 * np.log(2))
return (
np.real(wofz((x + 1j * gamma) / sigma / np.sqrt(2)))
/ sigma
/ np.sqrt(2 * np.pi)
)
def Poly(x, a, b):
return (10**a) * (x - b) ** 2
# ------- Peaks -----------
def generate_example_raman(
x,
n=9,
scale_range=(4, 5.4),
shift_range=(-100, 190),
alpha_range=(0.2, 4),
gamma_range=(0.2, 4),
):
"""Generates a synthetic raman signal.
Generates a mixture of voigt profiles generated by combining gaussian and lorentzian peaks.
Parameters
----------
x
Input array. Usually np.linspace array.
n
Number of peaks
scale_range
Intensity multiplier of the form `10**scale_range` will be randomly sampled from this range.
shift_range
Wavenumber shift will be randomly sampled from this range
alpha_range
Specifies the FWHM of the gaussian contribution in the voigt profile
gamma_range
Specifies the FWHM of the lorentzian contribution in the voigt profile
Returns
-------
np.array
Peak values over x.
"""
scale = [random.uniform(*scale_range) for i in range(n)]
shift = [random.uniform(*shift_range) for i in range(n)]
alpha = [random.uniform(*alpha_range) for i in range(n)]
gamma = [random.uniform(*gamma_range) for i in range(n)]
funcs = [
(10**scale_i) * V(x + shift_i, alpha_i, gamma_i)
for scale_i, shift_i, alpha_i, gamma_i in zip(scale, shift, alpha, gamma)
]
f_sum = [sum(x) for x in zip(*funcs)]
return np.array(f_sum)
# ------ Background ----------
def generate_example_background_gaussians(
x, n=5, scale_range=(6, 7), shift_range=(-100, 190), alpha_range=(100, 300)
):
"""Generates a synthetic background signal of `n` broad gaussian peaks.
Parameters
----------
x
Input array. Usually np.linspace array.
n
Number of gaussian peaks
scale_range
Intensity multiplier of the form `10**scale_range` will be randomly sampled from this range.
shift_range
Wavenumber shift will be randomly sampled from this range.
alpha_range
Specifies the FWHM of the gaussians.
Returns
-------
np.array
Background values over x.
"""
scale = [random.uniform(*scale_range) for i in range(n)]
shift = [random.uniform(*shift_range) for i in range(n)]
alpha = [random.uniform(*alpha_range) for i in range(n)]
background_gaussians = [
(10**scale_i) * G(x + shift_i, alpha_i)
for scale_i, shift_i, alpha_i in zip(scale, shift, alpha)
]
combined_gaussians = [sum(i) for i in zip(*background_gaussians)]
background_funcs = np.array(combined_gaussians)
return background_funcs
def generate_example_background_polynomial(x, a_range=(-0.4, -0.3), b_range=(150, 300)):
"""Generates a synthetic background polynomial signal.
The polynomial will be of the form
$$$
poly(x) = a*(x - b)**2
$$$
Parameters
----------
x
Input array. Usually np.linspace array.
a_range
Specifies the multiplier of the polynomial of the form 10**a_range.
b_range
Specifies the shift of the polynomial.
Returns
-------
np.array
Background values over x.
"""
a = random.uniform(*a_range)
b = random.uniform(*b_range)
poly = Poly(x, a, b)
background_funcs = np.array(poly)
return background_funcs
def generate_example_background(x, *args, **kwargs):
"""Generates a synthetic background signal by combining `n` broad gaussian peaks with
a polynomial.
The polynomial will be of the form
$$$
poly(x) = a*(x - b)**2
$$$
Parameters
----------
x
Input array. Usually np.linspace array.
n
Number of gaussian peaks
scale_range
Intensity multiplier will be randomly sampled from this range.
shift_range
Wavenumber shift will be randomly sampled from this range.
alpha_range
Specifies the FWHM of the gaussians.
a_range
Specifies the multiplier of the polynomial.
b_range
Specifies the shift of the polynomial.
Returns
-------
np.array
Background values over x.
"""
poly = generate_example_background_polynomial(x, *args, **kwargs)
combined_gaussians = generate_example_background_gaussians(x, *args, **kwargs)
background_funcs = np.array(poly) + np.array(combined_gaussians)
return background_funcs
# ------ Noise -----------
def generate_example_noise(spectra, *args, **kwargs):
"""Generates poissonian noise over an input spectra.
Parameters
----------
spectra: np.array
Input array.
Returns
-------
np.array
Noisy version of input spectra.
"""
noise_mask = np.random.poisson(spectra, *args, **kwargs)
noisy_spectra = spectra + noise_mask
return noisy_spectra
return np.array(noisy_raman)
def generate_raman_example(x, *args, **kwargs):
raman = generate_example_raman(x, *args, **kwargs)
background = generate_example_background(x, *args, **kwargs)
noisy_raman = generate_example_noise(raman + background, *args, **kwargs)
return noisy_raman
def generate_single_raman_example(
x,
scale,
shift,
alpha,
gamma,
a,
b,
c,
scale_background,
alpha_background,
shift_background,
*args,
**kwargs
):
# raman = (10**scale)*V(x + shift, alpha, gamma)
# poly = Poly(x, a, b, c)
# background_gaussian = G(x + shift_background, alpha_background)
# background_funcs = np.array(poly)*scale_background + np.array(background_gaussian)
# background_funcs = background_funcs
# noisy_raman = background_funcs#raman + background_funcs
raman_signal = generate_example_raman(x)
background_signal = generate_example_background(x)
combined_signal = raman_signal + background_signal
noisy_combined_signal = generate_example_noise(combined_signal)
return noisy_combined_signal
def generate_training_set(x, num_base_examples=1):
"""Will generate num_base_examples**2 total examples."""
raman_examples = [generate_example_raman(x) for _ in range(num_base_examples)]
background_examples = [
generate_example_background(x) for _ in range(num_base_examples)
]
product_pairs = itertools.product(raman_examples, background_examples)
training_set = [
(generate_example_noise(raman + background), raman)
for (raman, background) in product_pairs
]
input, target = zip(*training_set)
return input, target
| [
"random.uniform",
"numpy.sqrt",
"numpy.random.poisson",
"itertools.product",
"numpy.log",
"numpy.array"
] | [((2227, 2242), 'numpy.array', 'np.array', (['f_sum'], {}), '(f_sum)\n', (2235, 2242), True, 'import numpy as np\n'), ((3373, 3401), 'numpy.array', 'np.array', (['combined_gaussians'], {}), '(combined_gaussians)\n', (3381, 3401), True, 'import numpy as np\n'), ((3986, 4010), 'random.uniform', 'random.uniform', (['*a_range'], {}), '(*a_range)\n', (4000, 4010), False, 'import random\n'), ((4019, 4043), 'random.uniform', 'random.uniform', (['*b_range'], {}), '(*b_range)\n', (4033, 4043), False, 'import random\n'), ((4094, 4108), 'numpy.array', 'np.array', (['poly'], {}), '(poly)\n', (4102, 4108), True, 'import numpy as np\n'), ((5518, 5561), 'numpy.random.poisson', 'np.random.poisson', (['spectra', '*args'], {}), '(spectra, *args, **kwargs)\n', (5535, 5561), True, 'import numpy as np\n'), ((5642, 5663), 'numpy.array', 'np.array', (['noisy_raman'], {}), '(noisy_raman)\n', (5650, 5663), True, 'import numpy as np\n'), ((7076, 7130), 'itertools.product', 'itertools.product', (['raman_examples', 'background_examples'], {}), '(raman_examples, background_examples)\n', (7093, 7130), False, 'import itertools\n'), ((783, 801), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (790, 801), True, 'import numpy as np\n'), ((1780, 1808), 'random.uniform', 'random.uniform', (['*scale_range'], {}), '(*scale_range)\n', (1794, 1808), False, 'import random\n'), ((1841, 1869), 'random.uniform', 'random.uniform', (['*shift_range'], {}), '(*shift_range)\n', (1855, 1869), False, 'import random\n'), ((1903, 1931), 'random.uniform', 'random.uniform', (['*alpha_range'], {}), '(*alpha_range)\n', (1917, 1931), False, 'import random\n'), ((1964, 1992), 'random.uniform', 'random.uniform', (['*gamma_range'], {}), '(*gamma_range)\n', (1978, 1992), False, 'import random\n'), ((2958, 2986), 'random.uniform', 'random.uniform', (['*scale_range'], {}), '(*scale_range)\n', (2972, 2986), False, 'import random\n'), ((3019, 3047), 'random.uniform', 'random.uniform', (['*shift_range'], {}), '(*shift_range)\n', (3033, 3047), False, 'import random\n'), ((3081, 3109), 'random.uniform', 'random.uniform', (['*alpha_range'], {}), '(*alpha_range)\n', (3095, 3109), False, 'import random\n'), ((5122, 5136), 'numpy.array', 'np.array', (['poly'], {}), '(poly)\n', (5130, 5136), True, 'import numpy as np\n'), ((5139, 5167), 'numpy.array', 'np.array', (['combined_gaussians'], {}), '(combined_gaussians)\n', (5147, 5167), True, 'import numpy as np\n'), ((348, 357), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (354, 357), True, 'import numpy as np\n'), ((671, 680), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (677, 680), True, 'import numpy as np\n'), ((290, 299), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (296, 299), True, 'import numpy as np\n'), ((744, 754), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (751, 754), True, 'import numpy as np\n')] |
import time
import os
import getopt
import sys
import datetime
import numpy as np
from milvus import *
import config
import logging
import random
milvus = Milvus()
def is_normalized():
filenames = os.listdir(NL_FOLDER_NAME)
filenames.sort()
vetors = load_vec_list(NL_FOLDER_NAME+'/'+filenames[0])
for i in range(10):
sqrt_sum = np.sum(np.power(vetors[i], 2))
print(sqrt_sum)
def connect_server():
try:
status = milvus.connect(host=config.MILVUS_HOST, port=config.MILVUS_PORT)
# print(status)
except Exception as e:
logging.error(e)
def build_collection(collection_name,it):
connect_server()
if it == 'flat':
index_type = IndexType.FLAT
index_param = {'nlist': config.NLIST}
elif it == 'ivf_flat':
index_type = IndexType.IVF_FLAT
index_param = {'nlist': config.NLIST}
elif it == 'sq8':
index_type = IndexType.IVF_SQ8
index_param = {'nlist': config.NLIST}
elif it == 'sq8h':
index_type = IndexType.IVF_SQ8H
index_param = {'nlist': config.NLIST}
elif it == 'pq':
index_type = IndexType.IVF_PQ
index_param = {'nlist': config.NLIST, 'm':config.PQ_M}
elif it == 'nsg':
index_type = IndexType.RNSG
index_param = {'search_length': config.SEARCH_LENGTH, 'out_degree':config.OUT_DEGREE, 'candidate_pool_size':config.CANDIDATE_POOL, 'knng':config.KNNG}
elif it == 'hnsw':
index_type = IndexType.HNSW
index_param = {'M': config.HNSW_M, 'efConstruction':config.EFCONSTRUCTION}
else:
print("error index_type, only support these index: flat, ivf_flat, sq8, sq8h, pq, nsg, hnsw")
print("please try again!")
sys.exit(2)
print(collection_name, " ", index_type, " ", index_param)
status = milvus.create_index(collection_name,index_type,index_param)
print(status)
def search(collection_name,search_param):
connect_server()
performance_file = config.PERFORMANCE_FILE_NAME
nq_scope = config.nq_scope
topk_scope = config.topk_scope
if not os.path.exists(performance_file):
os.mkdir(performance_file)
filename = performance_file + '/' + collection_name + '_' + str(search_param) + '_performance.csv'
search_params = get_search_params(collection_name,search_param)
with open(filename,'w+') as f:
f.write("nq,topk,total_time,avg_time"+'\n')
for nq in nq_scope:
time_start = time.time()
query_list = load_nq_vec(nq)
print("load query:", len(query_list), "time_load = ", time.time() - time_start)
for topk in topk_scope:
time_start = time.time()
status,result = milvus.search(collection_name=collection_name, query_records=query_list, top_k=topk, params=search_params)
time_cost = time.time() - time_start
line = str(nq) + ',' + str(topk) + ',' + str(round(time_cost, 4)) + ',' + str(round(time_cost / nq, 4)) + '\n'
f.write(line)
print(nq, topk, time_cost)
f.write('\n')
# file.close()
print("search_vec_list done !")
def get_search_params(collection_name,search_param):
index_type = str(milvus.describe_index(collection_name)[1]._index_type)
if index_type == 'RNSG':
search_params = {'search_length':search_param}
elif index_type == 'HNSW':
search_params == {'ef':search_param}
else:
search_params = {'nprobe': search_param}
return search_params
def load_nq_vec(nq):
vectors = []
length = 0
filenames = os.listdir(config.NQ_FOLDER_NAME)
filenames.sort()
for filename in filenames:
vec_list = load_vec_list(config.NQ_FOLDER_NAME + '/' + filename)
length += len(vec_list)
if length > nq:
num = nq % len(vec_list)
vec_list = vec_list[0:num]
vectors += vec_list
if len(vectors) == nq:
return vectors
def load_vec_list(file_name):
if config.IS_CSV:
import pandas as pd
data = pd.read_csv(file_name, header=None)
data = np.array(data)
else:
data = np.load(file_name)
# if config.IS_UINT8:
# data = (data + 0.5) / 255
vec_list = data.tolist()
return vec_list
def recall_test(collection_name,search_param):
connect_server()
vectors = load_vec_list(config.recall_vec_fname)
# for nq in config.nq_scope:
nq = config.recall_nq
query_list = []
rand = sorted(random.sample(range(0, len(vectors)), nq))
for i in rand:
query_list.append(vectors[i])
# print("load query:", len(query_list))
search_params = get_search_params(collection_name,search_param)
print("collection name:", collection_name, "query list:", len(query_list), "topk:", config.recall_topk, "search_params:", search_params)
time_start = time.time()
status, results = milvus.search_vectors(collection_name=collection_name, query_records=query_list, top_k=config.recall_topk, params=search_params)
# time_end = time.time()
time_cost = time.time() - time_start
print("time_search = ", time_cost)
save_re_to_file(collection_name, rand, results, search_param,nq)
compute_recall(collection_name,nq,results,search_param,rand)
def save_re_to_file(collection_name, rand, results, search_param, nq):
if not os.path.exists(config.recall_res_fname):
os.mkdir(config.recall_res_fname)
file_name = config.recall_res_fname + '/' + collection_name + '_' + str(search_param) + '_' + str(nq) + '_recall.txt'
with open(file_name, 'w') as f:
for i in range(len(results)):
for j in range(len(results[i])):
line = str(rand[i]) + ' ' + str(results[i][j].id) + ' ' + str(results[i][j].distance)
f.write(line + '\n')
f.write('\n')
f.close()
def compute_recall(collection_name,nq,results,search_param,rand):
ids = []
# dis = []
for nq_result in (results):
temp = []
for result in (nq_result):
temp.append(result.id)
ids.append(temp)
gt_ids = load_gt_ids()
for top_k in config.compute_recall_topk:
recalls, count_all = compare_correct(nq, top_k, rand, gt_ids, ids)
fname = config.recall_out_fname+ '/' + collection_name + '_' + str(search_param) + '_' + str(nq) + "_" + str(top_k) + ".csv"
with open(fname,'w') as f:
f.write('nq,topk,recall\n')
for i in range(nq):
line = str(i + 1) + ',' + str(top_k) + ',' + str(recalls[i] * 100) + "%"
f.write(line + '\n')
f.write("max, avarage, min\n")
f.write( str(max(recalls) * 100) + "%," + str(round(count_all / nq / top_k, 3) * 100) + "%," + str(min(recalls) * 100) + "%\n")
print("top_k=", top_k, ", total accuracy", round(count_all / nq / top_k, 3) * 100, "%")
def load_gt_ids():
file_name = config.GT_FNAME_NAME
gt_ids = []
result = []
with open(file_name, 'r') as f:
for line in f.readlines():
data = line.split()
if data:
result.append(int(data[0]))
else:
gt_ids.append(result)
result = []
return gt_ids
def compare_correct(nq, top_k, rand, gt_ids, ids):
recalls = []
count_all = 0
for i in range(nq):
milvus_results = []
ground_truth = []
for j in range(top_k):
milvus_results.append(ids[i][j])
ground_truth.append(gt_ids[int(rand[i])][j])
# ground_truth += gt_ids[int(rand[i * top_k]) * config.ground_truth_topk + j]
# print(milvus_results)
# print(ground_truth)
union = list(set(milvus_results).intersection(set(ground_truth)))
recalls.append(len(union) / top_k)
count_all += len(union)
# print("topk_ground_truth:", topk_ground_truth)
return recalls, count_all | [
"os.path.exists",
"os.listdir",
"pandas.read_csv",
"numpy.power",
"numpy.array",
"os.mkdir",
"sys.exit",
"numpy.load",
"time.time",
"logging.error"
] | [((204, 230), 'os.listdir', 'os.listdir', (['NL_FOLDER_NAME'], {}), '(NL_FOLDER_NAME)\n', (214, 230), False, 'import os\n'), ((3617, 3650), 'os.listdir', 'os.listdir', (['config.NQ_FOLDER_NAME'], {}), '(config.NQ_FOLDER_NAME)\n', (3627, 3650), False, 'import os\n'), ((4903, 4914), 'time.time', 'time.time', ([], {}), '()\n', (4912, 4914), False, 'import time\n'), ((2096, 2128), 'os.path.exists', 'os.path.exists', (['performance_file'], {}), '(performance_file)\n', (2110, 2128), False, 'import os\n'), ((2138, 2164), 'os.mkdir', 'os.mkdir', (['performance_file'], {}), '(performance_file)\n', (2146, 2164), False, 'import os\n'), ((4091, 4126), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'header': 'None'}), '(file_name, header=None)\n', (4102, 4126), True, 'import pandas as pd\n'), ((4142, 4156), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4150, 4156), True, 'import numpy as np\n'), ((4182, 4200), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (4189, 4200), True, 'import numpy as np\n'), ((5111, 5122), 'time.time', 'time.time', ([], {}), '()\n', (5120, 5122), False, 'import time\n'), ((5395, 5434), 'os.path.exists', 'os.path.exists', (['config.recall_res_fname'], {}), '(config.recall_res_fname)\n', (5409, 5434), False, 'import os\n'), ((5444, 5477), 'os.mkdir', 'os.mkdir', (['config.recall_res_fname'], {}), '(config.recall_res_fname)\n', (5452, 5477), False, 'import os\n'), ((362, 384), 'numpy.power', 'np.power', (['vetors[i]', '(2)'], {}), '(vetors[i], 2)\n', (370, 384), True, 'import numpy as np\n'), ((584, 600), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (597, 600), False, 'import logging\n'), ((2476, 2487), 'time.time', 'time.time', ([], {}), '()\n', (2485, 2487), False, 'import time\n'), ((2686, 2697), 'time.time', 'time.time', ([], {}), '()\n', (2695, 2697), False, 'import time\n'), ((2595, 2606), 'time.time', 'time.time', ([], {}), '()\n', (2604, 2606), False, 'import time\n'), ((2865, 2876), 'time.time', 'time.time', ([], {}), '()\n', (2874, 2876), False, 'import time\n'), ((1734, 1745), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1742, 1745), False, 'import sys\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
from abc import ABC
import numpy as np
from matplotlib.gridspec import GridSpec
from .._doc import ds
logger = getLogger(__name__)
@ds.get_sectionsf('PlotGUI')
class PlotGUI(ABC):
'''abstract base class for heatmap GUI.
The grid of subplots looks like this:
+------------------------------------+---+----------------+
| sample color bar | | | |
|------------------------------------+---+------------+---|
| | f | | |
| | e | | |
| | a | | c |
| | t | | o |
| heatmap | u | | l |
| | r | | o |
| | e | | r |
| | | tree | |
| | c | (optional | l |
| | o | column) | e |
| | l | | g |
| | o | | e |
| | r | | n |
| | | | d |
| | b | | |
| | a | | |
| | r | | |
+------------------------------------+---+------------+---+
Attributes
----------
exp : Experiment
the experiment associated with this gui
selected_features : dict of matplotlib.lines.Line2D
used to track the selected features and plot horizontal lines for each selectiom
keys - the selected features indeices, values - the line id for each selected feature
selected_samples : dict of matplotlib.lines.Line2D
used to track the selected samples and plot vertical lines for each selectiom
keys - the selected sample indices, values - the line id for each selected samples
current_select : tuple of (int, int)
current selected point
zoom_scale : numeric
the scaling factor for zooming
scroll_offset : numeric, optional
The amount of columns/rows to scroll when arrow key pressed
0 (default) to scroll one full screen every keypress
>0 : scroll than constant number of columns/rows per keypress
figure : matplotlib.figure.Figure
The figure where the heatmap and other axes will be plotted into.
ax_hm : matplotlib.axes.Axes
Axes for the heatmap.
ax_sbar : matplotlib.axes.Axes
Axes for the sample colorbar
ax_fbar : matplotlib.axes.Axes
Axes for the feature colorbar
ax_tre : matplotlib.axes.Axes
Axes for the dendrogram/tree
ax_legend : matplotlib.axes.Axes
Axes for the color legend
databases : list
the databases to interact with
Parameters
----------
exp : Experiment
object associated with this GUI
zoom_scale : float or int
the scaling factor for zooming
scroll_offset : float
The amount of columns/rows to scroll when arrow key pressed
databases : the databases to interact with
tree_size : int (>= 0)
the width of the axes to plot a tree. 7 is a good value to start.
'''
def __init__(self, exp, zoom_scale=2, scroll_offset=0, databases=None, tree_size=0):
# the Experiment being plotted
self.exp = exp
# how much zooming in on key press
self.zoom_scale = zoom_scale
# how much to scroll on key press
self.scroll_offset = scroll_offset
# list of selected features
self.selected_features = {}
# list of selected samples
self.selected_samples = {}
# current selected point
self.current_select = None
# list of databases to interface with
if databases is None:
self.databases = []
# the default database used when annotating features
self._annotation_db = None
def _set_figure(self, figure=None, tree_size=0):
import matplotlib.pyplot as plt
if figure is None:
self.figure = plt.figure()
else:
self.figure = figure
if tree_size == 0:
gs = GridSpec(2, 3, width_ratios=[12, 1, 0.5], height_ratios=[1, 12])
hm_ax = self.figure.add_subplot(gs[3])
self.ax_sbar = self.figure.add_subplot(gs[0], sharex=hm_ax)
self.ax_sbar.axis('off')
self.ax_fbar = self.figure.add_subplot(gs[4], sharey=hm_ax)
self.ax_fbar.axis('off')
self.ax_hm = hm_ax
self.ax_legend = self.figure.add_subplot(gs[5])
self.gridspec = gs
else:
gs = GridSpec(2, 4, width_ratios=[12, 1, tree_size, 0.5], height_ratios=[1, 12])
hm_ax = self.figure.add_subplot(gs[4])
self.ax_sbar = self.figure.add_subplot(gs[0], sharex=hm_ax)
self.ax_sbar.axis('off')
self.ax_fbar = self.figure.add_subplot(gs[5], sharey=hm_ax)
self.ax_fbar.axis('off')
self.ax_hm = hm_ax
self.ax_tre = self.figure.add_subplot(gs[6], sharey=hm_ax)
self.ax_tre.axis('off')
self.ax_legend = self.figure.add_subplot(gs[7])
self.gridspec = gs
def save_figure(self, *args, **kwargs):
'''Save the figure to file.
Parameters
----------
args, kwargs: tuple, dict
arguments passing to ``matplotlib.Figure.savefig`` function.
'''
# try:
# # create color bar for the heatmap before saving
# self.figure.colorbar(self.ax_hm.images[0])
# except IndexError:
# logger.warning('no heatmap are plotted')
self.figure.savefig(*args, **kwargs)
def get_selection_info(self):
'''Get the current selection information
Returns
-------
tuple of (str, str, numeric)
sample id, feature id, abundance
'''
if self.current_select is None:
return 'na', 'na', 0
row, col = self.current_select
fid = self.exp.feature_metadata.index[col]
sid = self.exp.sample_metadata.index[row]
abd = self.exp.data[row, col]
return sid, fid, abd
def get_database_annotations(self, feature):
'''Get database annotations about a feature
Parameters
----------
feature : str
The featureID to get info about
Returns
-------
list of tuple of (dict, str)
dict : annotation key/value pairs
str : a string summarizing the annotations
'''
annt = []
for cdatabase in self.databases:
try:
cannt = cdatabase.get_seq_annotation_strings(feature)
if len(cannt) == 0:
cannt = [[{'annotationtype': 'not found'},
'No annotation found in database %s' % cdatabase.database_name]]
else:
for cannotation in cannt:
cannotation[0]['_db_interface'] = cdatabase
except:
cannt = 'error connecting to db %s' % cdatabase.database_name
annt.extend(cannt)
return annt
def get_info(self):
'''Get info for the selected feature/sample
Returns
-------
tuple of (str, str, numeric, dict)
sample id, feature id, abundance, taxonomy, annotation
'''
sid, fid, abd = self.get_selection_info()
annt = self.get_database_annotations(fid)
return sid, fid, abd, annt
def show_info(self):
print(self.get_info())
def __call__(self):
'''Run the GUI.'''
self.connect_functions()
self.figure.tight_layout()
# the following line does not work since makes the colorbars far away so commented
# self.gridspec.tight_layout(self.figure, pad=1, h_pad=0, w_pad=0)
# squeeze color bars close to the heatmap
self.figure.subplots_adjust(hspace=0.01, wspace=0.01)
def connect_functions(self):
'''Connect to the matplotlib callbacks for key and mouse '''
canvas = self.figure.canvas
# comment out scroll event for now
# canvas.mpl_connect('scroll_event', self.scroll_zoom_callback)
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('key_press_event', self.key_press_callback)
def scroll_zoom_callback(self, event):
'''Zoom upon mouse scroll event'''
logger.debug(repr(event))
ax = event.inaxes
# ax is None when scrolled outside the heatmap
if ax is None:
return
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
x_left = xdata - cur_xlim[0]
x_right = cur_xlim[1] - xdata
y_top = ydata - cur_ylim[0]
y_bottom = cur_ylim[1] - ydata
if event.button == 'up':
scale_factor = 1. / self.zoom_scale
elif event.button == 'down':
scale_factor = self.zoom_scale
else:
# deal with something that should never happen
logger.warning('unknow scroll movement')
return
# set new limits
ax.set_xlim([xdata - x_left * scale_factor,
xdata + x_right * scale_factor])
ax.set_ylim([ydata - y_top * scale_factor,
ydata + y_bottom * scale_factor])
self.figure.canvas.draw() # force re-draw
def button_press_callback(self, event):
'''Select upon mouse button press.
button only: empty the previous selection and select the current point
button + shift: select all the features in the rectangle between
current selection and last selecton.
button + super: add current selected features to the selected list
'''
logger.debug(repr(event))
ax = event.inaxes
# ax is None when clicked outside the heatmap
if ax is None:
return
rx = int(round(event.xdata))
ry = int(round(event.ydata))
if event.key is None:
# add selection to list
self.clear_selection()
self.update_selection(samplepos=[rx], featurepos=[ry])
elif event.key == 'shift':
try:
last_selected_feature = self.current_select[1]
except IndexError:
logger.critical('You have not selected any previously.')
return
if last_selected_feature > ry:
features = np.arange(last_selected_feature, ry - 1, -1)
else:
features = np.arange(last_selected_feature, ry + 1, 1)
self.clear_selection()
self.update_selection(featurepos=features)
elif event.key == 'super':
self.update_selection(featurepos=[ry])
self.current_select = (rx, ry)
# and show the selected info
self.show_info()
def key_press_callback(self, event):
'''Move/zoom upon key pressing.'''
logger.debug('%r: %s key pressed' % (event, event.key))
ax = event.inaxes
if ax is None:
return
ylim_lower, ylim_upper = ax.get_ylim()
xlim_lower, xlim_upper = ax.get_xlim()
# set the scroll offset
if self.scroll_offset > 0:
x_offset = self.scroll_offset
y_offset = self.scroll_offset
else:
x_offset = xlim_upper - xlim_lower
y_offset = ylim_upper - ylim_lower
if event.key == 'shift+up' or event.key == '=':
ax.set_ylim(
ylim_lower,
ylim_lower + (ylim_upper - ylim_lower) / self.zoom_scale)
elif event.key == 'shift+down' or event.key == '-':
ax.set_ylim(
ylim_lower,
ylim_lower + (ylim_upper - ylim_lower) * self.zoom_scale)
elif event.key == 'shift+right' or event.key == '+':
ax.set_xlim(
xlim_lower,
xlim_lower + (xlim_upper - xlim_lower) / self.zoom_scale)
elif event.key == 'shift+left' or event.key == '_':
ax.set_xlim(
xlim_lower,
xlim_lower + (xlim_upper - xlim_lower) * self.zoom_scale)
elif event.key == 'down':
max_y = self.exp.data.shape[1] - 0.5
if ylim_lower - y_offset > max_y:
y_offset = ylim_lower - max_y
ax.set_ylim(ylim_lower - y_offset, ylim_upper - y_offset)
elif event.key == 'up':
if ylim_upper + y_offset < -0.5:
y_offset = -0.5 - ylim_upper
ax.set_ylim(ylim_lower + y_offset, ylim_upper + y_offset)
elif event.key == 'left':
if xlim_lower - x_offset < -0.5:
x_offset = xlim_lower + 0.5
ax.set_xlim(xlim_lower - x_offset, xlim_upper - x_offset)
elif event.key == 'right':
max_x = self.exp.data.shape[0] - 0.5
if xlim_upper + x_offset > max_x:
x_offset = xlim_upper - max_x
ax.set_xlim(xlim_lower + x_offset, xlim_upper + x_offset)
elif event.key in {'.', ',', '<', '>'}:
shift = {'.': (0, 1),
',': (0, -1),
'<': (-1, 0),
'>': (1, 0)}
try:
self.current_select = (self.current_select[0] + shift[event.key][0],
self.current_select[1] + shift[event.key][1])
except IndexError:
logger.warning('You have not selected any previously.')
return
self.clear_selection()
self.update_selection(
samplepos=[self.current_select[0]], featurepos=[self.current_select[1]])
self.show_info()
else:
logger.debug('Unrecoginzed key: %s' % event.key)
return
self.figure.canvas.draw()
def clear_selection(self):
'''Delete all shown selection lines '''
for cline in self.selected_samples.values():
self.ax_hm.lines.remove(cline)
logger.debug('remove sample selection %r' % cline)
self.selected_samples = {}
for cline in self.selected_features.values():
self.ax_hm.lines.remove(cline)
logger.debug('remove sample selection %r' % cline)
self.selected_features = {}
def update_selection(self, samplepos=(), featurepos=(), toggle=True):
'''Update the selection
Parameters
----------
samplepos : iterable of int, optional
positions of samples to be added
featurepos : iterable of int, optional
positions of features to be added
toggle: bool, optional
True (default) to remove lines in the lists that are already selected.
False to ignore
'''
for cpos in samplepos:
if cpos not in self.selected_samples:
self.selected_samples[cpos] = self.ax_hm.axvline(
x=cpos, color='white', linestyle='dotted', alpha=0.7, linewidth=0.7)
logger.debug('add sample selection %r' % cpos)
else:
if toggle:
self.ax_hm.lines.remove(self.selected_samples[cpos])
del self.selected_samples[cpos]
for cpos in featurepos:
if cpos not in self.selected_features:
self.selected_features[cpos] = self.ax_hm.axhline(
y=cpos, color='white', linestyle='dotted', alpha=0.7, linewidth=0.7)
logger.debug('add sample selection %r' % cpos)
else:
if toggle:
self.ax_hm.lines.remove(self.selected_features[cpos])
del self.selected_features[cpos]
self.figure.canvas.draw()
def get_selected_seqs(self):
'''Get the list of selected sequences
Parameters
----------
Returns
-------
seqs : list of str
The selected sequences ('ACGT')
'''
return list(self.exp.feature_metadata.index[list(self.selected_features.keys())])
| [
"logging.getLogger",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.arange"
] | [((495, 514), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (504, 514), False, 'from logging import getLogger\n'), ((4753, 4765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4763, 4765), True, 'import matplotlib.pyplot as plt\n'), ((4857, 4921), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(3)'], {'width_ratios': '[12, 1, 0.5]', 'height_ratios': '[1, 12]'}), '(2, 3, width_ratios=[12, 1, 0.5], height_ratios=[1, 12])\n', (4865, 4921), False, 'from matplotlib.gridspec import GridSpec\n'), ((5344, 5419), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(4)'], {'width_ratios': '[12, 1, tree_size, 0.5]', 'height_ratios': '[1, 12]'}), '(2, 4, width_ratios=[12, 1, tree_size, 0.5], height_ratios=[1, 12])\n', (5352, 5419), False, 'from matplotlib.gridspec import GridSpec\n'), ((11399, 11443), 'numpy.arange', 'np.arange', (['last_selected_feature', '(ry - 1)', '(-1)'], {}), '(last_selected_feature, ry - 1, -1)\n', (11408, 11443), True, 'import numpy as np\n'), ((11489, 11532), 'numpy.arange', 'np.arange', (['last_selected_feature', '(ry + 1)', '(1)'], {}), '(last_selected_feature, ry + 1, 1)\n', (11498, 11532), True, 'import numpy as np\n')] |
""" Example showing how to compute a family of basis polynomials """
# -------------------------------------------------------------------------------------------------------------------- #
# Importing packages
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
import nurbspy as nrb
import matplotlib.pyplot as plt
# -------------------------------------------------------------------------------------------------------------------- #
# Basis polynomials and derivatives example
# -------------------------------------------------------------------------------------------------------------------- #
# Maximum index of the basis polynomials (counting from zero)
n = 4
# Define the order of the basis polynomials
p = 3
# Define the knot vector (clamped spline)
# p+1 zeros, n-p equispaced points between 0 and 1, and p+1 ones. In total r+1 points where r=n+p+1
U = np.concatenate((np.zeros(p), np.linspace(0, 1, n - p + 2), np.ones(p)))
# Define a new u-parametrization suitable for finite differences
h = 1e-5
hh = h + h**2
Nu = 1000
u = np.linspace(0.00 + hh, 1.00 - hh, Nu) # Make sure that the limits [0, 1] also work when making changes
# Compute the basis polynomials and derivatives
N_basis = nrb.compute_basis_polynomials(n, p, U, u)
dN_basis = nrb.compute_basis_polynomials_derivatives(n, p, U, u, derivative_order=1)
ddN_basis = nrb.compute_basis_polynomials_derivatives(n, p, U, u, derivative_order=2)
# -------------------------------------------------------------------------------------------------------------------- #
# Plot the basis polynomials
# -------------------------------------------------------------------------------------------------------------------- #
# Create the figure
fig = plt.figure(figsize=(15, 5))
# Plot the basis polynomials
ax1 = fig.add_subplot(131)
ax1.set_title('Zeroth derivative', fontsize=12, color='k', pad=12)
ax1.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)
ax1.set_ylabel('Function value', fontsize=12, color='k', labelpad=12)
for i in range(n+1):
line, = ax1.plot(u, N_basis[i, :])
line.set_linewidth(1.25)
line.set_linestyle("-")
# line.set_color("k")
line.set_marker(" ")
line.set_markersize(3.5)
line.set_markeredgewidth(1)
line.set_markeredgecolor("k")
line.set_markerfacecolor("w")
line.set_label('index ' + str(i))
# Plot the first derivative
ax2 = fig.add_subplot(132)
ax2.set_title('First derivative', fontsize=12, color='k', pad=12)
ax2.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)
ax2.set_ylabel('Function value', fontsize=12, color='k', labelpad=12)
for i in range(n+1):
line, = ax2.plot(u, dN_basis[i, :])
line.set_linewidth(1.25)
line.set_linestyle("-")
# line.set_color("k")
line.set_marker(" ")
line.set_markersize(3.5)
line.set_markeredgewidth(1)
line.set_markeredgecolor("k")
line.set_markerfacecolor("w")
line.set_label('index ' + str(i))
# Plot the second derivative
ax3 = fig.add_subplot(133)
ax3.set_title('Second derivative', fontsize=12, color='k', pad=12)
ax3.set_xlabel('$u$ parameter', fontsize=12, color='k', labelpad=12)
ax3.set_ylabel('Function value', fontsize=12, color='k', labelpad=12)
for i in range(n+1):
line, = ax3.plot(u, ddN_basis[i, :])
line.set_linewidth(1.25)
line.set_linestyle("-")
# line.set_color("k")
line.set_marker(" ")
line.set_markersize(3.5)
line.set_markeredgewidth(1)
line.set_markeredgecolor("k")
line.set_markerfacecolor("w")
line.set_label('index ' + str(i))
# Create legend
ax3.legend(ncol=1, loc='right', bbox_to_anchor=(1.60, 0.50), fontsize=10, edgecolor='k', framealpha=1.0)
# Adjust pad
plt.tight_layout(pad=5.0, w_pad=None, h_pad=None)
# Show the figure
plt.show()
# # -------------------------------------------------------------------------------------------------------------------- #
# # Check that the computations are correct
# # -------------------------------------------------------------------------------------------------------------------- #
# # Check that the sum of the basis polynomials is equal to one (partition of unity property)
# print('The two-norm of partition of unity error is : ', np.sum((np.sum(N_basis, axis=0) - 1.00) ** 2) ** (1 / 2))
#
# # Check the first derivative against a finite difference aproximation
# a = -1/2*compute_basis_polynomials(n, p, U, u - h)
# b = +1/2*compute_basis_polynomials(n, p, U, u + h)
# dN_fd = (a+b)/h
# print('The two-norm of the first derivative error is : ', np.sum((dN_basis-dN_fd)**2)**(1/2)/Nu)
#
# # Check the second derivative against a finite difference aproximation
# a = +1*compute_basis_polynomials(n, p, U, u - h)
# b = -2*compute_basis_polynomials(n, p, U, u)
# c = +1*compute_basis_polynomials(n, p, U, u + h)
# ddN_fd = (a+b+c)/h**2
# print('The two-norm of the second derivative error is : ', np.sum((ddN_basis-ddN_fd)**2)**(1/2)/Nu)
| [
"nurbspy.compute_basis_polynomials",
"numpy.ones",
"nurbspy.compute_basis_polynomials_derivatives",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show"
] | [((1137, 1172), 'numpy.linspace', 'np.linspace', (['(0.0 + hh)', '(1.0 - hh)', 'Nu'], {}), '(0.0 + hh, 1.0 - hh, Nu)\n', (1148, 1172), True, 'import numpy as np\n'), ((1307, 1348), 'nurbspy.compute_basis_polynomials', 'nrb.compute_basis_polynomials', (['n', 'p', 'U', 'u'], {}), '(n, p, U, u)\n', (1336, 1348), True, 'import nurbspy as nrb\n'), ((1361, 1434), 'nurbspy.compute_basis_polynomials_derivatives', 'nrb.compute_basis_polynomials_derivatives', (['n', 'p', 'U', 'u'], {'derivative_order': '(1)'}), '(n, p, U, u, derivative_order=1)\n', (1402, 1434), True, 'import nurbspy as nrb\n'), ((1447, 1520), 'nurbspy.compute_basis_polynomials_derivatives', 'nrb.compute_basis_polynomials_derivatives', (['n', 'p', 'U', 'u'], {'derivative_order': '(2)'}), '(n, p, U, u, derivative_order=2)\n', (1488, 1520), True, 'import nurbspy as nrb\n'), ((1820, 1847), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (1830, 1847), True, 'import matplotlib.pyplot as plt\n'), ((3782, 3831), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(5.0)', 'w_pad': 'None', 'h_pad': 'None'}), '(pad=5.0, w_pad=None, h_pad=None)\n', (3798, 3831), True, 'import matplotlib.pyplot as plt\n'), ((3851, 3861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3859, 3861), True, 'import matplotlib.pyplot as plt\n'), ((978, 989), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (986, 989), True, 'import numpy as np\n'), ((991, 1019), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n - p + 2)'], {}), '(0, 1, n - p + 2)\n', (1002, 1019), True, 'import numpy as np\n'), ((1021, 1031), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (1028, 1031), True, 'import numpy as np\n')] |
import pathlib
import helpers
import numpy as np
import pytest
import meshio
@pytest.mark.parametrize(
"mesh",
[
# helpers.empty_mesh,
helpers.tri_mesh
],
)
def test_neuroglancer(mesh):
def writer(*args, **kwargs):
return meshio.neuroglancer.write(*args, **kwargs)
# 32bit only
helpers.write_read(writer, meshio.neuroglancer.read, mesh, 1.0e-8)
@pytest.mark.parametrize("filename, ref_sum, ref_num_cells", [("simple1", 20, 4)])
def test_reference_file(filename, ref_sum, ref_num_cells):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "neuroglancer" / filename
mesh = meshio.read(filename, "neuroglancer")
tol = 1.0e-5
s = np.sum(mesh.points)
assert abs(s - ref_sum) < tol * abs(ref_sum)
assert len(mesh.cells[0].data) == ref_num_cells
| [
"pathlib.Path",
"numpy.sum",
"pytest.mark.parametrize",
"meshio.read",
"helpers.write_read",
"meshio.neuroglancer.write"
] | [((82, 133), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mesh"""', '[helpers.tri_mesh]'], {}), "('mesh', [helpers.tri_mesh])\n", (105, 133), False, 'import pytest\n'), ((401, 486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filename, ref_sum, ref_num_cells"""', "[('simple1', 20, 4)]"], {}), "('filename, ref_sum, ref_num_cells', [('simple1', 20,\n 4)])\n", (424, 486), False, 'import pytest\n'), ((331, 396), 'helpers.write_read', 'helpers.write_read', (['writer', 'meshio.neuroglancer.read', 'mesh', '(1e-08)'], {}), '(writer, meshio.neuroglancer.read, mesh, 1e-08)\n', (349, 396), False, 'import helpers\n'), ((672, 709), 'meshio.read', 'meshio.read', (['filename', '"""neuroglancer"""'], {}), "(filename, 'neuroglancer')\n", (683, 709), False, 'import meshio\n'), ((735, 754), 'numpy.sum', 'np.sum', (['mesh.points'], {}), '(mesh.points)\n', (741, 754), True, 'import numpy as np\n'), ((266, 308), 'meshio.neuroglancer.write', 'meshio.neuroglancer.write', (['*args'], {}), '(*args, **kwargs)\n', (291, 308), False, 'import meshio\n'), ((557, 579), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (569, 579), False, 'import pathlib\n')] |
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import speed_of_light
from simphony.elements import Model
def plot_model(
model: Model,
pin_in: str = "o1",
pins: Tuple[str, ...] = None,
wavelengths=None,
logscale: bool = True,
fig=None,
phase: bool = False,
) -> None:
"""Plot simphony Sparameters for a model
Args:
model: simphony model
pin_in: input pin name
pins: list of pins
wavelengths (m):
logscale:
fig: figure
phase: plots phase
.. plot::
:include-source:
import gdsfactory.simulation simphony as gs
import gdsfactory.simulation.simphony.components as gc
c = gc.mmi1x2()
gs.plot_model(c)
"""
m = model() if callable(model) else model
if wavelengths is None:
if hasattr(m, "wavelengths"):
wavelengths = m.wavelengths
else:
wavelengths = np.linspace(1520e-9, 1580e-9, 2000)
f = speed_of_light / wavelengths
s = m.s_parameters(freq=f)
pins = pins or m.pins
if not isinstance(pins, (tuple, set, list)):
raise ValueError(f"pins {pins} need to be a tuple, set or list")
for pin in pins:
if pin not in m.pins:
raise ValueError(f"{pin} not in {m.pins}")
if pin_in not in m.pins:
raise ValueError(f"pin_in = `{pin_in}` not in {m.pins}")
pin_in_index = m.pins.index(pin_in)
fig = fig or plt.subplot()
ax = fig.axes
for pin_out in pins:
pin_out_index = m.pins.index(pin_out)
if phase:
y = np.angle(s[:, pin_out_index, pin_in_index])
ylabel = "angle (rad)"
else:
y = np.abs(s[:, pin_out_index, pin_in_index]) ** 2
y = 10 * np.log10(y) if logscale else y
ylabel = "|S (dB)|" if logscale else "|S|"
ax.plot(wavelengths * 1e9, y, label=pin_out)
ax.set_xlabel("wavelength (nm)")
ax.set_ylabel(ylabel)
plt.legend()
plt.show()
return ax
if __name__ == "__main__":
from simphony.library import siepic
from gdsfactory.simulation.simphony.components.straight import straight
w = np.linspace(1520, 1570, 1024) * 1e-9
coupler = siepic.ebeam_dc_halfring_straight(
gap=200e-9, radius=10e-6, width=500e-9, thickness=220e-9, couple_length=0.0
)
# plot_model(coupler, pin_in="n1")
# plt.legend()
# plt.show()
m = straight()
plot_model(m, phase=False)
plt.show()
| [
"numpy.abs",
"gdsfactory.simulation.simphony.components.straight.straight",
"numpy.log10",
"matplotlib.pyplot.legend",
"numpy.angle",
"numpy.linspace",
"matplotlib.pyplot.subplot",
"simphony.library.siepic.ebeam_dc_halfring_straight",
"matplotlib.pyplot.show"
] | [((2024, 2036), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2034, 2036), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2049, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2385), 'simphony.library.siepic.ebeam_dc_halfring_straight', 'siepic.ebeam_dc_halfring_straight', ([], {'gap': '(2e-07)', 'radius': '(1e-05)', 'width': '(5e-07)', 'thickness': '(2.2e-07)', 'couple_length': '(0.0)'}), '(gap=2e-07, radius=1e-05, width=5e-07,\n thickness=2.2e-07, couple_length=0.0)\n', (2305, 2385), False, 'from simphony.library import siepic\n'), ((2482, 2492), 'gdsfactory.simulation.simphony.components.straight.straight', 'straight', ([], {}), '()\n', (2490, 2492), False, 'from gdsfactory.simulation.simphony.components.straight import straight\n'), ((2528, 2538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2536, 2538), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1516), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (1514, 1516), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2250), 'numpy.linspace', 'np.linspace', (['(1520)', '(1570)', '(1024)'], {}), '(1520, 1570, 1024)\n', (2232, 2250), True, 'import numpy as np\n'), ((990, 1027), 'numpy.linspace', 'np.linspace', (['(1.52e-06)', '(1.58e-06)', '(2000)'], {}), '(1.52e-06, 1.58e-06, 2000)\n', (1001, 1027), True, 'import numpy as np\n'), ((1641, 1684), 'numpy.angle', 'np.angle', (['s[:, pin_out_index, pin_in_index]'], {}), '(s[:, pin_out_index, pin_in_index])\n', (1649, 1684), True, 'import numpy as np\n'), ((1750, 1791), 'numpy.abs', 'np.abs', (['s[:, pin_out_index, pin_in_index]'], {}), '(s[:, pin_out_index, pin_in_index])\n', (1756, 1791), True, 'import numpy as np\n'), ((1818, 1829), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (1826, 1829), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
import copy
import logging
import sys
from collections import Counter
from sys import platform
import numpy as np
import ludwig.contrib
from ludwig.constants import TRAINING, VALIDATION
logger = logging.getLogger(__name__)
try:
import matplotlib as mpl
if platform == "darwin": # OS X
mpl.use('TkAgg')
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import ticker
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
logger.error(
' matplotlib or seaborn are not installed. '
'In order to install all visualization dependencies run '
'pip install ludwig[viz]'
)
sys.exit(-1)
# plt.rc('xtick', labelsize='x-large')
# plt.rc('ytick', labelsize='x-large')
# plt.rc('axes', labelsize='x-large')
def learning_curves_plot(
train_values,
vali_values,
metric,
algorithm_names=None,
title=None,
filename=None
):
num_algorithms = len(train_values)
max_len = max([len(tv) for tv in train_values])
fig, ax = plt.subplots()
sns.set_style('whitegrid')
if title is not None:
ax.set_title(title)
if num_algorithms == 1:
colors = plt.get_cmap('tab10').colors
else: # num_algorithms > 1
colors = plt.get_cmap('tab20').colors
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
ax.set_xlabel('epochs')
ax.set_ylabel(metric.replace('_', ' '))
xs = list(range(1, max_len + 1))
for i in range(num_algorithms):
name_prefix = algorithm_names[
i] + ' ' if algorithm_names is not None and i < len(
algorithm_names) else ''
ax.plot(xs[:len(train_values[i])], train_values[i],
label=name_prefix + TRAINING,
color=colors[i * 2], linewidth=3)
if i < len(vali_values) and vali_values[i] is not None and len(
vali_values[i]) > 0:
ax.plot(xs[:len(vali_values[i])], vali_values[i],
label=name_prefix + VALIDATION,
color=colors[i * 2 + 1], linewidth=3)
ax.legend()
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def compare_classifiers_plot(
scores,
metrics,
algoritm_names=None,
adaptive=False,
decimals=4,
title=None,
filename=None
):
assert len(scores) == len(metrics)
assert len(scores) > 0
num_metrics = len(metrics)
sns.set_style('whitegrid')
fig, ax = plt.subplots()
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
ax.set_xticklabels([], minor=True)
if title is not None:
ax.set_title(title)
width = 0.8 / num_metrics if num_metrics > 1 else 0.4
ticks = np.arange(len(scores[0]))
colors = plt.get_cmap('tab10').colors
if adaptive:
maximum = max([max(score) for score in scores])
else:
ax.set_xlim([0, 1])
ax.set_xticks(np.linspace(0.0, 1.0, num=21), minor=True)
ax.set_xticks(np.linspace(0.0, 1.0, num=11))
maximum = 1
half_total_width = 0.4 if num_metrics > 1 else 0.2
ax.set_yticks(ticks + half_total_width - width / 2)
ax.set_yticklabels(algoritm_names if algoritm_names is not None else '')
ax.invert_yaxis() # labels read top-to-bottom
for i, metric in enumerate(metrics):
ax.barh(ticks + (i * width), scores[i], width, label=metric,
color=colors[i])
for j, v in enumerate(scores[i]):
if v < maximum * (0.025 * decimals + 0.1):
x = v + maximum * 0.01
horizontal_alignment = 'left'
else:
x = v - maximum * 0.01
horizontal_alignment = 'right'
txt = ax.text(x, ticks[j] + (i * width),
('{:.' + str(decimals) + 'f}').format(v),
color='white',
fontweight='bold', verticalalignment='center',
horizontalalignment=horizontal_alignment)
txt.set_path_effects(
[PathEffects.withStroke(linewidth=3, foreground='black')])
plt.setp(ax.get_xminorticklabels(), visible=False)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def compare_classifiers_line_plot(
xs,
scores,
metric,
algorithm_names=None,
title=None,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
fig, ax = plt.subplots()
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
if title is not None:
ax.set_title(title)
ax.set_xticks(xs)
ax.set_xticklabels(xs)
ax.set_xlabel('k')
ax.set_ylabel(metric)
for i, score in enumerate(scores):
ax.plot(xs, score,
label=algorithm_names[
i] if algorithm_names is not None and i < len(
algorithm_names) else 'Algorithm {}'.format(i),
color=colors[i], linewidth=3, marker='o')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def compare_classifiers_multiclass_multimetric_plot(
scores,
metrics,
labels=None,
title=None,
filename=None
):
assert len(scores) > 0
sns.set_style('whitegrid')
fig, ax = plt.subplots()
if title is not None:
ax.set_title(title)
width = 0.9 / len(scores)
ticks = np.arange(len(scores[0]))
colors = plt.get_cmap('tab10').colors
ax.set_xlabel('class')
ax.set_xticks(ticks + width)
if labels is not None:
ax.set_xticklabels(labels, rotation=90)
else:
ax.set_xticklabels(ticks, rotation=90)
for i, score in enumerate(scores):
ax.bar(ticks + i * width, score, width, label=metrics[i],
color=colors[i])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def radar_chart(
ground_truth,
predictions,
algorithms=None,
log_scale=False,
title=None,
filename=None
):
sns.set_style('whitegrid')
if title is not None:
plt.title(title)
ground_truth = ground_truth[0:10]
predictions = [pred[0:10] for pred in predictions]
gt_argsort = np.argsort(-ground_truth) # sort deacreasing
logger.info(gt_argsort)
ground_truth = ground_truth[gt_argsort]
predictions = [pred[gt_argsort] for pred in predictions]
maximum = max(max(ground_truth), max([max(p) for p in predictions]))
ax = plt.subplot(111, polar=True)
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_rmax(maximum)
ax.set_rlabel_position(305)
ax.set_ylabel('Probability')
# ax.set_rscale('log')
ax.grid(True)
colors = plt.get_cmap('tab10').colors
num_classes = len(ground_truth)
# Set ticks to the number of properties (in radians)
t = np.arange(0, 2 * np.pi, 2 * np.pi / num_classes)
ax.set_xticks(t, [])
ax.set_xticklabels(np.arange(0, num_classes))
# Set yticks from 0 to 10
# ax.set_yticks(np.linspace(0, 10, 11))
# Set axes limits
# ax.set_rlim(0, 1)
# ax.set_rscale('log')
def draw_polygon(values, label, color='grey'):
points = [(x, y) for x, y in zip(t, values)]
points.append(points[0])
points = np.array(points)
codes = [path.Path.MOVETO, ] + \
[path.Path.LINETO, ] * (len(values) - 1) + \
[path.Path.CLOSEPOLY]
_path = path.Path(points, codes)
_patch = patches.PathPatch(_path, fill=True, color=color, linewidth=0,
alpha=.2)
ax.add_patch(_patch)
_patch = patches.PathPatch(_path, fill=False, color=color, linewidth=3)
ax.add_patch(_patch)
# Draw circles at value points
# line = ax.scatter(points[:, 0], points[:, 1], linewidth=3,
# s=50, color='white', edgecolor=color, zorder=10)
ax.plot(points[:, 0], points[:, 1], linewidth=3, marker='o',
fillstyle='full',
markerfacecolor='white',
markeredgecolor=color,
markeredgewidth=2,
color=color, zorder=10, label=label)
draw_polygon(ground_truth, 'Ground Truth')
# Draw polygon representing values
for i, alg_predictions in enumerate(predictions):
draw_polygon(alg_predictions, algorithms[i], colors[i])
ax.legend(frameon=True, loc='upper left')
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def pie(ax, values, **kwargs):
total = sum(values)
def formatter(pct):
if pct > 0:
return '{:0.0f}\n({:0.1f}%)'.format(pct * total / 100, pct)
else:
return ''
wedges, _, labels = ax.pie(values, autopct=formatter, **kwargs)
return wedges
def donut(
inside_values,
inside_labels,
outside_values,
outside_labels,
outside_groups,
title=None,
filename=None
):
fig, ax = plt.subplots()
if title is not None:
ax.set_title(title)
ax.axis('equal')
width = 0.35
colors_tab20c = list(plt.get_cmap('tab20c').colors)
colors_set2 = list(plt.get_cmap('Set2').colors)
colors_set3 = list(plt.get_cmap('Set3').colors)
colors_pastel1 = list(plt.get_cmap('Pastel1').colors)
# swap green and red
# for i in range(4):
# tmp = colors[4 + i]
# colors[4 + i] = colors[8 + i]
# colors[8 + i] = tmp
colors = []
colors.extend(colors_tab20c[8:12])
colors.append(colors_set2[5])
colors.append(colors_set3[11])
colors.append(colors_set3[1])
colors.append(colors_pastel1[5])
colors.extend(colors_tab20c[4:8])
inside_colors = [colors[x * 4] for x in range(len(inside_values))]
group_count = Counter(outside_groups)
outside_colors = [colors[(i * 4) + ((j % 3) + 1)]
for i in list(set(outside_groups))
for j in range(group_count[i])]
outside = pie(ax, outside_values, radius=1, pctdistance=1 - width / 2,
colors=outside_colors, startangle=90, counterclock=False,
textprops={'color': 'w', 'weight': 'bold',
'path_effects': [
PathEffects.withStroke(linewidth=3,
foreground='black')]})
inside = pie(ax, inside_values, radius=1 - width,
pctdistance=1 - (width / 2) / (1 - width),
colors=inside_colors, startangle=90, counterclock=False,
textprops={'color': 'w', 'weight': 'bold',
'path_effects': [PathEffects.withStroke(linewidth=3,
foreground='black')]})
plt.setp(inside + outside, width=width, edgecolor='white')
wedges = []
labels = []
so_far = 0
for i in list(set(outside_groups)):
wedges.append(inside[i])
labels.append(inside_labels[i])
for j in range(group_count[i]):
wedges.append(outside[so_far])
labels.append(outside_labels[so_far])
so_far += 1
ax.legend(wedges, labels, frameon=True)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def confidence_fitlering_plot(
thresholds,
accuracies,
dataset_kepts,
algorithm_names=None,
title=None,
filename=None
):
assert len(accuracies) == len(dataset_kepts)
num_algorithms = len(accuracies)
sns.set_style('whitegrid')
if num_algorithms == 1:
colors = plt.get_cmap('tab10').colors
else: # num_algorithms > 1
colors = plt.get_cmap('tab20').colors
y_ticks_minor = np.linspace(0.0, 1.0, num=21)
y_ticks_major = np.linspace(0.0, 1.0, num=11)
y_ticks_major_labels = ['{:3.0f}%'.format(y * 100) for y in y_ticks_major]
fig, ax1 = plt.subplots()
if title is not None:
ax1.set_title(title)
ax1.grid(which='both')
ax1.grid(which='minor', alpha=0.5)
ax1.grid(which='major', alpha=0.75)
ax1.set_xticks([x for idx, x in enumerate(thresholds) if idx % 2 == 0])
ax1.set_xticks(thresholds, minor=True)
ax1.set_xlim(-0.05, 1.05)
ax1.set_xlabel('confidence threshold')
ax1.set_ylim(0, 1.05)
ax1.set_yticks(y_ticks_major)
ax1.set_yticklabels(y_ticks_major_labels)
ax1.set_yticks(y_ticks_minor, minor=True)
ax2 = ax1.twinx()
ax2.set_ylim(0, 1.05)
ax2.set_yticks(y_ticks_major)
ax2.set_yticklabels(y_ticks_major_labels)
ax2.set_yticks(y_ticks_minor, minor=True)
for i in range(len(accuracies)):
algorithm_name = algorithm_names[
i] + ' ' if algorithm_names is not None and i < len(
algorithm_names) else ''
ax1.plot(thresholds, accuracies[i],
label='{} accuracy'.format(algorithm_name),
color=colors[i * 2],
linewidth=3)
ax1.plot(thresholds, dataset_kepts[i],
label='{} data coverage'.format(algorithm_name),
color=colors[i * 2 + 1], linewidth=3)
ax1.legend(frameon=True, loc=3)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def confidence_fitlering_data_vs_acc_plot(
accuracies,
dataset_kepts,
model_names=None,
dotted=False,
decimal_digits=0,
y_label='accuracy',
title=None,
filename=None
):
assert len(accuracies) == len(dataset_kepts)
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
max_dataset_kept = max(
[max(dataset_kept) for dataset_kept in dataset_kepts])
x_ticks_minor = np.linspace(0.0, max_dataset_kept, num=21)
x_ticks_major = np.linspace(0.0, max_dataset_kept, num=11)
x_ticks_major_labels = [
'{value:3.{decimal_digits}f}%'.format(
decimal_digits=decimal_digits,
value=x * 100
) for x in x_ticks_major
]
y_ticks_minor = np.linspace(0.0, 1.0, num=21)
y_ticks_major = np.linspace(0.0, 1.0, num=11)
fig, ax = plt.subplots()
if title is not None:
ax.set_title(title)
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
ax.set_xticks(x_ticks_major)
ax.set_xticks(x_ticks_minor, minor=True)
ax.set_xticklabels(x_ticks_major_labels)
ax.set_xlim(0, max_dataset_kept)
ax.set_xlabel('data coverage')
ax.set_ylim(0, 1)
ax.set_yticks(y_ticks_major)
ax.set_yticks(y_ticks_minor, minor=True)
ax.set_ylabel(y_label)
for i in range(len(accuracies)):
curr_dotted = dotted[i] if isinstance(dotted,
(list, tuple)) and i < len(
dotted) else dotted
algorithm_name = model_names[
i] + ' ' if model_names is not None and i < len(
model_names) else ''
ax.plot(dataset_kepts[i], accuracies[i], label=algorithm_name,
color=colors[i],
linewidth=3, linestyle=':' if curr_dotted else '-')
ax.legend(frameon=True, loc=3)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def confidence_fitlering_data_vs_acc_multiline_plot(
accuracies,
dataset_kepts,
models_names,
title=None,
filename=None
):
assert len(accuracies) == len(dataset_kepts)
sns.set_style('whitegrid')
colors = plt.get_cmap('tab20').colors
max_dataset_kept = max(
[max(dataset_kept) for dataset_kept in dataset_kepts])
x_ticks_minor = np.linspace(0.0, max_dataset_kept, num=21)
x_ticks_major = np.linspace(0.0, max_dataset_kept, num=11)
x_ticks_major_labels = ['{:3.0f}%'.format(x * 100) for x in x_ticks_major]
y_ticks_minor = np.linspace(0.0, 1.0, num=21)
y_ticks_major = np.linspace(0.0, 1.0, num=11)
fig, ax = plt.subplots()
if title is not None:
ax.set_title(title)
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
ax.set_xticks(x_ticks_major)
ax.set_xticks(x_ticks_minor, minor=True)
ax.set_xticklabels(x_ticks_major_labels)
ax.set_xlim(0, max_dataset_kept)
ax.set_xlabel('data coverage')
ax.set_ylim(0, 1)
ax.set_yticks(y_ticks_major)
ax.set_yticks(y_ticks_minor, minor=True)
ax.set_ylabel('accuracy')
for i in range(len(accuracies)):
ax.plot(dataset_kepts[i], accuracies[i], color=colors[0],
linewidth=1.0, alpha=0.35)
legend_elements = [Line2D([0], [0], linewidth=1.0, color=colors[0])]
ax.legend(legend_elements, models_names)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def confidence_fitlering_3d_plot(
thresholds_1,
thresholds_2,
accuracies,
dataset_kepts,
threshold_output_feature_names=None,
title=None,
filename=None
):
assert len(accuracies) == len(dataset_kepts)
assert len(thresholds_1) == len(thresholds_2)
thresholds_1, thresholds_2 = np.meshgrid(thresholds_1, thresholds_2)
colors = plt.get_cmap('tab10').colors
sns.set_style('white')
z_ticks_minor = np.linspace(0.0, 1.0, num=21)
z_ticks_major = np.linspace(0.0, 1.0, num=11)
z_ticks_major_labels = ['{:3.0f}%'.format(z * 100) for z in z_ticks_major]
fig = plt.figure()
ax = Axes3D
ax = fig.add_subplot(111, projection='3d')
if title is not None:
ax.set_title(title)
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
ax.set_xlabel('{} probability'.format(threshold_output_feature_names[0]))
ax.set_ylabel('{} probability'.format(threshold_output_feature_names[1]))
ax.set_xlim(np.min(thresholds_1), np.max(thresholds_1))
ax.set_ylim(np.min(thresholds_2), np.max(thresholds_2))
ax.set_zlim(0, 1)
ax.set_zticks(z_ticks_major)
ax.set_zticklabels(z_ticks_major_labels)
ax.set_zticks(z_ticks_minor, minor=True)
# ORRIBLE HACK, IT'S THE ONLY WAY TO REMOVE PADDING
from mpl_toolkits.mplot3d.axis3d import Axis
if not hasattr(Axis, '_get_coord_info_old'):
def _get_coord_info_new(self, renderer):
mins, maxs, centers, deltas, tc, highs = self._get_coord_info_old(
renderer)
mins += deltas / 4
maxs -= deltas / 4
return mins, maxs, centers, deltas, tc, highs
Axis._get_coord_info_old = Axis._get_coord_info
Axis._get_coord_info = _get_coord_info_new
# END OF HORRIBLE HACK
surf_1 = ax.plot_surface(thresholds_1, thresholds_2, accuracies,
alpha=0.5,
label='accuracy',
cmap=plt.get_cmap('winter'),
edgecolor='none')
surf_2 = ax.plot_surface(thresholds_1, thresholds_2, dataset_kepts,
alpha=0.5,
label='data coverage',
cmap=plt.get_cmap('autumn'),
edgecolor='none')
handle_1 = copy.copy(surf_1)
handle_2 = copy.copy(surf_2)
handle_1.set_color(colors[0])
handle_2.set_color(colors[1])
handle_1._edgecolors2d = handle_1._edgecolors3d
handle_2._edgecolors2d = handle_2._edgecolors3d
handle_1._facecolors2d = handle_1._facecolors3d
handle_2._facecolors2d = handle_2._facecolors3d
ax.legend(frameon=True, loc=3, handles=[handle_1, handle_2])
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def threshold_vs_metric_plot(
thresholds,
scores,
algorithm_names=None,
title=None,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
# y_ticks_minor = np.linspace(0.0, 1.0, num=21)
# y_ticks_major = np.linspace(0.0, 1.0, num=11)
# y_ticks_major_labels = ['{:3.0f}%'.format(y * 100) for y in y_ticks_major]
fig, ax1 = plt.subplots()
if title is not None:
ax1.set_title(title)
ax1.grid(which='both')
ax1.grid(which='minor', alpha=0.5)
ax1.grid(which='major', alpha=0.75)
ax1.set_xticks([x for idx, x in enumerate(thresholds) if idx % 2 == 0])
ax1.set_xticks(thresholds, minor=True)
# ax1.set_xlim(0, 1)
ax1.set_xlabel('confidence threshold')
# ax1.set_ylim(0, 1)
# ax1.set_yticks(y_ticks_major)
# ax1.set_yticklabels(y_ticks_major_labels)
# ax1.set_yticks(y_ticks_minor, minor=True)
for i in range(len(scores)):
algorithm_name = algorithm_names[
i] + ' ' if algorithm_names is not None and i < len(
algorithm_names) else ''
ax1.plot(thresholds, scores[i], label=algorithm_name, color=colors[i],
linewidth=3, marker='o')
ax1.legend(frameon=True)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def roc_curves(
fpr_tprs,
algorithm_names=None,
title=None,
graded_color=False,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
colormap = plt.get_cmap('RdYlGn')
y_ticks_minor = np.linspace(0.0, 1.0, num=21)
y_ticks_major = np.linspace(0.0, 1.0, num=11)
fig, ax = plt.subplots()
if title is not None:
ax.set_title(title)
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
ax.set_xlim(0, 1)
ax.set_xlabel('False positive rate')
ax.set_ylim(0, 1)
ax.set_yticks(y_ticks_major)
ax.set_yticks(y_ticks_minor, minor=True)
ax.set_ylabel('True positive rate')
plt.plot([0, 1], [0, 1], color='black', linewidth=3, linestyle='--')
for i in range(len(fpr_tprs)):
algorithm_name = algorithm_names[
i] + ' ' if algorithm_names is not None and i < len(
algorithm_names) else ''
color = colormap(i / len(fpr_tprs)) if graded_color else colors[i]
ax.plot(fpr_tprs[i][0], fpr_tprs[i][1], label=algorithm_name,
color=color,
linewidth=3)
ax.legend(frameon=True)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def calibration_plot(
fraction_positives,
mean_predicted_values,
algorithm_names=None,
filename=None
):
assert len(fraction_positives) == len(mean_predicted_values)
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
num_algorithms = len(fraction_positives)
plt.figure(figsize=(9, 9))
plt.grid(which='both')
plt.grid(which='minor', alpha=0.5)
plt.grid(which='major', alpha=0.75)
plt.plot([0, 1], [0, 1], 'k:', label='Perfectly calibrated')
for i in range(num_algorithms):
# ax1.plot(mean_predicted_values[i], fraction_positives[i],
# label=algorithms[i] if algorithm_names is not None and i < len(algorithms) else '')
# sns.tsplot(mean_predicted_values[i], fraction_positives[i], ax=ax1, color=colors[i])
assert len(mean_predicted_values[i]) == len(fraction_positives[i])
order = min(3, len(mean_predicted_values[i]) - 1)
sns.regplot(mean_predicted_values[i], fraction_positives[i],
order=order, x_estimator=np.mean, color=colors[i],
marker='o', scatter_kws={'s': 40},
label=algorithm_names[
i] if algorithm_names is not None and i < len(
algorithm_names) else '')
ticks = np.linspace(0.0, 1.0, num=11)
plt.xlim([-0.05, 1.05])
plt.xticks(ticks)
plt.xlabel('Predicted probability')
plt.ylabel('Observed probability')
plt.ylim([-0.05, 1.05])
plt.yticks(ticks)
plt.legend(loc='lower right')
plt.title('Calibration (reliability curve)')
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def brier_plot(
brier_scores,
algorithm_names=None,
title=None,
filename=None
):
sns.set_style('whitegrid')
if title is not None:
plt.title(title)
colors = plt.get_cmap('tab10').colors
plt.grid(which='both')
plt.grid(which='minor', alpha=0.5)
plt.grid(which='major', alpha=0.75)
plt.xlabel('class')
plt.ylabel('brier')
x = np.array(range(brier_scores.shape[0]))
for i in range(brier_scores.shape[1]):
plt.plot(brier_scores[:, i],
label=algorithm_names[
i] + ' ' if algorithm_names is not None and i < len(
algorithm_names) else '',
color=colors[i], linewidth=3)
plt.legend()
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def predictions_distribution_plot(
probabilities,
algorithm_names=None,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
num_algorithms = len(probabilities)
plt.figure(figsize=(9, 9))
plt.grid(which='both')
plt.grid(which='minor', alpha=0.5)
plt.grid(which='major', alpha=0.75)
for i in range(num_algorithms):
plt.hist(probabilities[i], range=(0, 1), bins=41, color=colors[i],
label=algorithm_names[
i] if algorithm_names is not None and i < len(
algorithm_names) else '',
histtype='stepfilled', alpha=0.5, lw=2)
plt.xlabel('Mean predicted value')
plt.xlim([0, 1])
plt.xticks(np.linspace(0.0, 1.0, num=21))
plt.ylabel('Count')
plt.legend(loc='upper center', ncol=2)
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def confusion_matrix_plot(
confusion_matrix,
labels=None,
output_feature_name=None,
filename=None
):
mpl.rcParams.update({'figure.autolayout': True})
fig, ax = plt.subplots()
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
cax = ax.matshow(confusion_matrix, cmap='viridis')
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xticklabels([''] + labels, rotation=45, ha='left')
ax.set_yticklabels([''] + labels)
ax.grid(False)
ax.tick_params(axis='both', which='both', length=0)
fig.colorbar(cax, ax=ax, extend='max')
ax.set_xlabel('Predicted {}'.format(output_feature_name))
ax.set_ylabel('Actual {}'.format(output_feature_name))
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def double_axis_line_plot(
y1_sorted,
y2,
y1_name,
y2_name,
labels=None,
title=None,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
fig, ax1 = plt.subplots()
if title is not None:
ax1.set_title(title)
# ax1.grid(which='both')
# ax1.grid(which='minor', alpha=0.5)
# ax1.grid(which='major', alpha=0.75)
ax1.set_xlabel('class (sorted by {})'.format(y1_name))
ax1.set_xlim(0, len(y1_sorted) - 1)
if labels is not None:
ax1.set_xticklabels(labels, rotation=45, ha='right')
ax1.set_xticks(np.arange(len(labels)))
ax1.set_ylabel(y1_name, color=colors[1])
ax1.tick_params('y', colors=colors[1])
ax1.set_ylim(min(y1_sorted), max(y1_sorted))
ax2 = ax1.twinx()
ax2.set_ylabel(y2_name, color=colors[0])
ax2.tick_params('y', colors=colors[0])
ax2.set_ylim(min(y2), max(y2))
ax1.plot(y1_sorted, label=y1_name, color=colors[1],
linewidth=4)
ax2.plot(y2, label=y2_name, color=colors[0],
linewidth=3)
fig.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_matrix(
matrix,
cmap='hot',
filename=None
):
plt.matshow(matrix, cmap=cmap)
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_distributions(
distributions,
labels=None,
title=None,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
fig, ax1 = plt.subplots()
if title is not None:
ax1.set_title(title)
ax1.grid(which='both')
ax1.grid(which='minor', alpha=0.5)
ax1.grid(which='major', alpha=0.75)
ax1.set_xlabel('class')
ax1.set_ylabel('p')
ax1.tick_params('y')
for i, distribution in enumerate(distributions):
ax1.plot(distribution, color=colors[i], alpha=0.6,
label=labels[i] if labels is not None and i < len(
labels) else 'Distribution {}'.format(i))
ax1.legend(frameon=True)
fig.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_distributions_difference(
distribution,
labels=None,
title=None,
filename=None
):
sns.set_style('whitegrid')
colors = plt.get_cmap('tab10').colors
fig, ax1 = plt.subplots()
if title is not None:
ax1.set_title(title)
ax1.grid(which='both')
ax1.grid(which='minor', alpha=0.5)
ax1.grid(which='major', alpha=0.75)
ax1.set_xlabel('class')
ax1.set_ylabel('p')
ax1.tick_params('y')
ax1.plot(distribution, color=colors[0])
fig.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
def bar_plot(
xs,
ys,
decimals=4,
labels=None,
title=None,
filename=None
):
assert len(xs) == len(ys)
assert len(xs) > 0
sns.set_style('whitegrid')
fig, ax = plt.subplots()
ax.grid(which='both')
ax.grid(which='minor', alpha=0.5)
ax.grid(which='major', alpha=0.75)
if title is not None:
ax.set_title(title)
colors = plt.get_cmap('tab10').colors
ax.invert_yaxis() # labels read top-to-bottom
maximum = ys.max()
ticks = np.arange(len(xs))
ax.set_yticks(ticks)
if labels is None:
ax.set_yticklabels(xs)
else:
ax.set_yticklabels(labels)
ax.barh(ticks, ys, color=colors[0], align='center')
for i, v in enumerate(ys):
if v < maximum * (0.025 * decimals + 0.1):
x = v + maximum * 0.01
horizontal_alignment = 'left'
else:
x = v - maximum * 0.01
horizontal_alignment = 'right'
txt = ax.text(x, ticks[i], ('{:.' + str(decimals) + 'f}').format(v),
color='white',
fontweight='bold', verticalalignment='center',
horizontalalignment=horizontal_alignment)
txt.set_path_effects(
[PathEffects.withStroke(linewidth=3, foreground='black')])
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
| [
"logging.getLogger",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.argsort",
"numpy.array",
"sys.exit",
"copy.copy",
"matplotlib.patheffects.withStroke",
"matplotlib.lines.Line2D",
"numpy.arange",
"matplotlib.path.Path",
"matplotlib.pyplot.xlabel",
"matp... | [((980, 1007), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (997, 1007), False, 'import logging\n'), ((2010, 2024), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2022, 2024), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2056), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (2043, 2056), True, 'import seaborn as sns\n'), ((3129, 3147), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3145, 3147), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3602), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (3589, 3602), True, 'import seaborn as sns\n'), ((3618, 3632), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3630, 3632), True, 'import matplotlib.pyplot as plt\n'), ((5422, 5440), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5438, 5440), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5769), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (5756, 5769), True, 'import seaborn as sns\n'), ((5827, 5841), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5839, 5841), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6480), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6478, 6480), True, 'import matplotlib.pyplot as plt\n'), ((6809, 6835), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (6822, 6835), True, 'import seaborn as sns\n'), ((6851, 6865), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6863, 6865), True, 'import matplotlib.pyplot as plt\n'), ((7426, 7444), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7442, 7444), True, 'import matplotlib.pyplot as plt\n'), ((7748, 7774), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (7761, 7774), True, 'import seaborn as sns\n'), ((7939, 7964), 'numpy.argsort', 'np.argsort', (['(-ground_truth)'], {}), '(-ground_truth)\n', (7949, 7964), True, 'import numpy as np\n'), ((8202, 8230), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'polar': '(True)'}), '(111, polar=True)\n', (8213, 8230), True, 'import matplotlib.pyplot as plt\n'), ((8579, 8627), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / num_classes)'], {}), '(0, 2 * np.pi, 2 * np.pi / num_classes)\n', (8588, 8627), True, 'import numpy as np\n'), ((10174, 10192), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10190, 10192), True, 'import matplotlib.pyplot as plt\n'), ((10822, 10836), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10834, 10836), True, 'import matplotlib.pyplot as plt\n'), ((11623, 11646), 'collections.Counter', 'Counter', (['outside_groups'], {}), '(outside_groups)\n', (11630, 11646), False, 'from collections import Counter\n'), ((12644, 12702), 'matplotlib.pyplot.setp', 'plt.setp', (['(inside + outside)'], {'width': 'width', 'edgecolor': '"""white"""'}), "(inside + outside, width=width, edgecolor='white')\n", (12652, 12702), True, 'import matplotlib.pyplot as plt\n'), ((13070, 13088), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13086, 13088), True, 'import matplotlib.pyplot as plt\n'), ((13493, 13519), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (13506, 13519), True, 'import seaborn as sns\n'), ((13694, 13723), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (13705, 13723), True, 'import numpy as np\n'), ((13744, 13773), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (13755, 13773), True, 'import numpy as np\n'), ((13869, 13883), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13881, 13883), True, 'import matplotlib.pyplot as plt\n'), ((15150, 15168), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15166, 15168), True, 'import matplotlib.pyplot as plt\n'), ((15600, 15626), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (15613, 15626), True, 'import seaborn as sns\n'), ((15783, 15825), 'numpy.linspace', 'np.linspace', (['(0.0)', 'max_dataset_kept'], {'num': '(21)'}), '(0.0, max_dataset_kept, num=21)\n', (15794, 15825), True, 'import numpy as np\n'), ((15846, 15888), 'numpy.linspace', 'np.linspace', (['(0.0)', 'max_dataset_kept'], {'num': '(11)'}), '(0.0, max_dataset_kept, num=11)\n', (15857, 15888), True, 'import numpy as np\n'), ((16093, 16122), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (16104, 16122), True, 'import numpy as np\n'), ((16143, 16172), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (16154, 16172), True, 'import numpy as np\n'), ((16188, 16202), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (16200, 16202), True, 'import matplotlib.pyplot as plt\n'), ((17244, 17262), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17260, 17262), True, 'import matplotlib.pyplot as plt\n'), ((17624, 17650), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (17637, 17650), True, 'import seaborn as sns\n'), ((17807, 17849), 'numpy.linspace', 'np.linspace', (['(0.0)', 'max_dataset_kept'], {'num': '(21)'}), '(0.0, max_dataset_kept, num=21)\n', (17818, 17849), True, 'import numpy as np\n'), ((17870, 17912), 'numpy.linspace', 'np.linspace', (['(0.0)', 'max_dataset_kept'], {'num': '(11)'}), '(0.0, max_dataset_kept, num=11)\n', (17881, 17912), True, 'import numpy as np\n'), ((18012, 18041), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (18023, 18041), True, 'import numpy as np\n'), ((18062, 18091), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (18073, 18091), True, 'import numpy as np\n'), ((18107, 18121), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18119, 18121), True, 'import matplotlib.pyplot as plt\n'), ((18877, 18895), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18893, 18895), True, 'import matplotlib.pyplot as plt\n'), ((19384, 19423), 'numpy.meshgrid', 'np.meshgrid', (['thresholds_1', 'thresholds_2'], {}), '(thresholds_1, thresholds_2)\n', (19395, 19423), True, 'import numpy as np\n'), ((19471, 19493), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (19484, 19493), True, 'import seaborn as sns\n'), ((19515, 19544), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (19526, 19544), True, 'import numpy as np\n'), ((19565, 19594), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (19576, 19594), True, 'import numpy as np\n'), ((19685, 19697), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19695, 19697), True, 'import matplotlib.pyplot as plt\n'), ((21454, 21471), 'copy.copy', 'copy.copy', (['surf_1'], {}), '(surf_1)\n', (21463, 21471), False, 'import copy\n'), ((21487, 21504), 'copy.copy', 'copy.copy', (['surf_2'], {}), '(surf_2)\n', (21496, 21504), False, 'import copy\n'), ((21855, 21873), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21871, 21873), True, 'import matplotlib.pyplot as plt\n'), ((22163, 22189), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (22176, 22189), True, 'import seaborn as sns\n'), ((22435, 22449), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22447, 22449), True, 'import matplotlib.pyplot as plt\n'), ((23309, 23327), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23325, 23327), True, 'import matplotlib.pyplot as plt\n'), ((23613, 23639), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (23626, 23639), True, 'import seaborn as sns\n'), ((23698, 23720), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""RdYlGn"""'], {}), "('RdYlGn')\n", (23710, 23720), True, 'import matplotlib.pyplot as plt\n'), ((23742, 23771), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (23753, 23771), True, 'import numpy as np\n'), ((23792, 23821), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (23803, 23821), True, 'import numpy as np\n'), ((23837, 23851), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23849, 23851), True, 'import matplotlib.pyplot as plt\n'), ((24221, 24289), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""black"""', 'linewidth': '(3)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='black', linewidth=3, linestyle='--')\n", (24229, 24289), True, 'import matplotlib.pyplot as plt\n'), ((24723, 24741), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24739, 24741), True, 'import matplotlib.pyplot as plt\n'), ((25092, 25118), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (25105, 25118), True, 'import seaborn as sns\n'), ((25213, 25239), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (25223, 25239), True, 'import matplotlib.pyplot as plt\n'), ((25244, 25266), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (25252, 25266), True, 'import matplotlib.pyplot as plt\n'), ((25271, 25305), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'alpha': '(0.5)'}), "(which='minor', alpha=0.5)\n", (25279, 25305), True, 'import matplotlib.pyplot as plt\n'), ((25310, 25345), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'alpha': '(0.75)'}), "(which='major', alpha=0.75)\n", (25318, 25345), True, 'import matplotlib.pyplot as plt\n'), ((25351, 25411), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k:"""'], {'label': '"""Perfectly calibrated"""'}), "([0, 1], [0, 1], 'k:', label='Perfectly calibrated')\n", (25359, 25411), True, 'import matplotlib.pyplot as plt\n'), ((26223, 26252), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (26234, 26252), True, 'import numpy as np\n'), ((26257, 26280), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (26265, 26280), True, 'import matplotlib.pyplot as plt\n'), ((26285, 26302), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {}), '(ticks)\n', (26295, 26302), True, 'import matplotlib.pyplot as plt\n'), ((26307, 26342), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted probability"""'], {}), "('Predicted probability')\n", (26317, 26342), True, 'import matplotlib.pyplot as plt\n'), ((26347, 26381), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Observed probability"""'], {}), "('Observed probability')\n", (26357, 26381), True, 'import matplotlib.pyplot as plt\n'), ((26386, 26409), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (26394, 26409), True, 'import matplotlib.pyplot as plt\n'), ((26414, 26431), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks'], {}), '(ticks)\n', (26424, 26431), True, 'import matplotlib.pyplot as plt\n'), ((26436, 26465), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (26446, 26465), True, 'import matplotlib.pyplot as plt\n'), ((26470, 26514), 'matplotlib.pyplot.title', 'plt.title', (['"""Calibration (reliability curve)"""'], {}), "('Calibration (reliability curve)')\n", (26479, 26514), True, 'import matplotlib.pyplot as plt\n'), ((26520, 26538), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (26536, 26538), True, 'import matplotlib.pyplot as plt\n'), ((26800, 26826), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (26813, 26826), True, 'import seaborn as sns\n'), ((26927, 26949), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (26935, 26949), True, 'import matplotlib.pyplot as plt\n'), ((26954, 26988), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'alpha': '(0.5)'}), "(which='minor', alpha=0.5)\n", (26962, 26988), True, 'import matplotlib.pyplot as plt\n'), ((26993, 27028), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'alpha': '(0.75)'}), "(which='major', alpha=0.75)\n", (27001, 27028), True, 'import matplotlib.pyplot as plt\n'), ((27033, 27052), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""class"""'], {}), "('class')\n", (27043, 27052), True, 'import matplotlib.pyplot as plt\n'), ((27057, 27076), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""brier"""'], {}), "('brier')\n", (27067, 27076), True, 'import matplotlib.pyplot as plt\n'), ((27424, 27436), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27434, 27436), True, 'import matplotlib.pyplot as plt\n'), ((27441, 27459), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27457, 27459), True, 'import matplotlib.pyplot as plt\n'), ((27721, 27747), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (27734, 27747), True, 'import seaborn as sns\n'), ((27837, 27863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (27847, 27863), True, 'import matplotlib.pyplot as plt\n'), ((27868, 27890), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (27876, 27890), True, 'import matplotlib.pyplot as plt\n'), ((27895, 27929), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'alpha': '(0.5)'}), "(which='minor', alpha=0.5)\n", (27903, 27929), True, 'import matplotlib.pyplot as plt\n'), ((27934, 27969), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'alpha': '(0.75)'}), "(which='major', alpha=0.75)\n", (27942, 27969), True, 'import matplotlib.pyplot as plt\n'), ((28299, 28333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean predicted value"""'], {}), "('Mean predicted value')\n", (28309, 28333), True, 'import matplotlib.pyplot as plt\n'), ((28338, 28354), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (28346, 28354), True, 'import matplotlib.pyplot as plt\n'), ((28405, 28424), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (28415, 28424), True, 'import matplotlib.pyplot as plt\n'), ((28429, 28467), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'ncol': '(2)'}), "(loc='upper center', ncol=2)\n", (28439, 28467), True, 'import matplotlib.pyplot as plt\n'), ((28473, 28491), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (28489, 28491), True, 'import matplotlib.pyplot as plt\n'), ((28773, 28821), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (28792, 28821), True, 'import matplotlib as mpl\n'), ((28836, 28850), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (28848, 28850), True, 'import matplotlib.pyplot as plt\n'), ((29454, 29472), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29470, 29472), True, 'import matplotlib.pyplot as plt\n'), ((29779, 29805), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (29792, 29805), True, 'import seaborn as sns\n'), ((29865, 29879), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (29877, 29879), True, 'import matplotlib.pyplot as plt\n'), ((30976, 31006), 'matplotlib.pyplot.matshow', 'plt.matshow', (['matrix'], {'cmap': 'cmap'}), '(matrix, cmap=cmap)\n', (30987, 31006), True, 'import matplotlib.pyplot as plt\n'), ((31268, 31294), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (31281, 31294), True, 'import seaborn as sns\n'), ((31354, 31368), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (31366, 31368), True, 'import matplotlib.pyplot as plt\n'), ((32179, 32205), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (32192, 32205), True, 'import seaborn as sns\n'), ((32265, 32279), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (32277, 32279), True, 'import matplotlib.pyplot as plt\n'), ((32917, 32943), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (32930, 32943), True, 'import seaborn as sns\n'), ((32959, 32973), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (32971, 32973), True, 'import matplotlib.pyplot as plt\n'), ((34069, 34087), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34085, 34087), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1105), 'matplotlib.use', 'mpl.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (1096, 1105), True, 'import matplotlib as mpl\n'), ((1612, 1624), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1620, 1624), False, 'import sys\n'), ((3203, 3212), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3210, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3239, 3260), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3250, 3260), True, 'import matplotlib.pyplot as plt\n'), ((3279, 3289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3287, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3942, 3963), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (3954, 3963), True, 'import matplotlib.pyplot as plt\n'), ((5496, 5505), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5503, 5505), True, 'import matplotlib.pyplot as plt\n'), ((5532, 5553), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (5543, 5553), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5580, 5582), True, 'import matplotlib.pyplot as plt\n'), ((5783, 5804), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (5795, 5804), True, 'import matplotlib.pyplot as plt\n'), ((6536, 6545), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6543, 6545), True, 'import matplotlib.pyplot as plt\n'), ((6572, 6593), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (6583, 6593), True, 'import matplotlib.pyplot as plt\n'), ((6612, 6622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6620, 6622), True, 'import matplotlib.pyplot as plt\n'), ((7004, 7025), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (7016, 7025), True, 'import matplotlib.pyplot as plt\n'), ((7500, 7509), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7507, 7509), True, 'import matplotlib.pyplot as plt\n'), ((7536, 7557), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (7547, 7557), True, 'import matplotlib.pyplot as plt\n'), ((7576, 7586), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7584, 7586), True, 'import matplotlib.pyplot as plt\n'), ((7810, 7826), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7819, 7826), True, 'import matplotlib.pyplot as plt\n'), ((8447, 8468), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (8459, 8468), True, 'import matplotlib.pyplot as plt\n'), ((8676, 8701), 'numpy.arange', 'np.arange', (['(0)', 'num_classes'], {}), '(0, num_classes)\n', (8685, 8701), True, 'import numpy as np\n'), ((9006, 9022), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (9014, 9022), True, 'import numpy as np\n'), ((9180, 9204), 'matplotlib.path.Path', 'path.Path', (['points', 'codes'], {}), '(points, codes)\n', (9189, 9204), True, 'import matplotlib.path as path\n'), ((9222, 9294), 'matplotlib.patches.PathPatch', 'patches.PathPatch', (['_path'], {'fill': '(True)', 'color': 'color', 'linewidth': '(0)', 'alpha': '(0.2)'}), '(_path, fill=True, color=color, linewidth=0, alpha=0.2)\n', (9239, 9294), True, 'import matplotlib.patches as patches\n'), ((9375, 9437), 'matplotlib.patches.PathPatch', 'patches.PathPatch', (['_path'], {'fill': '(False)', 'color': 'color', 'linewidth': '(3)'}), '(_path, fill=False, color=color, linewidth=3)\n', (9392, 9437), True, 'import matplotlib.patches as patches\n'), ((10248, 10257), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10255, 10257), True, 'import matplotlib.pyplot as plt\n'), ((10284, 10305), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (10295, 10305), True, 'import matplotlib.pyplot as plt\n'), ((10324, 10334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10332, 10334), True, 'import matplotlib.pyplot as plt\n'), ((13144, 13153), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13151, 13153), True, 'import matplotlib.pyplot as plt\n'), ((13180, 13201), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (13191, 13201), True, 'import matplotlib.pyplot as plt\n'), ((13220, 13230), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13228, 13230), True, 'import matplotlib.pyplot as plt\n'), ((15224, 15233), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (15231, 15233), True, 'import matplotlib.pyplot as plt\n'), ((15260, 15281), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (15271, 15281), True, 'import matplotlib.pyplot as plt\n'), ((15300, 15310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15308, 15310), True, 'import matplotlib.pyplot as plt\n'), ((15641, 15662), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (15653, 15662), True, 'import matplotlib.pyplot as plt\n'), ((17318, 17327), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (17325, 17327), True, 'import matplotlib.pyplot as plt\n'), ((17354, 17375), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (17365, 17375), True, 'import matplotlib.pyplot as plt\n'), ((17394, 17404), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17402, 17404), True, 'import matplotlib.pyplot as plt\n'), ((17665, 17686), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (17677, 17686), True, 'import matplotlib.pyplot as plt\n'), ((18778, 18826), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'linewidth': '(1.0)', 'color': 'colors[0]'}), '([0], [0], linewidth=1.0, color=colors[0])\n', (18784, 18826), False, 'from matplotlib.lines import Line2D\n'), ((18951, 18960), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (18958, 18960), True, 'import matplotlib.pyplot as plt\n'), ((18987, 19008), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (18998, 19008), True, 'import matplotlib.pyplot as plt\n'), ((19027, 19037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19035, 19037), True, 'import matplotlib.pyplot as plt\n'), ((19438, 19459), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (19450, 19459), True, 'import matplotlib.pyplot as plt\n'), ((20094, 20114), 'numpy.min', 'np.min', (['thresholds_1'], {}), '(thresholds_1)\n', (20100, 20114), True, 'import numpy as np\n'), ((20116, 20136), 'numpy.max', 'np.max', (['thresholds_1'], {}), '(thresholds_1)\n', (20122, 20136), True, 'import numpy as np\n'), ((20154, 20174), 'numpy.min', 'np.min', (['thresholds_2'], {}), '(thresholds_2)\n', (20160, 20174), True, 'import numpy as np\n'), ((20176, 20196), 'numpy.max', 'np.max', (['thresholds_2'], {}), '(thresholds_2)\n', (20182, 20196), True, 'import numpy as np\n'), ((21929, 21938), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21936, 21938), True, 'import matplotlib.pyplot as plt\n'), ((21965, 21986), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (21976, 21986), True, 'import matplotlib.pyplot as plt\n'), ((22005, 22015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22013, 22015), True, 'import matplotlib.pyplot as plt\n'), ((22204, 22225), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (22216, 22225), True, 'import matplotlib.pyplot as plt\n'), ((23383, 23392), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (23390, 23392), True, 'import matplotlib.pyplot as plt\n'), ((23419, 23440), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (23430, 23440), True, 'import matplotlib.pyplot as plt\n'), ((23459, 23469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23467, 23469), True, 'import matplotlib.pyplot as plt\n'), ((23654, 23675), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (23666, 23675), True, 'import matplotlib.pyplot as plt\n'), ((24797, 24806), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (24804, 24806), True, 'import matplotlib.pyplot as plt\n'), ((24833, 24854), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (24844, 24854), True, 'import matplotlib.pyplot as plt\n'), ((24873, 24883), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24881, 24883), True, 'import matplotlib.pyplot as plt\n'), ((25133, 25154), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (25145, 25154), True, 'import matplotlib.pyplot as plt\n'), ((26594, 26603), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26601, 26603), True, 'import matplotlib.pyplot as plt\n'), ((26630, 26651), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (26641, 26651), True, 'import matplotlib.pyplot as plt\n'), ((26670, 26680), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26678, 26680), True, 'import matplotlib.pyplot as plt\n'), ((26862, 26878), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (26871, 26878), True, 'import matplotlib.pyplot as plt\n'), ((26893, 26914), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (26905, 26914), True, 'import matplotlib.pyplot as plt\n'), ((27515, 27524), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (27522, 27524), True, 'import matplotlib.pyplot as plt\n'), ((27551, 27572), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (27562, 27572), True, 'import matplotlib.pyplot as plt\n'), ((27591, 27601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27599, 27601), True, 'import matplotlib.pyplot as plt\n'), ((27762, 27783), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (27774, 27783), True, 'import matplotlib.pyplot as plt\n'), ((28370, 28399), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (28381, 28399), True, 'import numpy as np\n'), ((28547, 28556), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (28554, 28556), True, 'import matplotlib.pyplot as plt\n'), ((28583, 28604), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (28594, 28604), True, 'import matplotlib.pyplot as plt\n'), ((28623, 28633), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28631, 28633), True, 'import matplotlib.pyplot as plt\n'), ((29025, 29050), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (29047, 29050), False, 'from matplotlib import ticker\n'), ((29083, 29108), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (29105, 29108), False, 'from matplotlib import ticker\n'), ((29528, 29537), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (29535, 29537), True, 'import matplotlib.pyplot as plt\n'), ((29564, 29585), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (29575, 29585), True, 'import matplotlib.pyplot as plt\n'), ((29604, 29614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29612, 29614), True, 'import matplotlib.pyplot as plt\n'), ((29820, 29841), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (29832, 29841), True, 'import matplotlib.pyplot as plt\n'), ((30805, 30814), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (30812, 30814), True, 'import matplotlib.pyplot as plt\n'), ((30841, 30862), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (30852, 30862), True, 'import matplotlib.pyplot as plt\n'), ((30881, 30891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30889, 30891), True, 'import matplotlib.pyplot as plt\n'), ((31062, 31071), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (31069, 31071), True, 'import matplotlib.pyplot as plt\n'), ((31098, 31119), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (31109, 31119), True, 'import matplotlib.pyplot as plt\n'), ((31138, 31148), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31146, 31148), True, 'import matplotlib.pyplot as plt\n'), ((31309, 31330), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (31321, 31330), True, 'import matplotlib.pyplot as plt\n'), ((31963, 31972), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (31970, 31972), True, 'import matplotlib.pyplot as plt\n'), ((31999, 32020), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (32010, 32020), True, 'import matplotlib.pyplot as plt\n'), ((32039, 32049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32047, 32049), True, 'import matplotlib.pyplot as plt\n'), ((32220, 32241), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (32232, 32241), True, 'import matplotlib.pyplot as plt\n'), ((32646, 32655), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (32653, 32655), True, 'import matplotlib.pyplot as plt\n'), ((32682, 32703), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (32693, 32703), True, 'import matplotlib.pyplot as plt\n'), ((32722, 32732), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32730, 32732), True, 'import matplotlib.pyplot as plt\n'), ((33147, 33168), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (33159, 33168), True, 'import matplotlib.pyplot as plt\n'), ((34143, 34152), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (34150, 34152), True, 'import matplotlib.pyplot as plt\n'), ((34179, 34200), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (34190, 34200), True, 'import matplotlib.pyplot as plt\n'), ((34219, 34229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34227, 34229), True, 'import matplotlib.pyplot as plt\n'), ((2158, 2179), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (2170, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2236, 2257), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (2248, 2257), True, 'import matplotlib.pyplot as plt\n'), ((4104, 4133), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(21)'}), '(0.0, 1.0, num=21)\n', (4115, 4133), True, 'import numpy as np\n'), ((4169, 4198), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)'}), '(0.0, 1.0, num=11)\n', (4180, 4198), True, 'import numpy as np\n'), ((10957, 10979), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20c"""'], {}), "('tab20c')\n", (10969, 10979), True, 'import matplotlib.pyplot as plt\n'), ((11011, 11031), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Set2"""'], {}), "('Set2')\n", (11023, 11031), True, 'import matplotlib.pyplot as plt\n'), ((11063, 11083), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Set3"""'], {}), "('Set3')\n", (11075, 11083), True, 'import matplotlib.pyplot as plt\n'), ((11118, 11141), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Pastel1"""'], {}), "('Pastel1')\n", (11130, 11141), True, 'import matplotlib.pyplot as plt\n'), ((13566, 13587), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (13578, 13587), True, 'import matplotlib.pyplot as plt\n'), ((13644, 13665), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (13656, 13665), True, 'import matplotlib.pyplot as plt\n'), ((21098, 21120), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""winter"""'], {}), "('winter')\n", (21110, 21120), True, 'import matplotlib.pyplot as plt\n'), ((21367, 21389), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""autumn"""'], {}), "('autumn')\n", (21379, 21389), True, 'import matplotlib.pyplot as plt\n'), ((34006, 34061), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""black"""'}), "(linewidth=3, foreground='black')\n", (34028, 34061), True, 'import matplotlib.patheffects as PathEffects\n'), ((5245, 5300), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""black"""'}), "(linewidth=3, foreground='black')\n", (5267, 5300), True, 'import matplotlib.patheffects as PathEffects\n'), ((12105, 12160), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""black"""'}), "(linewidth=3, foreground='black')\n", (12127, 12160), True, 'import matplotlib.patheffects as PathEffects\n'), ((12513, 12568), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(3)', 'foreground': '"""black"""'}), "(linewidth=3, foreground='black')\n", (12535, 12568), True, 'import matplotlib.patheffects as PathEffects\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 11:59, 17/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy import abs, exp, cos, pi, ones, where
from numpy.random import uniform
from copy import deepcopy
from mealpy.optimizer import Root
class BaseMFO(Root):
"""
My modified version of: Moth-Flame Optimization (MFO)
(Moth-flame optimization algorithm: A novel nature-inspired heuristic paradigm)
Notes:
+ Changed the flow of algorithm
+ Update the old solution
+ Remove third loop for faster
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
def train(self):
pop_moths = [self.create_solution() for _ in range(self.pop_size)]
# Update the position best flame obtained so far
pop_flames, g_best = self.get_sorted_pop_and_global_best_solution(pop_moths, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
# Number of flames Eq.(3.14) in the paper (linearly decreased)
num_flame = round(self.pop_size - (epoch + 1) * ((self.pop_size - 1) / self.epoch))
# a linearly decreases from -1 to -2 to calculate t in Eq. (3.12)
a = -1 + (epoch + 1) * ((-1) / self.epoch)
for i in range(self.pop_size):
# D in Eq.(3.13)
distance_to_flame = abs(pop_flames[i][self.ID_POS] - pop_moths[i][self.ID_POS])
t = (a - 1) * uniform(0, 1, self.problem_size) + 1
b = 1
# Update the position of the moth with respect to its corresponding flame, Eq.(3.12).
temp_1 = distance_to_flame * exp(b * t) * cos(t * 2 * pi) + pop_flames[i][self.ID_POS]
# Update the position of the moth with respect to one flame Eq.(3.12).
## Here is a changed, I used the best position of flames not the position num_flame th (as original code)
temp_2 = distance_to_flame * exp(b * t) * cos(t * 2 * pi) + g_best[self.ID_POS]
list_idx = i * ones(self.problem_size)
pos_new = where(list_idx < num_flame, temp_1, temp_2)
## This is the way I make this algorithm working. I tried to run matlab code with large dimension and it will not convergence.
fit_new = self.get_fitness_position(pos_new)
if fit_new < pop_moths[i][self.ID_FIT]:
pop_moths[i] = [pos_new, fit_new]
# Update the global best flame
pop_flames = pop_flames + pop_moths
pop_flames, g_best = self.update_sorted_population_and_global_best_solution(pop_flames, self.ID_MIN_PROB, g_best)
pop_flames = pop_flames[:self.pop_size]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class OriginalMFO(BaseMFO):
"""
The original version of: Moth-flame optimization (MFO)
(Moth-flame optimization algorithm: A novel nature-inspired heuristic paradigm)
Link:
+ https://www.mathworks.com/matlabcentral/fileexchange/52269-moth-flame-optimization-mfo-algorithm?s_tid=FX_rc1_behav
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, **kwargs):
BaseMFO.__init__(self, obj_func, lb, ub, verbose, epoch, pop_size, kwargs=kwargs)
def train(self):
pop_moths = [self.create_solution() for _ in range(self.pop_size)]
# Update the position best flame obtained so far
pop_flames, g_best= self.get_sorted_pop_and_global_best_solution(pop_moths, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
# Number of flames Eq.(3.14) in the paper (linearly decreased)
num_flame = round(self.pop_size - (epoch + 1) * ((self.pop_size - 1) / self.epoch))
# a linearly decreases from -1 to -2 to calculate t in Eq. (3.12)
a = -1 + (epoch + 1) * ((-1) / self.epoch)
for i in range(self.pop_size):
temp = deepcopy(pop_moths[i][self.ID_POS])
for j in range(self.problem_size):
# D in Eq.(3.13)
distance_to_flame = abs(pop_flames[i][self.ID_POS][j] - pop_moths[i][self.ID_POS][j])
t = (a - 1) * uniform() + 1
b = 1
if i <= num_flame: # Update the position of the moth with respect to its corresponding flame
# Eq.(3.12)
temp[j] = distance_to_flame * exp(b * t) * cos(t * 2 * pi) + pop_flames[i][self.ID_POS][j]
else: # Update the position of the moth with respect to one flame
# Eq.(3.12).
## Here is a changed, I used the best position of flames not the position num_flame th (as original code)
temp[j] = distance_to_flame * exp(b * t) * cos(t * 2 * pi) + pop_flames[num_flame][self.ID_POS][j]
fit = self.get_fitness_position(temp)
pop_moths[i] = [temp, fit]
# Update the global best flame
pop_flames = pop_flames + pop_moths
pop_flames, g_best = self.update_sorted_population_and_global_best_solution(pop_flames, self.ID_MIN_PROB, g_best)
pop_flames = pop_flames[:self.pop_size]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| [
"numpy.abs",
"copy.deepcopy",
"numpy.ones",
"numpy.where",
"numpy.exp",
"numpy.cos",
"numpy.random.uniform"
] | [((2175, 2234), 'numpy.abs', 'abs', (['(pop_flames[i][self.ID_POS] - pop_moths[i][self.ID_POS])'], {}), '(pop_flames[i][self.ID_POS] - pop_moths[i][self.ID_POS])\n', (2178, 2234), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((2918, 2961), 'numpy.where', 'where', (['(list_idx < num_flame)', 'temp_1', 'temp_2'], {}), '(list_idx < num_flame, temp_1, temp_2)\n', (2923, 2961), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((5051, 5086), 'copy.deepcopy', 'deepcopy', (['pop_moths[i][self.ID_POS]'], {}), '(pop_moths[i][self.ID_POS])\n', (5059, 5086), False, 'from copy import deepcopy\n'), ((2868, 2891), 'numpy.ones', 'ones', (['self.problem_size'], {}), '(self.problem_size)\n', (2872, 2891), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((5217, 5282), 'numpy.abs', 'abs', (['(pop_flames[i][self.ID_POS][j] - pop_moths[i][self.ID_POS][j])'], {}), '(pop_flames[i][self.ID_POS][j] - pop_moths[i][self.ID_POS][j])\n', (5220, 5282), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((2265, 2297), 'numpy.random.uniform', 'uniform', (['(0)', '(1)', 'self.problem_size'], {}), '(0, 1, self.problem_size)\n', (2272, 2297), False, 'from numpy.random import uniform\n'), ((2485, 2500), 'numpy.cos', 'cos', (['(t * 2 * pi)'], {}), '(t * 2 * pi)\n', (2488, 2500), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((2798, 2813), 'numpy.cos', 'cos', (['(t * 2 * pi)'], {}), '(t * 2 * pi)\n', (2801, 2813), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((2472, 2482), 'numpy.exp', 'exp', (['(b * t)'], {}), '(b * t)\n', (2475, 2482), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((2785, 2795), 'numpy.exp', 'exp', (['(b * t)'], {}), '(b * t)\n', (2788, 2795), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((5317, 5326), 'numpy.random.uniform', 'uniform', ([], {}), '()\n', (5324, 5326), False, 'from numpy.random import uniform\n'), ((5574, 5589), 'numpy.cos', 'cos', (['(t * 2 * pi)'], {}), '(t * 2 * pi)\n', (5577, 5589), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((5944, 5959), 'numpy.cos', 'cos', (['(t * 2 * pi)'], {}), '(t * 2 * pi)\n', (5947, 5959), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((5561, 5571), 'numpy.exp', 'exp', (['(b * t)'], {}), '(b * t)\n', (5564, 5571), False, 'from numpy import abs, exp, cos, pi, ones, where\n'), ((5931, 5941), 'numpy.exp', 'exp', (['(b * t)'], {}), '(b * t)\n', (5934, 5941), False, 'from numpy import abs, exp, cos, pi, ones, where\n')] |
from typing import Generator
from typing import Iterator
from typing import Tuple
import numpy
from ..Spectrum import Spectrum
def parse_msp_file(filename: str) -> Generator[dict, None, None]:
"""Read msp file and parse info in list of spectrum dictionaries."""
# Lists/dicts that will contain all params, masses and intensities of each molecule
params = {}
masses = []
intensities = []
# Peaks counter. Used to track and count the number of peaks
peakscount = 0
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
rline = line.rstrip()
if len(rline) == 0:
continue
if contains_metadata(rline):
parse_metadata(rline, params)
else:
# Obtaining the masses and intensities
peak_pairs = get_peak_tuples(rline)
for peak in peak_pairs:
mz, intensity = get_peak_values(peak)
peakscount += 1
masses.append(mz)
intensities.append(intensity)
# Obtaining the masses and intensities
if int(params['num peaks']) == peakscount:
peakscount = 0
yield {
'params': (params),
'm/z array': numpy.array(masses),
'intensity array': numpy.array(intensities)
}
params = {}
masses = []
intensities = []
def get_peak_values(peak: str) -> Tuple[float, float]:
""" Get the m/z and intensity value from the line containing the peak information. """
splitted_line = peak.split(maxsplit=2)
mz = float(splitted_line[0].strip())
intensity = float(splitted_line[1].strip())
return mz, intensity
def get_peak_tuples(rline: str) -> Iterator[str]:
""" Splits line at ';' and performs additional string cleaning. """
tokens = filter(None, rline.split(";"))
peak_pairs = map(lambda x: x.lstrip().rstrip(), tokens)
return peak_pairs
def parse_metadata(rline: str, params: dict):
""" Reads metadata contained in line into params dict. """
splitted_line = rline.split(":", 1)
if splitted_line[0].lower() == 'comments':
# Obtaining the parameters inside the comments index
for s in splitted_line[1][2:-1].split('" "'):
splitted_line = s.split("=", 1)
if splitted_line[0].lower() in params.keys() and splitted_line[0].lower() == 'smiles':
params[splitted_line[0].lower()+"_2"] = splitted_line[1].strip()
else:
params[splitted_line[0].lower()] = splitted_line[1].strip()
else:
params[splitted_line[0].lower()] = splitted_line[1].strip()
def contains_metadata(rline: str) -> bool:
""" Check if line contains Spectrum metadata."""
return ':' in rline
def load_from_msp(filename: str) -> Generator[Spectrum, None, None]:
"""
MSP file to a :py:class:`~matchms.Spectrum.Spectrum` objects
Function that reads a .msp file and converts the info
in :py:class:`~matchms.Spectrum.Spectrum` objects.
Parameters
----------
filename:
Path of the msp file.
Yields
------
Yield a spectrum object with the data of the msp file
Example:
.. code-block:: python
from matchms.importing import load_from_msp
# Download msp file from MassBank of North America repository at https://mona.fiehnlab.ucdavis.edu/
file_msp = "MoNA-export-GC-MS-first10.msp"
spectrums = list(load_from_msp(file_msp))
"""
for spectrum in parse_msp_file(filename):
metadata = spectrum.get("params", None)
mz = spectrum["m/z array"]
intensities = spectrum["intensity array"]
# Sort by mz (if not sorted already)
if not numpy.all(mz[:-1] <= mz[1:]):
idx_sorted = numpy.argsort(mz)
mz = mz[idx_sorted]
intensities = intensities[idx_sorted]
yield Spectrum(mz=mz, intensities=intensities, metadata=metadata)
| [
"numpy.argsort",
"numpy.array",
"numpy.all"
] | [((3927, 3955), 'numpy.all', 'numpy.all', (['(mz[:-1] <= mz[1:])'], {}), '(mz[:-1] <= mz[1:])\n', (3936, 3955), False, 'import numpy\n'), ((3982, 3999), 'numpy.argsort', 'numpy.argsort', (['mz'], {}), '(mz)\n', (3995, 3999), False, 'import numpy\n'), ((1360, 1379), 'numpy.array', 'numpy.array', (['masses'], {}), '(masses)\n', (1371, 1379), False, 'import numpy\n'), ((1424, 1448), 'numpy.array', 'numpy.array', (['intensities'], {}), '(intensities)\n', (1435, 1448), False, 'import numpy\n')] |
from pathlib import Path
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
class Volume:
def __init__(self, path):
self.path = Path(path)
self.nifti = nib.load(str(self.path))
self.data = self.nifti.get_fdata().squeeze()
self.current_data = self.pad(self.data)
self.current_downsample_factor = 1
self.shape = self.data.shape
def downsample(self):
self.current_data = self.current_data[::2,::2]
self.current_downsample_factor *= 2
def pad(self, array):
target = self.closest_powerof_two(max(array.shape), smaller=False)
pad_width = target - np.array(array.shape)
if not pad_width.any(): # already at max power of 2
return array
padded = np.pad(array, pad_width, mode='minimum')
return padded
def closest_powerof_two(self, n, smaller=True):
"""
closest_powerof_two(513) = 512
closest_powerof_two(512) = 256
"""
p = np.log(n) / np.log(2)
if p % 1 == 0:
if smaller:
p -= 1
result = 2 ** p
else:
if smaller:
result = 2 ** np.floor(p)
else:
result = 2 ** np.ceil(p)
return int(result)
def get_pyramid_shapes_map(self):
shape = list(self.shape)
level = 0
shapes_map = {level: shape}
last_level = False
while not last_level:
old_shape = shapes_map[level]
max_dim = max(old_shape)
closest_power = self.closest_powerof_two(max_dim)
new_shape = [min(closest_power, n) for n in old_shape]
level += 1
shapes_map[level] = new_shape
if max(new_shape) == 32:
last_level = True
return shapes_map
def plot(self):
plt.imshow(self.current_data)
plt.show()
if __name__ == "__main__":
path = 'ref_slice.nii.gz'
volume = Volume(path)
print(volume.get_pyramid_shapes_map())
| [
"matplotlib.pyplot.imshow",
"numpy.ceil",
"pathlib.Path",
"numpy.log",
"numpy.floor",
"numpy.array",
"numpy.pad",
"matplotlib.pyplot.show"
] | [((165, 175), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (169, 175), False, 'from pathlib import Path\n'), ((787, 827), 'numpy.pad', 'np.pad', (['array', 'pad_width'], {'mode': '"""minimum"""'}), "(array, pad_width, mode='minimum')\n", (793, 827), True, 'import numpy as np\n'), ((1892, 1921), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.current_data'], {}), '(self.current_data)\n', (1902, 1921), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1938, 1940), True, 'import matplotlib.pyplot as plt\n'), ((662, 683), 'numpy.array', 'np.array', (['array.shape'], {}), '(array.shape)\n', (670, 683), True, 'import numpy as np\n'), ((1018, 1027), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (1024, 1027), True, 'import numpy as np\n'), ((1030, 1039), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1036, 1039), True, 'import numpy as np\n'), ((1206, 1217), 'numpy.floor', 'np.floor', (['p'], {}), '(p)\n', (1214, 1217), True, 'import numpy as np\n'), ((1266, 1276), 'numpy.ceil', 'np.ceil', (['p'], {}), '(p)\n', (1273, 1276), True, 'import numpy as np\n')] |
import random
import csv
import os
from pathlib import Path
from tabulate import tabulate
from abc import abstractmethod
import keras.layers as Kl
import keras.models as Km
import numpy as np
import matplotlib.pyplot as plt
class TicTacToe():
def __init__(self, player1, player2, exp1=1, exp2=1):
self.state = '123456789'
player1 = globals()[player1]
self.player1 = player1(tag='X', exploration_factor=exp1)
player2 = globals()[player2]
self.player2 = player2(tag='O', exploration_factor=exp2)
self.winner = None
self.turn = 'X'
self.player_turn = self.player1
self.Xcount = 0
self.Ocount = 0
self.Tcount = 0
self.all_count = 0
def play_game(self):
if isinstance(self.player1, QAgent):
self.player1.exp_factor = 1
if isinstance(self.player2, QAgent):
self.player2.exp_factor = 1
while self.winner is None:
if type(self.player_turn) == Player:
print(self.turn)
self.print_game()
self.state = self.play_move()
self.game_winner()
if self.winner is not None:
break
self.print_game()
def play_to_learn(self, episodes):
for i in range(episodes):
print('Episode number: ' + str(i))
while self.winner is None:
self.state = self.play_move(learn=True)
self.game_winner()
if self.winner is not None:
break
self.state = self.play_move(learn=True)
self.game_winner()
# update last state
self.state = self.play_move(learn=True)
self.state = self.play_move(learn=True)
# update winning state
self.state = self.play_move(learn=True)
self.state = self.play_move(learn=True)
if i% 500 == 0:
self.print_bar()
print('-------------------')
self.player1.print_value = True
else:
self.player1.print_value = False
if i % 2000 == 0:
self.Xcount = 0
self.Ocount = 0
self.Tcount = 0
self.all_count = i
self.init_game()
self.print_summary()
self.player1.save_values()
self.player2.save_values()
def play_move(self, learn=False):
if self.turn == 'X':
if learn is True:
new_state = self.player1.make_move_and_learn(self.state, self.winner)
else:
new_state = self.player1.make_move(self.state, self.winner)
self.turn = 'O'
self.player_turn = self.player2
else:
if learn is True:
new_state = self.player2.make_move_and_learn(self.state, self.winner)
else:
new_state = self.player2.make_move(self.state, self.winner)
self.turn = 'X'
self.player_turn = self.player1
return new_state
def print_game(self):
s = list(self.state)
print(' {} | {} | {}'.format(s[0], s[1], s[2]))
print(' --------------')
print(' {} | {} | {}'.format(s[3], s[4], s[5]))
print(' --------------')
print(' {} | {} | {}'.format(s[6], s[7], s[8]))
print(' --------------')
print(' --------------')
def game_winner(self):
winner = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]
for line in winner:
s = self.state[line[0]] + self.state[line[1]] + self.state[line[2]]
if s == 'XXX':
self.winner = 'X'
break
elif s == 'OOO':
self.winner = 'O'
break
elif not any(s.isnumeric() for s in list(self.state)):
self.winner = 'No winner'
self.check_winner()
return self.winner
def check_winner(self):
if self.winner == 'X':
self.Xcount += 1
# print('The winner is X')
# print('')
# self.print_game()
elif self.winner == 'O':
self.Ocount += 1
# print('The winner is O')
# print('')
# self.print_game()
elif self.winner == 'No winner':
self.Tcount += 1
# print('No winner')
# print('')
# self.print_game()
def init_game(self):
self.state = '123456789'
self.winner = None
self.turn = 'X'
self.player_turn = self.player1
def print_bar(self):
plt.close()
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
x = ['X', 'Tie', 'O', 'Sum']
a = self.Xcount
b = self.Tcount
c = self.Ocount
d = self.all_count
aprec = 100*a / (a + b + c + 1)
bprec = 100*b / (a + b + c + 1)
cprec = 100*c / (a + b + c + 1)
ax1.clear()
ax2.clear()
bar1 = ax1.bar(x, [a, b, c, d])
bar1[0].set_color('r')
bar1[1].set_color('b')
ax1.set_ylim((0, d + 100))
plt.draw()
bar2 = ax2.bar(x[0:3], [aprec, bprec, cprec])
bar2[0].set_color('r')
bar2[1].set_color('b')
ax2.set_ylim((0, 100))
for rect in bar2:
height = rect.get_height()
ax2.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%d' % int(height),
ha='center', va='bottom')
plt.draw()
plt.pause(0.05)
def print_summary(self):
a = ['X', self.Xcount, 100 * self.Xcount / (self.Xcount + self.Ocount + self.Tcount)]
b = ['O', self.Ocount, 100 * self.Ocount / (self.Xcount + self.Ocount + self.Tcount)]
c = ['Tie', self.Tcount, 100 * self.Tcount / (self.Xcount + self.Ocount + self.Tcount)]
tab = tabulate([a, b, c], headers=['Player', 'num of wins', 'prec'])
print(tab)
class Player():
def __init__(self, tag, exploration_factor=1):
self.tag = tag
self.print_value = False
self.exp_factor = exploration_factor
def make_move(self, state, winner):
idx = int(input('Choose move number: '))
s = state[:idx-1] + self.tag + state[idx:]
return s
class Agent(Player):
def __init__(self, tag, exploration_factor=1):
super().__init__(tag, exploration_factor)
self.epsilon = 0.1
self.alpha = 0.5
self.prev_state = '123456789'
self.state = None
self.print_value = False
if self.tag == 'X':
self.op_tag = 'O'
else:
self.op_tag = 'X'
@abstractmethod
def calc_value(self, state):
pass
@abstractmethod
def learn_state(self, state, winner):
pass
def make_move(self, state, winner):
self.state = state
if winner is not None:
new_state = state
return new_state
p = random.uniform(0, 1)
if p < self.exp_factor:
new_state = self.make_optimal_move(state)
else:
moves = [s for s, v in enumerate(state) if v.isnumeric()]
idx = random.choice(moves)
new_state = state[:idx] + self.tag + state[idx + 1:]
return new_state
def make_move_and_learn(self, state, winner):
self.learn_state(state, winner)
return self.make_move(state, winner)
def make_optimal_move(self, state):
moves = [s for s, v in enumerate(state) if v.isnumeric()]
if len(moves) == 1:
temp_state = state[:moves[0]] + self.tag + state[moves[0] + 1:]
new_state = temp_state
return new_state
temp_state_list = []
v = -float('Inf')
for idx in moves:
v_temp = []
temp_state = state[:idx] + self.tag + state[idx + 1:]
moves_op = [s for s, v in enumerate(temp_state) if v.isnumeric()]
for idy in moves_op:
temp_state_op = temp_state[:idy] + self.op_tag + temp_state[idy + 1:]
v_temp.append(self.calc_value(temp_state_op))
# delets Nones
v_temp = list(filter(None.__ne__, v_temp))
if len(v_temp) != 0:
v_temp = np.min(v_temp)
else:
# encourage exploration
v_temp = 1
if v_temp > v:
temp_state_list = [temp_state]
v = v_temp
elif v_temp == v:
temp_state_list.append(temp_state)
try:
new_state = random.choice(temp_state_list)
except ValueError:
print('temp state:', temp_state_list)
raise Exception('temp state empty')
return new_state
def reward(self, winner):
if winner is self.tag:
R = 1
elif winner is None:
R = 0
elif winner == 'No winner':
R = 0.5
else:
R = -1
return R
class QAgent(Agent):
def __init__(self, tag, exploration_factor=1):
super().__init__(tag, exploration_factor)
self.tag = tag
self.values = dict()
self.load_values()
def learn_state(self, state, winner):
if self.tag in state:
if self.prev_state in self.values.keys():
v_s = self.values[self.prev_state]
else:
v_s = int(0)
R = self.reward(winner)
if self.state in self.values.keys() and winner is None:
v_s_tag = self.values[state]
else:
v_s_tag = int(0)
self.values[self.prev_state] = v_s + self.alpha*(R + v_s_tag - v_s)
self.prev_state = state
def calc_value(self, state):
if state in self.values.keys():
return self.values[state]
def load_values(self):
s = 'values' + self.tag + '.csv'
try:
value_csv = csv.reader(open(s, 'r'))
for row in value_csv:
k, v = row
self.values[k] = float(v)
except:
pass
# print(self.values)
def save_values(self):
s = 'values' + self.tag + '.csv'
try:
os.remove(s)
except:
pass
a = csv.writer(open(s, 'a'))
for v, k in self.values.items():
a.writerow([v, k])
class DeepAgent(Agent):
def __init__(self, tag, exploration_factor=1):
super().__init__(tag, exploration_factor)
self.tag = tag
self.value_model = self.load_model()
@staticmethod
def state2array(state):
num_state = []
for s in state:
if s == 'X':
num_state.append(1)
elif s == 'O':
num_state.append(-1)
else:
num_state.append(0)
num_state = np.array([num_state])
return num_state
def learn_state(self, state, winner):
target = self.calc_target(state, winner)
self.train_model(target, 10)
self.prev_state = state
def load_model(self):
s = 'model_values' + self.tag + '.h5'
model_file = Path(s)
if model_file.is_file():
model = Km.load_model(s)
print('load model: ' + s)
else:
print('new model')
model = Km.Sequential()
model.add(Kl.Dense(18, activation='relu', input_dim=9))
model.add(Kl.Dense(18, activation='relu'))
model.add(Kl.Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mean_absolute_error', metrics=['accuracy'])
model.summary()
return model
def calc_value(self, state):
return self.value_model.predict(self.state2array(state))
def calc_target(self, state, winner):
if self.tag in state:
v_s = self.calc_value(self.prev_state)
R = self.reward(winner)
if winner is None:
v_s_tag = self.calc_value(state)
else:
v_s_tag = 0
target = np.array(v_s + self.alpha * (R + v_s_tag - v_s))
return target
def train_model(self, target, epochs):
X_train = self.state2array(self.prev_state)
if target is not None:
self.value_model.fit(X_train, target, epochs=epochs, verbose=0)
def save_values(self):
s = 'model_values' + self.tag + '.h5'
try:
os.remove(s)
except:
pass
self.value_model.save(s)
def check_player():
#print('QAgent X 1 and QAgent 1 0')
#game = TicTacToe('QAgent', 'QAgent', 1, 0)
#game.play_to_learn(1000)
#print('DeepAgent X 0.8 and DeepAgent 0.8')
game = TicTacToe('DeepAgent', 'DeepAgent', 1, 1)
game.play_to_learn(100)
#print('DeepAgent X 0 and QAgent 1, 0')
#game = TicTacToe('Player', 'DeepAgent', 0.8, 1)
#game.play_game()
check_player()
| [
"tabulate.tabulate",
"random.uniform",
"random.choice",
"keras.models.load_model",
"pathlib.Path",
"keras.models.Sequential",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"keras.layers.Dense",
"numpy.min",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"os.re... | [((4756, 4767), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4765, 4767), True, 'import matplotlib.pyplot as plt\n'), ((4782, 4794), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4792, 4794), True, 'import matplotlib.pyplot as plt\n'), ((5317, 5327), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5325, 5327), True, 'import matplotlib.pyplot as plt\n'), ((5711, 5721), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5719, 5721), True, 'import matplotlib.pyplot as plt\n'), ((5731, 5746), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (5740, 5746), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6141), 'tabulate.tabulate', 'tabulate', (['[a, b, c]'], {'headers': "['Player', 'num of wins', 'prec']"}), "([a, b, c], headers=['Player', 'num of wins', 'prec'])\n", (6087, 6141), False, 'from tabulate import tabulate\n'), ((7183, 7203), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (7197, 7203), False, 'import random\n'), ((11124, 11145), 'numpy.array', 'np.array', (['[num_state]'], {}), '([num_state])\n', (11132, 11145), True, 'import numpy as np\n'), ((11429, 11436), 'pathlib.Path', 'Path', (['s'], {}), '(s)\n', (11433, 11436), False, 'from pathlib import Path\n'), ((7392, 7412), 'random.choice', 'random.choice', (['moves'], {}), '(moves)\n', (7405, 7412), False, 'import random\n'), ((8815, 8845), 'random.choice', 'random.choice', (['temp_state_list'], {}), '(temp_state_list)\n', (8828, 8845), False, 'import random\n'), ((10478, 10490), 'os.remove', 'os.remove', (['s'], {}), '(s)\n', (10487, 10490), False, 'import os\n'), ((11490, 11506), 'keras.models.load_model', 'Km.load_model', (['s'], {}), '(s)\n', (11503, 11506), True, 'import keras.models as Km\n'), ((11610, 11625), 'keras.models.Sequential', 'Km.Sequential', ([], {}), '()\n', (11623, 11625), True, 'import keras.models as Km\n'), ((12356, 12404), 'numpy.array', 'np.array', (['(v_s + self.alpha * (R + v_s_tag - v_s))'], {}), '(v_s + self.alpha * (R + v_s_tag - v_s))\n', (12364, 12404), True, 'import numpy as np\n'), ((12736, 12748), 'os.remove', 'os.remove', (['s'], {}), '(s)\n', (12745, 12748), False, 'import os\n'), ((8494, 8508), 'numpy.min', 'np.min', (['v_temp'], {}), '(v_temp)\n', (8500, 8508), True, 'import numpy as np\n'), ((11648, 11692), 'keras.layers.Dense', 'Kl.Dense', (['(18)'], {'activation': '"""relu"""', 'input_dim': '(9)'}), "(18, activation='relu', input_dim=9)\n", (11656, 11692), True, 'import keras.layers as Kl\n'), ((11716, 11747), 'keras.layers.Dense', 'Kl.Dense', (['(18)'], {'activation': '"""relu"""'}), "(18, activation='relu')\n", (11724, 11747), True, 'import keras.layers as Kl\n'), ((11771, 11803), 'keras.layers.Dense', 'Kl.Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (11779, 11803), True, 'import keras.layers as Kl\n')] |
#!/usr/bin/env python3
# -*- CoDing: utf-8 -*-
"""
Created on May 22 2019
Last Update May 22 2019
@author: simonvanvliet
Department of Zoology
University of Britisch Columbia
<EMAIL>
This recreates the data and figure for figure 3
By default data is loaded unless parameters have changes, to rerun model set override_data to True
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import MLS_static_fast as mlssf
import mls_general_code as mlsg
import numpy.lib.recfunctions as rf
from mpl_toolkits.mplot3d import axes3d
from joblib import Parallel, delayed
import datetime
from pathlib import Path
import itertools
"""
# SET model settings
"""
# set to True to force recalculation of data
override_data = False
# set folder
data_folder = Path("Data_Paper/")
fig_Folder = Path("Figures_Paper/")
figureName = 'figure3.pdf'
dataName = 'data_Figure3.npz'
# set model parameters
tau_H = 100
tauVRange = (-2, 2)
tauHRange = (-2, 4)
nStep = 30
sigma_vec = [0.02, 0.1]
model_par = {
# selection strength settings
"s": 1,
"K_H": 500.,
"D_H": 0.,
# tau_var settings
"TAU_H": tau_H,
# tau_mig settings
"n0": 1E-4,
# init conditions
"F0": 0.01,
"N0init": 1.,
"NUMGROUP": -1,
# time settings
"maxT": 150000,
"dT": 5E-2,
"sampleT": 10,
"rms_err_treshold": 5E-2,
"mav_window": 1000,
"rms_window": 10000,
"minTRun": 25000,
# fixed model parameters
"sampling": "fixedvar",
"mu": 1E-9,
"K": 1,
"numTypeBins": 100
}
# calc other parameters
desiredTauV = np.logspace(*tauVRange, nStep) * tau_H
desiredTauH = np.logspace(*tauHRange, nStep)
B_H_vec = [model_par['s'], 0]
cost_vec = 1 / desiredTauV
mig_vec = model_par['n0'] / desiredTauH
"""
# SET figure settings
"""
wFig = 8.7
hFig = 4.5
font = {'family': 'Helvetica',
'weight': 'light',
'size': 6}
axes = {'linewidth': 0.5,
'titlesize': 7,
'labelsize': 6,
'labelpad': 2,
'spines.top': False,
'spines.right': False,
}
ticks = {'major.width': 0.5,
'direction': 'in',
'major.size': 2,
'labelsize': 6,
'major.pad': 2}
legend = {'fontsize': 6,
'handlelength': 1.5,
'handletextpad': 0.5,
'labelspacing': 0.2}
figure = {'dpi': 300}
savefigure = {'dpi': 300,
'transparent': True}
mpl.style.use('seaborn-ticks')
mpl.rc('font', **font)
mpl.rc('axes', **axes)
mpl.rc('xtick', **ticks)
mpl.rc('ytick', **ticks)
#mpl.rc('ztick', **ticks)
mpl.rc('legend', **legend)
mpl.rc('figure', **figure)
mpl.rc('savefig', **savefigure)
"""
Main code
"""
# set variable model parameters
def set_cost_mig_BH(cost, mig, B_H, sigma):
model_par_local = model_par.copy()
model_par_local['cost'] = cost
model_par_local['mig'] = mig
model_par_local['B_H'] = B_H
model_par_local['sigmaBirth'] = sigma
# change the error treshold to use to find if steady state is reached
# higher sample variance gives more noise, needs lower treshold
if sigma > 0.05:
model_par_local['rms_err_treshold'] = 5E-3
else:
model_par_local['rms_err_treshold'] = 1E-3
return model_par_local
# runs model
def run_model():
# set modelpar list to run
modelParList = [set_cost_mig_BH(*x)
for x in itertools.product(*(cost_vec, mig_vec, B_H_vec, sigma_vec))]
# run model
nJobs = min(len(modelParList), 4)
print('starting with %i jobs' % len(modelParList))
results = Parallel(n_jobs=nJobs, verbose=9, timeout=1.E9)(
delayed(mlssf.single_run_finalstate)(par) for par in modelParList)
# process and store output
Output, InvPerHost = zip(*results)
statData = np.vstack(Output)
distData = np.vstack(InvPerHost)
saveName = data_folder / dataName
np.savez(saveName, statData=statData, distData=distData,
modelParList=modelParList, date=datetime.datetime.now())
return statData
# checks of model parmaters have changed
def check_model_par(model_par_load, parToIgnore):
rerun = False
for key in model_par_load:
if not (key in parToIgnore):
if model_par_load[key] != model_par[key]:
print('Parameter "%s" has changed, rerunning model!' % 'load')
rerun = True
return rerun
# Load model is datafile found, run model if not found or if settings have changed
def load_model():
# need not check these parameters
parToIgnore = ('cost', 'mig', 'B_H', 'sigmaBirth', 'rms_err_treshold')
loadName = data_folder / dataName
if loadName.is_file():
# open file and load data
data_file = np.load(loadName, allow_pickle=True)
data1D = data_file['statData']
rerun = check_model_par(data_file['modelParList'][0], parToIgnore)
data_file.close()
else:
# cannot load, need to rerun model
rerun = True
print('Model data not found, running model')
if rerun or override_data:
# rerun model
data1D = run_model()
return data1D
# Process data, calc timescale and other variables needed for plotting
def process_data(statData):
# calculate heritability time
tauHer = mlsg.calc_tauHer_numeric(
statData['n0'], statData['mig'])
tauVar = mlsg.calc_tauV(statData['cost'])
tauHerRel = tauHer/statData['TAU_H']
tauVar_rel = tauVar/statData['TAU_H']
sigma_cat = mlsg.make_categorial(statData['sigmaBirth'])
BH_cat = mlsg.make_categorial(statData['B_H'])
dataToStore = (tauHer, tauVar, tauHerRel, tauVar_rel, sigma_cat, BH_cat)
nameToStore = ('tauHer', 'tauVar', 'tauHer_rel',
'tauVar_rel', 'sigma_cat', 'BH_cat')
statData = rf.append_fields(
statData, nameToStore, dataToStore, usemask=False)
return statData
# get subset of data to plot
def select_data(data1D, BHidx, sigmaidx):
curSigma = data1D['sigma_cat'] == sigmaidx
curBH = data1D['BH_cat'] == BHidx
# remove nan and inf
isFinite = np.logical_and.reduce(
np.isfinite((data1D['tauVar_rel'], data1D['tauHer_rel'],
data1D['F_mav'], curSigma, curBH)))
currSubset = np.logical_and.reduce((curSigma, curBH, isFinite))
# extract data and log transform x,y
x = np.log10(data1D['tauVar_rel'][currSubset])
transMode = data1D['n0']/data1D['mig']
y = np.log10(transMode[currSubset])
z = data1D['F_mav'][currSubset]
return (x, y, z)
# make 3D scatter plot
def plot_3D(ax, data1D, sigmaIndex):
BHidx = 1
x, y, z = select_data(data1D, BHidx, sigmaIndex)
ax.scatter(x, y, z,
c=z,
s=0.3, alpha=1,
vmin=0, vmax=1, cmap='plasma')
steps = (3, 4, 3)
fRange = (0, 1)
ax.set_xlim(tauVRange)
ax.set_ylim(tauHRange)
ax.set_zlim(fRange)
ax.set_xticks(np.linspace(*tauVRange, steps[0]))
ax.set_yticks(np.linspace(*tauHRange, steps[1]))
ax.set_zticks(np.linspace(*fRange, steps[2]))
# set labels
ax.set_xlabel('$log_{10} \\frac{\\tau_{Var}}{\\tau_H}$')
ax.set_ylabel('$log_{10} \\frac{n_0/k}{\\theta/\\beta}$')
ax.set_zlabel('$\\langle f \\rangle$')
ax.yaxis.labelpad = -10
ax.xaxis.labelpad = -10
ax.zaxis.labelpad = -10
ax.tick_params(axis='z', which='major', pad=0)
ax.tick_params(axis='both', which='major', pad=-5)
ax.view_init(30, -115)
return None
# bin scatter data to show as heatmap
def bin_2Ddata(currXData, currYData, currZData, xbins, ybins):
"""[Bins x,y data into 2d bins]
Arguments:
currXData {np vector} -- xData to bin
currYData {np vector} -- yData to bin
currZData {np vector} -- zData to bin
xbins {np vector} -- xBins to use
ybins {np vector} -- yBins to use
"""
# init output
nX = xbins.size
nY = ybins.size
binnedData = np.full((nY, nX), np.nan)
# loop over bins and calc mean
for xx in range(nX - 1):
for yy in range(nY - 1):
# find data in bin
inXBin = np.logical_and(
(currXData >= xbins[xx]), (currXData < xbins[xx+1]))
inYBin = np.logical_and(
(currYData >= ybins[yy]), (currYData < ybins[yy+1]))
inBin = np.logical_and(inXBin, inYBin)
zInBin = currZData[inBin]
# calc mean over bine
binnedData[yy, xx] = np.nanmean(zInBin)
return(binnedData)
# make heatmap
def plot_heatmap(fig, ax, data1D, sigmaIndex):
BHidx = 1
xStep = 0.25
yStep = 0.5
xbins = np.linspace(*tauVRange, int(
np.ceil((tauVRange[1]-tauVRange[0])/xStep))+1)
ybins = np.linspace(*tauHRange, int(
np.ceil((tauHRange[1] - tauHRange[0]) / yStep)) + 1)
# get data with selection
xS, yS, zS = select_data(data1D, BHidx, sigmaIndex)
binnedDataS = bin_2Ddata(xS, yS, zS, xbins, ybins)
im = ax.pcolormesh(xbins, ybins, binnedDataS,
cmap='plasma', vmin=0, vmax=1)
#cb = fig.colorbar(im, ax=ax)
name = '$\\langle f \\rangle$'
fig.colorbar(im, ax=ax, orientation='vertical',
label=name,
ticks=[0, 0.5, 1])
steps = (3, 4)
ax.set_xlim(tauVRange)
ax.set_ylim(tauHRange)
ax.set_xticks(np.linspace(*tauVRange, steps[0]))
ax.set_yticks(np.linspace(*tauHRange, steps[1]))
# set labels
ax.set_xlabel('$log_{10} \\frac{\\tau_{Var}}{\\tau_H}$')
ax.set_ylabel('$log_{10} \\frac{n_0/k}{\\theta/\\beta}$')
return None
# main function to create figure
def create_fig():
# load data or compute model
data1D = load_model()
data1D = process_data(data1D)
fig = plt.figure()
mlsg.set_fig_size_cm(fig, wFig, hFig)
# setup manual axis for subplots
bm = 0.15
tm = 0.06
cm = 0.13
hf = 0.65
h = [hf*(1 - bm - cm - tm), (1-hf)*(1 - bm - cm - tm)]
lm = 0.05
rm = 0.05
cmh = 0.1
w = (1 - cmh - rm - lm)/2
wf = 0.85
wo = (1-wf)*w
left = [lm, lm+cmh+w]
bot = [bm+h[1]+cm, bm]
# plot for different sampling variance
for ss in range(2):
# plot scatter
ax = fig.add_axes([left[ss], bot[0], w, h[0]], projection='3d')
plot_3D(ax, data1D, ss)
ax.set_title('$\\sigma={}$'.format(sigma_vec[ss]), fontsize=6)
# plot heatmap
ax = fig.add_axes([left[ss]+wo, bot[1], wf*w, h[1]])
plot_heatmap(fig, ax, data1D, ss)
#plt.tight_layout(pad=1, h_pad=2.5, w_pad=1.5)
fig.savefig(fig_Folder / figureName,
format="pdf", transparent=True)
return None
if __name__ == "__main__":
create_fig()
| [
"numpy.log10",
"mls_general_code.set_fig_size_cm",
"mls_general_code.calc_tauV",
"numpy.nanmean",
"numpy.logical_and.reduce",
"matplotlib.style.use",
"numpy.isfinite",
"matplotlib.rc",
"joblib.delayed",
"pathlib.Path",
"mls_general_code.make_categorial",
"itertools.product",
"numpy.linspace"... | [((774, 793), 'pathlib.Path', 'Path', (['"""Data_Paper/"""'], {}), "('Data_Paper/')\n", (778, 793), False, 'from pathlib import Path\n'), ((807, 829), 'pathlib.Path', 'Path', (['"""Figures_Paper/"""'], {}), "('Figures_Paper/')\n", (811, 829), False, 'from pathlib import Path\n'), ((1627, 1657), 'numpy.logspace', 'np.logspace', (['*tauHRange', 'nStep'], {}), '(*tauHRange, nStep)\n', (1638, 1657), True, 'import numpy as np\n'), ((2393, 2423), 'matplotlib.style.use', 'mpl.style.use', (['"""seaborn-ticks"""'], {}), "('seaborn-ticks')\n", (2406, 2423), True, 'import matplotlib as mpl\n'), ((2424, 2446), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (2430, 2446), True, 'import matplotlib as mpl\n'), ((2447, 2469), 'matplotlib.rc', 'mpl.rc', (['"""axes"""'], {}), "('axes', **axes)\n", (2453, 2469), True, 'import matplotlib as mpl\n'), ((2470, 2494), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {}), "('xtick', **ticks)\n", (2476, 2494), True, 'import matplotlib as mpl\n'), ((2495, 2519), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {}), "('ytick', **ticks)\n", (2501, 2519), True, 'import matplotlib as mpl\n'), ((2546, 2572), 'matplotlib.rc', 'mpl.rc', (['"""legend"""'], {}), "('legend', **legend)\n", (2552, 2572), True, 'import matplotlib as mpl\n'), ((2573, 2599), 'matplotlib.rc', 'mpl.rc', (['"""figure"""'], {}), "('figure', **figure)\n", (2579, 2599), True, 'import matplotlib as mpl\n'), ((2600, 2631), 'matplotlib.rc', 'mpl.rc', (['"""savefig"""'], {}), "('savefig', **savefigure)\n", (2606, 2631), True, 'import matplotlib as mpl\n'), ((1574, 1604), 'numpy.logspace', 'np.logspace', (['*tauVRange', 'nStep'], {}), '(*tauVRange, nStep)\n', (1585, 1604), True, 'import numpy as np\n'), ((3742, 3759), 'numpy.vstack', 'np.vstack', (['Output'], {}), '(Output)\n', (3751, 3759), True, 'import numpy as np\n'), ((3775, 3796), 'numpy.vstack', 'np.vstack', (['InvPerHost'], {}), '(InvPerHost)\n', (3784, 3796), True, 'import numpy as np\n'), ((5233, 5290), 'mls_general_code.calc_tauHer_numeric', 'mlsg.calc_tauHer_numeric', (["statData['n0']", "statData['mig']"], {}), "(statData['n0'], statData['mig'])\n", (5257, 5290), True, 'import mls_general_code as mlsg\n'), ((5313, 5345), 'mls_general_code.calc_tauV', 'mlsg.calc_tauV', (["statData['cost']"], {}), "(statData['cost'])\n", (5327, 5345), True, 'import mls_general_code as mlsg\n'), ((5445, 5489), 'mls_general_code.make_categorial', 'mlsg.make_categorial', (["statData['sigmaBirth']"], {}), "(statData['sigmaBirth'])\n", (5465, 5489), True, 'import mls_general_code as mlsg\n'), ((5503, 5540), 'mls_general_code.make_categorial', 'mlsg.make_categorial', (["statData['B_H']"], {}), "(statData['B_H'])\n", (5523, 5540), True, 'import mls_general_code as mlsg\n'), ((5743, 5810), 'numpy.lib.recfunctions.append_fields', 'rf.append_fields', (['statData', 'nameToStore', 'dataToStore'], {'usemask': '(False)'}), '(statData, nameToStore, dataToStore, usemask=False)\n', (5759, 5810), True, 'import numpy.lib.recfunctions as rf\n'), ((6201, 6251), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(curSigma, curBH, isFinite)'], {}), '((curSigma, curBH, isFinite))\n', (6222, 6251), True, 'import numpy as np\n'), ((6301, 6343), 'numpy.log10', 'np.log10', (["data1D['tauVar_rel'][currSubset]"], {}), "(data1D['tauVar_rel'][currSubset])\n", (6309, 6343), True, 'import numpy as np\n'), ((6396, 6427), 'numpy.log10', 'np.log10', (['transMode[currSubset]'], {}), '(transMode[currSubset])\n', (6404, 6427), True, 'import numpy as np\n'), ((7909, 7934), 'numpy.full', 'np.full', (['(nY, nX)', 'np.nan'], {}), '((nY, nX), np.nan)\n', (7916, 7934), True, 'import numpy as np\n'), ((9712, 9724), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9722, 9724), True, 'import matplotlib.pyplot as plt\n'), ((9729, 9766), 'mls_general_code.set_fig_size_cm', 'mlsg.set_fig_size_cm', (['fig', 'wFig', 'hFig'], {}), '(fig, wFig, hFig)\n', (9749, 9766), True, 'import mls_general_code as mlsg\n'), ((3532, 3587), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'nJobs', 'verbose': '(9)', 'timeout': '(1000000000.0)'}), '(n_jobs=nJobs, verbose=9, timeout=1000000000.0)\n', (3540, 3587), False, 'from joblib import Parallel, delayed\n'), ((4681, 4717), 'numpy.load', 'np.load', (['loadName'], {'allow_pickle': '(True)'}), '(loadName, allow_pickle=True)\n', (4688, 4717), True, 'import numpy as np\n'), ((6070, 6165), 'numpy.isfinite', 'np.isfinite', (["(data1D['tauVar_rel'], data1D['tauHer_rel'], data1D['F_mav'], curSigma, curBH)"], {}), "((data1D['tauVar_rel'], data1D['tauHer_rel'], data1D['F_mav'],\n curSigma, curBH))\n", (6081, 6165), True, 'import numpy as np\n'), ((6874, 6907), 'numpy.linspace', 'np.linspace', (['*tauVRange', 'steps[0]'], {}), '(*tauVRange, steps[0])\n', (6885, 6907), True, 'import numpy as np\n'), ((6927, 6960), 'numpy.linspace', 'np.linspace', (['*tauHRange', 'steps[1]'], {}), '(*tauHRange, steps[1])\n', (6938, 6960), True, 'import numpy as np\n'), ((6980, 7010), 'numpy.linspace', 'np.linspace', (['*fRange', 'steps[2]'], {}), '(*fRange, steps[2])\n', (6991, 7010), True, 'import numpy as np\n'), ((9309, 9342), 'numpy.linspace', 'np.linspace', (['*tauVRange', 'steps[0]'], {}), '(*tauVRange, steps[0])\n', (9320, 9342), True, 'import numpy as np\n'), ((9362, 9395), 'numpy.linspace', 'np.linspace', (['*tauHRange', 'steps[1]'], {}), '(*tauHRange, steps[1])\n', (9373, 9395), True, 'import numpy as np\n'), ((3347, 3406), 'itertools.product', 'itertools.product', (['*(cost_vec, mig_vec, B_H_vec, sigma_vec)'], {}), '(*(cost_vec, mig_vec, B_H_vec, sigma_vec))\n', (3364, 3406), False, 'import itertools\n'), ((3942, 3965), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3963, 3965), False, 'import datetime\n'), ((8084, 8149), 'numpy.logical_and', 'np.logical_and', (['(currXData >= xbins[xx])', '(currXData < xbins[xx + 1])'], {}), '(currXData >= xbins[xx], currXData < xbins[xx + 1])\n', (8098, 8149), True, 'import numpy as np\n'), ((8190, 8255), 'numpy.logical_and', 'np.logical_and', (['(currYData >= ybins[yy])', '(currYData < ybins[yy + 1])'], {}), '(currYData >= ybins[yy], currYData < ybins[yy + 1])\n', (8204, 8255), True, 'import numpy as np\n'), ((8295, 8325), 'numpy.logical_and', 'np.logical_and', (['inXBin', 'inYBin'], {}), '(inXBin, inYBin)\n', (8309, 8325), True, 'import numpy as np\n'), ((8431, 8449), 'numpy.nanmean', 'np.nanmean', (['zInBin'], {}), '(zInBin)\n', (8441, 8449), True, 'import numpy as np\n'), ((3589, 3625), 'joblib.delayed', 'delayed', (['mlssf.single_run_finalstate'], {}), '(mlssf.single_run_finalstate)\n', (3596, 3625), False, 'from joblib import Parallel, delayed\n'), ((8633, 8679), 'numpy.ceil', 'np.ceil', (['((tauVRange[1] - tauVRange[0]) / xStep)'], {}), '((tauVRange[1] - tauVRange[0]) / xStep)\n', (8640, 8679), True, 'import numpy as np\n'), ((8729, 8775), 'numpy.ceil', 'np.ceil', (['((tauHRange[1] - tauHRange[0]) / yStep)'], {}), '((tauHRange[1] - tauHRange[0]) / yStep)\n', (8736, 8775), True, 'import numpy as np\n')] |
# Copyright 2019, 2020, 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import aif360.algorithms.postprocessing
import aif360.datasets
import aif360.metrics
import numpy as np
import pandas as pd
import sklearn.metrics
import sklearn.model_selection
import lale.datasets.data_schemas
import lale.datasets.openml
import lale.lib.lale
import lale.operators
import lale.type_checking
from lale.datasets.data_schemas import add_schema_adjusting_n_rows
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def dataset_to_pandas(dataset, return_only="Xy"):
"""
Return pandas representation of the AIF360 dataset.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
AIF360 dataset to convert to a pandas representation.
return_only : 'Xy', 'X', or 'y'
Which part of features X or labels y to convert and return.
Returns
-------
result : tuple
- item 0: pandas Dataframe or None, features X
- item 1: pandas Series or None, labels y
"""
if "X" in return_only:
X = pd.DataFrame(dataset.features, columns=dataset.feature_names)
result_X = lale.datasets.data_schemas.add_schema(X)
assert isinstance(result_X, pd.DataFrame), type(result_X)
else:
result_X = None
if "y" in return_only:
y = pd.Series(dataset.labels.ravel(), name=dataset.label_names[0])
result_y = lale.datasets.data_schemas.add_schema(y)
assert isinstance(result_y, pd.Series), type(result_y)
else:
result_y = None
return result_X, result_y
_dataset_fairness_properties: lale.type_checking.JSON_TYPE = {
"favorable_label": {
"description": 'Label value which is considered favorable (i.e. "positive").',
"type": "number",
},
"unfavorable_label": {
"description": 'Label value which is considered unfavorable (i.e. "negative").',
"type": "number",
},
"protected_attribute_names": {
"description": "Subset of feature names for which fairness is desired.",
"type": "array",
"items": {"type": "string"},
},
"unprivileged_groups": {
"description": "Representation for unprivileged group.",
"type": "array",
"items": {
"description": "Map from feature names to group-indicating values.",
"type": "object",
"additionalProperties": {"type": "number"},
},
},
"privileged_groups": {
"description": "Representation for privileged group.",
"type": "array",
"items": {
"description": "Map from feature names to group-indicating values.",
"type": "object",
"additionalProperties": {"type": "number"},
},
},
}
_categorical_fairness_properties: lale.type_checking.JSON_TYPE = {
"favorable_labels": {
"description": 'Label values which are considered favorable (i.e. "positive").',
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Literal value.", "type": "string"},
{"description": "Numerical value.", "type": "number"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
"protected_attributes": {
"description": "Features for which fairness is desired.",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"required": ["feature", "privileged_groups"],
"properties": {
"feature": {
"description": "Column name or column index.",
"anyOf": [{"type": "string"}, {"type": "integer"}],
},
"privileged_groups": {
"description": "Values or ranges that indicate being a member of the privileged group.",
"type": "array",
"minItems": 1,
"items": {
"anyOf": [
{"description": "Literal value.", "type": "string"},
{"description": "Numerical value.", "type": "number"},
{
"description": "Numeric range [a,b] from a to b inclusive.",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"},
},
]
},
},
},
},
},
}
_categorical_fairness_schema = {
"type": "object",
"properties": _categorical_fairness_properties,
}
_dataset_fairness_schema = {
"type": "object",
"properties": _dataset_fairness_properties,
}
def dataset_fairness_info(dataset):
"""
Inspect the AIF360 dataset and return its fairness metadata as JSON.
Parameters
----------
dataset : aif360.datasets.BinaryLabelDataset
Returns
-------
result : dict
JSON data structure with fairness information.
- favorable_label : number
Label value which is considered favorable (i.e. "positive").
- unfavorable_label : number
Label value which is considered unfavorable (i.e. "negative").
- protected_attribute_names : array **of** items : string
Subset of feature names for which fairness is desired.
- unprivileged_groups : array
Representation for unprivileged group.
- items : dict
Map from feature names to group-indicating values.
- privileged_groups : array
Representation for privileged group.
- items : dict
Map from feature names to group-indicating values.
"""
def attributes_to_groups(names, value_arrays):
result = [{}]
for i in range(len(names)):
next_result = []
for d in result:
for next_v in value_arrays[i]:
next_d = {**d, names[i]: next_v}
next_result.append(next_d)
result = next_result
return result
unprivileged_groups = attributes_to_groups(
dataset.protected_attribute_names, dataset.unprivileged_protected_attributes
)
privileged_groups = attributes_to_groups(
dataset.protected_attribute_names, dataset.privileged_protected_attributes
)
result = {
"favorable_label": dataset.favorable_label,
"unfavorable_label": dataset.unfavorable_label,
"protected_attribute_names": dataset.protected_attribute_names,
"unprivileged_groups": unprivileged_groups,
"privileged_groups": privileged_groups,
}
lale.type_checking.validate_schema(result, _dataset_fairness_schema)
return result
class _PandasToDatasetConverter:
def __init__(self, favorable_label, unfavorable_label, protected_attribute_names):
lale.type_checking.validate_schema(
favorable_label, _dataset_fairness_properties["favorable_label"]
)
self.favorable_label = favorable_label
lale.type_checking.validate_schema(
unfavorable_label, _dataset_fairness_properties["unfavorable_label"]
)
self.unfavorable_label = unfavorable_label
lale.type_checking.validate_schema(
protected_attribute_names,
_dataset_fairness_properties["protected_attribute_names"],
)
self.protected_attribute_names = protected_attribute_names
def convert(self, X, y, probas=None):
assert isinstance(X, pd.DataFrame), type(X)
assert isinstance(y, pd.Series), type(y)
assert X.shape[0] == y.shape[0], f"X.shape {X.shape}, y.shape {y.shape}"
assert not X.isna().any().any(), f"X\n{X}\n"
assert not y.isna().any().any(), f"y\n{X}\n"
y_reindexed = pd.Series(data=y.values, index=X.index, name=y.name)
df = pd.concat([X, y_reindexed], axis=1)
assert df.shape[0] == X.shape[0], f"df.shape {df.shape}, X.shape {X.shape}"
assert not df.isna().any().any(), f"df\n{df}\nX\n{X}\ny\n{y}"
label_names = [y.name]
result = aif360.datasets.BinaryLabelDataset(
favorable_label=self.favorable_label,
unfavorable_label=self.unfavorable_label,
protected_attribute_names=self.protected_attribute_names,
df=df,
label_names=label_names,
)
if probas is not None:
pos_ind = 1 # TODO: is this always the case?
result.scores = probas[:, pos_ind].reshape(-1, 1)
return result
def _group_flag(value, groups):
for group in groups:
if isinstance(group, list):
if group[0] <= value <= group[1]:
return 1
elif value == group:
return 1
return 0
def _dataframe_replace(dataframe, subst):
new_columns = [
subst.get(i, subst.get(name, dataframe.iloc[:, i]))
for i, name in enumerate(dataframe.columns)
]
result = pd.concat(new_columns, axis=1)
return result
def _ensure_str(str_or_int):
return f"f{str_or_int}" if isinstance(str_or_int, int) else str_or_int
def _ndarray_to_series(data, name, index=None, dtype=None):
if isinstance(data, pd.Series):
return data
result = pd.Series(data=data, index=index, dtype=dtype, name=_ensure_str(name))
schema = getattr(data, "json_schema", None)
if schema is not None:
result = lale.datasets.data_schemas.add_schema(result, schema)
return result
def _ndarray_to_dataframe(array):
assert len(array.shape) == 2
column_names = None
schema = getattr(array, "json_schema", None)
if schema is not None:
column_schemas = schema.get("items", {}).get("items", None)
if isinstance(column_schemas, list):
column_names = [s.get("description", None) for s in column_schemas]
if column_names is None or None in column_names:
column_names = [_ensure_str(i) for i in range(array.shape[1])]
result = pd.DataFrame(array, columns=column_names)
if schema is not None:
result = lale.datasets.data_schemas.add_schema(result, schema)
return result
class _ScorerFactory:
def __init__(
self,
metric,
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
if hasattr(aif360.metrics.BinaryLabelDatasetMetric, metric):
self.kind = "BinaryLabelDatasetMetric"
elif hasattr(aif360.metrics.ClassificationMetric, metric):
self.kind = "ClassificationMetric"
else:
raise ValueError(f"unknown metric {metric}")
self.metric = metric
if favorable_labels is None:
self.prot_attr_enc = None
else:
self.favorable_labels = favorable_labels
assert favorable_label is None and unfavorable_label is None
favorable_label, unfavorable_label = 1, 0
assert protected_attribute_names is None
pas = protected_attributes
protected_attribute_names = [_ensure_str(pa["feature"]) for pa in pas]
assert unprivileged_groups is None and privileged_groups is None
unprivileged_groups = [{_ensure_str(pa["feature"]): 0 for pa in pas}]
privileged_groups = [{_ensure_str(pa["feature"]): 1 for pa in pas}]
from lale.lib.aif360 import ProtectedAttributesEncoder
self.prot_attr_enc = ProtectedAttributesEncoder(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
remainder="drop",
return_X_y=True,
)
self.fairness_info = {
"favorable_label": favorable_label,
"unfavorable_label": unfavorable_label,
"protected_attribute_names": protected_attribute_names,
"unprivileged_groups": unprivileged_groups,
"privileged_groups": privileged_groups,
}
lale.type_checking.validate_schema(self.fairness_info, _dataset_fairness_schema)
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label, unfavorable_label, protected_attribute_names
)
def scoring(self, y_true=None, y_pred=None, X=None):
assert y_pred is not None
assert X is not None
y_pred_orig = y_pred
if not isinstance(y_pred, pd.Series):
assert y_true is not None
y_pred = _ndarray_to_series(
y_pred,
y_true.name
if isinstance(y_true, pd.Series)
else _ensure_str(X.shape[1]),
X.index if isinstance(X, pd.DataFrame) else None,
y_pred.dtype,
)
if getattr(self, "favorable_labels", None) is None:
encoded_X = X
else:
encoded_X, y_pred = self.prot_attr_enc.transform(X, y_pred)
dataset_pred = self.pandas_to_dataset.convert(encoded_X, y_pred)
if self.kind == "BinaryLabelDatasetMetric":
fairness_metrics = aif360.metrics.BinaryLabelDatasetMetric(
dataset_pred,
self.fairness_info["unprivileged_groups"],
self.fairness_info["privileged_groups"],
)
else:
assert self.kind == "ClassificationMetric"
assert y_true is not None
if not isinstance(y_true, pd.Series):
y_true = _ndarray_to_series(
y_true, y_pred.name, y_pred.index, y_pred_orig.dtype
)
if getattr(self, "favorable_labels", None) is not None:
_, y_true = self.prot_attr_enc.transform(X, y_true)
dataset_true = self.pandas_to_dataset.convert(encoded_X, y_true)
fairness_metrics = aif360.metrics.ClassificationMetric(
dataset_true,
dataset_pred,
self.fairness_info["unprivileged_groups"],
self.fairness_info["privileged_groups"],
)
method = getattr(fairness_metrics, self.metric)
result = method()
if np.isnan(result) or not np.isfinite(result):
if 0 == fairness_metrics.num_positives(privileged=True):
logger.warning("there are 0 positives in the privileged group")
if 0 == fairness_metrics.num_positives(privileged=False):
logger.warning("there are 0 positives in the unprivileged group")
if 0 == fairness_metrics.num_instances(privileged=True):
logger.warning("there are 0 instances in the privileged group")
if 0 == fairness_metrics.num_instances(privileged=False):
logger.warning("there are 0 instances in the unprivileged group")
if self.metric == "disparate_impact":
result = 0.0
logger.warning(
f"The metric {self.metric} is ill-defined and returns {result}. Check your fairness configuration. The set of predicted labels is {set(y_pred_orig)}."
)
return result
def scorer(self, estimator, X, y):
return self.scoring(y_true=y, y_pred=estimator.predict(X), X=X)
def __call__(self, estimator, X, y):
return self.scorer(estimator, X, y)
_SCORER_DOCSTRING = """
There are two ways to construct this scorer, either with
(favorable_label, unfavorable_label, protected_attribute_names,
unprivileged_groups, privileged_groups) or with
(favorable_labels, protected_attributes).
Parameters
----------
favorable_label : number
Label value which is considered favorable (i.e. "positive").
unfavorable_label : number
Label value which is considered unfavorable (i.e. "negative").
protected_attribute_names : array **of** items : string
Subset of feature names for which fairness is desired.
unprivileged_groups : array
Representation for unprivileged group.
- items : dict
Map from feature names to group-indicating values.
privileged_groups : array
Representation for privileged group.
- items : dict
Map from feature names to group-indicating values.
favorable_labels : array of union
Label values which are considered favorable (i.e. "positive").
- string
Literal value
- number
Numerical value
- array of number, >= 2 items, <= 2 items
Numeric range [a,b] from a to b inclusive.
protected_attributes : array of dict
Features for which fairness is desired.
- feature : string or integer
Column name or column index.
- privileged_groups : array of union
Values or ranges that indicate being a member of the privileged group.
- string
Literal value
- number
Numerical value
- array of number, >= 2 items, <= 2 items
Numeric range [a,b] from a to b inclusive.
Returns
-------
result : callable
Scorer that takes three arguments (estimator, X, y) and returns score.
"""
class _AccuracyAndDisparateImpact:
def __init__(
self,
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
self.accuracy_scorer = sklearn.metrics.make_scorer(
sklearn.metrics.accuracy_score
)
self.disparate_impact_scorer = disparate_impact(
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
def __call__(self, estimator, X, y):
disp_impact = self.disparate_impact_scorer(estimator, X, y)
accuracy = self.accuracy_scorer(estimator, X, y)
if np.isnan(disp_impact): # empty privileged or unprivileged groups
return accuracy
assert 0.0 <= accuracy <= 1.0 and 0.0 <= disp_impact, (accuracy, disp_impact)
if disp_impact == 0.0:
return 0.0
elif disp_impact <= 1.0:
symmetric_impact = disp_impact
else:
symmetric_impact = 1.0 / disp_impact
disp_impact_treshold = 0.9 # impact above threshold is considered fair
if symmetric_impact < disp_impact_treshold:
scaling_factor = symmetric_impact / disp_impact_treshold
else:
scaling_factor = 1.0
scaling_hardness = 4.0 # higher hardness yields result closer to 0 when unfair
result = accuracy * scaling_factor ** scaling_hardness
assert 0.0 <= result <= accuracy <= 1.0, (result, accuracy)
assert symmetric_impact >= 0.9 or result < accuracy
return result
def accuracy_and_disparate_impact(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible combined scorer for `accuracy`_ and `disparate impact`_ given the fairness info.
.. _`accuracy`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
.. _`disparate impact`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.BinaryLabelDatasetMetric.html#aif360.metrics.BinaryLabelDatasetMetric.disparate_impact"""
return _AccuracyAndDisparateImpact(
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
accuracy_and_disparate_impact.__doc__ = (
str(accuracy_and_disparate_impact.__doc__) + _SCORER_DOCSTRING
)
def average_odds_difference(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible `average odds difference`_ scorer given the fairness info.
.. _`average odds difference`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.ClassificationMetric.html#aif360.metrics.ClassificationMetric.average_odds_difference"""
return _ScorerFactory(
"average_odds_difference",
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
average_odds_difference.__doc__ = (
str(average_odds_difference.__doc__) + _SCORER_DOCSTRING
)
def disparate_impact(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible `disparate impact`_ scorer given the fairness info.
.. _`disparate impact`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.BinaryLabelDatasetMetric.html#aif360.metrics.BinaryLabelDatasetMetric.disparate_impact"""
return _ScorerFactory(
"disparate_impact",
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
disparate_impact.__doc__ = str(disparate_impact.__doc__) + _SCORER_DOCSTRING
def equal_opportunity_difference(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible `equal opportunity difference`_ scorer given the fairness info.
.. _`equal opportunity difference`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.ClassificationMetric.html#aif360.metrics.ClassificationMetric.equal_opportunity_difference"""
return _ScorerFactory(
"equal_opportunity_difference",
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
equal_opportunity_difference.__doc__ = (
str(equal_opportunity_difference.__doc__) + _SCORER_DOCSTRING
)
class _R2AndDisparateImpact:
def __init__(
self,
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
self.r2_scorer = sklearn.metrics.make_scorer(sklearn.metrics.r2_score)
self.disparate_impact_scorer = disparate_impact(
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
def __call__(self, estimator, X, y):
disp_impact = self.disparate_impact_scorer(estimator, X, y)
r2 = self.r2_scorer(estimator, X, y)
if np.isnan(disp_impact): # empty privileged or unprivileged groups
return r2
assert r2 <= 1.0 and 0.0 <= disp_impact, (r2, disp_impact)
if disp_impact == 0.0:
return np.finfo(np.float32).min
elif disp_impact <= 1.0:
symmetric_impact = disp_impact
else:
symmetric_impact = 1.0 / disp_impact
disp_impact_treshold = 0.9 # impact above threshold is considered fair
if symmetric_impact < disp_impact_treshold:
scaling_factor = symmetric_impact / disp_impact_treshold
else:
scaling_factor = 1.0
scaling_hardness = 4.0 # higher hardness yields result closer to 0 when unfair
positive_r2 = 1.0 - r2
scaled_r2 = positive_r2 / scaling_factor ** scaling_hardness
result = 1.0 - scaled_r2
assert result <= r2 <= 1.0, (result, r2)
assert symmetric_impact >= 0.9 or result < r2
return result
def r2_and_disparate_impact(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible combined scorer for `R2 score`_ and `disparate impact`_ given the fairness info.
.. _`R2 score`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
.. _`disparate impact`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.BinaryLabelDatasetMetric.html#aif360.metrics.BinaryLabelDatasetMetric.disparate_impact"""
return _R2AndDisparateImpact(
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
r2_and_disparate_impact.__doc__ = (
str(r2_and_disparate_impact.__doc__) + _SCORER_DOCSTRING
)
def statistical_parity_difference(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible `statistical parity difference`_ scorer given the fairness info.
.. _`statistical parity difference`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.BinaryLabelDatasetMetric.html#aif360.metrics.BinaryLabelDatasetMetric.statistical_parity_difference"""
return _ScorerFactory(
"statistical_parity_difference",
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
statistical_parity_difference.__doc__ = (
str(statistical_parity_difference.__doc__) + _SCORER_DOCSTRING
)
def theil_index(
favorable_label=None,
unfavorable_label=None,
protected_attribute_names=None,
unprivileged_groups=None,
privileged_groups=None,
favorable_labels=None,
protected_attributes=None,
):
"""Create a scikit-learn compatible `Theil index`_ scorer given the fairness info.
.. _`Theil index`: https://aif360.readthedocs.io/en/latest/modules/generated/aif360.metrics.ClassificationMetric.html#aif360.metrics.ClassificationMetric.theil_index"""
return _ScorerFactory(
"theil_index",
favorable_label,
unfavorable_label,
protected_attribute_names,
unprivileged_groups,
privileged_groups,
favorable_labels,
protected_attributes,
)
average_odds_difference.__doc__ = (
str(average_odds_difference.__doc__) + _SCORER_DOCSTRING
)
class _BaseInprocessingImpl:
def __init__(
self, favorable_labels, protected_attributes, preprocessing, mitigator
):
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
if preprocessing is None:
preprocessing = lale.lib.lale.NoOp
self.preprocessing = preprocessing
self.mitigator = mitigator
def _prep_and_encode(self, X, y=None):
prepared_X = self.redact_and_prep.transform(X, y)
encoded_X, encoded_y = self.prot_attr_enc.transform(X, y)
combined_attribute_names = list(prepared_X.columns) + [
name for name in encoded_X.columns if name not in prepared_X.columns
]
combined_columns = [
encoded_X[name] if name in encoded_X else prepared_X[name]
for name in combined_attribute_names
]
combined_X = pd.concat(combined_columns, axis=1)
result = self.pandas_to_dataset.convert(combined_X, encoded_y)
return result
def _decode(self, y):
assert isinstance(y, pd.Series)
assert len(self.favorable_labels) == 1 and len(self.unfavorable_labels) == 1
favorable, unfavorable = self.favorable_labels[0], self.unfavorable_labels[0]
result = y.map(lambda label: favorable if label == 1 else unfavorable)
return result
def fit(self, X, y):
from lale.lib.aif360 import ProtectedAttributesEncoder, Redacting
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
}
redacting = Redacting(**fairness_info)
trainable_redact_and_prep = redacting >> self.preprocessing
assert isinstance(trainable_redact_and_prep, lale.operators.TrainablePipeline)
self.redact_and_prep = trainable_redact_and_prep.fit(X, y)
self.prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info, remainder="drop", return_X_y=True,
)
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
encoded_data = self._prep_and_encode(X, y)
self.mitigator.fit(encoded_data)
self.unfavorable_labels = list(set(list(y)) - set(list(self.favorable_labels)))
return self
def predict(self, X):
encoded_data = self._prep_and_encode(X)
result_data = self.mitigator.predict(encoded_data)
_, result_y = dataset_to_pandas(result_data, return_only="y")
decoded_y = self._decode(result_y)
return decoded_y
class _BasePostprocessingImpl:
def __init__(
self, favorable_labels, protected_attributes, estimator, mitigator,
):
self.favorable_labels = favorable_labels
self.protected_attributes = protected_attributes
self.estimator = estimator
self.mitigator = mitigator
def _decode(self, y):
assert isinstance(y, pd.Series)
assert len(self.favorable_labels) == 1 and len(self.unfavorable_labels) == 1
favorable, unfavorable = self.favorable_labels[0], self.unfavorable_labels[0]
result = y.map(lambda label: favorable if label == 1 else unfavorable)
return result
def fit(self, X, y):
from lale.lib.aif360 import ProtectedAttributesEncoder, Redacting
fairness_info = {
"favorable_labels": self.favorable_labels,
"protected_attributes": self.protected_attributes,
}
redacting = Redacting(**fairness_info)
trainable_redact_and_estim = redacting >> self.estimator
assert isinstance(trainable_redact_and_estim, lale.operators.TrainablePipeline)
self.redact_and_estim = trainable_redact_and_estim.fit(X, y)
self.prot_attr_enc = ProtectedAttributesEncoder(
**fairness_info, remainder="drop", return_X_y=True,
)
prot_attr_names = [pa["feature"] for pa in self.protected_attributes]
self.pandas_to_dataset = _PandasToDatasetConverter(
favorable_label=1,
unfavorable_label=0,
protected_attribute_names=prot_attr_names,
)
encoded_X, encoded_y = self.prot_attr_enc.transform(X, y)
self.y_dtype = encoded_y.dtype
self.y_name = encoded_y.name
predicted_y = self.redact_and_estim.predict(X)
predicted_y = _ndarray_to_series(predicted_y, self.y_name, X.index)
_, predicted_y = self.prot_attr_enc.transform(X, predicted_y)
predicted_probas = self.redact_and_estim.predict_proba(X)
dataset_true = self.pandas_to_dataset.convert(encoded_X, encoded_y)
dataset_pred = self.pandas_to_dataset.convert(
encoded_X, predicted_y, predicted_probas
)
self.mitigator = self.mitigator.fit(dataset_true, dataset_pred)
self.unfavorable_labels = list(set(list(y)) - set(list(self.favorable_labels)))
return self
def predict(self, X):
predicted_y = self.redact_and_estim.predict(X)
predicted_probas = self.redact_and_estim.predict_proba(X)
predicted_y = _ndarray_to_series(predicted_y, self.y_name, X.index)
encoded_X, predicted_y = self.prot_attr_enc.transform(X, predicted_y)
dataset_pred = self.pandas_to_dataset.convert(
encoded_X, predicted_y, predicted_probas
)
dataset_out = self.mitigator.predict(dataset_pred)
_, result_y = dataset_to_pandas(dataset_out, return_only="y")
decoded_y = self._decode(result_y)
return decoded_y
_categorical_supervised_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {
"description": "Target class labels; the array is over samples.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_categorical_unsupervised_input_fit_schema = {
"description": "Input data schema for training.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
},
"y": {"description": "Target values; the array is over samples."},
},
}
_categorical_input_predict_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_categorical_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
}
_categorical_input_transform_schema = {
"description": "Input data schema for transform.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
},
}
_categorical_output_transform_schema = {
"description": "Output data schema for reweighted features.",
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "number"}, {"type": "string"}]},
},
}
_numeric_output_transform_schema = {
"description": "Output data schema for reweighted features.",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
def column_for_stratification(X, y, favorable_labels, protected_attributes):
from lale.lib.aif360 import ProtectedAttributesEncoder
prot_attr_enc = ProtectedAttributesEncoder(
favorable_labels=favorable_labels,
protected_attributes=protected_attributes,
remainder="drop",
return_X_y=True,
)
encoded_X, encoded_y = prot_attr_enc.transform(X, y)
df = pd.concat([encoded_X, encoded_y], axis=1)
def label_for_stratification(row):
return "".join(["T" if v == 1 else "F" for v in row])
result = df.apply(label_for_stratification, axis=1)
result.name = "stratify"
return result
def fair_stratified_train_test_split(
X, y, favorable_labels, protected_attributes, test_size=0.25
):
"""
Splits X and y into random train and test subsets stratified by labels and protected attributes.
Behaves similar to the `train_test_split`_ function from scikit-learn.
.. _`train_test_split`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
Parameters
----------
X : array
Features including protected attributes as numpy ndarray or pandas dataframe.
y : array
Labels as numpy ndarray or pandas series.
favorable_labels : array
Label values which are considered favorable (i.e. "positive").
protected_attributes : array
Features for which fairness is desired.
Returns
-------
result : tuple
- item 0: train_X
- item 1: test_X
- item 2: train_y
- item 3: test_y
"""
stratify = column_for_stratification(X, y, favorable_labels, protected_attributes)
train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(
X, y, test_size=test_size, random_state=42, stratify=stratify
)
if hasattr(X, "json_schema"):
train_X = add_schema_adjusting_n_rows(train_X, X.json_schema)
test_X = add_schema_adjusting_n_rows(test_X, X.json_schema)
if hasattr(y, "json_schema"):
train_y = add_schema_adjusting_n_rows(train_y, y.json_schema)
test_y = add_schema_adjusting_n_rows(test_y, y.json_schema)
return train_X, test_X, train_y, test_y
class FairStratifiedKFold:
"""
Stratified k-folds cross-validator by labels and protected attributes.
Behaves similar to the `StratifiedKFold`_ class from scikit-learn.
This cross-validation object can be passed to the `cv` argument of
the `auto_configure`_ method.
.. _`StratifiedKFold`: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html
.. _`auto_configure`: https://lale.readthedocs.io/en/latest/modules/lale.operators.html#lale.operators.PlannedOperator.auto_configure
"""
def __init__(
self,
favorable_labels,
protected_attributes,
n_splits=5,
shuffle=False,
random_state=None,
):
"""
Parameters
----------
favorable_labels : array
Label values which are considered favorable (i.e. "positive").
protected_attributes : array
Features for which fairness is desired.
n_splits : integer, optional, default 5
Number of folds. Must be at least 2.
shuffle : boolean, optional, default False
Whether to shuffle each class's samples before splitting into batches.
random_state : union type, not for optimizer, default None
When shuffle is True, random_state affects the ordering of the indices.
- None
RandomState used by np.random
- numpy.random.RandomState
Use the provided random state, only affecting other users of that same random state instance.
- integer
Explicit seed.
"""
self._fairness_info = {
"favorable_labels": favorable_labels,
"protected_attributes": protected_attributes,
}
self._stratified_k_fold = sklearn.model_selection.StratifiedKFold(
n_splits=n_splits, shuffle=shuffle, random_state=random_state
)
def get_n_splits(self, X=None, y=None, groups=None):
"""
The number of splitting iterations in the cross-validator.
Parameters
----------
X : Any
Always ignored, exists for compatibility.
y : Any
Always ignored, exists for compatibility.
groups : Any
Always ignored, exists for compatibility.
Returns
-------
integer
The number of splits.
"""
return self._stratified_k_fold.get_n_splits(X, y, groups)
def split(self, X, y, groups=None):
"""
Generate indices to split data into training and test set.
X : array **of** items : array **of** items : Any
Training data, including columns with the protected attributes.
y : union type
Target class labels; the array is over samples.
- array **of** items : float
- array **of** items : string
groups : Any
Always ignored, exists for compatibility.
Yields
------
result : tuple
- train
The training set indices for that split.
- test
The testing set indices for that split.
"""
stratify = column_for_stratification(X, y, **self._fairness_info)
result = self._stratified_k_fold.split(X, stratify, groups)
return result
| [
"logging.getLogger",
"pandas.Series",
"lale.lib.aif360.ProtectedAttributesEncoder",
"lale.lib.aif360.Redacting",
"numpy.isnan",
"lale.datasets.data_schemas.add_schema_adjusting_n_rows",
"numpy.isfinite",
"numpy.finfo",
"pandas.DataFrame",
"pandas.concat"
] | [((991, 1018), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1008, 1018), False, 'import logging\n'), ((9903, 9933), 'pandas.concat', 'pd.concat', (['new_columns'], {'axis': '(1)'}), '(new_columns, axis=1)\n', (9912, 9933), True, 'import pandas as pd\n'), ((10923, 10964), 'pandas.DataFrame', 'pd.DataFrame', (['array'], {'columns': 'column_names'}), '(array, columns=column_names)\n', (10935, 10964), True, 'import pandas as pd\n'), ((36879, 37027), 'lale.lib.aif360.ProtectedAttributesEncoder', 'ProtectedAttributesEncoder', ([], {'favorable_labels': 'favorable_labels', 'protected_attributes': 'protected_attributes', 'remainder': '"""drop"""', 'return_X_y': '(True)'}), "(favorable_labels=favorable_labels,\n protected_attributes=protected_attributes, remainder='drop', return_X_y\n =True)\n", (36905, 37027), False, 'from lale.lib.aif360 import ProtectedAttributesEncoder\n'), ((37124, 37165), 'pandas.concat', 'pd.concat', (['[encoded_X, encoded_y]'], {'axis': '(1)'}), '([encoded_X, encoded_y], axis=1)\n', (37133, 37165), True, 'import pandas as pd\n'), ((1607, 1668), 'pandas.DataFrame', 'pd.DataFrame', (['dataset.features'], {'columns': 'dataset.feature_names'}), '(dataset.features, columns=dataset.feature_names)\n', (1619, 1668), True, 'import pandas as pd\n'), ((8726, 8778), 'pandas.Series', 'pd.Series', ([], {'data': 'y.values', 'index': 'X.index', 'name': 'y.name'}), '(data=y.values, index=X.index, name=y.name)\n', (8735, 8778), True, 'import pandas as pd\n'), ((8792, 8827), 'pandas.concat', 'pd.concat', (['[X, y_reindexed]'], {'axis': '(1)'}), '([X, y_reindexed], axis=1)\n', (8801, 8827), True, 'import pandas as pd\n'), ((18931, 18952), 'numpy.isnan', 'np.isnan', (['disp_impact'], {}), '(disp_impact)\n', (18939, 18952), True, 'import numpy as np\n'), ((24400, 24421), 'numpy.isnan', 'np.isnan', (['disp_impact'], {}), '(disp_impact)\n', (24408, 24421), True, 'import numpy as np\n'), ((29043, 29078), 'pandas.concat', 'pd.concat', (['combined_columns'], {'axis': '(1)'}), '(combined_columns, axis=1)\n', (29052, 29078), True, 'import pandas as pd\n'), ((29786, 29812), 'lale.lib.aif360.Redacting', 'Redacting', ([], {}), '(**fairness_info)\n', (29795, 29812), False, 'from lale.lib.aif360 import ProtectedAttributesEncoder, Redacting\n'), ((30064, 30142), 'lale.lib.aif360.ProtectedAttributesEncoder', 'ProtectedAttributesEncoder', ([], {'remainder': '"""drop"""', 'return_X_y': '(True)'}), "(**fairness_info, remainder='drop', return_X_y=True)\n", (30090, 30142), False, 'from lale.lib.aif360 import ProtectedAttributesEncoder\n'), ((31829, 31855), 'lale.lib.aif360.Redacting', 'Redacting', ([], {}), '(**fairness_info)\n', (31838, 31855), False, 'from lale.lib.aif360 import ProtectedAttributesEncoder, Redacting\n'), ((32107, 32185), 'lale.lib.aif360.ProtectedAttributesEncoder', 'ProtectedAttributesEncoder', ([], {'remainder': '"""drop"""', 'return_X_y': '(True)'}), "(**fairness_info, remainder='drop', return_X_y=True)\n", (32133, 32185), False, 'from lale.lib.aif360 import ProtectedAttributesEncoder\n'), ((38611, 38662), 'lale.datasets.data_schemas.add_schema_adjusting_n_rows', 'add_schema_adjusting_n_rows', (['train_X', 'X.json_schema'], {}), '(train_X, X.json_schema)\n', (38638, 38662), False, 'from lale.datasets.data_schemas import add_schema_adjusting_n_rows\n'), ((38680, 38730), 'lale.datasets.data_schemas.add_schema_adjusting_n_rows', 'add_schema_adjusting_n_rows', (['test_X', 'X.json_schema'], {}), '(test_X, X.json_schema)\n', (38707, 38730), False, 'from lale.datasets.data_schemas import add_schema_adjusting_n_rows\n'), ((38783, 38834), 'lale.datasets.data_schemas.add_schema_adjusting_n_rows', 'add_schema_adjusting_n_rows', (['train_y', 'y.json_schema'], {}), '(train_y, y.json_schema)\n', (38810, 38834), False, 'from lale.datasets.data_schemas import add_schema_adjusting_n_rows\n'), ((38852, 38902), 'lale.datasets.data_schemas.add_schema_adjusting_n_rows', 'add_schema_adjusting_n_rows', (['test_y', 'y.json_schema'], {}), '(test_y, y.json_schema)\n', (38879, 38902), False, 'from lale.datasets.data_schemas import add_schema_adjusting_n_rows\n'), ((12513, 12661), 'lale.lib.aif360.ProtectedAttributesEncoder', 'ProtectedAttributesEncoder', ([], {'favorable_labels': 'favorable_labels', 'protected_attributes': 'protected_attributes', 'remainder': '"""drop"""', 'return_X_y': '(True)'}), "(favorable_labels=favorable_labels,\n protected_attributes=protected_attributes, remainder='drop', return_X_y\n =True)\n", (12539, 12661), False, 'from lale.lib.aif360 import ProtectedAttributesEncoder\n'), ((15200, 15216), 'numpy.isnan', 'np.isnan', (['result'], {}), '(result)\n', (15208, 15216), True, 'import numpy as np\n'), ((15224, 15243), 'numpy.isfinite', 'np.isfinite', (['result'], {}), '(result)\n', (15235, 15243), True, 'import numpy as np\n'), ((24605, 24625), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (24613, 24625), True, 'import numpy as np\n')] |
from keras.preprocessing import image
import numpy as np
from glob import glob
import pickle
import random
import cv2
from model.extract_bottleneck_features import extract_InceptionV3
from keras.applications.resnet50 import preprocess_input
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from keras.layers import GlobalAveragePooling2D
from keras.layers import Dense
from keras.models import Sequential
from keras.applications.resnet50 import ResNet50
import urllib.request
import os
random.seed(8675309)
path = __file__.replace('model/dog_app.py', '')
def load_dog_names():
"""
load dog names sorted by their label
Returns:
list: list of dog names
"""
with open(os.path.join(path, 'model/names.pkl'), 'rb') as f:
dog_names = pickle.load(f)
dog_names = [x.split('/')[-1][4:] for x in dog_names]
return dog_names
def load_dataset(path):
"""
Given the path to the dataset of images, extract the file names and labels from the file name
Args:
path (str): path to the set images
Returns:
list: file paths
list: the name of the dog breed for each image
"""
files = glob(path + '*')
targets = [x.split('/')[-1][:-10] for x in files]
return files, targets
def path_to_tensor(img_path):
"""
Given an image path laod the image and return a tensor
Args:
img_path (str): path to the image
Returns:
tf.tensor
"""
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def get_model(p=os.path.join(path, 'model/DogInceptionV3Data.npz')):
"""
Function that load feature maps form a pre-trained model. Then it will add a pooling layer and two dense layers to
tune the top of the CNN model to adapt to dog images. The model is already train on a set of dog images and it's
saved in `weights.best.Inception.hdf5`.
Args:
p (str): path to the DogInceptionV3Data.npz file that contains the transformed features. If file doesn't exist,
it will be downloaded from AWS.
Returns:
Keras Model
"""
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz'
if not os.path.exists(p):
urllib.request.urlretrieve(url, p)
bottleneck_features = np.load(p)
train_Inception = bottleneck_features['train']
Inception_model = Sequential()
Inception_model.add(GlobalAveragePooling2D(input_shape=train_Inception.shape[1:]))
Inception_model.add(Dense(256, activation='relu'))
Inception_model.add(Dense(133, activation='softmax'))
Inception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
Inception_model.load_weights(os.path.join(path, 'saved_models/weights.best.Inception.hdf5'))
return Inception_model
class DogDetection:
"""
The class handles all the necessary function for detection faces, and dogs in an image. After detection faces and
dogs this class allows you to find the dog breed of the dog in your image.
Example:
This example uses :class:`.DogDetection` to instantiate an object that handles all the necessary functions
to predict the dog breed of a dog image.
>>> dd = DogDetection()
>>> dd.which_dog('static/sample_dog_output.png')
>>> dd.dog_detector('static/sample_dog_output.png')
"""
def __init__(self):
url = 'https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz'
p = os.path.join(path, 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
if not os.path.exists(p):
urllib.request.urlretrieve(url, p)
self.resnet = ResNet50(weights='imagenet')
self.model = get_model()
self.dog_names = load_dog_names()
self.train_files, self.train_targets = load_dataset(os.path.join(path, 'static/images/'))
self.face_cascade = cv2.CascadeClassifier(os.path.join(path, 'model/haarcascade_frontalface_alt.xml'))
def face_detector(self, img_path):
"""
Using the path to the image find the bounding box around a human face. Then return whether a face is
detected or not.
Args:
img_path (str): Path to the image
Returns:
(str): Name of the dog based on the prediction of the Inception model
"""
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray)
return len(faces) > 0
def Inception_predict_breed(self, img_path):
"""
Using the path to the image predict the name of the dog
Args:
img_path (str): Path to the image
Returns:
(str): Name of the dog based on the prediction of the Inception model
"""
bottleneck_feature = extract_InceptionV3(path_to_tensor(img_path))
predicted_vector = self.model.predict(bottleneck_feature)
return self.dog_names[np.argmax(predicted_vector)]
def ResNet50_predict_labels(self, img_path):
"""
Using the path to the image it predict the label of the dog breed
Args:
img_path (str): Path to the image
Returns:
(int): Label of the dog image
"""
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(self.resnet.predict(img))
def dog_detector(self, img_path):
"""
Function to determine if there is a dog in the image or not
Args:
img_path (str): Path to the image
Returns:
bool: whether the image is a dog or not
"""
prediction = self.ResNet50_predict_labels(img_path)
return (prediction <= 268) & (prediction >= 151)
def which_dog(self, img_path):
"""
This function determines the breed of the dog using an input image.
If the image is not an image of dog, but rather of a human,
it will find a dog that resembles the picture of the human. If the image is neither human face nor dog face, it will return
a string indicating "no face detected".
Args:
img_path (str): The path to an image
"""
is_dog = self.dog_detector(img_path)
is_face = self.face_detector(img_path)
dog_breed = self.Inception_predict_breed(img_path)
if is_dog or is_face:
if is_face:
message = f"The dog look alike is {dog_breed}"
else:
message = dog_breed
else:
message = "no face or dog detected"
return message, dog_breed
def get_image(self, breed):
ids = np.where([breed == x for x in self.train_targets])[0]
idx = np.random.choice(ids)
image = self.train_files[idx]
return image
def main():
import matplotlib.pyplot as plt
from glob import glob
import cv2
files = glob('../images/*')
breed_detector = DogDetection()
for f in files:
message, breed = breed_detector.which_dog(f)
print(message)
img = cv2.imread(f)
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
file = breed_detector.get_image(breed)
img_similar = cv2.imread(file)
cv_rgb_similar = cv2.cvtColor(img_similar, cv2.COLOR_BGR2RGB)
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].imshow(cv_rgb)
ax[1].imshow(cv_rgb_similar)
ax[1].set_title(breed)
plt.pause(0.0001)
plt.show()
if __name__ == '__main__':
main() | [
"keras.preprocessing.image.img_to_array",
"keras.layers.Dense",
"os.path.exists",
"numpy.where",
"keras.layers.GlobalAveragePooling2D",
"glob.glob",
"numpy.random.choice",
"pickle.load",
"numpy.argmax",
"keras.models.Sequential",
"keras.applications.resnet50.ResNet50",
"cv2.cvtColor",
"matpl... | [((504, 524), 'random.seed', 'random.seed', (['(8675309)'], {}), '(8675309)\n', (515, 524), False, 'import random\n'), ((1183, 1199), 'glob.glob', 'glob', (["(path + '*')"], {}), "(path + '*')\n", (1187, 1199), False, 'from glob import glob\n'), ((1481, 1529), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (1495, 1529), False, 'from keras.preprocessing import image\n'), ((1538, 1561), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1556, 1561), False, 'from keras.preprocessing import image\n'), ((1573, 1598), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1587, 1598), True, 'import numpy as np\n'), ((1617, 1667), 'os.path.join', 'os.path.join', (['path', '"""model/DogInceptionV3Data.npz"""'], {}), "(path, 'model/DogInceptionV3Data.npz')\n", (1629, 1667), False, 'import os\n'), ((2364, 2374), 'numpy.load', 'np.load', (['p'], {}), '(p)\n', (2371, 2374), True, 'import numpy as np\n'), ((2448, 2460), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2458, 2460), False, 'from keras.models import Sequential\n'), ((7037, 7056), 'glob.glob', 'glob', (['"""../images/*"""'], {}), "('../images/*')\n", (7041, 7056), False, 'from glob import glob\n'), ((7607, 7617), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7615, 7617), True, 'import matplotlib.pyplot as plt\n'), ((785, 799), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (796, 799), False, 'import pickle\n'), ((2276, 2293), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2290, 2293), False, 'import os\n'), ((2485, 2546), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'input_shape': 'train_Inception.shape[1:]'}), '(input_shape=train_Inception.shape[1:])\n', (2507, 2546), False, 'from keras.layers import GlobalAveragePooling2D\n'), ((2572, 2601), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (2577, 2601), False, 'from keras.layers import Dense\n'), ((2627, 2659), 'keras.layers.Dense', 'Dense', (['(133)'], {'activation': '"""softmax"""'}), "(133, activation='softmax')\n", (2632, 2659), False, 'from keras.layers import Dense\n'), ((2798, 2860), 'os.path.join', 'os.path.join', (['path', '"""saved_models/weights.best.Inception.hdf5"""'], {}), "(path, 'saved_models/weights.best.Inception.hdf5')\n", (2810, 2860), False, 'import os\n'), ((3587, 3665), 'os.path.join', 'os.path.join', (['path', '"""inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"""'], {}), "(path, 'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')\n", (3599, 3665), False, 'import os\n'), ((3769, 3797), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (3777, 3797), False, 'from keras.applications.resnet50 import ResNet50\n'), ((4455, 4475), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (4465, 4475), False, 'import cv2\n'), ((4491, 4528), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4503, 4528), False, 'import cv2\n'), ((6853, 6874), 'numpy.random.choice', 'np.random.choice', (['ids'], {}), '(ids)\n', (6869, 6874), True, 'import numpy as np\n'), ((7203, 7216), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (7213, 7216), False, 'import cv2\n'), ((7234, 7270), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7246, 7270), False, 'import cv2\n'), ((7340, 7356), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (7350, 7356), False, 'import cv2\n'), ((7382, 7426), 'cv2.cvtColor', 'cv2.cvtColor', (['img_similar', 'cv2.COLOR_BGR2RGB'], {}), '(img_similar, cv2.COLOR_BGR2RGB)\n', (7394, 7426), False, 'import cv2\n'), ((7445, 7479), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4)'}), '(1, 2, figsize=(8, 4))\n', (7457, 7479), True, 'import matplotlib.pyplot as plt\n'), ((7585, 7602), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (7594, 7602), True, 'import matplotlib.pyplot as plt\n'), ((714, 751), 'os.path.join', 'os.path.join', (['path', '"""model/names.pkl"""'], {}), "(path, 'model/names.pkl')\n", (726, 751), False, 'import os\n'), ((3681, 3698), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (3695, 3698), False, 'import os\n'), ((3933, 3969), 'os.path.join', 'os.path.join', (['path', '"""static/images/"""'], {}), "(path, 'static/images/')\n", (3945, 3969), False, 'import os\n'), ((4021, 4080), 'os.path.join', 'os.path.join', (['path', '"""model/haarcascade_frontalface_alt.xml"""'], {}), "(path, 'model/haarcascade_frontalface_alt.xml')\n", (4033, 4080), False, 'import os\n'), ((5086, 5113), 'numpy.argmax', 'np.argmax', (['predicted_vector'], {}), '(predicted_vector)\n', (5095, 5113), True, 'import numpy as np\n'), ((6785, 6837), 'numpy.where', 'np.where', (['[(breed == x) for x in self.train_targets]'], {}), '([(breed == x) for x in self.train_targets])\n', (6793, 6837), True, 'import numpy as np\n')] |
import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
from .fedbase import BaseFedarated
from flearn.utils.tf_utils import process_grad, cosine_sim, softmax, norm_grad
from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch, project
class Server(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using agnostic flearn (non-stochastic version) to Train')
self.inner_opt = tf.train.AdagradOptimizer(params['learning_rate'])
super(Server, self).__init__(params, learner, dataset)
self.latest_lambdas = np.ones(len(self.clients)) * 1.0 / len(self.clients)
self.resulting_model = self.client_model.get_params() # this is only for the agnostic flearn paper
def train(self):
print('Training with {} workers ---'.format(self.clients_per_round))
num_clients = len(self.clients)
pk = np.ones(num_clients) * 1.0 / num_clients
batches = {}
for c in self.clients:
batches[c] = gen_epoch(c.train_data, self.num_rounds+2)
for i in trange(self.num_rounds+1, desc='Round: ', ncols=120):
# test model
if i % self.eval_every == 0:
self.client_model.set_params(self.resulting_model)
stats = self.test_resulting_model()
tqdm.write('At round {} testing accuracy: {}'.format(i, np.sum(stats[3])*1.0/np.sum(stats[2])))
test_accuracies = np.divide(np.asarray(stats[3]), np.asarray(stats[2]))
for idx in range(len(self.clients)):
tqdm.write('Client {} testing accuracy: {}'.format(self.clients[idx].id, test_accuracies[idx]))
solns = []
losses = []
for idx, c in enumerate(self.clients):
c.set_params(self.latest_model)
batch = next(batches[c])
_, grads, loss = c.solve_sgd(batch) # this gradient is with respect to w
losses.append(loss)
solns.append((self.latest_lambdas[idx],grads[1]))
avg_gradient = self.aggregate(solns)
for v,g in zip(self.latest_model, avg_gradient):
v -= self.learning_rate * g
for idx in range(len(self.latest_lambdas)):
self.latest_lambdas[idx] += self.learning_rate_lambda * losses[idx]
self.latest_lambdas = project(self.latest_lambdas)
for k in range(len(self.resulting_model)):
self.resulting_model[k] = (self.resulting_model[k] * i + self.latest_model[k]) * 1.0 / (i+1)
| [
"numpy.ones",
"flearn.utils.model_utils.project",
"numpy.asarray",
"flearn.utils.model_utils.gen_epoch",
"tensorflow.train.AdagradOptimizer",
"numpy.sum",
"tqdm.trange"
] | [((446, 496), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (["params['learning_rate']"], {}), "(params['learning_rate'])\n", (471, 496), True, 'import tensorflow as tf\n'), ((1084, 1138), 'tqdm.trange', 'trange', (['(self.num_rounds + 1)'], {'desc': '"""Round: """', 'ncols': '(120)'}), "(self.num_rounds + 1, desc='Round: ', ncols=120)\n", (1090, 1138), False, 'from tqdm import trange, tqdm\n'), ((1023, 1067), 'flearn.utils.model_utils.gen_epoch', 'gen_epoch', (['c.train_data', '(self.num_rounds + 2)'], {}), '(c.train_data, self.num_rounds + 2)\n', (1032, 1067), False, 'from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch, project\n'), ((2481, 2509), 'flearn.utils.model_utils.project', 'project', (['self.latest_lambdas'], {}), '(self.latest_lambdas)\n', (2488, 2509), False, 'from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch, project\n'), ((904, 924), 'numpy.ones', 'np.ones', (['num_clients'], {}), '(num_clients)\n', (911, 924), True, 'import numpy as np\n'), ((1513, 1533), 'numpy.asarray', 'np.asarray', (['stats[3]'], {}), '(stats[3])\n', (1523, 1533), True, 'import numpy as np\n'), ((1535, 1555), 'numpy.asarray', 'np.asarray', (['stats[2]'], {}), '(stats[2])\n', (1545, 1555), True, 'import numpy as np\n'), ((1450, 1466), 'numpy.sum', 'np.sum', (['stats[2]'], {}), '(stats[2])\n', (1456, 1466), True, 'import numpy as np\n'), ((1429, 1445), 'numpy.sum', 'np.sum', (['stats[3]'], {}), '(stats[3])\n', (1435, 1445), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, random_split
from PIL import Image
import numpy as np
import os
import argparse
import json
import Encoder
import Public_Classifier
preprocess = transforms.Compose([
transforms.ToTensor(),
])
# CelebA dataset
class CelebA(Dataset):
def __init__(self, img_dir, label_root):
self.img_dir = os.listdir(img_dir)
self.label_root = np.load(label_root)
self.root = img_dir
def __len__(self):
return len(self.img_dir)
def __getitem__(self, idx):
filename = self.img_dir[idx]
img = Image.open(os.path.join(self.root, filename))
label = self.label_root[int(filename[:-4])-1]
for i in range(len(label)):
if label[i] < 0:
label[i] = 0
img = preprocess(img)
sample = {'images': img, 'labels': label}
return sample
if __name__ == '__main__':
# args
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', type=bool, default=True, help='use gpu or not')
parser.add_argument('-img_dir', type=str, default='/home/al380/CelebA/data/img_align_celeba/',
help='image dictionary path')
parser.add_argument('-label_root', type=str, default='/home/al380/CelebA/data/labels.npy',
help='label root path')
parser.add_argument('-labels', type=list, default=[31], help='label index list(default: smiling only)')
parser.add_argument('-epoch', type=int, default=20, help='epoch number for training')
parser.add_argument('-w', type=int, default=32, help='number of workers for dataloader')
parser.add_argument('-b', type=int, default=512, help='batch size for dataloader')
parser.add_argument('-s', type=bool, default=True, help='whether shuffle the dataset')
parser.add_argument('-lr', type=float, default=0.0001, help='initial learning rate')
args = parser.parse_args()
# if args.gpu:
# os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2, 3, 4, 5'
# data loader
print('data loading')
celeba_dataset = CelebA(args.img_dir, args.label_root)
train_size = int(0.8 * len(celeba_dataset))
test_size = len(celeba_dataset) - train_size
train_dataset, test_dataset = random_split(celeba_dataset, [train_size, test_size])
celeba_train_loader = DataLoader(train_dataset, batch_size=args.b, shuffle=args.s, num_workers=args.w)
celeba_test_loader = DataLoader(test_dataset, batch_size=args.b, shuffle=args.s, num_workers=args.w)
print(len(celeba_dataset))
print(len(train_dataset))
print(len(celeba_train_loader.dataset))
print(len(celeba_test_loader.dataset))
print('done')
E = Encoder.Encoder()
PC = Public_Classifier.Public_Classifier(len(args.labels))
if args.gpu:
E = E.cuda()
PC = PC.cuda()
E = nn.DataParallel(E)#, device_ids=[0, 1, 2, 3, 4, 5])
PC = nn.DataParallel(PC)#, device_ids=[0, 1, 2, 3, 4, 5])
# loss & optimizer
loss_func = nn.BCELoss()
E_optimizer = optim.Adam(E.parameters(), lr=args.lr)
PC_optimizer = optim.Adam(PC.parameters(), lr=args.lr)
E_scheduler = optim.lr_scheduler.StepLR(E_optimizer, step_size=10, gamma=0.1)
PC_scheduler = optim.lr_scheduler.StepLR(PC_optimizer, step_size=10, gamma=0.1)
for epoch in range(args.epoch):
# training phase
E_scheduler.step(epoch)
PC_scheduler.step(epoch)
train_loss = []
train_acc = []
E.train()
PC.train()
for batch_idx, sample in enumerate(celeba_train_loader):
images = sample['images']
labels = sample['labels']
images = images.type(torch.FloatTensor)
labels = labels[:, args.labels]
labels = labels.type(torch.FloatTensor)
if args.gpu:
images = images.cuda()
labels = labels.cuda()
E_optimizer.zero_grad()
PC_optimizer.zero_grad()
features, p1_idx, p2_idx = E(images)
out = PC(features)
loss = loss_func(out, labels)
train_loss.append(loss.cpu().data.numpy())
out = out.cpu().data.numpy()
labels = labels.cpu().data.numpy()
for i in range(len(out)):
for j in range(len(args.labels)):
if out[i, j] < 0.5:
out[i, j] = 0.
else:
out[i, j] = 1.
train_acc.append(np.sum(labels == out) / (len(out) * len(args.labels)))
loss.backward()
PC_optimizer.step()
E_optimizer.step()
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.6f}'.format(
epoch, batch_idx * len(images), len(celeba_train_loader.dataset),
100. * batch_idx / len(celeba_train_loader), loss.item(), train_acc[-1]))
# testing phase
test_loss = []
test_acc = []
E.eval()
PC.eval()
with torch.no_grad():
for batch_idx, sample in enumerate(celeba_test_loader):
images = sample['images']
labels = sample['labels']
images = images.type(torch.FloatTensor)
labels = labels[:, args.labels]
labels = labels.type(torch.FloatTensor)
if args.gpu:
images = images.cuda()
labels = labels.cuda()
features, p1_idx, p2_idx = E(images)
out = PC(features)
loss = loss_func(out, labels)
test_loss.append(loss.cpu().data.numpy())
out = out.cpu().data.numpy()
labels = labels.cpu().data.numpy()
for i in range(len(out)):
for j in range(len(args.labels)):
if out[i, j] < 0.5:
out[i, j] = 0.
else:
out[i, j] = 1.
test_acc.append(np.sum(labels == out) / (args.b * len(args.labels)))
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
np.mean(test_loss), np.mean(test_acc), len(celeba_test_loader.dataset),
100. * np.mean(test_acc) / len(celeba_test_loader.dataset)))
print('Epoch:', epoch, '| train loss: %.4f' % np.mean(train_loss), '| train accuracy: %.4f' % np.mean(train_acc),
'| test loss: %.4f' % np.mean(test_loss), '| test accuracy: %.4f' % np.mean(test_acc))
with open('/home/al380/CelebA/output/initial/C_acc'+str(epoch)+'.json', 'w') as file:
json.dump(test_acc, file)
file.close()
test_acc = []
torch.save(E, '/home/al380/CelebA/output/initial/Encoder_epoch='+str(epoch)+'.pth')
torch.save(PC, '/home/al380/CelebA/output/initial/Public_Classifier_epoch='+str(epoch)+'.pth')
| [
"numpy.mean",
"Encoder.Encoder",
"os.listdir",
"argparse.ArgumentParser",
"torch.utils.data.random_split",
"os.path.join",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.DataParallel",
"numpy.sum",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torchvision.transforms.ToTe... | [((1120, 1145), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1143, 1145), False, 'import argparse\n'), ((2435, 2488), 'torch.utils.data.random_split', 'random_split', (['celeba_dataset', '[train_size, test_size]'], {}), '(celeba_dataset, [train_size, test_size])\n', (2447, 2488), False, 'from torch.utils.data import DataLoader, Dataset, random_split\n'), ((2515, 2600), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.b', 'shuffle': 'args.s', 'num_workers': 'args.w'}), '(train_dataset, batch_size=args.b, shuffle=args.s, num_workers=args.w\n )\n', (2525, 2600), False, 'from torch.utils.data import DataLoader, Dataset, random_split\n'), ((2621, 2700), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'args.b', 'shuffle': 'args.s', 'num_workers': 'args.w'}), '(test_dataset, batch_size=args.b, shuffle=args.s, num_workers=args.w)\n', (2631, 2700), False, 'from torch.utils.data import DataLoader, Dataset, random_split\n'), ((2876, 2893), 'Encoder.Encoder', 'Encoder.Encoder', ([], {}), '()\n', (2891, 2893), False, 'import Encoder\n'), ((3189, 3201), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (3199, 3201), True, 'import torch.nn as nn\n'), ((3336, 3399), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['E_optimizer'], {'step_size': '(10)', 'gamma': '(0.1)'}), '(E_optimizer, step_size=10, gamma=0.1)\n', (3361, 3399), True, 'import torch.optim as optim\n'), ((3419, 3483), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['PC_optimizer'], {'step_size': '(10)', 'gamma': '(0.1)'}), '(PC_optimizer, step_size=10, gamma=0.1)\n', (3444, 3483), True, 'import torch.optim as optim\n'), ((398, 419), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (417, 419), False, 'from torchvision import datasets, transforms\n'), ((534, 553), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (544, 553), False, 'import os\n'), ((580, 599), 'numpy.load', 'np.load', (['label_root'], {}), '(label_root)\n', (587, 599), True, 'import numpy as np\n'), ((3031, 3049), 'torch.nn.DataParallel', 'nn.DataParallel', (['E'], {}), '(E)\n', (3046, 3049), True, 'import torch.nn as nn\n'), ((3096, 3115), 'torch.nn.DataParallel', 'nn.DataParallel', (['PC'], {}), '(PC)\n', (3111, 3115), True, 'import torch.nn as nn\n'), ((780, 813), 'os.path.join', 'os.path.join', (['self.root', 'filename'], {}), '(self.root, filename)\n', (792, 813), False, 'import os\n'), ((5227, 5242), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5240, 5242), False, 'import torch\n'), ((6881, 6906), 'json.dump', 'json.dump', (['test_acc', 'file'], {}), '(test_acc, file)\n', (6890, 6906), False, 'import json\n'), ((6405, 6423), 'numpy.mean', 'np.mean', (['test_loss'], {}), '(test_loss)\n', (6412, 6423), True, 'import numpy as np\n'), ((6425, 6442), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (6432, 6442), True, 'import numpy as np\n'), ((6605, 6624), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (6612, 6624), True, 'import numpy as np\n'), ((6653, 6671), 'numpy.mean', 'np.mean', (['train_acc'], {}), '(train_acc)\n', (6660, 6671), True, 'import numpy as np\n'), ((6709, 6727), 'numpy.mean', 'np.mean', (['test_loss'], {}), '(test_loss)\n', (6716, 6727), True, 'import numpy as np\n'), ((6755, 6772), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (6762, 6772), True, 'import numpy as np\n'), ((4694, 4715), 'numpy.sum', 'np.sum', (['(labels == out)'], {}), '(labels == out)\n', (4700, 4715), True, 'import numpy as np\n'), ((6253, 6274), 'numpy.sum', 'np.sum', (['(labels == out)'], {}), '(labels == out)\n', (6259, 6274), True, 'import numpy as np\n'), ((6496, 6513), 'numpy.mean', 'np.mean', (['test_acc'], {}), '(test_acc)\n', (6503, 6513), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
def calculate_pdf(train, test):
train, test = reverse(train, test)
train, test = scale(train, test)
pdfs = get_pdfs(train)
df_pdf = pd.DataFrame(pdfs.T, columns=['var_prob_%d' % i for i in range(200)])
return df_pdf, train, test
def logloss(y, yp):
yp = np.clip(yp, 1e-5, 1 - 1e-5)
return -y * np.log(yp) - (1 - y) * np.log(1 - yp)
def reverse(tr, te):
reverse_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 15, 16, 18, 19,
22, 24, 25, 26, 27, 41, 29,
32, 35, 37, 40, 48, 49, 47,
55, 51, 52, 53, 60, 61, 62, 65, 66, 67, 69,
70, 71, 74, 78, 79,
82, 84, 89, 90, 91, 94, 95, 96, 97, 99, 103,
105, 106, 110, 111, 112, 118, 119, 125, 128,
130, 133, 134, 135, 137, 138,
140, 144, 145, 147, 151, 155, 157, 159,
161, 162, 163, 164, 167, 168,
170, 171, 173, 175, 176, 179,
180, 181, 184, 185, 187, 189,
190, 191, 195, 196, 199]
reverse_list = ['var_%d' % i for i in reverse_list]
for col in reverse_list:
tr[col] = tr[col] * (-1)
te[col] = te[col] * (-1)
return tr, te
def scale(tr, te):
trte = pd.concat([tr, te], axis=0)
for col in tr.columns:
if col.startswith('var_'):
mean, std = tr[col].mean(), tr[col].std()
tr[col] = (tr[col] - mean) / std
te[col] = (te[col] - mean) / std
return tr, te
def getp_vec_sum(x, x_sort, y, std, c=0.5):
# x is sorted
left = x - std / c
right = x + std / c
p_left = np.searchsorted(x_sort, left)
p_right = np.searchsorted(x_sort, right)
p_right[p_right >= y.shape[0]] = y.shape[0] - 1
p_left[p_left >= y.shape[0]] = y.shape[0] - 1
return (y[p_right] - y[p_left])
def get_pdf(tr, col, x_query=None, smooth=3):
std = tr[col].std()
df = tr.groupby(col).agg({'target': ['sum', 'count']})
cols = ['sum_y', 'count_y']
df.columns = cols
df = df.reset_index()
df = df.sort_values(col)
y, c = cols
df[y] = df[y].cumsum()
df[c] = df[c].cumsum()
if x_query is None:
rmin, rmax, res = -5.0, 5.0, 501
x_query = np.linspace(rmin, rmax, res)
dg = pd.DataFrame()
tm = getp_vec_sum(x_query, df[col].values, df[y].values, std, c=smooth)
cm = getp_vec_sum(x_query, df[col].values, df[c].values, std, c=smooth) + 1
dg['res'] = tm / cm
dg.loc[cm < 500, 'res'] = 0.1
return dg['res'].values
def get_pdfs(tr):
y = []
for i in range(200):
name = 'var_%d' % i
res = get_pdf(tr, name)
y.append(res)
return np.vstack(y)
def print_corr(corr_mat, col, bar=0.97):
print(col)
cols = corr_mat.loc[corr_mat[col] > bar, col].index.values
cols_ = ['var_%s' % (i.split('_')[-1]) for i in cols]
print(cols)
return cols
if __name__ == '__main__':
pass | [
"numpy.clip",
"numpy.searchsorted",
"numpy.log",
"numpy.linspace",
"numpy.vstack",
"pandas.DataFrame",
"pandas.concat",
"warnings.filterwarnings"
] | [((41, 74), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (64, 74), False, 'import warnings\n'), ((472, 501), 'numpy.clip', 'np.clip', (['yp', '(1e-05)', '(1 - 1e-05)'], {}), '(yp, 1e-05, 1 - 1e-05)\n', (479, 501), True, 'import numpy as np\n'), ((1480, 1507), 'pandas.concat', 'pd.concat', (['[tr, te]'], {'axis': '(0)'}), '([tr, te], axis=0)\n', (1489, 1507), True, 'import pandas as pd\n'), ((1856, 1885), 'numpy.searchsorted', 'np.searchsorted', (['x_sort', 'left'], {}), '(x_sort, left)\n', (1871, 1885), True, 'import numpy as np\n'), ((1900, 1930), 'numpy.searchsorted', 'np.searchsorted', (['x_sort', 'right'], {}), '(x_sort, right)\n', (1915, 1930), True, 'import numpy as np\n'), ((2503, 2517), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2515, 2517), True, 'import pandas as pd\n'), ((2909, 2921), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (2918, 2921), True, 'import numpy as np\n'), ((2464, 2492), 'numpy.linspace', 'np.linspace', (['rmin', 'rmax', 'res'], {}), '(rmin, rmax, res)\n', (2475, 2492), True, 'import numpy as np\n'), ((516, 526), 'numpy.log', 'np.log', (['yp'], {}), '(yp)\n', (522, 526), True, 'import numpy as np\n'), ((539, 553), 'numpy.log', 'np.log', (['(1 - yp)'], {}), '(1 - yp)\n', (545, 553), True, 'import numpy as np\n')] |
import numpy as np
directory = '/mnt/home/landerson/lyalpha/'
for m in range(46):
n = m + 5
seed = np.random.randint(100000)
#make paramfile for genic
genicParamString = """OutputDir = output # Directory for output
FileBase = IC_varied_{0} # Base-filename of output files
InvertPhase = 0
UnitaryAmplitude = 0
Ngrid = 256 # Size of cubic grid on which to create particles.
BoxSize = 20000 # Periodic box size of simulation
Omega0 = 0.2814 # Total matter density (at z=0)
OmegaLambda = 0.7186 # Cosmological constant (at z=0)
OmegaBaryon = 0.0464 # Baryon density (at z=0)
ProduceGas = 1 # 1 = Produce gas 0 = no gas, just DM.
HubbleParam = 0.697 # Hubble paramater (may be used for power spec parameterization)
Redshift = 99 # Starting redshift
Sigma8 = 0.810 # power spectrum normalization
WhichSpectrum = 2 # "1" selects Eisenstein & Hu spectrum,
# "2" selects a tabulated power spectrum in
# the file 'FileWithInputSpectrum'
# otherwise, Efstathiou parametrization is used
FileWithInputSpectrum = /mnt/xfs1/home/landerson/src/MP-Gadget/examples/powerspectrum-wmap9.txt # filename of tabulated input
# spectrum (if used)
InputSpectrum_UnitLength_in_cm = 3.085678e24 # defines length unit of tabulated
# input spectrum in cm/h.
# Note: This can be chosen different from UnitLength_in_cm
PrimordialIndex 0.971 = # may be used to tilt the primordial index
Seed = {1} # seed for IC-generator
UnitLength_in_cm = 3.085678e21 # defines length unit of output (in cm/h)
UnitMass_in_g = 1.989e43 # defines mass unit of output (in g/cm)
UnitVelocity_in_cm_per_s = 1e5 # defines velocity unit of output (in cm/sec)
""".format(n, seed)
with open(directory + 'paramfileVaried{0}.genic'.format(n), 'w') as f:
f.write(genicParamString)
#make paramfile for gadget
gadgetParamString = """# Relevant files
InitCondFile = output/IC_varied_{0}
OutputDir = /mnt/cephtest/landerson/lyalphaVaried{1}
#OutputDir = /mnt/ceph/users/landerson/lyalphaVaried1
TreeCoolFile = /mnt/xfs1/home/landerson/src/MP-Gadget/examples/TREECOOL_fg_june11
OutputList = "0.02,0.1,0.192307692308,0.2,0.208333333333,0.217391304348,0.227272727273,0.238095238095,0.25,0.263157894737,0.277777777778,0.294117647059,0.3125,0.33333"
#OutputListOn=1
Nmesh = 512
# CPU time -limit
TimeLimitCPU = 43000 #= 8 hours
# Code options
# Characteristics of run
TimeMax = 0.33333
Omega0 = 0.2814 # Total matter density (at z=0)
OmegaLambda = 0.7186 # Cosmological constant (at z=0)
OmegaBaryon = 0.0464 # Baryon density (at z=0)
HubbleParam = 0.697 # Hubble paramater (may be used for power spec parameterization)
CoolingOn = 1
StarformationOn = 1
RadiationOn = 1
HydroOn = 1
BlackHoleOn = 0
WindOn = 0
StarformationCriterion = density
MassiveNuLinRespOn = 0
# Further parameters of SPH
# #Only kernel supported by fake_spectra
DensityKernelType = cubic
InitGasTemp = 270.
MinGasTemp = 100
# Memory allocation
PartAllocFactor = 2.0
#----------------------SFR Stuff-------------------------
CritPhysDensity = 0 # critical physical density for star formation in
# hydrogen number density in cm^(-3)
CritOverDensity = 1000 # overdensity threshold value
QuickLymanAlphaProbability = 1 # Set to 1.0 to turn dense gas directly into stars.
SnapshotWithFOF = 1
WindModel = nowind
""".format(n, n)
with open(directory + 'paramfileVaried{0}.gadget'.format(n), 'w') as f:
f.write(gadgetParamString)
#make sbatch file, with n/10 sims per sbatch file
#I think we should shoot for 50 sims so that's 5 per sbatch file
dateLine = "date \n"
for j in range(10):
k = j + 5
sbatchStringStart = """#!/bin/bash
#SBATCH --exclusive
#SBATCH --nodes=1
#SBATCH -o lyaV{0}.out -e lyaV{1}.err
#SBATCH --reservation andras_test
#SBATCH -p cca
#SBATCH --qos cca
module load gcc
module load openmpi
module load lib/gsl/2.3
ROOT=/mnt/xfs1/home/landerson/src/MP-Gadget/build
export OMP_NUM_THREADS=1
date
""".format(k, k)
with open(directory + 'Var{0}.sbatch'.format(k), 'w') as f:
f.write(sbatchStringStart)
for i in range(5):
num = k + 10*i
genicLine = "srun -n 28 -c 1 --mpi=pmi2 $ROOT/MP-GenIC paramfileVaried{0}.genic || exit 1 \n".format(num)
gadgetLine = "srun -n 28 -c 1 --mpi=pmi2 $ROOT/MP-Gadget paramfileVaried{0}.gadget 2 014 || exit 1 \n".format(num)
#f.write(genicLine)
f.write(gadgetLine)
f.write(dateLine)
for j in range(10):
k = j + 5
sbatchStringStart = """#!/bin/bash
#SBATCH --exclusive
#SBATCH --nodes=1
#SBATCH -o lyaV{0}.out -e lyaV{1}.err
#SBATCH --reservation andras_test
#SBATCH -p cca
#SBATCH --qos cca
module load gcc
module load openmpi
module load lib/gsl/2.3
ROOT=/mnt/xfs1/home/landerson/src/MP-Gadget/build
export OMP_NUM_THREADS=1
date
""".format(k, k)
with open(directory + 'psVar{0}.sbatch'.format(k), 'w') as f:
f.write(sbatchStringStart)
for i in range(5):
num = k + 10*i
pspipeLine = "srun python3 makeSpectra.py lyalphaVaried{0} \n".format(num)
f.write(pspipeLine)
| [
"numpy.random.randint"
] | [((109, 134), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (126, 134), True, 'import numpy as np\n')] |
import time
import wandb
import numpy as np
from functools import reduce
from itertools import chain
import torch
from onpolicy.runner.separated.base_runner_multitype import Runner
def _t2n(x):
return x.detach().cpu().numpy()
class SMACRunner(Runner):
"""Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details."""
def __init__(self, config):
super(SMACRunner, self).__init__(config)
def run(self):
self.warmup()
start = time.time()
episodes = int(
self.num_env_steps) // self.episode_length // self.n_rollout_threads
last_battles_game = np.zeros(self.n_rollout_threads, dtype=np.float32)
last_battles_won = np.zeros(self.n_rollout_threads, dtype=np.float32)
for episode in range(episodes):
for unit_type in range(self.unit_type_bits):
if self.use_linear_lr_decay:
self.trainer[unit_type].policy.lr_decay(episode, episodes)
for step in range(self.episode_length):
# Sample actions
values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(
step)
# Obser reward and next obs
# print(actions.shape)
obs, share_obs, rewards, dones, infos, available_actions = self.envs.step(
actions)
data = obs, share_obs, rewards, dones, infos, available_actions, \
values, actions, action_log_probs, \
rnn_states, rnn_states_critic
# insert data into buffer
self.insert(data)
# compute return and update network
self.compute()
train_infos = self.train()
# post process
total_num_steps = (episode + 1) * \
self.episode_length * self.n_rollout_threads
# save model
if (episode % self.save_interval == 0 or episode == episodes - 1):
self.save()
# log information
if episode % self.log_interval == 0:
end = time.time()
print("\n Map {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(self.all_args.map_name,
self.algorithm_name,
self.experiment_name,
episode,
episodes,
total_num_steps,
self.num_env_steps,
int(total_num_steps / (end - start))))
if self.env_name == "StarCraft2":
battles_won = []
battles_draw = []
battles_game = []
incre_battles_won = []
incre_battles_draw = []
incre_battles_game = []
for i, info in enumerate(infos):
if 'battles_won' in info[0].keys():
battles_won.append(info[0]['battles_won'])
incre_battles_won.append(
info[0]['battles_won']-last_battles_won[i])
if 'battles_draw' in info[0].keys():
battles_draw.append(info[0]['battles_draw'])
incre_battles_draw.append(
info[0]['battles_won']-last_battles_won[i])
if 'battles_game' in info[0].keys():
battles_game.append(info[0]['battles_game'])
incre_battles_game.append(
info[0]['battles_game']-last_battles_game[i])
incre_win_rate = np.sum(
incre_battles_won)/np.sum(incre_battles_game) if np.sum(incre_battles_game) > 0 else 0.0
incre_draw_rate = np.sum(
incre_battles_draw)/np.sum(incre_battles_game) if np.sum(incre_battles_game) > 0 else 0.0
print("incre win rate is {}.".format(incre_win_rate))
if self.use_wandb:
wandb.log({"incre_win_rate": incre_win_rate},
step=total_num_steps)
# wandb.log({"incre_draw_rate": incre_win_rate},
# step=total_num_steps)
wandb.log({"average_step_rewards": np.mean(self.buffer[0].rewards)}, step=total_num_steps)
else:
self.writter.add_scalars(
"incre_win_rate", {"incre_win_rate": incre_win_rate}, total_num_steps)
self.writter.add_scalars(
"incre_draw_rate", {"incre_draw_rate": incre_draw_rate}, total_num_steps)
last_battles_game = battles_game
last_battles_won = battles_won
for unit_type in range(self.unit_type_bits):
train_infos[unit_type].update(
{'dead_ratio': 1 - self.buffer[unit_type].active_masks.sum() / reduce(lambda x, y: x*y, list(self.buffer[unit_type].active_masks.shape))})
# train_infos[unit_type].update({'average_step_rewards': np.mean(self.buffer[unit_type].rewards)})
self.log_train(train_infos, total_num_steps)
# eval
if episode % self.eval_interval == 0 and self.use_eval:
self.eval(total_num_steps)
# print("saving")
# self.envs.envs[0].save_replay()
# print("saved")
def warmup(self):
# reset env
obs, share_obs, available_actions = self.envs.reset()
# _obs = obs.copy()
# _share_obs = share_obs.copy()
# _available_action = available_actions.copy()
if not self.use_centralized_V:
share_obs = obs
bit = 0
for count in self.type_count:
self.buffer[bit].share_obs[0] = share_obs[:, :count]
self.buffer[bit].obs[0] = obs[:, :count]
self.buffer[bit].available_actions[0] = available_actions[:, :count]
bit += 1
obs = obs[:, count:]
share_obs = share_obs[:, count:]
available_actions = available_actions[:, count:]
@ torch.no_grad()
def collect(self, step):
values = []
actions = []
action_log_probs = []
rnn_states = []
rnn_states_critics = []
for unit_type in range(self.unit_type_bits):
self.trainer[unit_type].prep_rollout()
value, action, action_log_prob, rnn_state, rnn_states_critic \
= self.trainer[unit_type].policy.get_actions(np.concatenate(self.buffer[unit_type].share_obs[step]),
np.concatenate(self.buffer[unit_type].obs[step]),
np.concatenate(self.buffer[unit_type].rnn_states[step]),
np.concatenate(self.buffer[unit_type].rnn_states_critic[step]),
np.concatenate(self.buffer[unit_type].masks[step]),
np.concatenate(self.buffer[unit_type].available_actions[step]))
value = np.array(np.split(_t2n(value), self.n_rollout_threads))
action = np.array(np.split(_t2n(action), self.n_rollout_threads))
action_log_prob = np.array(np.split(_t2n(action_log_prob), self.n_rollout_threads))
rnn_state = np.array(np.split(_t2n(rnn_state), self.n_rollout_threads))
rnn_states_critic = np.array(np.split(_t2n(rnn_states_critic), self.n_rollout_threads))
values.append(value)
actions.append(action)
action_log_probs.append(action_log_prob)
rnn_states.append(rnn_state)
rnn_states_critics.append(rnn_states_critic)
# [self.envs, agents, dim]
values = np.concatenate(values,1)
actions = np.concatenate(actions,1)
action_log_probs = np.concatenate(action_log_probs,1)
rnn_states = np.concatenate(rnn_states,1)
rnn_states_critics = np.concatenate(rnn_states_critics,1)
# values = np.array(np.split(values, self.n_rollout_threads))
# actions = np.array(np.split(actions, self.n_rollout_threads))
# action_log_probs = np.array(np.split(action_log_probs, self.n_rollout_threads))
# rnn_states = np.array(np.split(rnn_states, self.n_rollout_threads))
# rnn_states_critics = np.array(np.split(rnn_states_critics, self.n_rollout_threads))
return values, actions, action_log_probs, rnn_states, rnn_states_critics
def insert(self, data):
obs, share_obs, rewards, dones, infos, available_actions, \
values, actions, action_log_probs, rnn_states, rnn_states_critic = data
dones_env = np.all(dones, axis=1)
rnn_states[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
# rnn_states_critic[dones == True] = np.zeros(((dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
rnn_states_critic[dones_env == True] = np.zeros(((dones_env == True).sum(
), self.num_agents, *self.buffer[0].rnn_states_critic.shape[3:]), dtype=np.float32)
masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)
active_masks = np.ones(
(self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
active_masks[dones == True] = np.zeros(
((dones == True).sum(), 1), dtype=np.float32)
active_masks[dones_env == True] = np.ones(
((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)
bad_masks = np.array([[[0.0] if info[agent_id]['bad_transition'] else [
1.0] for agent_id in range(self.num_agents)] for info in infos])
# _obs = obs.copy()
# _share_obs = share_obs.copy()
# _rnn_states = rnn_states.copy()
# _rnn_states_critic = rnn_states_critic.copy()
# _actions = actions.copy()
# _action_log_probs = action_log_probs.copy()
# _values = values.copy()
# _rewards = rewards.copy()
# _masks = masks.copy()
# _bad_masks = bad_masks.copy()
# _active_masks = active_masks.copy()
# _available_actions = available_actions.copy()
if not self.use_centralized_V:
share_obs = obs
bit = 0
for count in self.type_count:
self.buffer[bit].insert(share_obs[:, :count],
obs[:, :count],
rnn_states[:, :count],
rnn_states_critic[:, :count],
actions[:, :count],
action_log_probs[:, :count],
values[:, :count],
rewards[:, :count],
masks[:, :count],
bad_masks[:, :count],
active_masks[:, :count],
available_actions[:, :count])
share_obs = share_obs[:, count:]
obs = obs[:, count:]
rnn_states = rnn_states[:, count:]
rnn_states_critic = rnn_states_critic[:, count:]
actions = actions[:, count:]
action_log_probs = action_log_probs[:, count:]
values = values[:, count:]
rewards = rewards[:, count:]
masks = masks[:, count:]
bad_masks = bad_masks[:, count:]
active_masks = active_masks[:, count:]
available_actions = available_actions[:, count:]
bit += 1
# def log_train(self, train_infos, total_num_steps):
# train_infos["average_step_rewards"] = np.mean(self.buffer.rewards)
# for k, v in train_infos.items():
# if self.use_wandb:
# wandb.log({k: v}, step=total_num_steps)
# else:
# self.writter.add_scalars(k, {k: v}, total_num_steps)
@ torch.no_grad()
def eval(self, total_num_steps):
eval_battles_won = 0
eval_episode = 0
eval_episode_rewards = []
one_episode_rewards = []
eval_obs, eval_share_obs, eval_available_actions = self.eval_envs.reset()
eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents,
self.recurrent_N, self.hidden_size), dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads,
self.num_agents, 1), dtype=np.float32)
while True:
eval_actions = []
_eval_rnn_states = []
for unit_type in range(self.unit_type_bits):
self.trainer[unit_type].prep_rollout()
eval_action, eval_rnn_state = self.trainer[unit_type].policy.act(eval_obs[:, :self.type_count[unit_type]],
eval_rnn_states[:, :self.type_count[unit_type]],
eval_masks[:, :self.type_count[unit_type]],
eval_available_actions[:, :self.type_count[unit_type]],
deterministic=True)
eval_actions.append(_t2n(eval_action))
_eval_rnn_states.append(_t2n(eval_rnn_state))
eval_obs = eval_obs[:, self.type_count[unit_type]:]
eval_rnn_states = eval_rnn_states[:, self.type_count[unit_type]:]
eval_masks = eval_masks[:, self.type_count[unit_type]:]
eval_available_actions = eval_available_actions[:, self.type_count[unit_type]:]
eval_actions = np.concatenate(eval_actions,axis=1)
eval_rnn_states = np.concatenate(_eval_rnn_states,axis=1)
# eval_actions = np.array(
# np.split(_t2n(eval_actions), self.n_eval_rollout_threads))
# eval_rnn_states = np.array(
# np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
# Obser reward and next obs
eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, eval_available_actions = self.eval_envs.step(
eval_actions)
one_episode_rewards.append(eval_rewards)
eval_dones_env = np.all(eval_dones, axis=1)
eval_rnn_states[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(
), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
eval_masks = np.ones(
(self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)
eval_masks[eval_dones_env == True] = np.zeros(
((eval_dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)
for eval_i in range(self.n_eval_rollout_threads):
if eval_dones_env[eval_i]:
eval_episode += 1
eval_episode_rewards.append(
np.sum(one_episode_rewards, axis=0))
one_episode_rewards = []
if eval_infos[eval_i][0]['won']:
eval_battles_won += 1
if eval_episode >= self.all_args.eval_episodes:
eval_episode_rewards = np.array(eval_episode_rewards)
eval_env_infos = {
'eval_average_episode_rewards': eval_episode_rewards}
self.log_env(eval_env_infos, total_num_steps)
eval_win_rate = eval_battles_won/eval_episode
print("eval win rate is {}.".format(eval_win_rate))
if self.use_wandb:
wandb.log({"eval_win_rate": eval_win_rate},
step=total_num_steps)
else:
self.writter.add_scalars(
"eval_win_rate", {"eval_win_rate": eval_win_rate}, total_num_steps)
break
print('\nSaving replay')
self.eval_envs.envs[0].save_replay()
print('Replay saved') | [
"numpy.mean",
"wandb.log",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.concatenate",
"torch.no_grad",
"numpy.all",
"time.time"
] | [((6504, 6519), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6517, 6519), False, 'import torch\n'), ((12726, 12741), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12739, 12741), False, 'import torch\n'), ((517, 528), 'time.time', 'time.time', ([], {}), '()\n', (526, 528), False, 'import time\n'), ((663, 713), 'numpy.zeros', 'np.zeros', (['self.n_rollout_threads'], {'dtype': 'np.float32'}), '(self.n_rollout_threads, dtype=np.float32)\n', (671, 713), True, 'import numpy as np\n'), ((741, 791), 'numpy.zeros', 'np.zeros', (['self.n_rollout_threads'], {'dtype': 'np.float32'}), '(self.n_rollout_threads, dtype=np.float32)\n', (749, 791), True, 'import numpy as np\n'), ((8286, 8311), 'numpy.concatenate', 'np.concatenate', (['values', '(1)'], {}), '(values, 1)\n', (8300, 8311), True, 'import numpy as np\n'), ((8329, 8355), 'numpy.concatenate', 'np.concatenate', (['actions', '(1)'], {}), '(actions, 1)\n', (8343, 8355), True, 'import numpy as np\n'), ((8382, 8417), 'numpy.concatenate', 'np.concatenate', (['action_log_probs', '(1)'], {}), '(action_log_probs, 1)\n', (8396, 8417), True, 'import numpy as np\n'), ((8438, 8467), 'numpy.concatenate', 'np.concatenate', (['rnn_states', '(1)'], {}), '(rnn_states, 1)\n', (8452, 8467), True, 'import numpy as np\n'), ((8496, 8533), 'numpy.concatenate', 'np.concatenate', (['rnn_states_critics', '(1)'], {}), '(rnn_states_critics, 1)\n', (8510, 8533), True, 'import numpy as np\n'), ((9230, 9251), 'numpy.all', 'np.all', (['dones'], {'axis': '(1)'}), '(dones, axis=1)\n', (9236, 9251), True, 'import numpy as np\n'), ((9727, 9798), 'numpy.ones', 'np.ones', (['(self.n_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (9734, 9798), True, 'import numpy as np\n'), ((9934, 10005), 'numpy.ones', 'np.ones', (['(self.n_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (9941, 10005), True, 'import numpy as np\n'), ((13011, 13125), 'numpy.zeros', 'np.zeros', (['(self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.\n hidden_size)'], {'dtype': 'np.float32'}), '((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N,\n self.hidden_size), dtype=np.float32)\n', (13019, 13125), True, 'import numpy as np\n'), ((13179, 13255), 'numpy.ones', 'np.ones', (['(self.n_eval_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (13186, 13255), True, 'import numpy as np\n'), ((14583, 14619), 'numpy.concatenate', 'np.concatenate', (['eval_actions'], {'axis': '(1)'}), '(eval_actions, axis=1)\n', (14597, 14619), True, 'import numpy as np\n'), ((14649, 14689), 'numpy.concatenate', 'np.concatenate', (['_eval_rnn_states'], {'axis': '(1)'}), '(_eval_rnn_states, axis=1)\n', (14663, 14689), True, 'import numpy as np\n'), ((15229, 15255), 'numpy.all', 'np.all', (['eval_dones'], {'axis': '(1)'}), '(eval_dones, axis=1)\n', (15235, 15255), True, 'import numpy as np\n'), ((15464, 15554), 'numpy.ones', 'np.ones', (['(self.all_args.n_eval_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=\n np.float32)\n', (15471, 15554), True, 'import numpy as np\n'), ((2181, 2192), 'time.time', 'time.time', ([], {}), '()\n', (2190, 2192), False, 'import time\n'), ((6925, 6979), 'numpy.concatenate', 'np.concatenate', (['self.buffer[unit_type].share_obs[step]'], {}), '(self.buffer[unit_type].share_obs[step])\n', (6939, 6979), True, 'import numpy as np\n'), ((7041, 7089), 'numpy.concatenate', 'np.concatenate', (['self.buffer[unit_type].obs[step]'], {}), '(self.buffer[unit_type].obs[step])\n', (7055, 7089), True, 'import numpy as np\n'), ((7151, 7206), 'numpy.concatenate', 'np.concatenate', (['self.buffer[unit_type].rnn_states[step]'], {}), '(self.buffer[unit_type].rnn_states[step])\n', (7165, 7206), True, 'import numpy as np\n'), ((7268, 7330), 'numpy.concatenate', 'np.concatenate', (['self.buffer[unit_type].rnn_states_critic[step]'], {}), '(self.buffer[unit_type].rnn_states_critic[step])\n', (7282, 7330), True, 'import numpy as np\n'), ((7392, 7442), 'numpy.concatenate', 'np.concatenate', (['self.buffer[unit_type].masks[step]'], {}), '(self.buffer[unit_type].masks[step])\n', (7406, 7442), True, 'import numpy as np\n'), ((7504, 7566), 'numpy.concatenate', 'np.concatenate', (['self.buffer[unit_type].available_actions[step]'], {}), '(self.buffer[unit_type].available_actions[step])\n', (7518, 7566), True, 'import numpy as np\n'), ((16212, 16242), 'numpy.array', 'np.array', (['eval_episode_rewards'], {}), '(eval_episode_rewards)\n', (16220, 16242), True, 'import numpy as np\n'), ((16599, 16664), 'wandb.log', 'wandb.log', (["{'eval_win_rate': eval_win_rate}"], {'step': 'total_num_steps'}), "({'eval_win_rate': eval_win_rate}, step=total_num_steps)\n", (16608, 16664), False, 'import wandb\n'), ((4303, 4370), 'wandb.log', 'wandb.log', (["{'incre_win_rate': incre_win_rate}"], {'step': 'total_num_steps'}), "({'incre_win_rate': incre_win_rate}, step=total_num_steps)\n", (4312, 4370), False, 'import wandb\n'), ((15931, 15966), 'numpy.sum', 'np.sum', (['one_episode_rewards'], {'axis': '(0)'}), '(one_episode_rewards, axis=0)\n', (15937, 15966), True, 'import numpy as np\n'), ((3966, 3992), 'numpy.sum', 'np.sum', (['incre_battles_game'], {}), '(incre_battles_game)\n', (3972, 3992), True, 'import numpy as np\n'), ((3885, 3910), 'numpy.sum', 'np.sum', (['incre_battles_won'], {}), '(incre_battles_won)\n', (3891, 3910), True, 'import numpy as np\n'), ((3936, 3962), 'numpy.sum', 'np.sum', (['incre_battles_game'], {}), '(incre_battles_game)\n', (3942, 3962), True, 'import numpy as np\n'), ((4126, 4152), 'numpy.sum', 'np.sum', (['incre_battles_game'], {}), '(incre_battles_game)\n', (4132, 4152), True, 'import numpy as np\n'), ((4044, 4070), 'numpy.sum', 'np.sum', (['incre_battles_draw'], {}), '(incre_battles_draw)\n', (4050, 4070), True, 'import numpy as np\n'), ((4096, 4122), 'numpy.sum', 'np.sum', (['incre_battles_game'], {}), '(incre_battles_game)\n', (4102, 4122), True, 'import numpy as np\n'), ((4595, 4626), 'numpy.mean', 'np.mean', (['self.buffer[0].rewards'], {}), '(self.buffer[0].rewards)\n', (4602, 4626), True, 'import numpy as np\n')] |
# @Author : <NAME>
# @Email : <EMAIL>
# @Personal homepage : https://coderskychen.cn
import numpy as np
import pdb
def valid():
files_scores = ['/home/mcg/cxk/action-recognition-zoo/results/tsn-flow/output/flow.npz', '/home/mcg/cxk/action-recognition-zoo/results/tsn-rgb/output/rgb.npz']
allsum = np.zeros([11522, 174])
labels = []
for filename in files_scores:
print(filename)
data = np.load(filename)
scores = data['scores']
# print(scores.shape)
ss = scores[:, 0]
ll = scores[:, 1]
labels.append(ll)
ss = [x.reshape(174) for x in ss]
allsum += ss
preds = np.argmax(allsum,axis=1)
num_correct = np.sum(preds == labels)
acc = num_correct * 1.0 / preds.shape[0]
print('acc=%.3f' % (acc))
if __name__ == '__main__':
valid()
| [
"numpy.sum",
"numpy.zeros",
"numpy.load",
"numpy.argmax"
] | [((312, 334), 'numpy.zeros', 'np.zeros', (['[11522, 174]'], {}), '([11522, 174])\n', (320, 334), True, 'import numpy as np\n'), ((659, 684), 'numpy.argmax', 'np.argmax', (['allsum'], {'axis': '(1)'}), '(allsum, axis=1)\n', (668, 684), True, 'import numpy as np\n'), ((703, 726), 'numpy.sum', 'np.sum', (['(preds == labels)'], {}), '(preds == labels)\n', (709, 726), True, 'import numpy as np\n'), ((424, 441), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (431, 441), True, 'import numpy as np\n')] |
import pandapower as pp
from pandapower import runpp
from pandapower.plotting import simple_plotly, pf_res_plotly
import pandapower.networks as networks
from citylearn import CityLearn
from citylearn import RBC_Agent
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import random
class GridLearn(CityLearn):
def __init__(self, data_path, building_attributes, weather_file, solar_profile, building_ids, hourly_timesteps, buildings_states_actions = None, simulation_period = (0,8759), cost_function = ['ramping','1-load_factor','average_daily_peak', 'peak_demand','net_electricity_consumption'], central_agent = False, verbose = 0, n_buildings_per_bus=4, pv_penetration=0.3, test=False):
self.test = test
if self.test:
self.net = self.make_test_grid()
else:
self.net = self.make_grid()
n_buildings = n_buildings_per_bus * (len(self.net.bus)-1)
super().__init__(data_path, building_attributes, weather_file, solar_profile, building_ids, hourly_timesteps, buildings_states_actions, simulation_period, cost_function, central_agent, verbose, n_buildings)
self.house_nodes = self.add_houses(n_buildings_per_bus, pv_penetration)
# for some reason it seems like the output_writer for panda power only applies to deterministic time series
self.output = {'p_mw_load':{'var':'p_mw', 'parent':'res_load', 'values':pd.DataFrame()},
'q_mvar_gen':{'var':'q_mvar', 'parent':'res_gen', 'values':pd.DataFrame()},
'vm_pu':{'var':'vm_pu', 'parent':'res_bus', 'values':pd.DataFrame()},
'i_ka':{'var':'i_ka', 'parent':'res_line', 'values':pd.DataFrame()},
'p_mw_stor':{'var':'p_mw', 'parent':'res_storage', 'values':pd.DataFrame()},
'p_mw_gen':{'var':'p_mw', 'parent':'res_gen', 'values':pd.DataFrame()}}
self.system_losses = []
self.voltage_dev = []
self.pv_penetration = pv_penetration
self.n_buildings_per_bus = n_buildings_per_bus
def make_test_grid(self):
net = pp.create_empty_network(name="single bus network")
ex_grid = pp.create_bus(net, name=0, vn_kv=12.66, geodata=(0,0))
pp.create_ext_grid(net, ex_grid, vm_pu=1.02, va_degree=50)
bus1 = pp.create_bus(net, name=1, vn_kv=12.66, geodata=(0,1))
load1 = pp.create_load(net, 1, p_mw=0)
main_line = pp.create_line(net, ex_grid, bus1, 0.5, std_type="N2XS(FL)2Y 1x300 RM/35 64/110 kV") # arbitrary line
return net
def make_grid(self):
# make a grid that fits the buildings generated for CityLearn
net = networks.case33bw()
return net
def add_houses(self, n, pv_penetration):
houses = []
b = 0
# find nodes in the network with residential voltage levels and load infrastructure
# get the node indexes by their assigned names
load_nodes = self.net.load['bus']
res_voltage_nodes = self.net.bus['name'][self.net.bus['vn_kv'] == 12.66]
res_load_nodes = set(load_nodes) & set(res_voltage_nodes)
# add a residential distribution feeder type to the PandaPower network
# Assume for now ~250A per home on "94-AL1/15-ST1A 0.4" lines rated @ 350A
# for geometric placement of nodes
delta_x = 0.2
delta_y = 0.2
all_buildings = list(self.buildings.keys())
for existing_node in res_load_nodes:
# remove the existing arbitrary load
self.net.load.drop(self.net.load[self.net.load.bus == existing_node].index, inplace=True)
# get geodata of this load
existing_x = self.net.bus_geodata['x'][existing_node]
existing_y = self.net.bus_geodata['y'][existing_node]
# add n houses at each of these nodes
for i in range(n):
bid = all_buildings[b] # get a building in the order they were initialized
b += 1
new_x = existing_x + np.cos(2 * np.pi/n * i) * delta_x
new_y = existing_y + np.sin(2 * np.pi/n * i) * delta_y
new_house = pp.create_bus(self.net, name=bid, vn_kv=12.66, max_vm_pu=1.2, min_vm_pu=0.8, zone=1, geodata=(new_x, new_y))
new_feeder = pp.create_line(self.net, new_house, existing_node, 0.5, "94-AL1/15-ST1A 0.4", max_loading_percent=100)
new_house_load = pp.create_load(self.net, new_house, 0, name=bid)
# if self.buildings_states_actions[bid]['pv_curtail']:
if np.random.uniform() <= pv_penetration:
new_house_pv = pp.create_sgen(self.net, new_house, 0.0, name=bid)
houses += [new_house]
return houses
# Change to citylearn.py: aux_grid_function is called at the end of .step()
def aux_grid_func(self):
for i in self.net.load.index:
if self.test:
current_load = 0.01
else:
current_load = 0
h = self.net.load.name[i]
current_load += self.buildings[h].get_dhw_electric_demand() * 0.001
current_load += self.buildings[h].get_non_shiftable_load() * 0.001
current_load += self.buildings[h].get_cooling_electric_demand() * 0.001
# TBD const_i_percent by appliance (check PNNL reports)
self.net.load.at[i, 'const_i_percent'] = 2.0 * self.buildings[h].get_cooling_electric_demand() * 0.001 / current_load
self.net.load.at[i, 'const_i_percent'] = 3.0 * self.buildings[h].get_non_shiftable_load() * 0.001 / current_load
self.net.load.at[i, 'p_mw'] = 0.9 * current_load
self.net.load.at[i, 'sn_mva'] = current_load
for j in self.net.sgen.index:
h = self.net.sgen.name[j]
current_gen = self.buildings[h].solar_power * 0.001
phi = self.buildings[h].v_lag
self.net.sgen.at[j,'p_mw'] = current_gen*np.cos(phi)
self.net.sgen.at[j,'q_mvar'] = current_gen*np.sin(phi)
runpp(self.net, enforce_q_lims=True)
self.calc_system_losses()
self.calc_voltage_dev()
# write these value to the output writer:
for k, v in self.output.items():
self.output[k]['values'][str(self.time_step)] = self.net[v['parent']][v['var']]
def calc_system_losses(self):
self.system_losses += list((self.net.res_ext_grid.p_mw + self.net.res_load.p_mw.sum() - self.net.res_gen.p_mw.sum()).values)
def calc_voltage_dev(self):
self.voltage_dev += list(abs((self.net.res_bus['vm_pu']-1)/0.05))
def get_rbc_cost(self):
# Running the reference rule-based controller to find the baseline cost
if self.cost_rbc is None:
env_rbc = GridLearn(self.data_path, self.building_attributes, self.weather_file, self.solar_profile, self.building_ids, hourly_timesteps=self.hourly_timesteps, buildings_states_actions = self.buildings_states_actions_filename, simulation_period = self.simulation_period, cost_function = self.cost_function, central_agent = False, n_buildings_per_bus=self.n_buildings_per_bus, pv_penetration=self.pv_penetration)
_, actions_spaces = env_rbc.get_state_action_spaces()
#Instantiatiing the control agent(s)
agent_rbc = RBC_Agent(env_rbc)
state = env_rbc.reset()
done = False
while not done:
action = agent_rbc.select_action([list(env_rbc.buildings.values())[0].sim_results['hour'][env_rbc.time_step]])
next_state, rewards, done, _ = env_rbc.step(action)
state = next_state
self.cost_rbc = env_rbc.get_baseline_cost()
def reset(self):
self.system_losses = []
self.voltage_dev = []
return super().reset()
def plot_buses(self):
df = self.output['vm_pu']['values']
xfmr = set(self.net.bus.iloc[self.net.trafo.hv_bus].index) | set(self.net.bus.iloc[self.net.trafo.lv_bus].index)
ext_grid = set(self.net.bus.iloc[self.net.ext_grid.bus].index)
substation = xfmr | ext_grid
loads = set(self.net.load.bus)
buses = set(self.net.bus.index) - substation
gens = set(self.net.gen.bus)
# substation buses
self.plot_northsouth([df.loc[substation]], title="Substation Voltages", y="Vm_pu")
# generator buses
gen_buses = gens & buses
non_gen_buses = gens ^ buses
if not len(gen_buses) == 0:
self.plot_northsouth([df.loc[gen_buses]], title="Buses with PV", y="Vm_pu")
# buses with building loads
building_ng_buses = non_gen_buses & loads
if not len(building_ng_buses) == 0:
self.plot_northsouth([df.loc[building_ng_buses]], title="Building Voltages", y="Vm_pu")
# other buses (distribution strictly)
other_ng_buses = non_gen_buses - building_ng_buses
if not len(other_ng_buses) == 0:
self.plot_northsouth([df.loc[other_ng_buses]], title="Distribution buses", y="Vm_pu")
def plot_northsouth(self, dfs, title="", y=""):
line = self.net.bus_geodata['x'].median()
temp = self.net.bus_geodata.merge(self.net.bus, left_index=True, right_index=True)
north_buses = set(temp.loc[temp["x"] > line].index)
south_buses = set(temp.loc[temp["x"] <= line].index)
fig, axes = plt.subplots(nrows=len(dfs),ncols=2, figsize=(20,8))
plt.subplots_adjust(hspace = 0.5, wspace=0.25)
for i in range(len(dfs)): # can pass p and q vars
north_list = set(dfs[i].index) & north_buses
south_list = set(dfs[i].index) & south_buses
if len(south_list) > 0:
if len(dfs) > 1:
quad = axes[i][0]
else:
quad = axes[0]
f = dfs[i].loc[south_list].transpose().plot(ax=quad, figsize=(10,6), color=plt.cm.Spectral(np.linspace(0, 1, len(dfs[i]))))
f.set_xlabel(f"Timestep ({60/self.hourly_timesteps} minutes)")
f.set_ylabel(y[i])
f.set_title(f"South, {title}")
quad.legend().set_visible(False)
if len(north_list) > 0:
if len(dfs) > 1:
quad = axes[i][1]
else:
quad = axes[1]
g = dfs[i].loc[north_list].transpose().plot(ax=quad, figsize=(10,6), color=plt.cm.Spectral(np.linspace(1, 0, len(dfs[i]))))
g.set_xlabel(f"Time ({60/self.hourly_timesteps} minutes)")
g.set_ylabel(y[i])
g.set_title(f"North, {title}")
quad.legend().set_visible(False)
def plot_all(self):
self.plot_buses()
self.plot_northsouth([self.output['p_mw_load']['values']], title="Building loads", y=["P (MW)"])
self.plot_northsouth([self.output['p_mw_gen']['values'], self.output['q_mvar_gen']['values']], title="Generation", y=["P (MW)", "Q (MVAR)"])
plt.show()
| [
"pandapower.create_sgen",
"pandapower.networks.case33bw",
"citylearn.RBC_Agent",
"pandapower.create_ext_grid",
"pandapower.create_empty_network",
"numpy.sin",
"pandapower.create_load",
"numpy.cos",
"numpy.random.uniform",
"pandas.DataFrame",
"pandapower.create_line",
"pandapower.runpp",
"pan... | [((2084, 2134), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {'name': '"""single bus network"""'}), "(name='single bus network')\n", (2107, 2134), True, 'import pandapower as pp\n'), ((2154, 2209), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'name': '(0)', 'vn_kv': '(12.66)', 'geodata': '(0, 0)'}), '(net, name=0, vn_kv=12.66, geodata=(0, 0))\n', (2167, 2209), True, 'import pandapower as pp\n'), ((2217, 2275), 'pandapower.create_ext_grid', 'pp.create_ext_grid', (['net', 'ex_grid'], {'vm_pu': '(1.02)', 'va_degree': '(50)'}), '(net, ex_grid, vm_pu=1.02, va_degree=50)\n', (2235, 2275), True, 'import pandapower as pp\n'), ((2292, 2347), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'name': '(1)', 'vn_kv': '(12.66)', 'geodata': '(0, 1)'}), '(net, name=1, vn_kv=12.66, geodata=(0, 1))\n', (2305, 2347), True, 'import pandapower as pp\n'), ((2363, 2393), 'pandapower.create_load', 'pp.create_load', (['net', '(1)'], {'p_mw': '(0)'}), '(net, 1, p_mw=0)\n', (2377, 2393), True, 'import pandapower as pp\n'), ((2415, 2504), 'pandapower.create_line', 'pp.create_line', (['net', 'ex_grid', 'bus1', '(0.5)'], {'std_type': '"""N2XS(FL)2Y 1x300 RM/35 64/110 kV"""'}), "(net, ex_grid, bus1, 0.5, std_type=\n 'N2XS(FL)2Y 1x300 RM/35 64/110 kV')\n", (2429, 2504), True, 'import pandapower as pp\n'), ((2647, 2666), 'pandapower.networks.case33bw', 'networks.case33bw', ([], {}), '()\n', (2664, 2666), True, 'import pandapower.networks as networks\n'), ((6078, 6114), 'pandapower.runpp', 'runpp', (['self.net'], {'enforce_q_lims': '(True)'}), '(self.net, enforce_q_lims=True)\n', (6083, 6114), False, 'from pandapower import runpp\n'), ((9501, 9545), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)', 'wspace': '(0.25)'}), '(hspace=0.5, wspace=0.25)\n', (9520, 9545), True, 'import matplotlib.pyplot as plt\n'), ((11061, 11071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11069, 11071), True, 'import matplotlib.pyplot as plt\n'), ((7348, 7366), 'citylearn.RBC_Agent', 'RBC_Agent', (['env_rbc'], {}), '(env_rbc)\n', (7357, 7366), False, 'from citylearn import RBC_Agent\n'), ((1441, 1455), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1453, 1455), True, 'import pandas as pd\n'), ((1528, 1542), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1540, 1542), True, 'import pandas as pd\n'), ((1609, 1623), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1621, 1623), True, 'import pandas as pd\n'), ((1689, 1703), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1701, 1703), True, 'import pandas as pd\n'), ((1777, 1791), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1789, 1791), True, 'import pandas as pd\n'), ((1860, 1874), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1872, 1874), True, 'import pandas as pd\n'), ((4142, 4254), 'pandapower.create_bus', 'pp.create_bus', (['self.net'], {'name': 'bid', 'vn_kv': '(12.66)', 'max_vm_pu': '(1.2)', 'min_vm_pu': '(0.8)', 'zone': '(1)', 'geodata': '(new_x, new_y)'}), '(self.net, name=bid, vn_kv=12.66, max_vm_pu=1.2, min_vm_pu=0.8,\n zone=1, geodata=(new_x, new_y))\n', (4155, 4254), True, 'import pandapower as pp\n'), ((4280, 4386), 'pandapower.create_line', 'pp.create_line', (['self.net', 'new_house', 'existing_node', '(0.5)', '"""94-AL1/15-ST1A 0.4"""'], {'max_loading_percent': '(100)'}), "(self.net, new_house, existing_node, 0.5,\n '94-AL1/15-ST1A 0.4', max_loading_percent=100)\n", (4294, 4386), True, 'import pandapower as pp\n'), ((4416, 4464), 'pandapower.create_load', 'pp.create_load', (['self.net', 'new_house', '(0)'], {'name': 'bid'}), '(self.net, new_house, 0, name=bid)\n', (4430, 4464), True, 'import pandapower as pp\n'), ((5990, 6001), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5996, 6001), True, 'import numpy as np\n'), ((6057, 6068), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (6063, 6068), True, 'import numpy as np\n'), ((4556, 4575), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4573, 4575), True, 'import numpy as np\n'), ((4630, 4680), 'pandapower.create_sgen', 'pp.create_sgen', (['self.net', 'new_house', '(0.0)'], {'name': 'bid'}), '(self.net, new_house, 0.0, name=bid)\n', (4644, 4680), True, 'import pandapower as pp\n'), ((4009, 4034), 'numpy.cos', 'np.cos', (['(2 * np.pi / n * i)'], {}), '(2 * np.pi / n * i)\n', (4015, 4034), True, 'import numpy as np\n'), ((4080, 4105), 'numpy.sin', 'np.sin', (['(2 * np.pi / n * i)'], {}), '(2 * np.pi / n * i)\n', (4086, 4105), True, 'import numpy as np\n')] |
import torch
import numpy as np
from functools import partial
class Optimizer():
def __init__(self, parameters, optimizer, lr, eps, lr_scheduler, tf_start=1, tf_end=1, tf_step=1, **kwargs):
# Setup teacher forcing scheduler
self.tf_type = tf_end != 1
self.tf_rate = lambda step: max(
tf_end, tf_start-(tf_start-tf_end)*step/tf_step)
# Setup torch optimizer
self.opt_type = optimizer
self.init_lr = lr
self.sch_type = lr_scheduler
opt = getattr(torch.optim, optimizer)
if lr_scheduler == 'warmup':
warmup_step = 4000.0
init_lr = lr
self.lr_scheduler = lambda step: init_lr * warmup_step ** 0.5 * \
np.minimum((step+1)*warmup_step**-1.5, (step+1)**-0.5)
self.opt = opt(parameters, lr=1.0)
elif lr_scheduler == 'spec-aug-basic':
# Scheduler from https://arxiv.org/pdf/1904.08779.pdf
self.lr_scheduler = partial(speech_aug_scheduler, s_r=500,
s_i=20000, s_f=80000, peak_lr=lr)
self.opt = opt(parameters, lr=lr, eps=eps)
elif lr_scheduler == 'spec-aug-double':
# Scheduler from https://arxiv.org/pdf/1904.08779.pdf
self.lr_scheduler = partial(speech_aug_scheduler, s_r=1000,
s_i=40000, s_f=160000, peak_lr=lr)
self.opt = opt(parameters, lr=lr, eps=eps)
else:
self.lr_scheduler = None
self.opt = opt(parameters, lr=lr, eps=eps) # ToDo: 1e-8 better?
def get_opt_state_dict(self):
return self.opt.state_dict()
def load_opt_state_dict(self, state_dict):
self.opt.load_state_dict(state_dict)
def pre_step(self, step):
if self.lr_scheduler is not None:
cur_lr = self.lr_scheduler(step)
for param_group in self.opt.param_groups:
param_group['lr'] = cur_lr
self.opt.zero_grad()
return self.tf_rate(step)
def step(self):
self.opt.step()
def create_msg(self):
return ['Optim.spec.| Algo. = {}\t| Lr = {}\t (Scheduler = {})| Scheduled sampling = {}'
.format(self.opt_type, self.init_lr, self.sch_type, self.tf_type)]
def speech_aug_scheduler(step, s_r, s_i, s_f, peak_lr):
# Starting from 0, ramp-up to set LR and converge to 0.01*LR, w/ exp. decay
final_lr_ratio = 0.01
exp_decay_lambda = -np.log10(final_lr_ratio)/(s_f-s_i) # Approx. w/ 10-based
cur_step = step+1
if cur_step<s_r:
# Ramp-up
return peak_lr*float(cur_step)/s_r
elif cur_step<s_i:
# Hold
return peak_lr
elif cur_step<=s_f:
# Decay
return peak_lr*np.power(10,-exp_decay_lambda*(cur_step-s_i))
else:
# Converge
return peak_lr*final_lr_ratio
| [
"numpy.log10",
"functools.partial",
"numpy.minimum",
"numpy.power"
] | [((2482, 2506), 'numpy.log10', 'np.log10', (['final_lr_ratio'], {}), '(final_lr_ratio)\n', (2490, 2506), True, 'import numpy as np\n'), ((987, 1059), 'functools.partial', 'partial', (['speech_aug_scheduler'], {'s_r': '(500)', 's_i': '(20000)', 's_f': '(80000)', 'peak_lr': 'lr'}), '(speech_aug_scheduler, s_r=500, s_i=20000, s_f=80000, peak_lr=lr)\n', (994, 1059), False, 'from functools import partial\n'), ((740, 804), 'numpy.minimum', 'np.minimum', (['((step + 1) * warmup_step ** -1.5)', '((step + 1) ** -0.5)'], {}), '((step + 1) * warmup_step ** -1.5, (step + 1) ** -0.5)\n', (750, 804), True, 'import numpy as np\n'), ((1301, 1375), 'functools.partial', 'partial', (['speech_aug_scheduler'], {'s_r': '(1000)', 's_i': '(40000)', 's_f': '(160000)', 'peak_lr': 'lr'}), '(speech_aug_scheduler, s_r=1000, s_i=40000, s_f=160000, peak_lr=lr)\n', (1308, 1375), False, 'from functools import partial\n'), ((2768, 2818), 'numpy.power', 'np.power', (['(10)', '(-exp_decay_lambda * (cur_step - s_i))'], {}), '(10, -exp_decay_lambda * (cur_step - s_i))\n', (2776, 2818), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import rlkit.testing.testing_utils as tu
class TestAreNpArraysEqual(unittest.TestCase):
def test_equal(self):
a = np.array([1, 5])
b = np.array([1.0000000001, 5])
self.assertTrue(tu.are_np_arrays_equal(a, b))
def test_not_equal(self):
a = np.array([1, 5])
b = np.array([1.0000000001, 5])
self.assertFalse(tu.are_np_arrays_equal(a, b, threshold=1e-20))
class TestAreDictListsEqual(unittest.TestCase):
def test_order_does_not_matter(self):
d1 = [
{
"a": 1,
"b": -1,
},
{
"a": 2,
"b": -1,
},
]
d2 = [
{
"a": 2,
"b": -1,
},
{
"a": 1,
"b": -1,
},
]
self.assertTrue(tu.are_dict_lists_equal(d1, d2))
def test_values_matter(self):
d1 = [
{
"a": 1,
"b": -1,
},
{
"a": 2,
"b": -1,
},
]
d2 = [
{
"a": 2,
"b": -1,
},
{
"a": 2,
"b": -1,
},
]
self.assertFalse(tu.are_dict_lists_equal(d1, d2))
def test_keys_matter(self):
d1 = [
{
"a": 1,
"b": -1,
},
{
"a": 2,
"b": -1,
},
]
d2 = [
{
"a": 1,
"b": -1,
},
{
"a": 2,
"c": -1,
},
]
self.assertFalse(tu.are_dict_lists_equal(d1, d2))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"rlkit.testing.testing_utils.are_np_arrays_equal",
"rlkit.testing.testing_utils.are_dict_lists_equal"
] | [((1900, 1915), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1913, 1915), False, 'import unittest\n'), ((165, 181), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (173, 181), True, 'import numpy as np\n'), ((194, 221), 'numpy.array', 'np.array', (['[1.0000000001, 5]'], {}), '([1.0000000001, 5])\n', (202, 221), True, 'import numpy as np\n'), ((319, 335), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (327, 335), True, 'import numpy as np\n'), ((348, 375), 'numpy.array', 'np.array', (['[1.0000000001, 5]'], {}), '([1.0000000001, 5])\n', (356, 375), True, 'import numpy as np\n'), ((246, 274), 'rlkit.testing.testing_utils.are_np_arrays_equal', 'tu.are_np_arrays_equal', (['a', 'b'], {}), '(a, b)\n', (268, 274), True, 'import rlkit.testing.testing_utils as tu\n'), ((401, 446), 'rlkit.testing.testing_utils.are_np_arrays_equal', 'tu.are_np_arrays_equal', (['a', 'b'], {'threshold': '(1e-20)'}), '(a, b, threshold=1e-20)\n', (423, 446), True, 'import rlkit.testing.testing_utils as tu\n'), ((926, 957), 'rlkit.testing.testing_utils.are_dict_lists_equal', 'tu.are_dict_lists_equal', (['d1', 'd2'], {}), '(d1, d2)\n', (949, 957), True, 'import rlkit.testing.testing_utils as tu\n'), ((1381, 1412), 'rlkit.testing.testing_utils.are_dict_lists_equal', 'tu.are_dict_lists_equal', (['d1', 'd2'], {}), '(d1, d2)\n', (1404, 1412), True, 'import rlkit.testing.testing_utils as tu\n'), ((1834, 1865), 'rlkit.testing.testing_utils.are_dict_lists_equal', 'tu.are_dict_lists_equal', (['d1', 'd2'], {}), '(d1, d2)\n', (1857, 1865), True, 'import rlkit.testing.testing_utils as tu\n')] |
import numpy as np
import mixture
class bmm(mixture.mixture):
def __init__(self, n_components, covariance_type='diag',
n_iter=100, verbose=False):
super().__init__(n_components, covariance_type=covariance_type,
n_iter=n_iter, verbose=verbose)
def _log_support(self, x):
k = self.n_components; pi = self.weights; mu = self.means
x_c = 1 - x
mu_c = 1 - mu
log_support = np.ndarray(shape=(x.shape[0], k))
for i in range(k):
log_support[:, i] = (
np.sum(x * np.log(mu[i, :].clip(min=1e-50)), 1) \
+ np.sum(x_c * np.log(mu_c[i, :].clip(min=1e-50)), 1))
return log_support
| [
"numpy.ndarray"
] | [((466, 499), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(x.shape[0], k)'}), '(shape=(x.shape[0], k))\n', (476, 499), True, 'import numpy as np\n')] |
"""plot_T_out.py
author: <NAME>
date: 24.10.2016
This script plots the output of extract_T_irr.py
"""
import netCDF4 as nc
import numpy as np
import scipy
import os
import matplotlib
matplotlib.rcParams['backend'] = "Qt4Agg"
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import maskoceans
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
execfile('extract_T_irr.py')
T_out = extract_T_irr('CRU','Tmean','PC/PD',1901,1930,1981,2010)
plot_var = T_out[1,2,:,:] - T_out[0,2,:,:]
Tfile = nc.Dataset('/net/exo/landclim/data/dataset/CRUTS/v3.22/0.5deg_lat-lon_1m/original/cru_ts3.22.1901.2013.tmp.dat.nc','r')
lat = Tfile.variables['lat'][:]
lon = Tfile.variables['lon'][:]
lats = np.tile(lat,(lon.shape[0],1)).T
lons = np.tile(lon,(lat.shape[0],1))
m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90,llcrnrlon=-180,urcrnrlon=180,resolution='l')
pv_oceanmask = maskoceans(lons,lats,plot_var,inlands=True,resolution='l',grid=10)
plt.figure()
m.drawcoastlines()
m.drawmapboundary(fill_color='lightgray')
m.imshow(pv_oceanmask,cmap="RdBu_r",vmin=-3,vmax=3)
c = plt.colorbar()
c.set_label('T change [$^\circ$ C]')
plt.show(block=False) | [
"numpy.tile",
"mpl_toolkits.basemap.maskoceans",
"netCDF4.Dataset",
"matplotlib.pyplot.colorbar",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((531, 661), 'netCDF4.Dataset', 'nc.Dataset', (['"""/net/exo/landclim/data/dataset/CRUTS/v3.22/0.5deg_lat-lon_1m/original/cru_ts3.22.1901.2013.tmp.dat.nc"""', '"""r"""'], {}), "(\n '/net/exo/landclim/data/dataset/CRUTS/v3.22/0.5deg_lat-lon_1m/original/cru_ts3.22.1901.2013.tmp.dat.nc'\n , 'r')\n", (541, 661), True, 'import netCDF4 as nc\n'), ((761, 792), 'numpy.tile', 'np.tile', (['lon', '(lat.shape[0], 1)'], {}), '(lon, (lat.shape[0], 1))\n', (768, 792), True, 'import numpy as np\n'), ((795, 900), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""cyl"""', 'llcrnrlat': '(-90)', 'urcrnrlat': '(90)', 'llcrnrlon': '(-180)', 'urcrnrlon': '(180)', 'resolution': '"""l"""'}), "(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180,\n urcrnrlon=180, resolution='l')\n", (802, 900), False, 'from mpl_toolkits.basemap import Basemap\n'), ((908, 979), 'mpl_toolkits.basemap.maskoceans', 'maskoceans', (['lons', 'lats', 'plot_var'], {'inlands': '(True)', 'resolution': '"""l"""', 'grid': '(10)'}), "(lons, lats, plot_var, inlands=True, resolution='l', grid=10)\n", (918, 979), False, 'from mpl_toolkits.basemap import maskoceans\n'), ((976, 988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (986, 988), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1120), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1118, 1120), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1179), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1166, 1179), True, 'import matplotlib.pyplot as plt\n'), ((722, 753), 'numpy.tile', 'np.tile', (['lat', '(lon.shape[0], 1)'], {}), '(lat, (lon.shape[0], 1))\n', (729, 753), True, 'import numpy as np\n')] |
import gym
import tensorflow as tf
import spinup
import numpy as np
#making environment lambda function
env = lambda : gym.make("quadrotor_14d_env:DiffDriveEnv-v0")
#vpg
# spinup.vpg(
# env,
# ac_kwargs={"hidden_sizes":(64,2)},
# seed = np.random.randint(100),
# steps_per_epoch=1250,
# epochs=2500,
# pi_lr=3e-4,
# logger_kwargs = {"output_dir" : "logs/vpgrandomtest"}
# )
#ppo
spinup.ppo(
env,
ac_kwargs={"hidden_sizes":(64,2)},
seed = np.random.randint(100),
steps_per_epoch=1250,
pi_lr=3e-3,
epochs=2500,
logger_kwargs = {"output_dir" : "logs/ppo-diffdrivetest-wtf"}
)
#polynomials
# spinup.vpgpolynomial(
# env,
# ac_kwargs={"order":3},
# seed = np.random.randint(100),
# steps_per_epoch=1250,
# epochs=2500,
# pi_lr=2e-5,
# l1_scaling=0.001,
# logger_kwargs = {"output_dir" : "logs/polyrandomtest"}
# ) | [
"numpy.random.randint",
"gym.make"
] | [((120, 165), 'gym.make', 'gym.make', (['"""quadrotor_14d_env:DiffDriveEnv-v0"""'], {}), "('quadrotor_14d_env:DiffDriveEnv-v0')\n", (128, 165), False, 'import gym\n'), ((481, 503), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (498, 503), True, 'import numpy as np\n')] |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__author__ = "<NAME>"
import pkgutil
import doctest
from os.path import join
import tempfile
from importlib import import_module
import numpy as np
import pytest
import biotite.structure.io as strucio
from .util import is_not_installed, cannot_import, cannot_connect_to
NCBI_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/"
RCSB_URL = "https://www.rcsb.org/"
@pytest.mark.parametrize("package_name, context_package_names", [
pytest.param("biotite", [] ),
pytest.param("biotite.sequence", [] ),
pytest.param("biotite.sequence.align", ["biotite.sequence"] ),
pytest.param("biotite.sequence.phylo", ["biotite.sequence"] ),
pytest.param("biotite.sequence.graphics", ["biotite.sequence"],
marks=pytest.mark.skipif(
cannot_import("matplotlib"),
reason="Matplotlib is not installed") ),
pytest.param("biotite.sequence.io", ["biotite.sequence"] ),
pytest.param("biotite.sequence.io.fasta", ["biotite.sequence"] ),
pytest.param("biotite.sequence.io.fastq", ["biotite.sequence"] ),
pytest.param("biotite.sequence.io.genbank", ["biotite.sequence",
"biotite.database.entrez"],
marks=pytest.mark.skipif(
cannot_connect_to(NCBI_URL),
reason="NCBI Entrez is not available") ),
pytest.param("biotite.sequence.io.gff", ["biotite.sequence",
"biotite.sequence.io.fasta"],
marks=pytest.mark.filterwarnings("ignore:") ),
pytest.param("biotite.structure", ["biotite.structure.io",
"biotite.structure.info"] ),
pytest.param("biotite.structure.graphics", ["biotite.structure"],
marks=pytest.mark.skipif(
cannot_import("matplotlib"),
reason="Matplotlib is not installed"), ),
pytest.param("biotite.structure.io", ["biotite.structure"] ),
pytest.param("biotite.structure.io.pdb", ["biotite.structure",
"biotite"] ),
pytest.param("biotite.structure.io.pdbx", ["biotite.structure"] ),
pytest.param("biotite.structure.io.pdbqt", ["biotite.structure",
"biotite.structure.info"] ),
pytest.param("biotite.structure.io.npz", ["biotite.structure"] ),
pytest.param("biotite.structure.io.mmtf", ["biotite.structure"] ),
pytest.param("biotite.structure.info", ["biotite.structure"] ),
pytest.param("biotite.database.entrez", [],
marks=pytest.mark.skipif(
cannot_connect_to(NCBI_URL),
reason="NCBI Entrez is not available") ),
pytest.param("biotite.database.rcsb", [],
marks=pytest.mark.skipif(
cannot_connect_to(RCSB_URL),
reason="RCSB PDB is not available") ),
pytest.param("biotite.application", ["biotite.application.clustalo",
"biotite.sequence"],
marks=pytest.mark.skipif(is_not_installed("clustalo"),
reason="Software is not installed")),
pytest.param("biotite.application.blast", [], ),
pytest.param("biotite.application.muscle", ["biotite.sequence"],
marks=pytest.mark.skipif(is_not_installed("muscle"),
reason="Software is not installed")),
pytest.param("biotite.application.clustalo",["biotite.sequence"],
marks=pytest.mark.skipif(is_not_installed("clustalo"),
reason="Software is not installed")),
pytest.param("biotite.application.mafft", ["biotite.sequence"],
marks=pytest.mark.skipif(is_not_installed("mafft"),
reason="Software is not installed")),
pytest.param("biotite.application.dssp", ["biotite.structure"],
marks=pytest.mark.skipif(is_not_installed("mkdssp"),
reason="Software is not installed")),
pytest.param("biotite.application.autodock",["biotite.structure",
"biotite.structure.info"],
marks=pytest.mark.skipif(is_not_installed("vina"),
reason="Software is not installed")),
])
def test_doctest(package_name, context_package_names):
"""
Run all doctest strings in all Biotite subpackages.
"""
# Collect all attributes of this package and its subpackages
# as globals for the doctests
globs = {}
mod_names = []
#The package itself is also used as context
for name in context_package_names + [package_name]:
context_package = import_module(name)
globs.update(
{attr : getattr(context_package, attr)
for attr in dir(context_package)}
)
# Add fixed names for certain paths
globs["path_to_directory"] = tempfile.gettempdir()
globs["path_to_structures"] = join(".", "tests", "structure", "data")
globs["path_to_sequences"] = join(".", "tests", "sequence", "data")
# Add frequently used modules
globs["np"] = np
# Add frequently used objects
globs["atom_array_stack"] = strucio.load_structure(
join(".", "tests", "structure", "data", "1l2y.mmtf"),
include_bonds=True
)
globs["atom_array"] = globs["atom_array_stack"][0]
# Adjust NumPy print formatting
np.set_printoptions(precision=3, floatmode="maxprec_equal")
# Run doctests
# This test does not use 'testfile()' or 'testmod()'
# due to problems with doctest identification for Cython modules
# More information below
package = import_module(package_name)
runner = doctest.DocTestRunner(
verbose = False,
optionflags =
doctest.ELLIPSIS |
doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.NORMALIZE_WHITESPACE
)
for test in doctest.DocTestFinder(exclude_empty=False).find(
package, package.__name__,
# It is necessary to set 'module' to 'False', as otherwise
# Cython functions and classes would be falsely identified
# as members of an external module by 'DocTestFinder._find()'
# and consequently would be ignored
#
# Setting 'module=False' omits this check
# This check is not necessary as the biotite subpackages
# ('__init__.py' modules) should only contain attributes, that
# are part of the package itself.
module=False,
extraglobs=globs
):
runner.run(test)
results = doctest.TestResults(runner.failures, runner.tries)
try:
assert results.failed == 0
except AssertionError:
print(f"Failing doctest in module {package}")
raise | [
"doctest.DocTestRunner",
"pytest.mark.filterwarnings",
"importlib.import_module",
"doctest.DocTestFinder",
"os.path.join",
"doctest.TestResults",
"pytest.param",
"tempfile.gettempdir",
"numpy.set_printoptions"
] | [((5695, 5716), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (5714, 5716), False, 'import tempfile\n'), ((5751, 5790), 'os.path.join', 'join', (['"""."""', '"""tests"""', '"""structure"""', '"""data"""'], {}), "('.', 'tests', 'structure', 'data')\n", (5755, 5790), False, 'from os.path import join\n'), ((5825, 5863), 'os.path.join', 'join', (['"""."""', '"""tests"""', '"""sequence"""', '"""data"""'], {}), "('.', 'tests', 'sequence', 'data')\n", (5829, 5863), False, 'from os.path import join\n'), ((6204, 6263), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'floatmode': '"""maxprec_equal"""'}), "(precision=3, floatmode='maxprec_equal')\n", (6223, 6263), True, 'import numpy as np\n'), ((6453, 6480), 'importlib.import_module', 'import_module', (['package_name'], {}), '(package_name)\n', (6466, 6480), False, 'from importlib import import_module\n'), ((6494, 6632), 'doctest.DocTestRunner', 'doctest.DocTestRunner', ([], {'verbose': '(False)', 'optionflags': '(doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE | doctest.\n NORMALIZE_WHITESPACE)'}), '(verbose=False, optionflags=doctest.ELLIPSIS | doctest\n .REPORT_ONLY_FIRST_FAILURE | doctest.NORMALIZE_WHITESPACE)\n', (6515, 6632), False, 'import doctest\n'), ((7370, 7420), 'doctest.TestResults', 'doctest.TestResults', (['runner.failures', 'runner.tries'], {}), '(runner.failures, runner.tries)\n', (7389, 7420), False, 'import doctest\n'), ((5466, 5485), 'importlib.import_module', 'import_module', (['name'], {}), '(name)\n', (5479, 5485), False, 'from importlib import import_module\n'), ((6017, 6069), 'os.path.join', 'join', (['"""."""', '"""tests"""', '"""structure"""', '"""data"""', '"""1l2y.mmtf"""'], {}), "('.', 'tests', 'structure', 'data', '1l2y.mmtf')\n", (6021, 6069), False, 'from os.path import join\n'), ((589, 616), 'pytest.param', 'pytest.param', (['"""biotite"""', '[]'], {}), "('biotite', [])\n", (601, 616), False, 'import pytest\n'), ((669, 705), 'pytest.param', 'pytest.param', (['"""biotite.sequence"""', '[]'], {}), "('biotite.sequence', [])\n", (681, 705), False, 'import pytest\n'), ((749, 809), 'pytest.param', 'pytest.param', (['"""biotite.sequence.align"""', "['biotite.sequence']"], {}), "('biotite.sequence.align', ['biotite.sequence'])\n", (761, 809), False, 'import pytest\n'), ((829, 889), 'pytest.param', 'pytest.param', (['"""biotite.sequence.phylo"""', "['biotite.sequence']"], {}), "('biotite.sequence.phylo', ['biotite.sequence'])\n", (841, 889), False, 'import pytest\n'), ((1151, 1208), 'pytest.param', 'pytest.param', (['"""biotite.sequence.io"""', "['biotite.sequence']"], {}), "('biotite.sequence.io', ['biotite.sequence'])\n", (1163, 1208), False, 'import pytest\n'), ((1231, 1294), 'pytest.param', 'pytest.param', (['"""biotite.sequence.io.fasta"""', "['biotite.sequence']"], {}), "('biotite.sequence.io.fasta', ['biotite.sequence'])\n", (1243, 1294), False, 'import pytest\n'), ((1311, 1374), 'pytest.param', 'pytest.param', (['"""biotite.sequence.io.fastq"""', "['biotite.sequence']"], {}), "('biotite.sequence.io.fastq', ['biotite.sequence'])\n", (1323, 1374), False, 'import pytest\n'), ((1937, 2026), 'pytest.param', 'pytest.param', (['"""biotite.structure"""', "['biotite.structure.io', 'biotite.structure.info']"], {}), "('biotite.structure', ['biotite.structure.io',\n 'biotite.structure.info'])\n", (1949, 2026), False, 'import pytest\n'), ((2337, 2396), 'pytest.param', 'pytest.param', (['"""biotite.structure.io"""', "['biotite.structure']"], {}), "('biotite.structure.io', ['biotite.structure'])\n", (2349, 2396), False, 'import pytest\n'), ((2417, 2491), 'pytest.param', 'pytest.param', (['"""biotite.structure.io.pdb"""', "['biotite.structure', 'biotite']"], {}), "('biotite.structure.io.pdb', ['biotite.structure', 'biotite'])\n", (2429, 2491), False, 'import pytest\n'), ((2567, 2631), 'pytest.param', 'pytest.param', (['"""biotite.structure.io.pdbx"""', "['biotite.structure']"], {}), "('biotite.structure.io.pdbx', ['biotite.structure'])\n", (2579, 2631), False, 'import pytest\n'), ((2647, 2742), 'pytest.param', 'pytest.param', (['"""biotite.structure.io.pdbqt"""', "['biotite.structure', 'biotite.structure.info']"], {}), "('biotite.structure.io.pdbqt', ['biotite.structure',\n 'biotite.structure.info'])\n", (2659, 2742), False, 'import pytest\n'), ((2797, 2860), 'pytest.param', 'pytest.param', (['"""biotite.structure.io.npz"""', "['biotite.structure']"], {}), "('biotite.structure.io.npz', ['biotite.structure'])\n", (2809, 2860), False, 'import pytest\n'), ((2877, 2941), 'pytest.param', 'pytest.param', (['"""biotite.structure.io.mmtf"""', "['biotite.structure']"], {}), "('biotite.structure.io.mmtf', ['biotite.structure'])\n", (2889, 2941), False, 'import pytest\n'), ((2957, 3018), 'pytest.param', 'pytest.param', (['"""biotite.structure.info"""', "['biotite.structure']"], {}), "('biotite.structure.info', ['biotite.structure'])\n", (2969, 3018), False, 'import pytest\n'), ((3821, 3866), 'pytest.param', 'pytest.param', (['"""biotite.application.blast"""', '[]'], {}), "('biotite.application.blast', [])\n", (3833, 3866), False, 'import pytest\n'), ((6707, 6749), 'doctest.DocTestFinder', 'doctest.DocTestFinder', ([], {'exclude_empty': '(False)'}), '(exclude_empty=False)\n', (6728, 6749), False, 'import doctest\n'), ((1876, 1913), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:"""'], {}), "('ignore:')\n", (1902, 1913), False, 'import pytest\n')] |
import matplotlib.pyplot as plt
import random
import numpy as np
plt.hist([random.randint(1, 100) for _ in range(30)], list(np.arange(10, 301, 10)), histtype="bar", color="orange", rwidth=0.8, label="Product Ratings")
plt.show()
| [
"random.randint",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((219, 229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (227, 229), True, 'import matplotlib.pyplot as plt\n'), ((76, 98), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (90, 98), False, 'import random\n'), ((125, 147), 'numpy.arange', 'np.arange', (['(10)', '(301)', '(10)'], {}), '(10, 301, 10)\n', (134, 147), True, 'import numpy as np\n')] |
import numpy as np
import argparse
from base_module import Posenet, Camnet, discriminator, Encoder
from mmdgan_mh_enc import Pose_mmdgan_enc
import os
import random
import tensorflow as tf
import scipy.io as sio
import logging, logging.config
import sys
from eval_functions import err_3dpe
import ops
parse = argparse.ArgumentParser()
parse.add_argument("--batchsize", help= "the batch size used in training", default=128, type = int)
parse.add_argument("--epochs", help="number of epochs during training", default=50, type = int)
parse.add_argument("--latent_dim", help="dimension of latent space", default=1024, type = int)
parse.add_argument("--latent_dim_pose", help="dimension for pose in the latent space of discriminator", default=128, type=int)
parse.add_argument("--latent_dim_kcs", help="dimension for kcs in the latent space of discriminator", default=1024, type=int)
parse.add_argument("--d_output_dim", help="dimension for output of discriminator", default=8, type=int)
parse.add_argument("--lr", help="learning rate", default=1e-4, type=float)
parse.add_argument("--architecture", help="which architeture to use[mmdgan, mmdgan_enc]", default='mmdgan_enc', type=str)
parse.add_argument("--beta1", help="beta1 for adamoptimizor", default=0.5, type=float)
parse.add_argument("--diter", help="the number of discriminator updates oer generator updates", default=1, type=int)
parse.add_argument("--kernel", help="kernel type used in mmd[dot, mix_rbf, mix_rq]", default='mix_rq', type=str)
parse.add_argument("--repro_weight", help="weight of reprojection loss", default=10.0, type=float)
parse.add_argument("--cam_weight", help="weight of camera loss", default=10.0, type=float)
parse.add_argument("--gp_weight", help="weight of dot kernel in mix kernel", default=0.1, type=float)
parse.add_argument("--reg_weight", help="weight for regularizer", default=7.5, type=float)
parse.add_argument("--dot_weight", help="weight of dot kernel in mix kernel", default=10.0, type=float)
parse.add_argument("--lr_decay", help="learning rate decay rate", default=0.94, type=float)
parse.add_argument("--enc_weight", help="weight of encoder", default=10.0, type=float)
parse.add_argument("--sampling", help="set to true if generate samples", default=True, type=bool)
parse.add_argument("--checkpoint", help="which model to load", default=0, type=int)
# 931070 for gt data
# 971070 for shft
parse.add_argument("--num_samples", help="number of hypotheses", default=10, type=int)
parse.add_argument("--datatype", help="datatype used for training [GT, SHFT, GTMJ]", default='GT', type=str)
parse.add_argument("--load_path", help="specify the path to load model", default='./models', type=str)
args = parse.parse_args()
actions = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Photo', 'Posing', 'Purchases', 'Sitting',
'SittingDown', 'Smoking', 'Waiting', 'WalkDog', 'WalkTogether', 'Walking']
pose3d_dim = 16 * 3
pose2d_dim = 16 * 2
cam_dim = 6
lr = args.lr
model_name = '{}_regweight{}_encweight{}_2D{}'.format(args.architecture, args.reg_weight, args.enc_weight, args.datatype)
log_dir = 'logs_eval'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.config.fileConfig('./logging.conf')
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/log.txt".format(log_dir))
logger.addHandler(fileHandler)
logger.info("Logs will be written to %s" % log_dir)
def log_arguments():
logger.info('Command: %s', ' '.join(sys.argv))
s = '\n'.join([' {}: {}'.format(arg, getattr(args, arg)) for arg in vars(args)])
s = 'Arguments:\n' + s
logger.info(s)
log_arguments()
posenet = Posenet(args.latent_dim, pose3d_dim)
camnet = Camnet(args.latent_dim, cam_dim)
disc = discriminator(args.latent_dim_pose, args.latent_dim_kcs, args.d_output_dim)
encoder = Encoder(args.latent_dim, args.latent_dim)
mmd_posenet = Pose_mmdgan_enc(posenet, camnet, disc, encoder, args.latent_dim, args.batchsize, log_dir, args.epochs, pose2d_dim, pose3d_dim,
args.kernel, args.repro_weight, args.cam_weight, args.gp_weight, args.reg_weight, args.dot_weight, args.enc_weight)
mmd_posenet.build_model()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
batchsize = args.batchsize
load_dir = os.path.join(args.load_path, model_name)
ckpt = tf.train.get_checkpoint_state(load_dir, latest_filename="checkpoint")
if args.checkpoint > 0:
ckpt_name = os.path.join(os.path.join(load_dir, "checkpoint-{}".format(args.checkpoint)))
else:
ckpt_name = ckpt.model_checkpoint_path
mmd_posenet.saver.restore(sess, ckpt_name)
print('Loading model {}'.format(os.path.basename(ckpt_name)))
path = 'new_data/test/2d{}_3dTEM'.format(args.datatype)
path_cam = 'new_data/test/2d{}_3dCAM'.format(args.datatype)
logger.info('{0:>15} {1:>30} {2:>30}'.format('Action', 'Protocol1', 'Protocol2'))
val_best_all = []
valcam_best_all = []
val_zc_all = []
valcam_zc_all = []
for action in actions:
data_2d_3d_test = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path, action, args.datatype))
data_cam = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path_cam, action, args.datatype))
poses2d_eval = data_2d_3d_test['poses_2d'][::64, :]
poses3d_eval = data_2d_3d_test['poses_3d'][::64, :] / 1000
poses_3d_cam = data_cam['poses_3d'][::64, :] / 1000
poses_zc = []
posescam_zc = []
# generate results under zero code setting
for eval in range(poses2d_eval.shape[0] // batchsize):
noise_zc = np.zeros([batchsize, args.latent_dim])
poses, cam = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], noise_zc,
lr)
poses_reshape = np.reshape(poses, [poses.shape[0], 3, 16])
k = np.reshape(cam, [cam.shape[0], 2, 3])
R = ops.compute_R(k) # recover rotation matrix from camera matrix
poses_cam = np.matmul(R, poses_reshape) # transfer pose from the template frame to the camera frame
poses_cam_reshape = np.reshape(poses_cam, [poses_cam.shape[0], -1])
posescam_zc.append(poses_cam_reshape)
poses_zc.append(poses)
poses_zc = np.vstack(poses_zc)
posescam_zc = np.vstack(posescam_zc)
# compute the error under zero code setting
val_zc = 0.0
valcam_zc = 0.0
for p in range(poses_zc.shape[0]):
err_zc = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_zc[p:p + 1, :], True)
errcam_zc = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_zc[p:p + 1, :], False)
# scale the output according to the ratio between poses in camera frame and poses in template frame in the training set
val_zc = val_zc + err_zc
valcam_zc = valcam_zc + errcam_zc
val_zc_all.append(err_zc)
valcam_zc_all.append(errcam_zc)
val_zc = val_zc / poses_zc.shape[0]
valcam_zc = valcam_zc/posescam_zc.shape[0]
# generate results for multiple hypotheses
poses_samples_all = []
posescam_samples_all = []
R_all = []
poses_repro_all = []
for eval in range(poses2d_eval.shape[0] // batchsize):
poses_samples_batch = []
posescam_samples_batch = []
poses_repro_batch = []
for i in range(args.num_samples):
z_test = np.random.normal(0, 1, (batchsize, args.latent_dim))
posespred, campred = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], z_test,
lr)
posespred_reshape = np.reshape(posespred, [posespred.shape[0], 3, 16])
poses_samples_batch.append(posespred)
k = np.reshape(campred, [campred.shape[0], 2, 3])
R = ops.compute_R(k)
posespred_cam = np.matmul(R, posespred_reshape)
posespred_cam_reshape = np.reshape(posespred_cam, [posespred_cam.shape[0], -1])
posescam_samples_batch.append(posespred_cam_reshape)
poses_repro = np.reshape(np.matmul(k, posespred_reshape), [posespred.shape[0], -1])
poses_repro_batch.append(poses_repro)
poses_samples_batch = np.stack(poses_samples_batch, axis=1)
poses_samples_all.append(poses_samples_batch)
posescam_samples_batch = np.stack(posescam_samples_batch,axis=1)
posescam_samples_all.append(posescam_samples_batch)
poses_repro_batch = np.stack(poses_repro_batch, axis=1)
poses_repro_all.append(poses_repro_batch)
R_all.append(R)
poses_samples_all = np.concatenate(poses_samples_all, axis=0)
posescam_samples_all = np.concatenate(posescam_samples_all, axis=0)
poses_repro_all = np.concatenate(poses_repro_all, axis=0)
R_all = np.concatenate(R_all, axis=0)
# compute error for bh setting
err = np.zeros([poses_samples_all.shape[0], poses_samples_all.shape[1]])
err_cam = np.zeros([poses_samples_all.shape[0], poses_samples_all.shape[1]])
for p in range(err.shape[0]):
for s in range(args.num_samples):
err[p, s] = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_samples_all[p:p + 1, s, :], True)
err_cam[p, s] = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_samples_all[p:p + 1, s, :],
False) # scale the output according to the ratio between poses in camera
# frame and poses in template frame in the training set
val_best = np.mean(np.min(err, axis=1))
valcam_best = np.mean(np.min(err_cam, axis=1))
val_best_all.append(np.min(err, axis=1))
valcam_best_all.append(np.min(err_cam, axis=1))
logger.info('{0:<15} {1:>15.2f} {2:>15.2f} {3:>15.2f} {4:>15.2f}'.format(action, valcam_zc, valcam_best, val_zc, val_best ))
valcam_zc_all = np.array(valcam_zc_all)
val_zc_all = np.array(val_zc_all)
valcam_best_all = np.concatenate(valcam_best_all)
val_best_all = np.concatenate(val_best_all)
logger.info('{0:<15} {1:>15.2f} {2:>15.2f} {3:>15.2f} {4:>15.2f}'.format('Average', np.mean(valcam_zc_all), np.mean(valcam_best_all),
np.mean(val_zc_all), np.mean(val_best_all)))
# the result for each column represents: protocol 1 (zc, bh), protocol 2 (zc, bh)
| [
"logging.getLogger",
"ops.compute_R",
"eval_functions.err_3dpe",
"numpy.array",
"base_module.discriminator",
"os.path.exists",
"numpy.mean",
"numpy.reshape",
"argparse.ArgumentParser",
"tensorflow.Session",
"numpy.stack",
"numpy.matmul",
"numpy.vstack",
"numpy.concatenate",
"numpy.min",
... | [((310, 335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (333, 335), False, 'import argparse\n'), ((3182, 3225), 'logging.config.fileConfig', 'logging.config.fileConfig', (['"""./logging.conf"""'], {}), "('./logging.conf')\n", (3207, 3225), False, 'import logging, logging.config\n'), ((3235, 3254), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3252, 3254), False, 'import logging, logging.config\n'), ((3640, 3676), 'base_module.Posenet', 'Posenet', (['args.latent_dim', 'pose3d_dim'], {}), '(args.latent_dim, pose3d_dim)\n', (3647, 3676), False, 'from base_module import Posenet, Camnet, discriminator, Encoder\n'), ((3686, 3718), 'base_module.Camnet', 'Camnet', (['args.latent_dim', 'cam_dim'], {}), '(args.latent_dim, cam_dim)\n', (3692, 3718), False, 'from base_module import Posenet, Camnet, discriminator, Encoder\n'), ((3726, 3801), 'base_module.discriminator', 'discriminator', (['args.latent_dim_pose', 'args.latent_dim_kcs', 'args.d_output_dim'], {}), '(args.latent_dim_pose, args.latent_dim_kcs, args.d_output_dim)\n', (3739, 3801), False, 'from base_module import Posenet, Camnet, discriminator, Encoder\n'), ((3812, 3853), 'base_module.Encoder', 'Encoder', (['args.latent_dim', 'args.latent_dim'], {}), '(args.latent_dim, args.latent_dim)\n', (3819, 3853), False, 'from base_module import Posenet, Camnet, discriminator, Encoder\n'), ((3868, 4123), 'mmdgan_mh_enc.Pose_mmdgan_enc', 'Pose_mmdgan_enc', (['posenet', 'camnet', 'disc', 'encoder', 'args.latent_dim', 'args.batchsize', 'log_dir', 'args.epochs', 'pose2d_dim', 'pose3d_dim', 'args.kernel', 'args.repro_weight', 'args.cam_weight', 'args.gp_weight', 'args.reg_weight', 'args.dot_weight', 'args.enc_weight'], {}), '(posenet, camnet, disc, encoder, args.latent_dim, args.\n batchsize, log_dir, args.epochs, pose2d_dim, pose3d_dim, args.kernel,\n args.repro_weight, args.cam_weight, args.gp_weight, args.reg_weight,\n args.dot_weight, args.enc_weight)\n', (3883, 4123), False, 'from mmdgan_mh_enc import Pose_mmdgan_enc\n'), ((4164, 4180), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4178, 4180), True, 'import tensorflow as tf\n'), ((3131, 3154), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3145, 3154), False, 'import os\n'), ((3160, 3180), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3171, 3180), False, 'import os\n'), ((4225, 4250), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4235, 4250), True, 'import tensorflow as tf\n'), ((4306, 4346), 'os.path.join', 'os.path.join', (['args.load_path', 'model_name'], {}), '(args.load_path, model_name)\n', (4318, 4346), False, 'import os\n'), ((4358, 4427), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['load_dir'], {'latest_filename': '"""checkpoint"""'}), "(load_dir, latest_filename='checkpoint')\n", (4387, 4427), True, 'import tensorflow as tf\n'), ((10425, 10448), 'numpy.array', 'np.array', (['valcam_zc_all'], {}), '(valcam_zc_all)\n', (10433, 10448), True, 'import numpy as np\n'), ((10466, 10486), 'numpy.array', 'np.array', (['val_zc_all'], {}), '(val_zc_all)\n', (10474, 10486), True, 'import numpy as np\n'), ((10509, 10540), 'numpy.concatenate', 'np.concatenate', (['valcam_best_all'], {}), '(valcam_best_all)\n', (10523, 10540), True, 'import numpy as np\n'), ((10560, 10588), 'numpy.concatenate', 'np.concatenate', (['val_best_all'], {}), '(val_best_all)\n', (10574, 10588), True, 'import numpy as np\n'), ((6442, 6461), 'numpy.vstack', 'np.vstack', (['poses_zc'], {}), '(poses_zc)\n', (6451, 6461), True, 'import numpy as np\n'), ((6484, 6506), 'numpy.vstack', 'np.vstack', (['posescam_zc'], {}), '(posescam_zc)\n', (6493, 6506), True, 'import numpy as np\n'), ((9081, 9122), 'numpy.concatenate', 'np.concatenate', (['poses_samples_all'], {'axis': '(0)'}), '(poses_samples_all, axis=0)\n', (9095, 9122), True, 'import numpy as np\n'), ((9154, 9198), 'numpy.concatenate', 'np.concatenate', (['posescam_samples_all'], {'axis': '(0)'}), '(posescam_samples_all, axis=0)\n', (9168, 9198), True, 'import numpy as np\n'), ((9225, 9264), 'numpy.concatenate', 'np.concatenate', (['poses_repro_all'], {'axis': '(0)'}), '(poses_repro_all, axis=0)\n', (9239, 9264), True, 'import numpy as np\n'), ((9281, 9310), 'numpy.concatenate', 'np.concatenate', (['R_all'], {'axis': '(0)'}), '(R_all, axis=0)\n', (9295, 9310), True, 'import numpy as np\n'), ((9365, 9431), 'numpy.zeros', 'np.zeros', (['[poses_samples_all.shape[0], poses_samples_all.shape[1]]'], {}), '([poses_samples_all.shape[0], poses_samples_all.shape[1]])\n', (9373, 9431), True, 'import numpy as np\n'), ((9450, 9516), 'numpy.zeros', 'np.zeros', (['[poses_samples_all.shape[0], poses_samples_all.shape[1]]'], {}), '([poses_samples_all.shape[0], poses_samples_all.shape[1]])\n', (9458, 9516), True, 'import numpy as np\n'), ((4694, 4721), 'os.path.basename', 'os.path.basename', (['ckpt_name'], {}), '(ckpt_name)\n', (4710, 4721), False, 'import os\n'), ((5623, 5661), 'numpy.zeros', 'np.zeros', (['[batchsize, args.latent_dim]'], {}), '([batchsize, args.latent_dim])\n', (5631, 5661), True, 'import numpy as np\n'), ((5969, 6011), 'numpy.reshape', 'np.reshape', (['poses', '[poses.shape[0], 3, 16]'], {}), '(poses, [poses.shape[0], 3, 16])\n', (5979, 6011), True, 'import numpy as np\n'), ((6028, 6065), 'numpy.reshape', 'np.reshape', (['cam', '[cam.shape[0], 2, 3]'], {}), '(cam, [cam.shape[0], 2, 3])\n', (6038, 6065), True, 'import numpy as np\n'), ((6082, 6098), 'ops.compute_R', 'ops.compute_R', (['k'], {}), '(k)\n', (6095, 6098), False, 'import ops\n'), ((6169, 6196), 'numpy.matmul', 'np.matmul', (['R', 'poses_reshape'], {}), '(R, poses_reshape)\n', (6178, 6196), True, 'import numpy as np\n'), ((6290, 6337), 'numpy.reshape', 'np.reshape', (['poses_cam', '[poses_cam.shape[0], -1]'], {}), '(poses_cam, [poses_cam.shape[0], -1])\n', (6300, 6337), True, 'import numpy as np\n'), ((8662, 8699), 'numpy.stack', 'np.stack', (['poses_samples_batch'], {'axis': '(1)'}), '(poses_samples_batch, axis=1)\n', (8670, 8699), True, 'import numpy as np\n'), ((8796, 8836), 'numpy.stack', 'np.stack', (['posescam_samples_batch'], {'axis': '(1)'}), '(posescam_samples_batch, axis=1)\n', (8804, 8836), True, 'import numpy as np\n'), ((8933, 8968), 'numpy.stack', 'np.stack', (['poses_repro_batch'], {'axis': '(1)'}), '(poses_repro_batch, axis=1)\n', (8941, 8968), True, 'import numpy as np\n'), ((10090, 10109), 'numpy.min', 'np.min', (['err'], {'axis': '(1)'}), '(err, axis=1)\n', (10096, 10109), True, 'import numpy as np\n'), ((10141, 10164), 'numpy.min', 'np.min', (['err_cam'], {'axis': '(1)'}), '(err_cam, axis=1)\n', (10147, 10164), True, 'import numpy as np\n'), ((10194, 10213), 'numpy.min', 'np.min', (['err'], {'axis': '(1)'}), '(err, axis=1)\n', (10200, 10213), True, 'import numpy as np\n'), ((10246, 10269), 'numpy.min', 'np.min', (['err_cam'], {'axis': '(1)'}), '(err_cam, axis=1)\n', (10252, 10269), True, 'import numpy as np\n'), ((10677, 10699), 'numpy.mean', 'np.mean', (['valcam_zc_all'], {}), '(valcam_zc_all)\n', (10684, 10699), True, 'import numpy as np\n'), ((10701, 10725), 'numpy.mean', 'np.mean', (['valcam_best_all'], {}), '(valcam_best_all)\n', (10708, 10725), True, 'import numpy as np\n'), ((10800, 10819), 'numpy.mean', 'np.mean', (['val_zc_all'], {}), '(val_zc_all)\n', (10807, 10819), True, 'import numpy as np\n'), ((10821, 10842), 'numpy.mean', 'np.mean', (['val_best_all'], {}), '(val_best_all)\n', (10828, 10842), True, 'import numpy as np\n'), ((6676, 6738), 'eval_functions.err_3dpe', 'err_3dpe', (['poses3d_eval[p:p + 1, :]', 'poses_zc[p:p + 1, :]', '(True)'], {}), '(poses3d_eval[p:p + 1, :], poses_zc[p:p + 1, :], True)\n', (6684, 6738), False, 'from eval_functions import err_3dpe\n'), ((6770, 6842), 'eval_functions.err_3dpe', 'err_3dpe', (['poses_3d_cam[p:p + 1, :]', '(1.1 * posescam_zc[p:p + 1, :])', '(False)'], {}), '(poses_3d_cam[p:p + 1, :], 1.1 * posescam_zc[p:p + 1, :], False)\n', (6778, 6842), False, 'from eval_functions import err_3dpe\n'), ((7647, 7699), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batchsize, args.latent_dim)'], {}), '(0, 1, (batchsize, args.latent_dim))\n', (7663, 7699), True, 'import numpy as np\n'), ((8034, 8084), 'numpy.reshape', 'np.reshape', (['posespred', '[posespred.shape[0], 3, 16]'], {}), '(posespred, [posespred.shape[0], 3, 16])\n', (8044, 8084), True, 'import numpy as np\n'), ((8160, 8205), 'numpy.reshape', 'np.reshape', (['campred', '[campred.shape[0], 2, 3]'], {}), '(campred, [campred.shape[0], 2, 3])\n', (8170, 8205), True, 'import numpy as np\n'), ((8226, 8242), 'ops.compute_R', 'ops.compute_R', (['k'], {}), '(k)\n', (8239, 8242), False, 'import ops\n'), ((8275, 8306), 'numpy.matmul', 'np.matmul', (['R', 'posespred_reshape'], {}), '(R, posespred_reshape)\n', (8284, 8306), True, 'import numpy as np\n'), ((8347, 8402), 'numpy.reshape', 'np.reshape', (['posespred_cam', '[posespred_cam.shape[0], -1]'], {}), '(posespred_cam, [posespred_cam.shape[0], -1])\n', (8357, 8402), True, 'import numpy as np\n'), ((8514, 8545), 'numpy.matmul', 'np.matmul', (['k', 'posespred_reshape'], {}), '(k, posespred_reshape)\n', (8523, 8545), True, 'import numpy as np\n'), ((9636, 9710), 'eval_functions.err_3dpe', 'err_3dpe', (['poses3d_eval[p:p + 1, :]', 'poses_samples_all[p:p + 1, s, :]', '(True)'], {}), '(poses3d_eval[p:p + 1, :], poses_samples_all[p:p + 1, s, :], True)\n', (9644, 9710), False, 'from eval_functions import err_3dpe\n'), ((9750, 9839), 'eval_functions.err_3dpe', 'err_3dpe', (['poses_3d_cam[p:p + 1, :]', '(1.1 * posescam_samples_all[p:p + 1, s, :])', '(False)'], {}), '(poses_3d_cam[p:p + 1, :], 1.1 * posescam_samples_all[p:p + 1, s, :\n ], False)\n', (9758, 9839), False, 'from eval_functions import err_3dpe\n')] |
from matplotlib import pyplot as plt
import numpy as np
from model_overview import define, define_from_file
definition = define_from_file()
model = definition["model"]
# for index, beam in enumerate(model.beams):
# plt.clf()
# plt.axis('off')
# points = beam.points
# edges = beam.edges
# for i, j in edges:
# vertices = np.take(points, (i, j), axis=0)
# plt.plot(vertices[:, 0], vertices[:, 1], color=(0, 0, 0))
#
# ax = plt.gca()
# ax.set_aspect('equal')
# plt.savefig(f"part-{index}.png", dpi=500, transparent=True)
#
# np.savez(f"data/overview_{i}.npz",
# eigenvalue=np.array(e),
# points=points,
# edges=edges,
# eigenvector=eigenvector,
# force=force,
# stiffness=M)
points = model.point_matrix()
edges = model.edge_matrix()
for index in (0, 1, 2):
plt.clf()
plt.axis('off')
# for i, j in edges:
# vertices = np.take(points, (i, j), axis=0)
# plt.plot(vertices[:, 0], vertices[:, 1], color=(0, 0, 0))
data = np.load(f"data/overview_{index}.npz")
eigenvalue, eigenvector = data["eigenvalue"], data["eigenvector"].reshape(-1, 3)
print(index, eigenvalue)
xs, ys = points[:, 0], points[:, 1]
eigenvector *= 30
dxs, dys = eigenvector[:, 0], eigenvector[:, 1]
color = (1, 0, 0) if index == 0 else (255 / 255, 165 / 255, 0)
for x, y, dx, dy in zip(xs, ys, dxs, dys):
if np.linalg.norm([dx, dy]) < 1e-2:
continue
plt.gca().arrow(
x, y, dx, dy,
# length_includes_head=True,
color=color,
width=0.5,
)
# plt.show()
plt.savefig(f"eigenvector-{index}.svg", dpi=500, transparent=True)
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.clf",
"numpy.linalg.norm",
"model_overview.define_from_file",
"matplotlib.pyplot.axis",
"numpy.load"
] | [((122, 140), 'model_overview.define_from_file', 'define_from_file', ([], {}), '()\n', (138, 140), False, 'from model_overview import define, define_from_file\n'), ((866, 875), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (873, 875), True, 'from matplotlib import pyplot as plt\n'), ((880, 895), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (888, 895), True, 'from matplotlib import pyplot as plt\n'), ((1055, 1092), 'numpy.load', 'np.load', (['f"""data/overview_{index}.npz"""'], {}), "(f'data/overview_{index}.npz')\n", (1062, 1092), True, 'import numpy as np\n'), ((1676, 1742), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""eigenvector-{index}.svg"""'], {'dpi': '(500)', 'transparent': '(True)'}), "(f'eigenvector-{index}.svg', dpi=500, transparent=True)\n", (1687, 1742), True, 'from matplotlib import pyplot as plt\n'), ((1450, 1474), 'numpy.linalg.norm', 'np.linalg.norm', (['[dx, dy]'], {}), '([dx, dy])\n', (1464, 1474), True, 'import numpy as np\n'), ((1512, 1521), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1519, 1521), True, 'from matplotlib import pyplot as plt\n')] |
import os
import unittest
from unittest.mock import patch
from shutil import rmtree
import numpy as np
from elf.io.extensions import h5py, z5py, pyn5, zarr, zarr_open, FILE_CONSTRUCTORS
class FileTestBase(unittest.TestCase):
# todo: use https://github.com/clbarnes/tempcase/
tmp_dir = "./tmp"
def setUp(self):
os.makedirs(self.tmp_dir, exist_ok=True)
def tearDown(self):
try:
rmtree(self.tmp_dir)
except OSError:
pass
def path_to(self, *args):
return os.path.join("./tmp", *args)
class FileTestMixin:
ext = None
constructor = None
def test_open(self):
from elf.io import open_file
shape = (128,) * 2
data = np.random.rand(*shape)
fname = self.path_to("data" + self.ext)
with self.constructor(fname, 'a') as f:
f.create_dataset('data', data=data)
with patch("elf.io.extensions.FILE_CONSTRUCTORS", {self.ext: self.constructor}):
with open_file(fname) as f:
out = f['data'][:]
self.assertEqual(data.shape, out.shape)
self.assertTrue(np.allclose(data, out))
def test_is_group(self):
from elf.io import is_group
f = self.constructor(self.path_to("data" + self.ext), mode="a")
g = f.create_group('group')
ds = f.create_dataset('dataset', data=np.ones((100, 100)),
chunks=(10, 10))
self.assertTrue(is_group(f))
self.assertTrue(is_group(g))
self.assertFalse(is_group(ds))
@unittest.skipUnless(h5py, "Need h5py")
class TestH5pyFiles(FileTestBase, FileTestMixin):
ext = ".h5"
constructor = getattr(h5py, "File", None)
@unittest.skipUnless(z5py, "Need z5py")
class TestZ5pyN5Files(FileTestBase, FileTestMixin):
ext = ".n5"
constructor = getattr(z5py, "N5File", None)
@unittest.skipUnless(z5py, "Need z5py")
class TestZ5pyZarrFiles(FileTestBase, FileTestMixin):
ext = ".zr"
constructor = getattr(z5py, "ZarrFile", None)
@unittest.skipUnless(pyn5, "Need pyn5")
class TestPyn5Files(FileTestBase, FileTestMixin):
ext = ".n5"
constructor = getattr(pyn5, "File", None)
@unittest.skipUnless(zarr, "Need zarr")
class TestZarrFiles(FileTestBase, FileTestMixin):
ext = ".zr"
constructor = staticmethod(zarr_open)
class TestBackendPreference(unittest.TestCase):
@unittest.skipUnless(z5py and zarr, "Need z5py and zarr")
def test_z5py_over_zarr(self):
self.assertTrue(issubclass(FILE_CONSTRUCTORS[".n5"], z5py.File))
@unittest.skipUnless(z5py and pyn5, "Need z5py and pyn5")
def test_z5py_over_pyn5(self):
self.assertTrue(issubclass(FILE_CONSTRUCTORS[".zr"], z5py.File))
# todo: test loading N5 files using zarr-python
if __name__ == '__main__':
unittest.main()
| [
"numpy.allclose",
"elf.io.open_file",
"numpy.random.rand",
"os.makedirs",
"numpy.ones",
"os.path.join",
"unittest.skipUnless",
"shutil.rmtree",
"unittest.main",
"elf.io.is_group",
"unittest.mock.patch"
] | [((1561, 1599), 'unittest.skipUnless', 'unittest.skipUnless', (['h5py', '"""Need h5py"""'], {}), "(h5py, 'Need h5py')\n", (1580, 1599), False, 'import unittest\n'), ((1715, 1753), 'unittest.skipUnless', 'unittest.skipUnless', (['z5py', '"""Need z5py"""'], {}), "(z5py, 'Need z5py')\n", (1734, 1753), False, 'import unittest\n'), ((1873, 1911), 'unittest.skipUnless', 'unittest.skipUnless', (['z5py', '"""Need z5py"""'], {}), "(z5py, 'Need z5py')\n", (1892, 1911), False, 'import unittest\n'), ((2035, 2073), 'unittest.skipUnless', 'unittest.skipUnless', (['pyn5', '"""Need pyn5"""'], {}), "(pyn5, 'Need pyn5')\n", (2054, 2073), False, 'import unittest\n'), ((2189, 2227), 'unittest.skipUnless', 'unittest.skipUnless', (['zarr', '"""Need zarr"""'], {}), "(zarr, 'Need zarr')\n", (2208, 2227), False, 'import unittest\n'), ((2391, 2447), 'unittest.skipUnless', 'unittest.skipUnless', (['(z5py and zarr)', '"""Need z5py and zarr"""'], {}), "(z5py and zarr, 'Need z5py and zarr')\n", (2410, 2447), False, 'import unittest\n'), ((2562, 2618), 'unittest.skipUnless', 'unittest.skipUnless', (['(z5py and pyn5)', '"""Need z5py and pyn5"""'], {}), "(z5py and pyn5, 'Need z5py and pyn5')\n", (2581, 2618), False, 'import unittest\n'), ((2809, 2824), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2822, 2824), False, 'import unittest\n'), ((334, 374), 'os.makedirs', 'os.makedirs', (['self.tmp_dir'], {'exist_ok': '(True)'}), '(self.tmp_dir, exist_ok=True)\n', (345, 374), False, 'import os\n'), ((533, 561), 'os.path.join', 'os.path.join', (['"""./tmp"""', '*args'], {}), "('./tmp', *args)\n", (545, 561), False, 'import os\n'), ((728, 750), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (742, 750), True, 'import numpy as np\n'), ((425, 445), 'shutil.rmtree', 'rmtree', (['self.tmp_dir'], {}), '(self.tmp_dir)\n', (431, 445), False, 'from shutil import rmtree\n'), ((909, 983), 'unittest.mock.patch', 'patch', (['"""elf.io.extensions.FILE_CONSTRUCTORS"""', '{self.ext: self.constructor}'], {}), "('elf.io.extensions.FILE_CONSTRUCTORS', {self.ext: self.constructor})\n", (914, 983), False, 'from unittest.mock import patch\n'), ((1133, 1155), 'numpy.allclose', 'np.allclose', (['data', 'out'], {}), '(data, out)\n', (1144, 1155), True, 'import numpy as np\n'), ((1469, 1480), 'elf.io.is_group', 'is_group', (['f'], {}), '(f)\n', (1477, 1480), False, 'from elf.io import is_group\n'), ((1506, 1517), 'elf.io.is_group', 'is_group', (['g'], {}), '(g)\n', (1514, 1517), False, 'from elf.io import is_group\n'), ((1544, 1556), 'elf.io.is_group', 'is_group', (['ds'], {}), '(ds)\n', (1552, 1556), False, 'from elf.io import is_group\n'), ((1002, 1018), 'elf.io.open_file', 'open_file', (['fname'], {}), '(fname)\n', (1011, 1018), False, 'from elf.io import open_file\n'), ((1377, 1396), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (1384, 1396), True, 'import numpy as np\n')] |
'''
Created on 19 Dec 2014
@author: jadarve
'''
import pkg_resources
import numpy as np
import scipy.ndimage.interpolation as interp
import scipy.misc as misc
__all__ = ['flowToColor']
# load the optical flow color wheel texture
__colorWheelTexture = misc.imread(pkg_resources.resource_filename('pltutil.rsc', 'colorWheelTexture.png'), flatten=False)
__colorWheel_R = np.copy(__colorWheelTexture[:,:,0])
__colorWheel_G = np.copy(__colorWheelTexture[:,:,1])
__colorWheel_B = np.copy(__colorWheelTexture[:,:,2])
def flowToColor(flowField, maxFlow=1.0):
global __colorWheel_R
global __colorWheel_G
global __colorWheel_B
h = __colorWheel_R.shape[0]
w = __colorWheel_R.shape[1]
OF_m = np.zeros(flowField.shape)
OF_m[:,:,:] = (flowField + maxFlow) / float(2*maxFlow)
OF_m[:,:,0] *= (w-1)
OF_m[:,:,1] *= (h-1)
OF_m = np.reshape(OF_m, (flowField.shape[0]*flowField.shape[1], 2)).T
OF_flip = np.zeros_like(OF_m)
OF_flip[0,:] = OF_m[1,:]
OF_flip[1,:] = OF_m[0,:]
color_R = np.zeros((flowField.shape[0]*flowField.shape[1]), dtype=np.uint8)
color_G = np.zeros_like(color_R)
color_B = np.zeros_like(color_R)
interp.map_coordinates(__colorWheel_R, OF_flip, color_R, order=0, mode='nearest', cval=0)
interp.map_coordinates(__colorWheel_G, OF_flip, color_G, order=0, mode='nearest', cval=0)
interp.map_coordinates(__colorWheel_B, OF_flip, color_B, order=0, mode='nearest', cval=0)
flowColor = np.zeros((flowField.shape[0], flowField.shape[1], 3), dtype=np.uint8)
flowColor[:,:,0] = color_R.reshape(flowField.shape[0:2])
flowColor[:,:,1] = color_G.reshape(flowField.shape[0:2])
flowColor[:,:,2] = color_B.reshape(flowField.shape[0:2])
return flowColor
def colorWheel():
return __colorWheelTexture
| [
"numpy.copy",
"numpy.reshape",
"scipy.ndimage.interpolation.map_coordinates",
"pkg_resources.resource_filename",
"numpy.zeros",
"numpy.zeros_like"
] | [((374, 411), 'numpy.copy', 'np.copy', (['__colorWheelTexture[:, :, 0]'], {}), '(__colorWheelTexture[:, :, 0])\n', (381, 411), True, 'import numpy as np\n'), ((427, 464), 'numpy.copy', 'np.copy', (['__colorWheelTexture[:, :, 1]'], {}), '(__colorWheelTexture[:, :, 1])\n', (434, 464), True, 'import numpy as np\n'), ((480, 517), 'numpy.copy', 'np.copy', (['__colorWheelTexture[:, :, 2]'], {}), '(__colorWheelTexture[:, :, 2])\n', (487, 517), True, 'import numpy as np\n'), ((268, 339), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""pltutil.rsc"""', '"""colorWheelTexture.png"""'], {}), "('pltutil.rsc', 'colorWheelTexture.png')\n", (299, 339), False, 'import pkg_resources\n'), ((728, 753), 'numpy.zeros', 'np.zeros', (['flowField.shape'], {}), '(flowField.shape)\n', (736, 753), True, 'import numpy as np\n'), ((956, 975), 'numpy.zeros_like', 'np.zeros_like', (['OF_m'], {}), '(OF_m)\n', (969, 975), True, 'import numpy as np\n'), ((1053, 1118), 'numpy.zeros', 'np.zeros', (['(flowField.shape[0] * flowField.shape[1])'], {'dtype': 'np.uint8'}), '(flowField.shape[0] * flowField.shape[1], dtype=np.uint8)\n', (1061, 1118), True, 'import numpy as np\n'), ((1133, 1155), 'numpy.zeros_like', 'np.zeros_like', (['color_R'], {}), '(color_R)\n', (1146, 1155), True, 'import numpy as np\n'), ((1170, 1192), 'numpy.zeros_like', 'np.zeros_like', (['color_R'], {}), '(color_R)\n', (1183, 1192), True, 'import numpy as np\n'), ((1202, 1296), 'scipy.ndimage.interpolation.map_coordinates', 'interp.map_coordinates', (['__colorWheel_R', 'OF_flip', 'color_R'], {'order': '(0)', 'mode': '"""nearest"""', 'cval': '(0)'}), "(__colorWheel_R, OF_flip, color_R, order=0, mode=\n 'nearest', cval=0)\n", (1224, 1296), True, 'import scipy.ndimage.interpolation as interp\n'), ((1296, 1390), 'scipy.ndimage.interpolation.map_coordinates', 'interp.map_coordinates', (['__colorWheel_G', 'OF_flip', 'color_G'], {'order': '(0)', 'mode': '"""nearest"""', 'cval': '(0)'}), "(__colorWheel_G, OF_flip, color_G, order=0, mode=\n 'nearest', cval=0)\n", (1318, 1390), True, 'import scipy.ndimage.interpolation as interp\n'), ((1390, 1484), 'scipy.ndimage.interpolation.map_coordinates', 'interp.map_coordinates', (['__colorWheel_B', 'OF_flip', 'color_B'], {'order': '(0)', 'mode': '"""nearest"""', 'cval': '(0)'}), "(__colorWheel_B, OF_flip, color_B, order=0, mode=\n 'nearest', cval=0)\n", (1412, 1484), True, 'import scipy.ndimage.interpolation as interp\n'), ((1501, 1570), 'numpy.zeros', 'np.zeros', (['(flowField.shape[0], flowField.shape[1], 3)'], {'dtype': 'np.uint8'}), '((flowField.shape[0], flowField.shape[1], 3), dtype=np.uint8)\n', (1509, 1570), True, 'import numpy as np\n'), ((874, 936), 'numpy.reshape', 'np.reshape', (['OF_m', '(flowField.shape[0] * flowField.shape[1], 2)'], {}), '(OF_m, (flowField.shape[0] * flowField.shape[1], 2))\n', (884, 936), True, 'import numpy as np\n')] |
r"""
Custom PyTorch dataset that reads from the h5 datasets (see src.data module for more infos).
"""
import h5py
import random
import numpy as np
import torch
from src.common.image_util import stretched_image_from_skeleton_sequence
from src.common.joints import Joints
class Dataset(torch.utils.data.Dataset):
r"""This custom PyTorch lazy loads from the h5 datasets. This means that it does not load the entire dataset in
memory, which would be impossible for the IR sequences. Instead, it opens and reads from the h5 file. This is a bit
slower, but very memory efficient. Additionally, the lost time is mitigated when using multiple workers for the
data loaders.
Attributes:
- **data_path** (str): Path containing the h5 files (default *./data/processed/*).
- **model_type** (str): "FUSION" only for now.
- **use_pose** (bool): Include skeleton data
- **use_ir** (bool): Include IR data
- **use_cropped_IR** (bool): Type of IR dataset
- **sub_sequence_length** (str): Number of frames to subsample from full IR sequences
- **augment_data** (bool): Choose to augment data by geometric transformation (skeleton data) or horizontal
flip (IR data)
- **mirror_skeleton** (bool): Choose to perform mirroring on skeleton data (e.g. left hand becomes right hand)
- **samples_names** (list): Contains the sequences names of the dataset (ie. train, validation, test)
- **c_min** (float): Minimum coordinate after camera-subject normalization
- **c_max** (float): Maximum coordinate after camera-subject normalization
Methods:
- *__getitem__(index)*: Returns the processed sequence (skeleton and/or IR) and its label
- *__len__()*: Returns the number of elements in dataset.
"""
def __init__(self,
data_path,
samples_names,
c_min = None,
c_max = None):
super(Dataset, self).__init__()
self.data_path = data_path
self.samples_names = samples_names
self.c_min = c_min
self.c_max = c_max
if c_max is None and c_min is None:
print("Computing c_min and c_max. This takes a while ...")
c_min = []
c_max = []
with h5py.File(self.data_path + "skeleton.h5", 'r') as skeleton_dataset:
for sample_name in self.samples_names:
skeleton = skeleton_dataset[sample_name]["skeleton"][:]
# Perform normalization step
trans_vector = skeleton[:, 0, Joints.SPINEMID, :] # shape (3, 2)
trans_vector[:, 1] = trans_vector[:, 0]
skeleton = (skeleton.transpose(1, 2, 0, 3) - trans_vector).transpose(2, 0, 1, 3)
# Update c_min and c_max
c_min.append(np.amin(skeleton))
c_max.append(np.amax(skeleton))
self.c_min = np.amin(c_min)
self.c_max = np.amax(c_max)
print(f"Generated c_min {self.c_min} c_max {self.c_max}")
def __getitem__(self, index):
r"""Returns a processed sequence and label given an index.
Inputs:
- **index** (int): Used as an index for **samples_names** list which will yield a sequence
name that will be used to address the h5 files.
Outputs:
- **skeleton_image** (np array): Skeleton sequence mapped to an image of shape `(3, 224, 224)`.
Equals -1 if **use_pose** is False.
- **ir_sequence** (np array): Subsampled IR sequence of shape `(sub_sequence_length, 112, 112)`.
Equals -1 if **use_ir** is False.
- **y** (int): Class label of sequence.
"""
# Get label
y = int(self.samples_names[index][-3:]) - 1
# retrieve skeleton sequence of shape (3, max_frame, num_joint=25, 2)
with h5py.File(self.data_path + "skeleton.h5", 'r') as skeleton_dataset:
skeleton = skeleton_dataset[self.samples_names[index]]["skeleton"][:]
# Potential outputs
skeleton_image = -1
# Normalize skeleton according to S-trans (see View Adaptive Network for details)
# Subjects 1 and 2 have their own new coordinates system
trans_vector = skeleton[:, 0, Joints.SPINEMID, :]
# Subjects 1 and 2 are transposed into the coordinates system of subject 1
trans_vector[:, 1] = trans_vector[:, 0]
skeleton = (skeleton.transpose(1, 2, 0, 3) - trans_vector).transpose(2, 0, 1, 3)
# shape (3, 224, 224)
skeleton_image = np.float32(stretched_image_from_skeleton_sequence(skeleton, self.c_min, self.c_max))
# Return corresponding data
return [skeleton_image], y
def __len__(self):
r"""Returns number of elements in dataset
Outputs:
- **length** (int): Number of elements in dataset.
"""
return len(self.samples_names) | [
"src.common.image_util.stretched_image_from_skeleton_sequence",
"numpy.amax",
"numpy.amin",
"h5py.File"
] | [((2987, 3001), 'numpy.amin', 'np.amin', (['c_min'], {}), '(c_min)\n', (2994, 3001), True, 'import numpy as np\n'), ((3027, 3041), 'numpy.amax', 'np.amax', (['c_max'], {}), '(c_max)\n', (3034, 3041), True, 'import numpy as np\n'), ((3955, 4001), 'h5py.File', 'h5py.File', (["(self.data_path + 'skeleton.h5')", '"""r"""'], {}), "(self.data_path + 'skeleton.h5', 'r')\n", (3964, 4001), False, 'import h5py\n'), ((4665, 4737), 'src.common.image_util.stretched_image_from_skeleton_sequence', 'stretched_image_from_skeleton_sequence', (['skeleton', 'self.c_min', 'self.c_max'], {}), '(skeleton, self.c_min, self.c_max)\n', (4703, 4737), False, 'from src.common.image_util import stretched_image_from_skeleton_sequence\n'), ((2315, 2361), 'h5py.File', 'h5py.File', (["(self.data_path + 'skeleton.h5')", '"""r"""'], {}), "(self.data_path + 'skeleton.h5', 'r')\n", (2324, 2361), False, 'import h5py\n'), ((2890, 2907), 'numpy.amin', 'np.amin', (['skeleton'], {}), '(skeleton)\n', (2897, 2907), True, 'import numpy as np\n'), ((2942, 2959), 'numpy.amax', 'np.amax', (['skeleton'], {}), '(skeleton)\n', (2949, 2959), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def numeric_better(x, h):
return (np.cos(x + h) - np.cos(x - h)) / (2 * h)
def numeric_worse(x, h):
return (np.cos(x + h) - np.cos(x)) / h
# line 1 points
x = 1
distances = [10 ** (-i) for i in range(1, 16)]
real_y = -np.sin(x)
numeric_better_y = [abs(numeric_better(x, distance) - real_y) for distance in distances]
numeric_worse_y = [abs(numeric_worse(x, distance) - real_y) for distance in distances]
plt.plot(distances, numeric_worse_y, label="real derivative")
plt.plot(distances, numeric_better_y, label="numeric derivative")
# plt.loglog(x, real_y, label="real derivative")
# # plotting the line 1 points
# line 2 points
# plotting the line 2 points
plt.xlabel("h")
# Set the y axis label of the current axis.
plt.ylabel("f`(x)")
# Set a title of the current axes.
plt.title("Two or more lines on same plot with suitable legends ")
# show a legend on the plot
plt.legend()
# Display a figure.
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((470, 531), 'matplotlib.pyplot.plot', 'plt.plot', (['distances', 'numeric_worse_y'], {'label': '"""real derivative"""'}), "(distances, numeric_worse_y, label='real derivative')\n", (478, 531), True, 'import matplotlib.pyplot as plt\n'), ((532, 597), 'matplotlib.pyplot.plot', 'plt.plot', (['distances', 'numeric_better_y'], {'label': '"""numeric derivative"""'}), "(distances, numeric_better_y, label='numeric derivative')\n", (540, 597), True, 'import matplotlib.pyplot as plt\n'), ((725, 740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""h"""'], {}), "('h')\n", (735, 740), True, 'import matplotlib.pyplot as plt\n'), ((785, 804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f`(x)"""'], {}), "('f`(x)')\n", (795, 804), True, 'import matplotlib.pyplot as plt\n'), ((840, 906), 'matplotlib.pyplot.title', 'plt.title', (['"""Two or more lines on same plot with suitable legends """'], {}), "('Two or more lines on same plot with suitable legends ')\n", (849, 906), True, 'import matplotlib.pyplot as plt\n'), ((935, 947), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (945, 947), True, 'import matplotlib.pyplot as plt\n'), ((968, 978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (976, 978), True, 'import matplotlib.pyplot as plt\n'), ((283, 292), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (289, 292), True, 'import numpy as np\n'), ((91, 104), 'numpy.cos', 'np.cos', (['(x + h)'], {}), '(x + h)\n', (97, 104), True, 'import numpy as np\n'), ((107, 120), 'numpy.cos', 'np.cos', (['(x - h)'], {}), '(x - h)\n', (113, 120), True, 'import numpy as np\n'), ((171, 184), 'numpy.cos', 'np.cos', (['(x + h)'], {}), '(x + h)\n', (177, 184), True, 'import numpy as np\n'), ((187, 196), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (193, 196), True, 'import numpy as np\n')] |
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import argparse
import os
import time
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras import layers
from tensorflow.python import ipu
tf.disable_eager_execution()
tf.disable_v2_behavior()
def parse_args():
# Handle command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=32,
help="The batch size.")
parser.add_argument("--repeat-count", type=int, default=10,
help="The number of times the pipeline will be executed for each step.")
parser.add_argument("--epochs", type=float, default=50,
help="Total number of epochs to train for.")
parser.add_argument("--steps", type=int, default=None,
help="Total number of steps to train for (overrides epochs).")
parser.add_argument("--learning-rate", type=float, default=0.01,
help="The learning rate used with stochastic gradient descent.")
parser.add_argument("--batches-to-accumulate", type=int, default=16,
help="How many batches to process before processing gradients and updating weights.")
args = parser.parse_args()
return args
def create_dataset(args):
# Prepare a TensorFlow dataset with MNIST data
train_data, _ = mnist.load_data()
def normalise(x, y):
return x.astype("float32") / 255.0, y.astype("int32")
x_train, y_train = normalise(*train_data)
def generator():
return zip(x_train, y_train)
types = (x_train.dtype, y_train.dtype)
shapes = (x_train.shape[1:], y_train.shape[1:])
num_examples = len(x_train)
dataset = tf.data.Dataset.from_generator(generator, types, shapes)
# Use 'drop_remainder=True' because XLA (and the compiled static IPU graph)
# expect a complete, fixed sized, set of data as input.
# Caching and prefetching are important to prevent the host data
# feed from being the bottleneck for throughput.
dataset = dataset.batch(args.batch_size, drop_remainder=True)
dataset = dataset.shuffle(num_examples)
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return num_examples, dataset
def layer1_flatten(learning_rate, images, labels):
with tf.variable_scope("flatten"):
activations = layers.Flatten()(images)
return learning_rate, activations, labels
def layer2_dense256(learning_rate, activations, labels):
with tf.variable_scope("flatten"):
activations = layers.Dense(256, activation=tf.nn.relu)(activations)
return learning_rate, activations, labels
def layer3_dense128(learning_rate, activations, labels):
with tf.variable_scope("dense128"):
activations = layers.Dense(128, activation=tf.nn.relu)(activations)
return learning_rate, activations, labels
def layer4_dense64(learning_rate, activations, labels):
with tf.variable_scope("dense64"):
activations = layers.Dense(64, activation=tf.nn.relu)(activations)
return learning_rate, activations, labels
def layer5_dense32(learning_rate, activations, labels):
with tf.variable_scope("dense32"):
activations = layers.Dense(32, activation=tf.nn.relu)(activations)
return learning_rate, activations, labels
def layer6_logits(learning_rate, activations, labels):
with tf.variable_scope("logits"):
logits = layers.Dense(10)(activations)
return learning_rate, logits, labels
def layer7_cel(learning_rate, logits, labels):
with tf.variable_scope("softmax_ce"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with tf.variable_scope("mean"):
loss = tf.reduce_mean(cross_entropy)
return learning_rate, loss
def optimizer_function(learning_rate, loss):
# Optimizer function used by the pipeline to automatically set up
# the gradient accumulation and weight update steps
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
return ipu.pipelining_ops.OptimizerFunctionOutput(optimizer, loss)
def pipelined_model(learning_rate):
# Defines a pipelined model which is split accross two stages
def stage1(learning_rate, images, labels):
r = layer1_flatten(learning_rate, images, labels)
r = layer2_dense256(*r)
r = layer3_dense128(*r)
r = layer4_dense64(*r)
return r
def stage2(*r):
r = layer5_dense32(*r)
r = layer6_logits(*r)
r = layer7_cel(*r)
return r
pipeline_op = ipu.pipelining_ops.pipeline(
computational_stages = [stage1, stage2],
gradient_accumulation_count = args.batches_to_accumulate,
repeat_count = args.repeat_count,
inputs = [learning_rate],
infeed_queue = infeed_queue,
outfeed_queue = outfeed_queue,
optimizer_function = optimizer_function,
pipeline_schedule = ipu.pipelining_ops.PipelineSchedule.Grouped,
outfeed_loss=True,
name = "Pipeline")
return pipeline_op
if __name__ == "__main__":
args = parse_args()
num_examples, dataset = create_dataset(args)
num_train_examples = int(args.epochs * num_examples)
# Create the data queues from/to IPU
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
# With batch size BS, gradient accumulation count GAC and repeat count RPT,
# at every step n = (BS * GAC * RPT) examples are used.
examples_per_step = args.batch_size * args.batches_to_accumulate * args.repeat_count
# In order to evaluate at least N total examples, do ceil(N / n) steps
steps = args.steps if args.steps is not None else \
(num_train_examples + examples_per_step - 1) // examples_per_step
training_samples = steps * examples_per_step
print(f'Steps {steps} x examples per step {examples_per_step} '
f'(== {training_samples} training examples, {training_samples/num_examples} '
f'epochs of {num_examples} examples)')
with tf.device('cpu'):
learning_rate = tf.placeholder(np.float32, [])
with ipu.scopes.ipu_scope("/device:IPU:0"):
compiled_model = ipu.ipu_compiler.compile(pipelined_model, inputs=[learning_rate])
outfeed_op = outfeed_queue.dequeue()
ipu.utils.move_variable_initialization_to_cpu()
init_op = tf.global_variables_initializer()
# Configure the IPU.
ipu_configuration = ipu.config.IPUConfig()
ipu_configuration.auto_select_ipus = 2
ipu_configuration.selection_order = ipu.utils.SelectionOrder.SNAKE
ipu_configuration.configure_ipu_system()
with tf.Session() as sess:
# Initialize
sess.run(init_op)
sess.run(infeed_queue.initializer)
# Run
begin = time.time()
for step in range(steps):
sess.run(compiled_model, {learning_rate: args.learning_rate})
# Read the outfeed for the training losses
losses = sess.run(outfeed_op)
if losses is not None and len(losses):
epoch = float(examples_per_step * step / num_examples)
if (step == (steps-1) or (step % 10) == 0):
print("Step {}, Epoch {:.1f}, Mean loss: {:.3f}".format(
step, epoch, np.mean(losses)))
end = time.time()
elapsed = end - begin
samples_per_second = training_samples/elapsed
print("Elapsed {}, {} samples/sec".format(elapsed, samples_per_second))
print("Program ran successfully")
| [
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.python.ipu.config.IPUConfig",
"tensorflow.keras.layers.Dense",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.nn.sparse_softmax_cross_entropy... | [((264, 292), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (290, 292), True, 'import tensorflow.compat.v1 as tf\n'), ((293, 317), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (315, 317), True, 'import tensorflow.compat.v1 as tf\n'), ((387, 412), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (410, 412), False, 'import argparse\n'), ((1446, 1463), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1461, 1463), False, 'from tensorflow.keras.datasets import mnist\n'), ((1801, 1857), 'tensorflow.compat.v1.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['generator', 'types', 'shapes'], {}), '(generator, types, shapes)\n', (1831, 1857), True, 'import tensorflow.compat.v1 as tf\n'), ((4166, 4228), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4199, 4228), True, 'import tensorflow.compat.v1 as tf\n'), ((4240, 4299), 'tensorflow.python.ipu.pipelining_ops.OptimizerFunctionOutput', 'ipu.pipelining_ops.OptimizerFunctionOutput', (['optimizer', 'loss'], {}), '(optimizer, loss)\n', (4282, 4299), False, 'from tensorflow.python import ipu\n'), ((4766, 5159), 'tensorflow.python.ipu.pipelining_ops.pipeline', 'ipu.pipelining_ops.pipeline', ([], {'computational_stages': '[stage1, stage2]', 'gradient_accumulation_count': 'args.batches_to_accumulate', 'repeat_count': 'args.repeat_count', 'inputs': '[learning_rate]', 'infeed_queue': 'infeed_queue', 'outfeed_queue': 'outfeed_queue', 'optimizer_function': 'optimizer_function', 'pipeline_schedule': 'ipu.pipelining_ops.PipelineSchedule.Grouped', 'outfeed_loss': '(True)', 'name': '"""Pipeline"""'}), "(computational_stages=[stage1, stage2],\n gradient_accumulation_count=args.batches_to_accumulate, repeat_count=\n args.repeat_count, inputs=[learning_rate], infeed_queue=infeed_queue,\n outfeed_queue=outfeed_queue, optimizer_function=optimizer_function,\n pipeline_schedule=ipu.pipelining_ops.PipelineSchedule.Grouped,\n outfeed_loss=True, name='Pipeline')\n", (4793, 5159), False, 'from tensorflow.python import ipu\n'), ((5482, 5526), 'tensorflow.python.ipu.ipu_infeed_queue.IPUInfeedQueue', 'ipu.ipu_infeed_queue.IPUInfeedQueue', (['dataset'], {}), '(dataset)\n', (5517, 5526), False, 'from tensorflow.python import ipu\n'), ((5547, 5586), 'tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue', 'ipu.ipu_outfeed_queue.IPUOutfeedQueue', ([], {}), '()\n', (5584, 5586), False, 'from tensorflow.python import ipu\n'), ((6547, 6594), 'tensorflow.python.ipu.utils.move_variable_initialization_to_cpu', 'ipu.utils.move_variable_initialization_to_cpu', ([], {}), '()\n', (6592, 6594), False, 'from tensorflow.python import ipu\n'), ((6609, 6642), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6640, 6642), True, 'import tensorflow.compat.v1 as tf\n'), ((6693, 6715), 'tensorflow.python.ipu.config.IPUConfig', 'ipu.config.IPUConfig', ([], {}), '()\n', (6713, 6715), False, 'from tensorflow.python import ipu\n'), ((2449, 2477), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""flatten"""'], {}), "('flatten')\n", (2466, 2477), True, 'import tensorflow.compat.v1 as tf\n'), ((2644, 2672), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""flatten"""'], {}), "('flatten')\n", (2661, 2672), True, 'import tensorflow.compat.v1 as tf\n'), ((2868, 2897), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""dense128"""'], {}), "('dense128')\n", (2885, 2897), True, 'import tensorflow.compat.v1 as tf\n'), ((3092, 3120), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""dense64"""'], {}), "('dense64')\n", (3109, 3120), True, 'import tensorflow.compat.v1 as tf\n'), ((3314, 3342), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""dense32"""'], {}), "('dense32')\n", (3331, 3342), True, 'import tensorflow.compat.v1 as tf\n'), ((3535, 3562), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""logits"""'], {}), "('logits')\n", (3552, 3562), True, 'import tensorflow.compat.v1 as tf\n'), ((3714, 3745), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""softmax_ce"""'], {}), "('softmax_ce')\n", (3731, 3745), True, 'import tensorflow.compat.v1 as tf\n'), ((3771, 3847), 'tensorflow.compat.v1.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (3817, 3847), True, 'import tensorflow.compat.v1 as tf\n'), ((3870, 3895), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""mean"""'], {}), "('mean')\n", (3887, 3895), True, 'import tensorflow.compat.v1 as tf\n'), ((3912, 3941), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (3926, 3941), True, 'import tensorflow.compat.v1 as tf\n'), ((6287, 6303), 'tensorflow.compat.v1.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (6296, 6303), True, 'import tensorflow.compat.v1 as tf\n'), ((6329, 6359), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['np.float32', '[]'], {}), '(np.float32, [])\n', (6343, 6359), True, 'import tensorflow.compat.v1 as tf\n'), ((6370, 6407), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (6390, 6407), False, 'from tensorflow.python import ipu\n'), ((6434, 6499), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['pipelined_model'], {'inputs': '[learning_rate]'}), '(pipelined_model, inputs=[learning_rate])\n', (6458, 6499), False, 'from tensorflow.python import ipu\n'), ((6885, 6897), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (6895, 6897), True, 'import tensorflow.compat.v1 as tf\n'), ((7027, 7038), 'time.time', 'time.time', ([], {}), '()\n', (7036, 7038), False, 'import time\n'), ((7572, 7583), 'time.time', 'time.time', ([], {}), '()\n', (7581, 7583), False, 'import time\n'), ((2501, 2517), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2515, 2517), False, 'from tensorflow.keras import layers\n'), ((2696, 2736), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': 'tf.nn.relu'}), '(256, activation=tf.nn.relu)\n', (2708, 2736), False, 'from tensorflow.keras import layers\n'), ((2921, 2961), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': 'tf.nn.relu'}), '(128, activation=tf.nn.relu)\n', (2933, 2961), False, 'from tensorflow.keras import layers\n'), ((3144, 3183), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': 'tf.nn.relu'}), '(64, activation=tf.nn.relu)\n', (3156, 3183), False, 'from tensorflow.keras import layers\n'), ((3366, 3405), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': 'tf.nn.relu'}), '(32, activation=tf.nn.relu)\n', (3378, 3405), False, 'from tensorflow.keras import layers\n'), ((3581, 3597), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (3593, 3597), False, 'from tensorflow.keras import layers\n'), ((7540, 7555), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (7547, 7555), True, 'import numpy as np\n')] |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Depth and Ego-Motion networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
SIMPLE = 'simple'
RESNET = 'resnet'
ARCHITECTURES = [SIMPLE, RESNET]
SCALE_TRANSLATION = 0.001
SCALE_ROTATION = 0.01
# Disparity (inverse depth) values range from 0.01 to 10. Note that effectively,
# this is undone if depth normalization is used, which scales the values to
# have a mean of 1.
DISP_SCALING = 10
MIN_DISP = 0.01
WEIGHT_DECAY_KEY = 'WEIGHT_DECAY'
EGOMOTION_VEC_SIZE = 6
def egomotion_net(image_stack, disp_bottleneck_stack, joint_encoder, seq_length,
weight_reg):
"""Predict ego-motion vectors from a stack of frames or embeddings.
Args:
image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order.
disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden,
seq_length * c_hidden] in order.
joint_encoder: Determines if the same encoder is used for computing the
bottleneck layer of both the egomotion and the depth prediction
network. If enabled, disp_bottleneck_stack is used as input, and the
encoding steps are skipped. If disabled, a separate encoder is defined
on image_stack.
seq_length: The sequence length used.
weight_reg: The amount of weight regularization.
Returns:
Egomotion vectors with shape [B, seq_length - 1, 6].
"""
num_egomotion_vecs = seq_length - 1
with tf.variable_scope('pose_exp_net') as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
normalizer_params=None,
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
if not joint_encoder:
# Define separate encoder. If sharing, we can skip the encoding step,
# as the bottleneck layer will already be passed as input.
cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')
cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')
cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')
cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')
cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')
with tf.variable_scope('pose'):
inputs = disp_bottleneck_stack if joint_encoder else cnv5
cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6')
cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')
pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs
egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred',
stride=1, normalizer_fn=None,
activation_fn=None)
egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])
egomotion_res = tf.reshape(
egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])
# Tinghui found that scaling by a small constant facilitates training.
egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION,
egomotion_res[:, 3:6] * SCALE_ROTATION],
axis=1)
return egomotion_scaled
def objectmotion_net(image_stack, disp_bottleneck_stack, joint_encoder,
seq_length, weight_reg):
"""Predict object-motion vectors from a stack of frames or embeddings.
Args:
image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order.
disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden,
seq_length * c_hidden] in order.
joint_encoder: Determines if the same encoder is used for computing the
bottleneck layer of both the egomotion and the depth prediction
network. If enabled, disp_bottleneck_stack is used as input, and the
encoding steps are skipped. If disabled, a separate encoder is defined
on image_stack.
seq_length: The sequence length used.
weight_reg: The amount of weight regularization.
Returns:
Egomotion vectors with shape [B, seq_length - 1, 6].
"""
num_egomotion_vecs = seq_length - 1
with tf.variable_scope('pose_exp_net') as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
normalizer_params=None,
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
if not joint_encoder:
# Define separate encoder. If sharing, we can skip the encoding step,
# as the bottleneck layer will already be passed as input.
cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')
cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')
cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')
cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')
cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')
with tf.variable_scope('pose'):
inputs = disp_bottleneck_stack if joint_encoder else cnv5
cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6')
cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')
pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs
egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred',
stride=1, normalizer_fn=None,
activation_fn=None)
egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])
egomotion_res = tf.reshape(
egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])
# Tinghui found that scaling by a small constant facilitates training.
egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION,
egomotion_res[:, 3:6] * SCALE_ROTATION],
axis=1)
return egomotion_scaled
def disp_net(architecture, image, use_skip, weight_reg, is_training):
"""Defines an encoder-decoder architecture for depth prediction."""
if architecture not in ARCHITECTURES:
raise ValueError('Unknown architecture.')
encoder_selected = encoder(architecture)
decoder_selected = decoder(architecture)
# Encode image.
bottleneck, skip_connections = encoder_selected(image, weight_reg,
is_training)
# Decode to depth.
multiscale_disps_i = decoder_selected(target_image=image,
bottleneck=bottleneck,
weight_reg=weight_reg,
use_skip=use_skip,
skip_connections=skip_connections)
return multiscale_disps_i, bottleneck
def encoder(architecture):
return encoder_resnet if architecture == RESNET else encoder_simple
def decoder(architecture):
return decoder_resnet if architecture == RESNET else decoder_simple
def encoder_simple(target_image, weight_reg, is_training):
"""Defines the old encoding architecture."""
del is_training
with slim.arg_scope([slim.conv2d],
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
activation_fn=tf.nn.relu):
# Define (joint) encoder.
cnv1 = slim.conv2d(target_image, 32, [7, 7], stride=2, scope='cnv1')
cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b')
cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2')
cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b')
cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3')
cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b')
cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4')
cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b')
cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5')
cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b')
cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6')
cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b')
cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7')
cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b')
return cnv7b, (cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b)
def decoder_simple(target_image, bottleneck, weight_reg, use_skip,
skip_connections):
"""Defines the old depth decoder architecture."""
h = target_image.get_shape()[1].value
w = target_image.get_shape()[2].value
(cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b) = skip_connections
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
activation_fn=tf.nn.relu):
up7 = slim.conv2d_transpose(bottleneck, 512, [3, 3], stride=2,
scope='upcnv7')
up7 = _resize_like(up7, cnv6b)
if use_skip:
i7_in = tf.concat([up7, cnv6b], axis=3)
else:
i7_in = up7
icnv7 = slim.conv2d(i7_in, 512, [3, 3], stride=1, scope='icnv7')
up6 = slim.conv2d_transpose(icnv7, 512, [3, 3], stride=2, scope='upcnv6')
up6 = _resize_like(up6, cnv5b)
if use_skip:
i6_in = tf.concat([up6, cnv5b], axis=3)
else:
i6_in = up6
icnv6 = slim.conv2d(i6_in, 512, [3, 3], stride=1, scope='icnv6')
up5 = slim.conv2d_transpose(icnv6, 256, [3, 3], stride=2, scope='upcnv5')
up5 = _resize_like(up5, cnv4b)
if use_skip:
i5_in = tf.concat([up5, cnv4b], axis=3)
else:
i5_in = up5
icnv5 = slim.conv2d(i5_in, 256, [3, 3], stride=1, scope='icnv5')
up4 = slim.conv2d_transpose(icnv5, 128, [3, 3], stride=2, scope='upcnv4')
up4 = _resize_like(up4, cnv3b)
if use_skip:
i4_in = tf.concat([up4, cnv3b], axis=3)
else:
i4_in = up4
icnv4 = slim.conv2d(i4_in, 128, [3, 3], stride=1, scope='icnv4')
disp4 = (slim.conv2d(icnv4, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp4')
* DISP_SCALING + MIN_DISP)
disp4_up = tf.image.resize_bilinear(disp4, [np.int(h / 4), np.int(w / 4)],
align_corners=True)
up3 = slim.conv2d_transpose(icnv4, 64, [3, 3], stride=2, scope='upcnv3')
up3 = _resize_like(up3, cnv2b)
if use_skip:
i3_in = tf.concat([up3, cnv2b, disp4_up], axis=3)
else:
i3_in = tf.concat([up3, disp4_up])
icnv3 = slim.conv2d(i3_in, 64, [3, 3], stride=1, scope='icnv3')
disp3 = (slim.conv2d(icnv3, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp3')
* DISP_SCALING + MIN_DISP)
disp3_up = tf.image.resize_bilinear(disp3, [np.int(h / 2), np.int(w / 2)],
align_corners=True)
up2 = slim.conv2d_transpose(icnv3, 32, [3, 3], stride=2, scope='upcnv2')
up2 = _resize_like(up2, cnv1b)
if use_skip:
i2_in = tf.concat([up2, cnv1b, disp3_up], axis=3)
else:
i2_in = tf.concat([up2, disp3_up])
icnv2 = slim.conv2d(i2_in, 32, [3, 3], stride=1, scope='icnv2')
disp2 = (slim.conv2d(icnv2, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp2')
* DISP_SCALING + MIN_DISP)
disp2_up = tf.image.resize_bilinear(disp2, [h, w], align_corners=True)
up1 = slim.conv2d_transpose(icnv2, 16, [3, 3], stride=2, scope='upcnv1')
i1_in = tf.concat([up1, disp2_up], axis=3)
icnv1 = slim.conv2d(i1_in, 16, [3, 3], stride=1, scope='icnv1')
disp1 = (slim.conv2d(icnv1, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp1')
* DISP_SCALING + MIN_DISP)
return [disp1, disp2, disp3, disp4]
def encoder_resnet(target_image, weight_reg, is_training):
"""Defines a ResNet18-based encoding architecture.
This implementation follows <NAME>'s implementation of ResNet18 on GitHub:
https://github.com/dalgu90/resnet-18-tensorflow
Args:
target_image: Input tensor with shape [B, h, w, 3] to encode.
weight_reg: Parameter ignored.
is_training: Whether the model is being trained or not.
Returns:
Tuple of tensors, with the first being the bottleneck layer as tensor of
size [B, h_hid, w_hid, c_hid], and others being intermediate layers
for building skip-connections.
"""
del weight_reg
encoder_filters = [64, 64, 128, 256, 512]
stride = 2
# conv1
with tf.variable_scope('conv1'):
x = _conv(target_image, 7, encoder_filters[0], stride)
x = _bn(x, is_train=is_training)
econv1 = _relu(x)
x = tf.nn.max_pool(econv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')
# conv2_x
x = _residual_block(x, is_training, name='conv2_1')
econv2 = _residual_block(x, is_training, name='conv2_2')
# conv3_x
x = _residual_block_first(econv2, is_training, encoder_filters[2], stride,
name='conv3_1')
econv3 = _residual_block(x, is_training, name='conv3_2')
# conv4_x
x = _residual_block_first(econv3, is_training, encoder_filters[3], stride,
name='conv4_1')
econv4 = _residual_block(x, is_training, name='conv4_2')
# conv5_x
x = _residual_block_first(econv4, is_training, encoder_filters[4], stride,
name='conv5_1')
econv5 = _residual_block(x, is_training, name='conv5_2')
return econv5, (econv4, econv3, econv2, econv1)
def decoder_resnet(target_image, bottleneck, weight_reg, use_skip,
skip_connections):
"""Defines the depth decoder architecture.
Args:
target_image: The original encoder input tensor with shape [B, h, w, 3].
Just the shape information is used here.
bottleneck: Bottleneck layer to be decoded.
weight_reg: The amount of weight regularization.
use_skip: Whether the passed skip connections econv1, econv2, econv3 and
econv4 should be used.
skip_connections: Tensors for building skip-connections.
Returns:
Disparities at 4 different scales.
"""
(econv4, econv3, econv2, econv1) = skip_connections
decoder_filters = [16, 32, 64, 128, 256]
default_pad = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
reg = slim.l2_regularizer(weight_reg) if weight_reg > 0.0 else None
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
normalizer_params=None,
activation_fn=tf.nn.relu,
weights_regularizer=reg):
upconv5 = slim.conv2d_transpose(bottleneck, decoder_filters[4], [3, 3],
stride=2, scope='upconv5')
upconv5 = _resize_like(upconv5, econv4)
if use_skip:
i5_in = tf.concat([upconv5, econv4], axis=3)
else:
i5_in = upconv5
i5_in = tf.pad(i5_in, default_pad, mode='REFLECT')
iconv5 = slim.conv2d(i5_in, decoder_filters[4], [3, 3], stride=1,
scope='iconv5', padding='VALID')
upconv4 = slim.conv2d_transpose(iconv5, decoder_filters[3], [3, 3],
stride=2, scope='upconv4')
upconv4 = _resize_like(upconv4, econv3)
if use_skip:
i4_in = tf.concat([upconv4, econv3], axis=3)
else:
i4_in = upconv4
i4_in = tf.pad(i4_in, default_pad, mode='REFLECT')
iconv4 = slim.conv2d(i4_in, decoder_filters[3], [3, 3], stride=1,
scope='iconv4', padding='VALID')
disp4_input = tf.pad(iconv4, default_pad, mode='REFLECT')
disp4 = (slim.conv2d(disp4_input, 1, [3, 3], stride=1,
activation_fn=tf.sigmoid, normalizer_fn=None,
scope='disp4', padding='VALID')
* DISP_SCALING + MIN_DISP)
upconv3 = slim.conv2d_transpose(iconv4, decoder_filters[2], [3, 3],
stride=2, scope='upconv3')
upconv3 = _resize_like(upconv3, econv2)
if use_skip:
i3_in = tf.concat([upconv3, econv2], axis=3)
else:
i3_in = upconv3
i3_in = tf.pad(i3_in, default_pad, mode='REFLECT')
iconv3 = slim.conv2d(i3_in, decoder_filters[2], [3, 3], stride=1,
scope='iconv3', padding='VALID')
disp3_input = tf.pad(iconv3, default_pad, mode='REFLECT')
disp3 = (slim.conv2d(disp3_input, 1, [3, 3], stride=1,
activation_fn=tf.sigmoid, normalizer_fn=None,
scope='disp3', padding='VALID')
* DISP_SCALING + MIN_DISP)
upconv2 = slim.conv2d_transpose(iconv3, decoder_filters[1], [3, 3],
stride=2, scope='upconv2')
upconv2 = _resize_like(upconv2, econv1)
if use_skip:
i2_in = tf.concat([upconv2, econv1], axis=3)
else:
i2_in = upconv2
i2_in = tf.pad(i2_in, default_pad, mode='REFLECT')
iconv2 = slim.conv2d(i2_in, decoder_filters[1], [3, 3], stride=1,
scope='iconv2', padding='VALID')
disp2_input = tf.pad(iconv2, default_pad, mode='REFLECT')
disp2 = (slim.conv2d(disp2_input, 1, [3, 3], stride=1,
activation_fn=tf.sigmoid, normalizer_fn=None,
scope='disp2', padding='VALID')
* DISP_SCALING + MIN_DISP)
upconv1 = slim.conv2d_transpose(iconv2, decoder_filters[0], [3, 3],
stride=2, scope='upconv1')
upconv1 = _resize_like(upconv1, target_image)
upconv1 = tf.pad(upconv1, default_pad, mode='REFLECT')
iconv1 = slim.conv2d(upconv1, decoder_filters[0], [3, 3], stride=1,
scope='iconv1', padding='VALID')
disp1_input = tf.pad(iconv1, default_pad, mode='REFLECT')
disp1 = (slim.conv2d(disp1_input, 1, [3, 3], stride=1,
activation_fn=tf.sigmoid, normalizer_fn=None,
scope='disp1', padding='VALID')
* DISP_SCALING + MIN_DISP)
return [disp1, disp2, disp3, disp4]
def _residual_block_first(x, is_training, out_channel, strides, name='unit'):
"""Helper function for defining ResNet architecture."""
in_channel = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
# Shortcut connection
if in_channel == out_channel:
if strides == 1:
shortcut = tf.identity(x)
else:
shortcut = tf.nn.max_pool(x, [1, strides, strides, 1],
[1, strides, strides, 1], 'VALID')
else:
shortcut = _conv(x, 1, out_channel, strides, name='shortcut')
# Residual
x = _conv(x, 3, out_channel, strides, name='conv_1')
x = _bn(x, is_train=is_training, name='bn_1')
x = _relu(x, name='relu_1')
x = _conv(x, 3, out_channel, 1, name='conv_2')
x = _bn(x, is_train=is_training, name='bn_2')
# Merge
x = x + shortcut
x = _relu(x, name='relu_2')
return x
def _residual_block(x, is_training, input_q=None, output_q=None, name='unit'):
"""Helper function for defining ResNet architecture."""
num_channel = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
shortcut = x # Shortcut connection
# Residual
x = _conv(x, 3, num_channel, 1, input_q=input_q, output_q=output_q,
name='conv_1')
x = _bn(x, is_train=is_training, name='bn_1')
x = _relu(x, name='relu_1')
x = _conv(x, 3, num_channel, 1, input_q=output_q, output_q=output_q,
name='conv_2')
x = _bn(x, is_train=is_training, name='bn_2')
# Merge
x = x + shortcut
x = _relu(x, name='relu_2')
return x
def _conv(x, filter_size, out_channel, stride, pad='SAME', input_q=None,
output_q=None, name='conv'):
"""Helper function for defining ResNet architecture."""
if (input_q is None) ^ (output_q is None):
raise ValueError('Input/Output splits are not correctly given.')
in_shape = x.get_shape()
with tf.variable_scope(name):
# Main operation: conv2d
with tf.device('/CPU:0'):
kernel = tf.get_variable(
'kernel', [filter_size, filter_size, in_shape[3], out_channel],
tf.float32, initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0/filter_size/filter_size/out_channel)))
if kernel not in tf.get_collection(WEIGHT_DECAY_KEY):
tf.add_to_collection(WEIGHT_DECAY_KEY, kernel)
conv = tf.nn.conv2d(x, kernel, [1, stride, stride, 1], pad)
return conv
def _bn(x, is_train, name='bn'):
"""Helper function for defining ResNet architecture."""
bn = tf.layers.batch_normalization(x, training=is_train, name=name)
return bn
def _relu(x, name=None, leakness=0.0):
"""Helper function for defining ResNet architecture."""
if leakness > 0.0:
name = 'lrelu' if name is None else name
return tf.maximum(x, x*leakness, name='lrelu')
else:
name = 'relu' if name is None else name
return tf.nn.relu(x, name='relu')
def _resize_like(inputs, ref):
i_h, i_w = inputs.get_shape()[1], inputs.get_shape()[2]
r_h, r_w = ref.get_shape()[1], ref.get_shape()[2]
if i_h == r_h and i_w == r_w:
return inputs
else:
# TODO(casser): Other interpolation methods could be explored here.
return tf.image.resize_bilinear(inputs, [r_h.value, r_w.value],
align_corners=True)
| [
"tensorflow.nn.conv2d",
"tensorflow.device",
"tensorflow.nn.max_pool",
"numpy.sqrt",
"tensorflow.variable_scope",
"tensorflow.pad",
"tensorflow.nn.relu",
"tensorflow.reshape",
"tensorflow.image.resize_bilinear",
"tensorflow.identity",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.l... | [((15969, 16014), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [1, 1], [1, 1], [0, 0]]'], {}), '([[0, 0], [1, 1], [1, 1], [0, 0]])\n', (15980, 16014), True, 'import tensorflow as tf\n'), ((22403, 22465), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x'], {'training': 'is_train', 'name': 'name'}), '(x, training=is_train, name=name)\n', (22432, 22465), True, 'import tensorflow as tf\n'), ((2283, 2316), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pose_exp_net"""'], {}), "('pose_exp_net')\n", (2300, 2316), True, 'import tensorflow as tf\n'), ((5259, 5292), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pose_exp_net"""'], {}), "('pose_exp_net')\n", (5276, 5292), True, 'import tensorflow as tf\n'), ((13005, 13064), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['disp2', '[h, w]'], {'align_corners': '(True)'}), '(disp2, [h, w], align_corners=True)\n', (13029, 13064), True, 'import tensorflow as tf\n'), ((13158, 13192), 'tensorflow.concat', 'tf.concat', (['[up1, disp2_up]'], {'axis': '(3)'}), '([up1, disp2_up], axis=3)\n', (13167, 13192), True, 'import tensorflow as tf\n'), ((14213, 14239), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1"""'], {}), "('conv1')\n", (14230, 14239), True, 'import tensorflow as tf\n'), ((14371, 14429), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['econv1', '[1, 3, 3, 1]', '[1, 2, 2, 1]', '"""SAME"""'], {}), "(econv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')\n", (14385, 14429), True, 'import tensorflow as tf\n'), ((16638, 16680), 'tensorflow.pad', 'tf.pad', (['i5_in', 'default_pad'], {'mode': '"""REFLECT"""'}), "(i5_in, default_pad, mode='REFLECT')\n", (16644, 16680), True, 'import tensorflow as tf\n'), ((17112, 17154), 'tensorflow.pad', 'tf.pad', (['i4_in', 'default_pad'], {'mode': '"""REFLECT"""'}), "(i4_in, default_pad, mode='REFLECT')\n", (17118, 17154), True, 'import tensorflow as tf\n'), ((17306, 17349), 'tensorflow.pad', 'tf.pad', (['iconv4', 'default_pad'], {'mode': '"""REFLECT"""'}), "(iconv4, default_pad, mode='REFLECT')\n", (17312, 17349), True, 'import tensorflow as tf\n'), ((17882, 17924), 'tensorflow.pad', 'tf.pad', (['i3_in', 'default_pad'], {'mode': '"""REFLECT"""'}), "(i3_in, default_pad, mode='REFLECT')\n", (17888, 17924), True, 'import tensorflow as tf\n'), ((18074, 18117), 'tensorflow.pad', 'tf.pad', (['iconv3', 'default_pad'], {'mode': '"""REFLECT"""'}), "(iconv3, default_pad, mode='REFLECT')\n", (18080, 18117), True, 'import tensorflow as tf\n'), ((18650, 18692), 'tensorflow.pad', 'tf.pad', (['i2_in', 'default_pad'], {'mode': '"""REFLECT"""'}), "(i2_in, default_pad, mode='REFLECT')\n", (18656, 18692), True, 'import tensorflow as tf\n'), ((18842, 18885), 'tensorflow.pad', 'tf.pad', (['iconv2', 'default_pad'], {'mode': '"""REFLECT"""'}), "(iconv2, default_pad, mode='REFLECT')\n", (18848, 18885), True, 'import tensorflow as tf\n'), ((19322, 19366), 'tensorflow.pad', 'tf.pad', (['upconv1', 'default_pad'], {'mode': '"""REFLECT"""'}), "(upconv1, default_pad, mode='REFLECT')\n", (19328, 19366), True, 'import tensorflow as tf\n'), ((19518, 19561), 'tensorflow.pad', 'tf.pad', (['iconv1', 'default_pad'], {'mode': '"""REFLECT"""'}), "(iconv1, default_pad, mode='REFLECT')\n", (19524, 19561), True, 'import tensorflow as tf\n'), ((20028, 20051), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (20045, 20051), True, 'import tensorflow as tf\n'), ((20938, 20961), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (20955, 20961), True, 'import tensorflow as tf\n'), ((21773, 21796), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (21790, 21796), True, 'import tensorflow as tf\n'), ((22230, 22282), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'kernel', '[1, stride, stride, 1]', 'pad'], {}), '(x, kernel, [1, stride, stride, 1], pad)\n', (22242, 22282), True, 'import tensorflow as tf\n'), ((22662, 22703), 'tensorflow.maximum', 'tf.maximum', (['x', '(x * leakness)'], {'name': '"""lrelu"""'}), "(x, x * leakness, name='lrelu')\n", (22672, 22703), True, 'import tensorflow as tf\n'), ((22768, 22794), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': '"""relu"""'}), "(x, name='relu')\n", (22778, 22794), True, 'import tensorflow as tf\n'), ((23089, 23165), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['inputs', '[r_h.value, r_w.value]'], {'align_corners': '(True)'}), '(inputs, [r_h.value, r_w.value], align_corners=True)\n', (23113, 23165), True, 'import tensorflow as tf\n'), ((10557, 10588), 'tensorflow.concat', 'tf.concat', (['[up7, cnv6b]'], {'axis': '(3)'}), '([up7, cnv6b], axis=3)\n', (10566, 10588), True, 'import tensorflow as tf\n'), ((10839, 10870), 'tensorflow.concat', 'tf.concat', (['[up6, cnv5b]'], {'axis': '(3)'}), '([up6, cnv5b], axis=3)\n', (10848, 10870), True, 'import tensorflow as tf\n'), ((11121, 11152), 'tensorflow.concat', 'tf.concat', (['[up5, cnv4b]'], {'axis': '(3)'}), '([up5, cnv4b], axis=3)\n', (11130, 11152), True, 'import tensorflow as tf\n'), ((11403, 11434), 'tensorflow.concat', 'tf.concat', (['[up4, cnv3b]'], {'axis': '(3)'}), '([up4, cnv3b], axis=3)\n', (11412, 11434), True, 'import tensorflow as tf\n'), ((12007, 12048), 'tensorflow.concat', 'tf.concat', (['[up3, cnv2b, disp4_up]'], {'axis': '(3)'}), '([up3, cnv2b, disp4_up], axis=3)\n', (12016, 12048), True, 'import tensorflow as tf\n'), ((12075, 12101), 'tensorflow.concat', 'tf.concat', (['[up3, disp4_up]'], {}), '([up3, disp4_up])\n', (12084, 12101), True, 'import tensorflow as tf\n'), ((12643, 12684), 'tensorflow.concat', 'tf.concat', (['[up2, cnv1b, disp3_up]'], {'axis': '(3)'}), '([up2, cnv1b, disp3_up], axis=3)\n', (12652, 12684), True, 'import tensorflow as tf\n'), ((12711, 12737), 'tensorflow.concat', 'tf.concat', (['[up2, disp3_up]'], {}), '([up2, disp3_up])\n', (12720, 12737), True, 'import tensorflow as tf\n'), ((16554, 16590), 'tensorflow.concat', 'tf.concat', (['[upconv5, econv4]'], {'axis': '(3)'}), '([upconv5, econv4], axis=3)\n', (16563, 16590), True, 'import tensorflow as tf\n'), ((17028, 17064), 'tensorflow.concat', 'tf.concat', (['[upconv4, econv3]'], {'axis': '(3)'}), '([upconv4, econv3], axis=3)\n', (17037, 17064), True, 'import tensorflow as tf\n'), ((17798, 17834), 'tensorflow.concat', 'tf.concat', (['[upconv3, econv2]'], {'axis': '(3)'}), '([upconv3, econv2], axis=3)\n', (17807, 17834), True, 'import tensorflow as tf\n'), ((18566, 18602), 'tensorflow.concat', 'tf.concat', (['[upconv2, econv1]'], {'axis': '(3)'}), '([upconv2, econv1], axis=3)\n', (18575, 18602), True, 'import tensorflow as tf\n'), ((21838, 21857), 'tensorflow.device', 'tf.device', (['"""/CPU:0"""'], {}), "('/CPU:0')\n", (21847, 21857), True, 'import tensorflow as tf\n'), ((22127, 22162), 'tensorflow.get_collection', 'tf.get_collection', (['WEIGHT_DECAY_KEY'], {}), '(WEIGHT_DECAY_KEY)\n', (22144, 22162), True, 'import tensorflow as tf\n'), ((22171, 22217), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['WEIGHT_DECAY_KEY', 'kernel'], {}), '(WEIGHT_DECAY_KEY, kernel)\n', (22191, 22217), True, 'import tensorflow as tf\n'), ((3296, 3321), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pose"""'], {}), "('pose')\n", (3313, 3321), True, 'import tensorflow as tf\n'), ((3831, 3869), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['egomotion_pred', '[1, 2]'], {}), '(egomotion_pred, [1, 2])\n', (3845, 3869), True, 'import tensorflow as tf\n'), ((3895, 3966), 'tensorflow.reshape', 'tf.reshape', (['egomotion_avg', '[-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE]'], {}), '(egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])\n', (3905, 3966), True, 'import tensorflow as tf\n'), ((4089, 4195), 'tensorflow.concat', 'tf.concat', (['[egomotion_res[:, 0:3] * SCALE_TRANSLATION, egomotion_res[:, 3:6] *\n SCALE_ROTATION]'], {'axis': '(1)'}), '([egomotion_res[:, 0:3] * SCALE_TRANSLATION, egomotion_res[:, 3:6] *\n SCALE_ROTATION], axis=1)\n', (4098, 4195), True, 'import tensorflow as tf\n'), ((6272, 6297), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pose"""'], {}), "('pose')\n", (6289, 6297), True, 'import tensorflow as tf\n'), ((6807, 6845), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['egomotion_pred', '[1, 2]'], {}), '(egomotion_pred, [1, 2])\n', (6821, 6845), True, 'import tensorflow as tf\n'), ((6871, 6942), 'tensorflow.reshape', 'tf.reshape', (['egomotion_avg', '[-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE]'], {}), '(egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])\n', (6881, 6942), True, 'import tensorflow as tf\n'), ((7065, 7171), 'tensorflow.concat', 'tf.concat', (['[egomotion_res[:, 0:3] * SCALE_TRANSLATION, egomotion_res[:, 3:6] *\n SCALE_ROTATION]'], {'axis': '(1)'}), '([egomotion_res[:, 0:3] * SCALE_TRANSLATION, egomotion_res[:, 3:6] *\n SCALE_ROTATION], axis=1)\n', (7074, 7171), True, 'import tensorflow as tf\n'), ((11766, 11779), 'numpy.int', 'np.int', (['(h / 4)'], {}), '(h / 4)\n', (11772, 11779), True, 'import numpy as np\n'), ((11781, 11794), 'numpy.int', 'np.int', (['(w / 4)'], {}), '(w / 4)\n', (11787, 11794), True, 'import numpy as np\n'), ((12402, 12415), 'numpy.int', 'np.int', (['(h / 2)'], {}), '(h / 2)\n', (12408, 12415), True, 'import numpy as np\n'), ((12417, 12430), 'numpy.int', 'np.int', (['(w / 2)'], {}), '(w / 2)\n', (12423, 12430), True, 'import numpy as np\n'), ((20159, 20173), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (20170, 20173), True, 'import tensorflow as tf\n'), ((20207, 20285), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x', '[1, strides, strides, 1]', '[1, strides, strides, 1]', '"""VALID"""'], {}), "(x, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID')\n", (20221, 20285), True, 'import tensorflow as tf\n'), ((22054, 22108), 'numpy.sqrt', 'np.sqrt', (['(2.0 / filter_size / filter_size / out_channel)'], {}), '(2.0 / filter_size / filter_size / out_channel)\n', (22061, 22108), True, 'import numpy as np\n')] |
# Keras
import keras
from keras import regularizers
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, Model, model_from_json
from keras.layers import Dense, Embedding, LSTM
from keras.layers import Input, Flatten, Dropout, Activation, BatchNormalization
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from keras.utils import np_utils, to_categorical
from keras.callbacks import (EarlyStopping, LearningRateScheduler,
ModelCheckpoint, TensorBoard, ReduceLROnPlateau)
from keras import losses, models, optimizers
from keras.activations import relu, softmax
from keras.layers import (Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout,
GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense)
# sklearn
from sklearn.metrics import confusion_matrix, accuracy_score
# Other
from tqdm import tqdm
import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
import pandas as pd
import seaborn as sns
import sys
import IPython.display as ipd # To play sound in the notebook
import warnings
# ignore warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from sklearn.preprocessing import LabelEncoder
lb = LabelEncoder()
'''
1. Data Augmentation method
'''
def speedNpitch(data):
"""
Speed and Pitch Tuning.
"""
# you can change low and high here
length_change = np.random.uniform(low=0.8, high = 1)
speed_fac = 1.2 / length_change # try changing 1.0 to 2.0 ... =D
tmp = np.interp(np.arange(0,len(data),speed_fac),np.arange(0,len(data)),data)
minlen = min(data.shape[0], tmp.shape[0])
data *= 0
data[0:minlen] = tmp[0:minlen]
return data
'''
2. Extracting the MFCC feature as an image (Matrix format).
'''
def prepare_data(df, n, aug, mfcc):
X = np.empty(shape=(df.shape[0], n, 216, 1))
input_length = sampling_rate * audio_duration
cnt = 0
for fname in tqdm(df.path):
file_path = fname
data, _ = librosa.load(file_path, sr=sampling_rate
,res_type="kaiser_fast"
,duration=2.5
,offset=0.5
)
# Random offset / Padding
if len(data) > input_length:
max_offset = len(data) - input_length
offset = np.random.randint(max_offset)
data = data[offset:(input_length+offset)]
else:
if input_length > len(data):
max_offset = input_length - len(data)
offset = np.random.randint(max_offset)
else:
offset = 0
data = np.pad(data, (offset, int(input_length) - len(data) - offset), "constant")
# Augmentation?
if aug == 1:
data = speedNpitch(data)
# which feature?
if mfcc == 1:
# MFCC extraction
MFCC = librosa.feature.mfcc(data, sr=sampling_rate, n_mfcc=n_mfcc)
MFCC = np.expand_dims(MFCC, axis=-1)
X[cnt,] = MFCC
else:
# Log-melspectogram
melspec = librosa.feature.melspectrogram(data, n_mels = n_melspec)
logspec = librosa.amplitude_to_db(melspec)
logspec = np.expand_dims(logspec, axis=-1)
X[cnt,] = logspec
cnt += 1
return X
'''
3. Confusion matrix plot
'''
def print_confusion_matrix(confusion_matrix, class_names, figsize = (10,7), fontsize=14):
'''Prints a confusion matrix, as returned by sklearn.metrics.confusion_matrix, as a heatmap.
Arguments
---------
confusion_matrix: numpy.ndarray
The numpy.ndarray object returned from a call to sklearn.metrics.confusion_matrix.
Similarly constructed ndarrays can also be used.
class_names: list
An ordered list of class names, in the order they index the given confusion matrix.
figsize: tuple
A 2-long tuple, the first value determining the horizontal size of the ouputted figure,
the second determining the vertical size. Defaults to (10,7).
fontsize: int
Font size for axes labels. Defaults to 14.
Returns
-------
matplotlib.figure.Figure
The resulting confusion matrix figure
'''
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names,
)
fig = plt.figure(figsize=figsize)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
plt.ylabel('True label')
plt.xlabel('Predicted label')
'''
# 4. Create the 2D CNN model
'''
def get_2d_conv_model(n):
''' Create a standard deep 2D convolutional neural network'''
nclass = 14
inp = Input(shape=(n,216,1)) #2D matrix of 30 MFCC bands by 216 audio length.
x = Convolution2D(32, (4,10), padding="same")(inp)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.2)(x)
x = Convolution2D(32, (4,10), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.2)(x)
x = Convolution2D(32, (4,10), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.2)(x)
x = Convolution2D(32, (4,10), padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Dropout(rate=0.2)(x)
x = Flatten()(x)
x = Dense(64)(x)
x = Dropout(rate=0.2)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Dropout(rate=0.2)(x)
out = Dense(nclass, activation=softmax)(x)
model = models.Model(inputs=inp, outputs=out)
opt = optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=['acc'])
return model
'''
# 5. Other functions
'''
class get_results:
'''
We're going to create a class (blueprint template) for generating the results based on the various model approaches.
So instead of repeating the functions each time, we assign the results into on object with its associated variables
depending on each combination:
1) MFCC with no augmentation
2) MFCC with augmentation
3) Logmelspec with no augmentation
4) Logmelspec with augmentation
'''
def __init__(self, model_history, model ,X_test, y_test, labels):
self.model_history = model_history
self.model = model
self.X_test = X_test
self.y_test = y_test
self.labels = labels
def create_plot(self, model_history):
'''Check the logloss of both train and validation, make sure they are close and have plateau'''
plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def create_results(self, model):
'''predict on test set and get accuracy results'''
opt = optimizers.Adam(0.001)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
score = model.evaluate(X_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1]*100))
def confusion_results(self, X_test, y_test, labels, model):
'''plot confusion matrix results'''
preds = model.predict(X_test,
batch_size=16,
verbose=2)
preds=preds.argmax(axis=1)
preds = preds.astype(int).flatten()
preds = (lb.inverse_transform((preds)))
actual = y_test.argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (lb.inverse_transform((actual)))
classes = labels
classes.sort()
c = confusion_matrix(actual, preds)
print_confusion_matrix(c, class_names = classes)
def accuracy_results_gender(self, X_test, y_test, labels, model):
'''Print out the accuracy score and confusion matrix heat map of the Gender classification results'''
preds = model.predict(X_test,
batch_size=16,
verbose=2)
preds=preds.argmax(axis=1)
preds = preds.astype(int).flatten()
preds = (lb.inverse_transform((preds)))
actual = y_test.argmax(axis=1)
actual = actual.astype(int).flatten()
actual = (lb.inverse_transform((actual)))
# print(accuracy_score(actual, preds))
actual = pd.DataFrame(actual).replace({'female_angry':'female'
, 'female_disgust':'female'
, 'female_fear':'female'
, 'female_happy':'female'
, 'female_sad':'female'
, 'female_surprise':'female'
, 'female_neutral':'female'
, 'male_angry':'male'
, 'male_fear':'male'
, 'male_happy':'male'
, 'male_sad':'male'
, 'male_surprise':'male'
, 'male_neutral':'male'
, 'male_disgust':'male'
})
preds = pd.DataFrame(preds).replace({'female_angry':'female'
, 'female_disgust':'female'
, 'female_fear':'female'
, 'female_happy':'female'
, 'female_sad':'female'
, 'female_surprise':'female'
, 'female_neutral':'female'
, 'male_angry':'male'
, 'male_fear':'male'
, 'male_happy':'male'
, 'male_sad':'male'
, 'male_surprise':'male'
, 'male_neutral':'male'
, 'male_disgust':'male'
})
classes = actual.loc[:,0].unique()
classes.sort()
c = confusion_matrix(actual, preds)
print(accuracy_score(actual, preds))
print_confusion_matrix(c, class_names = classes) | [
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.ylabel",
"librosa.feature.mfcc",
"keras.layers.Activation",
"keras.layers.Dense",
"librosa.load",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.empty",
"keras.models.Model",
"pandas.DataFrame",
"warnings.simplefilter",
"... | [((1448, 1462), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1460, 1462), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1363, 1394), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1384, 1394), False, 'import warnings\n'), ((1630, 1664), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.8)', 'high': '(1)'}), '(low=0.8, high=1)\n', (1647, 1664), True, 'import numpy as np\n'), ((2045, 2085), 'numpy.empty', 'np.empty', ([], {'shape': '(df.shape[0], n, 216, 1)'}), '(shape=(df.shape[0], n, 216, 1))\n', (2053, 2085), True, 'import numpy as np\n'), ((2170, 2183), 'tqdm.tqdm', 'tqdm', (['df.path'], {}), '(df.path)\n', (2174, 2183), False, 'from tqdm import tqdm\n'), ((4563, 4633), 'pandas.DataFrame', 'pd.DataFrame', (['confusion_matrix'], {'index': 'class_names', 'columns': 'class_names'}), '(confusion_matrix, index=class_names, columns=class_names)\n', (4575, 4633), True, 'import pandas as pd\n'), ((4660, 4687), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4670, 4687), True, 'import matplotlib.pyplot as plt\n'), ((5070, 5094), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (5080, 5094), True, 'import matplotlib.pyplot as plt\n'), ((5099, 5128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (5109, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5296, 5320), 'keras.layers.Input', 'Input', ([], {'shape': '(n, 216, 1)'}), '(shape=(n, 216, 1))\n', (5301, 5320), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6285, 6322), 'keras.models.Model', 'models.Model', ([], {'inputs': 'inp', 'outputs': 'out'}), '(inputs=inp, outputs=out)\n', (6297, 6322), False, 'from keras import losses, models, optimizers\n'), ((6338, 6360), 'keras.optimizers.Adam', 'optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (6353, 6360), False, 'from keras import losses, models, optimizers\n'), ((2229, 2325), 'librosa.load', 'librosa.load', (['file_path'], {'sr': 'sampling_rate', 'res_type': '"""kaiser_fast"""', 'duration': '(2.5)', 'offset': '(0.5)'}), "(file_path, sr=sampling_rate, res_type='kaiser_fast', duration=\n 2.5, offset=0.5)\n", (2241, 2325), False, 'import librosa\n'), ((4715, 4754), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'annot': '(True)', 'fmt': '"""d"""'}), "(df_cm, annot=True, fmt='d')\n", (4726, 4754), True, 'import seaborn as sns\n'), ((5377, 5419), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(4, 10)'], {'padding': '"""same"""'}), "(32, (4, 10), padding='same')\n", (5390, 5419), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5432, 5452), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5450, 5452), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5464, 5482), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5474, 5482), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5494, 5505), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (5503, 5505), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5517, 5534), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (5524, 5534), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5551, 5593), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(4, 10)'], {'padding': '"""same"""'}), "(32, (4, 10), padding='same')\n", (5564, 5593), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5604, 5624), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5622, 5624), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5636, 5654), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5646, 5654), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5666, 5677), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (5675, 5677), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5689, 5706), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (5696, 5706), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5723, 5765), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(4, 10)'], {'padding': '"""same"""'}), "(32, (4, 10), padding='same')\n", (5736, 5765), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5776, 5796), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5794, 5796), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5808, 5826), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5818, 5826), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5838, 5849), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (5847, 5849), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5861, 5878), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (5868, 5878), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5895, 5937), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(4, 10)'], {'padding': '"""same"""'}), "(32, (4, 10), padding='same')\n", (5908, 5937), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5948, 5968), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5966, 5968), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((5980, 5998), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5990, 5998), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6010, 6021), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (6019, 6021), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6033, 6050), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (6040, 6050), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6067, 6076), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6074, 6076), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6088, 6097), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (6093, 6097), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6109, 6126), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (6116, 6126), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6138, 6158), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6156, 6158), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6170, 6188), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6180, 6188), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6200, 6217), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.2)'}), '(rate=0.2)\n', (6207, 6217), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((6236, 6269), 'keras.layers.Dense', 'Dense', (['nclass'], {'activation': 'softmax'}), '(nclass, activation=softmax)\n', (6241, 6269), False, 'from keras.layers import Convolution2D, GlobalAveragePooling2D, BatchNormalization, Flatten, Dropout, GlobalMaxPool2D, MaxPool2D, concatenate, Activation, Input, Dense\n'), ((7368, 7407), 'matplotlib.pyplot.plot', 'plt.plot', (["model_history.history['loss']"], {}), "(model_history.history['loss'])\n", (7376, 7407), True, 'import matplotlib.pyplot as plt\n'), ((7416, 7459), 'matplotlib.pyplot.plot', 'plt.plot', (["model_history.history['val_loss']"], {}), "(model_history.history['val_loss'])\n", (7424, 7459), True, 'import matplotlib.pyplot as plt\n'), ((7468, 7491), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (7477, 7491), True, 'import matplotlib.pyplot as plt\n'), ((7500, 7518), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (7510, 7518), True, 'import matplotlib.pyplot as plt\n'), ((7527, 7546), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (7537, 7546), True, 'import matplotlib.pyplot as plt\n'), ((7555, 7602), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (7565, 7602), True, 'import matplotlib.pyplot as plt\n'), ((7611, 7621), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7619, 7621), True, 'import matplotlib.pyplot as plt\n'), ((7733, 7755), 'keras.optimizers.Adam', 'optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (7748, 7755), False, 'from keras import losses, models, optimizers\n'), ((8545, 8576), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['actual', 'preds'], {}), '(actual, preds)\n', (8561, 8576), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((10609, 10640), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['actual', 'preds'], {}), '(actual, preds)\n', (10625, 10640), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((2588, 2617), 'numpy.random.randint', 'np.random.randint', (['max_offset'], {}), '(max_offset)\n', (2605, 2617), True, 'import numpy as np\n'), ((3165, 3224), 'librosa.feature.mfcc', 'librosa.feature.mfcc', (['data'], {'sr': 'sampling_rate', 'n_mfcc': 'n_mfcc'}), '(data, sr=sampling_rate, n_mfcc=n_mfcc)\n', (3185, 3224), False, 'import librosa\n'), ((3244, 3273), 'numpy.expand_dims', 'np.expand_dims', (['MFCC'], {'axis': '(-1)'}), '(MFCC, axis=-1)\n', (3258, 3273), True, 'import numpy as np\n'), ((3382, 3436), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['data'], {'n_mels': 'n_melspec'}), '(data, n_mels=n_melspec)\n', (3412, 3436), False, 'import librosa\n'), ((3464, 3496), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['melspec'], {}), '(melspec)\n', (3487, 3496), False, 'import librosa\n'), ((3519, 3551), 'numpy.expand_dims', 'np.expand_dims', (['logspec'], {'axis': '(-1)'}), '(logspec, axis=-1)\n', (3533, 3551), True, 'import numpy as np\n'), ((10655, 10684), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['actual', 'preds'], {}), '(actual, preds)\n', (10669, 10684), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((2806, 2835), 'numpy.random.randint', 'np.random.randint', (['max_offset'], {}), '(max_offset)\n', (2823, 2835), True, 'import numpy as np\n'), ((9285, 9305), 'pandas.DataFrame', 'pd.DataFrame', (['actual'], {}), '(actual)\n', (9297, 9305), True, 'import pandas as pd\n'), ((9941, 9960), 'pandas.DataFrame', 'pd.DataFrame', (['preds'], {}), '(preds)\n', (9953, 9960), True, 'import pandas as pd\n')] |
# In many physics studies, you will need random numbers to be generated from a gaussian (normal) or uniform distribution
# this tutorial shows you how to do it using numpy
# for this you need to install numpy module (in case you have not installed it)
# to know how to install it using anaconda navigator, go to https://github.com/deepaksamuel/python-tutorials and see the README.md
# In c/c++, we use the #include statement. In python we use import statement to include modules
# the numpy module contains important mathematical functions that you will learn soon.
# First step, as always is to import numpy as np (np will now be a shortcut for numpy)
import numpy as np
# generate a series of 1000 random numbers from a gaussian distribution with mean 0, sigma 1
ran = np.random.normal(0,1,1000) #(mean, sigma, number of samples)
print(ran)
# generate a series of 1000 random numbers from a uniform distribution between 0 and 1
ran = np.random.uniform(0,1,1000) #(low, high, number of samples)
print(ran)
# assignment: generate a series of random numbers from an exponential distribution
# You can refer to https://docs.scipy.org/doc/numpy-1.14.1/reference/routines.random.html | [
"numpy.random.normal",
"numpy.random.uniform"
] | [((776, 804), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (792, 804), True, 'import numpy as np\n'), ((943, 972), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (960, 972), True, 'import numpy as np\n')] |
import sys
import getopt
import numpy as np
from cplate.libio import *
def simulate_permutation_null(cfg):
# Extract paths
y_path = cfg['data']['chrom_path'].strip().format(**cfg)
regions_path = cfg['data']['regions_path'].strip().format(**cfg)
null_path = cfg['data']['null_path'].strip().format(**cfg)
# Load reads data
Y = []
with open(y_path, "rb") as f:
for line in f:
Y.append(np.fromstring(line.strip(), sep=','))
# Load region type information
regionTypes = []
with open(regions_path, 'rb') as f:
for line in f:
regionTypes.append(np.fromstring(line.strip(), sep=' ', dtype=int))
for chrom in xrange(len(regionTypes)):
# Normalize region types
regionTypes[chrom] -= regionTypes[chrom].min()
# Iterate over unique regions
regionIDs = np.unique(regionTypes[chrom])
for ID in regionIDs:
region = np.where(regionTypes[chrom]==ID)[0]
region = slice(np.min(region), np.max(region))
n = np.ceil(np.sum(Y[chrom][region]))
nullRegion = np.random.multinomial(n, np.ones(region.stop - region.start)/
(region.stop - region.start + 0.0))
Y[chrom][region] = nullRegion
# Write simulated reads to null file
with open(null_path, 'wb') as f:
for y_null in Y:
np.savetxt(f, y_null[np.newaxis,:], fmt="%d", delimiter=',')
return 0
| [
"numpy.unique",
"numpy.ones",
"numpy.where",
"numpy.max",
"numpy.sum",
"numpy.savetxt",
"numpy.min"
] | [((814, 843), 'numpy.unique', 'np.unique', (['regionTypes[chrom]'], {}), '(regionTypes[chrom])\n', (823, 843), True, 'import numpy as np\n'), ((1321, 1382), 'numpy.savetxt', 'np.savetxt', (['f', 'y_null[np.newaxis, :]'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(f, y_null[np.newaxis, :], fmt='%d', delimiter=',')\n", (1331, 1382), True, 'import numpy as np\n'), ((885, 919), 'numpy.where', 'np.where', (['(regionTypes[chrom] == ID)'], {}), '(regionTypes[chrom] == ID)\n', (893, 919), True, 'import numpy as np\n'), ((942, 956), 'numpy.min', 'np.min', (['region'], {}), '(region)\n', (948, 956), True, 'import numpy as np\n'), ((958, 972), 'numpy.max', 'np.max', (['region'], {}), '(region)\n', (964, 972), True, 'import numpy as np\n'), ((995, 1019), 'numpy.sum', 'np.sum', (['Y[chrom][region]'], {}), '(Y[chrom][region])\n', (1001, 1019), True, 'import numpy as np\n'), ((1065, 1100), 'numpy.ones', 'np.ones', (['(region.stop - region.start)'], {}), '(region.stop - region.start)\n', (1072, 1100), True, 'import numpy as np\n')] |
import random
import numpy as np
import torch
def set_random_seed(seed):
if seed < 0:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
def worker_init_fn(worker_id):
"""The function is designed for pytorch multi-process dataloader.
Note that we use the pytorch random generator to generate a base_seed.
Please try to be consistent.
References:
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
"""
base_seed = torch.IntTensor(1).random_().item()
# print(worker_id, base_seed)
np.random.seed(base_seed + worker_id)
| [
"torch.manual_seed",
"numpy.random.seed",
"random.seed",
"torch.IntTensor"
] | [((111, 128), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (122, 128), False, 'import random\n'), ((133, 153), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (147, 153), True, 'import numpy as np\n'), ((158, 181), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (175, 181), False, 'import torch\n'), ((634, 671), 'numpy.random.seed', 'np.random.seed', (['(base_seed + worker_id)'], {}), '(base_seed + worker_id)\n', (648, 671), True, 'import numpy as np\n'), ((560, 578), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (575, 578), False, 'import torch\n')] |
#!/usr/bin/env python
#
# Program to plot a list of extinction curves
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy as np
import matplotlib.pyplot as pyplot
import matplotlib
import astropy.units as u
from measure_extinction.extdata import ExtData
def get_elkejk_from_elv(x_band, elv_band, x, elv):
"""
Covert from E(l-V) to E(l-K)/E(J-K)
"""
kindx = np.argsort(np.absolute(x_band - 2.22))[0]
jindx = np.argsort(np.absolute(x_band - 1.25))[0]
elk = elv - elv_band[kindx]
elkejk = elk / (elv_band[jindx] - elv_band[kindx])
return elkejk
if __name__ == "__main__":
# commandline parser
parser = argparse.ArgumentParser()
parser.add_argument("filelist", help="file with list of curves to plot")
parser.add_argument(
"--rebin_fac", type=int, default=None, help="rebin factor for spectra"
)
parser.add_argument(
"--prevobs", help="plot previous observations", action="store_true"
)
parser.add_argument("--png", help="save figure as a png file", action="store_true")
parser.add_argument("--pdf", help="save figure as a pdf file", action="store_true")
args = parser.parse_args()
filename = args.filelist
f = open(filename, "r")
file_lines = list(f)
extnames = []
extdatas = []
avs = []
for line in file_lines:
if (line.find("#") != 0) & (len(line) > 0):
name = line.rstrip()
extnames.append(name)
text = ExtData(filename="fits/%s" % name)
extdatas.append(text)
avs.append(text.columns["AV"][0])
fontsize = 18
font = {"size": fontsize}
matplotlib.rc("font", **font)
matplotlib.rc("lines", linewidth=1)
matplotlib.rc("axes", linewidth=2)
matplotlib.rc("xtick.major", width=2)
matplotlib.rc("xtick.minor", width=2)
matplotlib.rc("ytick.major", width=2)
matplotlib.rc("ytick.minor", width=2)
figsize = (10, 12)
fig, ax = pyplot.subplots(nrows=1, ncols=1, figsize=figsize)
sindxs = np.argsort(avs)
col_vals = ["b", "g", "r", "m", "c", "y"]
lin_vals = ["--", ":", "-."]
mod_x = 1.0 / np.arange(1.0, 40.0, 0.1)
for i in range(len(extnames)):
k = sindxs[i]
color = col_vals[i % 6]
curtype = "BAND"
gindxs, = np.where(extdatas[k].npts[curtype] > 0)
x_band = extdatas[k].waves[curtype][gindxs].to(u.micron).value
elv_band = extdatas[k].exts[curtype][gindxs]
for curtype in extdatas[k].waves.keys():
gindxs, = np.where(extdatas[k].npts[curtype] > 0)
x = extdatas[k].waves[curtype][gindxs].to(u.micron).value
y = extdatas[k].exts[curtype][gindxs]
yu = extdatas[k].uncs[curtype][gindxs]
y_new = get_elkejk_from_elv(x_band, elv_band, x, y)
if len(gindxs) < 20:
# plot small number of points (usually BANDS data) as
# points
ax.plot(x, y_new, "o", color=color, mfc="white")
else:
ax.plot(x, y_new, "-", color=color)
ax.set_yscale("linear")
ax.set_xscale("log")
# ax.set_xlim(1.0, 100.0)
# ax.set_ylim(-6, -1.0)
ax.set_ylabel(r"$E(\lambda - K)/E(J-K)$", fontsize=1.3 * fontsize)
ax.set_xlabel(r"$\lambda$ [$\mu m$]")
ax.tick_params("both", length=10, width=2, which="major")
ax.tick_params("both", length=5, width=1, which="minor")
fig.tight_layout()
save_str = "_mext_elkejk"
if args.png:
fig.savefig(args.filelist.replace(".dat", save_str + ".png"))
elif args.pdf:
fig.savefig(args.filelist.replace(".dat", save_str + ".pdf"))
else:
pyplot.show()
| [
"argparse.ArgumentParser",
"numpy.where",
"numpy.absolute",
"numpy.argsort",
"matplotlib.rc",
"measure_extinction.extdata.ExtData",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((710, 735), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (733, 735), False, 'import argparse\n'), ((1706, 1735), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (1719, 1735), False, 'import matplotlib\n'), ((1741, 1776), 'matplotlib.rc', 'matplotlib.rc', (['"""lines"""'], {'linewidth': '(1)'}), "('lines', linewidth=1)\n", (1754, 1776), False, 'import matplotlib\n'), ((1781, 1815), 'matplotlib.rc', 'matplotlib.rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (1794, 1815), False, 'import matplotlib\n'), ((1820, 1857), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick.major"""'], {'width': '(2)'}), "('xtick.major', width=2)\n", (1833, 1857), False, 'import matplotlib\n'), ((1862, 1899), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick.minor"""'], {'width': '(2)'}), "('xtick.minor', width=2)\n", (1875, 1899), False, 'import matplotlib\n'), ((1904, 1941), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick.major"""'], {'width': '(2)'}), "('ytick.major', width=2)\n", (1917, 1941), False, 'import matplotlib\n'), ((1946, 1983), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick.minor"""'], {'width': '(2)'}), "('ytick.minor', width=2)\n", (1959, 1983), False, 'import matplotlib\n'), ((2022, 2072), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': 'figsize'}), '(nrows=1, ncols=1, figsize=figsize)\n', (2037, 2072), True, 'import matplotlib.pyplot as pyplot\n'), ((2087, 2102), 'numpy.argsort', 'np.argsort', (['avs'], {}), '(avs)\n', (2097, 2102), True, 'import numpy as np\n'), ((2202, 2227), 'numpy.arange', 'np.arange', (['(1.0)', '(40.0)', '(0.1)'], {}), '(1.0, 40.0, 0.1)\n', (2211, 2227), True, 'import numpy as np\n'), ((2362, 2401), 'numpy.where', 'np.where', (['(extdatas[k].npts[curtype] > 0)'], {}), '(extdatas[k].npts[curtype] > 0)\n', (2370, 2401), True, 'import numpy as np\n'), ((451, 477), 'numpy.absolute', 'np.absolute', (['(x_band - 2.22)'], {}), '(x_band - 2.22)\n', (462, 477), True, 'import numpy as np\n'), ((505, 531), 'numpy.absolute', 'np.absolute', (['(x_band - 1.25)'], {}), '(x_band - 1.25)\n', (516, 531), True, 'import numpy as np\n'), ((1536, 1570), 'measure_extinction.extdata.ExtData', 'ExtData', ([], {'filename': "('fits/%s' % name)"}), "(filename='fits/%s' % name)\n", (1543, 1570), False, 'from measure_extinction.extdata import ExtData\n'), ((2598, 2637), 'numpy.where', 'np.where', (['(extdatas[k].npts[curtype] > 0)'], {}), '(extdatas[k].npts[curtype] > 0)\n', (2606, 2637), True, 'import numpy as np\n'), ((3736, 3749), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (3747, 3749), True, 'import matplotlib.pyplot as pyplot\n')] |
from __future__ import print_function, division
from glob import glob
from random import random
from numpy import array
from geojson import load
from shapely.geometry import Polygon
import numpy as np
import os
def average_precision(truth_fp, test_fp):
IoU = lambda p1, p2: p1.intersection(p2).area / p1.union(p2).area
f = open(truth_fp)
truth_features = load(f, encoding='latin-1')
f = open(test_fp)
test_features = load(f, encoding='latin-1')
pos_det = 0
for truth_feature in truth_features['features']:
truth_poly = Polygon(truth_feature['geometry']['coordinates'][0])
for test_feature in test_features['features']:
test_poly = Polygon(test_feature['geometry']['coordinates'][0])
if test_poly.intersects(truth_poly):
pos_det += 1
return pos_det/len(test_features['features'])
def average_localization_error(truth_fp, test_fp):
IoU = lambda p1, p2: p1.intersection(p2).area / p1.union(p2).area
f = open(truth_fp)
truth_features = load(f, encoding='latin-1')
f = open(test_fp)
test_features = load(f, encoding='latin-1')
pos_det = 0
for truth_feature in truth_features['features']:
truth_poly = Polygon(truth_feature['geometry']['coordinates'][0])
for test_feature in test_features['features']:
test_poly = Polygon(test_feature['geometry']['coordinates'][0])
if 0 < IoU(test_poly, truth_poly) < 0.5:
pos_det += 1
return pos_det/len(test_features['features'])
def getPolys(geojson_path):
polyList = []
features = load(open(geojson_path), encoding='latin-1')
for f in features['features']:
geometry = f['geometry']['coordinates'][0]
polyType = f['geometry']['type']
if geometry:
if polyType == 'Polygon':
poly=Polygon(geometry)
polyList.append(poly)
return polyList
def score(test_geojson_path, truth_geojson_path):
# Define internal functions
IoU = lambda p1, p2: p1.intersection(p2).area/p1.union(p2).area
argmax = lambda iterable, func: max(iterable, key=func)
polygonize = lambda feature: Polygon(feature['geometry']['coordinates'][0])
# Convert geojson files of features/geometries to arrays of polygons
#test_features = load(open(test_geojson_path), encoding='latin-1')
#truth_features = load(open(truth_geojson_path), encoding='latin-1')
#test_polys = [polygonize(f) for f in test_features['features']]
#truth_polys = [polygonize(f) for f in truth_features['features']]
test_polys = getPolys(test_geojson_path)
truth_polys = getPolys(truth_geojson_path)
# Generate artifical confidences and sort
test_polys = [[random(), test_poly] for test_poly in test_polys]
test_polys = sorted(test_polys, key=lambda l:l[0], reverse=True)
# Find detections using threshold/argmax/IoU for test polygons
true_pos_count = 0
false_pos_count = 0
B = len(truth_polys)
M = len(test_polys)
for test_poly in test_polys:
IoUs = map(lambda x:IoU(test_poly[1],x),truth_polys)
maxIoU = max(IoUs)
threshold = 0.5
if maxIoU >= threshold:
true_pos_count += 1
# U=U\Bk? how do we insert argmax?
del truth_polys[np.argmax(IoUs)]
else:
false_pos_count += 1
false_neg_count = B - true_pos_count
print('True pos count: ', true_pos_count)
print('False pos count: ', false_pos_count)
print('False neg count: ', false_neg_count)
if (true_pos_count+false_pos_count) != 0:
precision = true_pos_count/(true_pos_count+false_pos_count)
else:
precision = 0
if (true_pos_count+false_neg_count) !=0:
recall = true_pos_count/(true_pos_count+false_neg_count)
return (precision, recall)
def createMarkupFile(fileSavePath, precisionList, recallList, f1ScoreList, pathList, truthFile, gitHubPath):
target = open(fileSavePath, 'w')
target.write('# Testing of {} ->\n'.format(truthFile))
target.write('## [Truth Polygon Map]({})\n'.format(os.path.join(gitHubPath,truthFile)))
target.write('Tests below sorted in order of F1Score \n')
target.write('\n')
sort_index = np.array(f1ScoreList).argsort()[::-1]
testCount = 1
for idx in sort_index:
target.write('# Test {} ->\n'.format(testCount))
target.write('## [Test Polygon Map]({})\n'.format(os.path.join(gitHubPath, pathList[idx])))
target.write('F1Score = {}\n'.format(f1ScoreList[idx]))
target.write('Precision = {}\n'.format(precisionList[idx]))
target.write('Recall = {}\n'.format(recallList[idx]))
testCount=testCount+1
target.write('\n')
target.close()
if __name__ == "__main__":
# DG sample submissions
#baseDirectory = '/Users/dlindenbaum/Documents/CosmiQCode_09282015/BEE-CSharp/Data/'
gitHubDirectory = 'https://github.com/toddstavish/BEE-CSharp/blob/master/data/'
evalFileName = 'Rio_Submission_Testing_CQW/rio_test_aoiResults.md'
for image_id in range(1,6):
truth_fp = ''.join(['Rio/rio_test_aoi',str(image_id),'.geojson'])
test_fp = ''.join(['Rio_Submission_Testing/Rio_sample_challenge_submission',str(image_id),'.geojson'])
print('truth_fp=%s' % truth_fp)
print('test_fp=%s' % test_fp)
precision, recall = score(test_fp, truth_fp)
print('Precision = ', precision)
print('Recall = ', recall)
# CosmiQ sample submissions
path = 'Rio_Hand_Truth_AOI1/*.geojson'
for test_fp in glob(path):
truth_fp = 'Rio/rio_test_aoi1.geojson'
print('truth_fp=%s' % truth_fp)
print('test_fp=%s' % test_fp)
precision, recall = score(test_fp, truth_fp)
print('Precision = ', precision)
print('Recall = ', recall)
# CosmiQ sample submissions
path = 'Rio_Submission_Testing_CQW/rio_test_aoi2*'
precisionList = []
recallList = []
f1ScoreList = []
pathList = glob(path)
for test_fp in pathList:
truth_fp = 'Rio/rio_test_aoi2.geojson'
print('truth_fp=%s' % truth_fp)
print('test_fp=%s' % test_fp)
precision, recall = score(test_fp, truth_fp)
if precision+recall != 0:
F1score = precision*recall/(precision+recall)
else:
F1score = 0
precisionList.append(precision)
recallList.append(recall)
f1ScoreList.append(F1score)
print('Precision = ', precision)
print('Recall = ', recall)
createMarkupFile(evalFileName, precisionList, recallList, f1ScoreList, pathList, truth_fp, gitHubDirectory)
| [
"geojson.load",
"os.path.join",
"numpy.argmax",
"numpy.array",
"shapely.geometry.Polygon",
"random.random",
"glob.glob"
] | [((368, 395), 'geojson.load', 'load', (['f'], {'encoding': '"""latin-1"""'}), "(f, encoding='latin-1')\n", (372, 395), False, 'from geojson import load\n'), ((438, 465), 'geojson.load', 'load', (['f'], {'encoding': '"""latin-1"""'}), "(f, encoding='latin-1')\n", (442, 465), False, 'from geojson import load\n'), ((1035, 1062), 'geojson.load', 'load', (['f'], {'encoding': '"""latin-1"""'}), "(f, encoding='latin-1')\n", (1039, 1062), False, 'from geojson import load\n'), ((1105, 1132), 'geojson.load', 'load', (['f'], {'encoding': '"""latin-1"""'}), "(f, encoding='latin-1')\n", (1109, 1132), False, 'from geojson import load\n'), ((5575, 5585), 'glob.glob', 'glob', (['path'], {}), '(path)\n', (5579, 5585), False, 'from glob import glob\n'), ((6019, 6029), 'glob.glob', 'glob', (['path'], {}), '(path)\n', (6023, 6029), False, 'from glob import glob\n'), ((556, 608), 'shapely.geometry.Polygon', 'Polygon', (["truth_feature['geometry']['coordinates'][0]"], {}), "(truth_feature['geometry']['coordinates'][0])\n", (563, 608), False, 'from shapely.geometry import Polygon\n'), ((1223, 1275), 'shapely.geometry.Polygon', 'Polygon', (["truth_feature['geometry']['coordinates'][0]"], {}), "(truth_feature['geometry']['coordinates'][0])\n", (1230, 1275), False, 'from shapely.geometry import Polygon\n'), ((2176, 2222), 'shapely.geometry.Polygon', 'Polygon', (["feature['geometry']['coordinates'][0]"], {}), "(feature['geometry']['coordinates'][0])\n", (2183, 2222), False, 'from shapely.geometry import Polygon\n'), ((688, 739), 'shapely.geometry.Polygon', 'Polygon', (["test_feature['geometry']['coordinates'][0]"], {}), "(test_feature['geometry']['coordinates'][0])\n", (695, 739), False, 'from shapely.geometry import Polygon\n'), ((1355, 1406), 'shapely.geometry.Polygon', 'Polygon', (["test_feature['geometry']['coordinates'][0]"], {}), "(test_feature['geometry']['coordinates'][0])\n", (1362, 1406), False, 'from shapely.geometry import Polygon\n'), ((2739, 2747), 'random.random', 'random', ([], {}), '()\n', (2745, 2747), False, 'from random import random\n'), ((4103, 4138), 'os.path.join', 'os.path.join', (['gitHubPath', 'truthFile'], {}), '(gitHubPath, truthFile)\n', (4115, 4138), False, 'import os\n'), ((1854, 1871), 'shapely.geometry.Polygon', 'Polygon', (['geometry'], {}), '(geometry)\n', (1861, 1871), False, 'from shapely.geometry import Polygon\n'), ((3306, 3321), 'numpy.argmax', 'np.argmax', (['IoUs'], {}), '(IoUs)\n', (3315, 3321), True, 'import numpy as np\n'), ((4242, 4263), 'numpy.array', 'np.array', (['f1ScoreList'], {}), '(f1ScoreList)\n', (4250, 4263), True, 'import numpy as np\n'), ((4440, 4479), 'os.path.join', 'os.path.join', (['gitHubPath', 'pathList[idx]'], {}), '(gitHubPath, pathList[idx])\n', (4452, 4479), False, 'import os\n')] |
#!/usr/bin/python
###
#
# Transformation functions for the pinhole camera model.
# Author: <NAME> <<EMAIL>>
#
###
import math
import numpy as np
from mmath import *
### Transform from world coordinates to pixel coordinates
# 3d world coordinates (in mm) -> 3d camera coordinates (in mm)
def worldToCamera(points, params, yOffset):
worldToCamera = np.dot(translationToHomogeneous([ params['tx'], params['ty']+yOffset, params['tz'] ]), rotationToHomogeneous(params['rotationMatrix']))
def transform(point):
return point._replace(camera = np.dot(worldToCamera, [ point.world[0], point.world[1], point.world[2], 1 ]))
return map(transform, points)
# 3d camera coordinates (in mm) -> sensor coordinates (2d, in mm)
def projectPoints(points, f):
cameraToPlane = np.array([ [ f, 0.0, 0.0, 0.0 ], [ 0.0, f, 0.0, 0.0 ], [ 0.0, 0.0, 1.0, 0.0 ] ], np.float64)
def projectPoint(point):
# perspective projection of the 3d point onto the plane at z=f
p = np.dot(cameraToPlane, point.camera)
p = p / p[2] #perspective division
# p is now a 2d vector from the center of the image sensor, in mm
return point._replace(projectedSensor = p)
return map(projectPoint, points)
# sensor coordinates (2d, in mm) -> normalised image coordinates
def sensorToNormal(points, pixelSize, resolution):
s2n = np.array([[2.0/(pixelSize[0]*resolution[0]),0.0,0.0],[0.0,2.0/(pixelSize[1]*resolution[1]),0.0],[0.0,0.0,1.0]], np.float64)
def transform(point):
p = [ point.projectedSensor[0], point.projectedSensor[1], 1.0 ]
return point._replace(projectedNormal = np.dot(s2n, p))
return map(transform, points)
# normalised image coordinates -> distorted normalised image coordinates
def distortPoints(points, kappa):
def dist(u, d=None, i=8):
if i == 0:
d = u
else:
d = dist(u, d, i-1)
rd2 = (d[0]*d[0]) + (d[1]*d[1])
correction = 1.0 / ( 1.0 - (kappa * rd2) )
return np.multiply(u, [ correction, correction, 1.0 ])
def transform(point):
return point._replace(distortedNormal = dist(point.projectedNormal))
return map(transform, points)
# (distorted) normalised image coordinates -> pixels
def normalToPixel(points, resolution):
n2p = np.array([[-resolution[0]/2.0,0.0,resolution[0]/2.0],[0.0,-resolution[1]/2.0,resolution[1]/2.0],[0.0,0.0,1.0]], np.float64)
def transform(point):
p = [ point.projectedNormal[0], point.projectedNormal[1], 1.0 ]
d = [ point.distortedNormal[0], point.distortedNormal[1], 1.0 ]
return point._replace(projectedPixel = np.dot(n2p, p), distortedPixel = np.dot(n2p, d))
return map(transform, points)
### Transform from pixel to world coordinates
# pixel coordinates -> normalised image coordinates
def pixelToNormal(points, resolution):
n2p = np.array([[-resolution[0]/2.0,0.0,resolution[0]/2.0],[0.0,-resolution[1]/2.0,resolution[1]/2.0],[0.0,0.0,1.0]], np.float64)
p2n = np.linalg.inv(n2p)
def transform(point):
p = np.array([ point.pixel[0], point.pixel[1], 1.0 ], np.float64) #pixel coordinates to homogeneous
return point._replace(normal = np.dot(p2n, p)) # = [ xd, yd, 1.0 ] transform pixel coordinates to sensor coordinates (in mm, origin at center of camera)
return map(transform, points)
# normalised image coordinates -> undistorted normalised image coordinates
def undistortPoints(points, kappa):
def transform(point):
x, y = point.normal[0], point.normal[1]
correction = 1.0 - ( kappa * ( (x*x) + (y*y) ) )
return point._replace(normal = np.multiply(point.normal, [ correction, correction, 1.0 ]))
return map(transform, points)
# normalised image coordinates -> sensor coordinates (2d, in mm)
def normalToSensor(points, resolution, pixelSize):
s2n = np.array([[2.0/(pixelSize[0]*resolution[0]),0.0,0.0],[0.0,2.0/(pixelSize[1]*resolution[1]),0.0],[0.0,0.0,1.0]], np.float64)
n2s = np.linalg.inv(s2n)
def transform(point):
p = np.array([ point.normal[0], point.normal[1], 1.0 ], np.float64) #pixel coordinates to homogeneous
return point._replace(sensor = np.dot(n2s, p)) # = [ xd, yd, 1.0 ] transform pixel coordinates to sensor coordinates (in mm, origin at center of camera)
return map(transform, points)
### Compose a few of the above together to make things easier to read
def pixelToSensor(points, resolution, pixelSize, kappa=0.0):
return normalToSensor(undistortPoints(pixelToNormal(points, resolution), kappa), resolution, pixelSize)
def sensorToPixel(points, pixelSize, resolution, kappa=0.0):
return normalToPixel(distortPoints(sensorToNormal(points, pixelSize, resolution), kappa), resolution)
def worldToSensor(points, params, pixelSize, resolution, yOffset, kappa=0.0):
return sensorToPixel(projectPoints(worldToCamera(points, params, yOffset), params['f']), pixelSize, resolution, kappa)
def worldToPixel(points, params, pixelSize, resolution, yOffset, kappa=0.0):
return sensorToPixel(projectPoints(worldToCamera(points, params, yOffset), params['f']), pixelSize, resolution, kappa)
# Distance from the origin in camera coordinates to the origin in world coordinates (in mm)
def cameraToWorldDistance(params, yOffset):
return np.linalg.norm([params['tx'], params['ty']-yOffset, params['tz']])
| [
"numpy.multiply",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.linalg.norm"
] | [((819, 908), 'numpy.array', 'np.array', (['[[f, 0.0, 0.0, 0.0], [0.0, f, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]', 'np.float64'], {}), '([[f, 0.0, 0.0, 0.0], [0.0, f, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], np\n .float64)\n', (827, 908), True, 'import numpy as np\n'), ((1413, 1557), 'numpy.array', 'np.array', (['[[2.0 / (pixelSize[0] * resolution[0]), 0.0, 0.0], [0.0, 2.0 / (pixelSize[1\n ] * resolution[1]), 0.0], [0.0, 0.0, 1.0]]', 'np.float64'], {}), '([[2.0 / (pixelSize[0] * resolution[0]), 0.0, 0.0], [0.0, 2.0 / (\n pixelSize[1] * resolution[1]), 0.0], [0.0, 0.0, 1.0]], np.float64)\n', (1421, 1557), True, 'import numpy as np\n'), ((2380, 2524), 'numpy.array', 'np.array', (['[[-resolution[0] / 2.0, 0.0, resolution[0] / 2.0], [0.0, -resolution[1] / \n 2.0, resolution[1] / 2.0], [0.0, 0.0, 1.0]]', 'np.float64'], {}), '([[-resolution[0] / 2.0, 0.0, resolution[0] / 2.0], [0.0, -\n resolution[1] / 2.0, resolution[1] / 2.0], [0.0, 0.0, 1.0]], np.float64)\n', (2388, 2524), True, 'import numpy as np\n'), ((2972, 3116), 'numpy.array', 'np.array', (['[[-resolution[0] / 2.0, 0.0, resolution[0] / 2.0], [0.0, -resolution[1] / \n 2.0, resolution[1] / 2.0], [0.0, 0.0, 1.0]]', 'np.float64'], {}), '([[-resolution[0] / 2.0, 0.0, resolution[0] / 2.0], [0.0, -\n resolution[1] / 2.0, resolution[1] / 2.0], [0.0, 0.0, 1.0]], np.float64)\n', (2980, 3116), True, 'import numpy as np\n'), ((3107, 3125), 'numpy.linalg.inv', 'np.linalg.inv', (['n2p'], {}), '(n2p)\n', (3120, 3125), True, 'import numpy as np\n'), ((3983, 4127), 'numpy.array', 'np.array', (['[[2.0 / (pixelSize[0] * resolution[0]), 0.0, 0.0], [0.0, 2.0 / (pixelSize[1\n ] * resolution[1]), 0.0], [0.0, 0.0, 1.0]]', 'np.float64'], {}), '([[2.0 / (pixelSize[0] * resolution[0]), 0.0, 0.0], [0.0, 2.0 / (\n pixelSize[1] * resolution[1]), 0.0], [0.0, 0.0, 1.0]], np.float64)\n', (3991, 4127), True, 'import numpy as np\n'), ((4118, 4136), 'numpy.linalg.inv', 'np.linalg.inv', (['s2n'], {}), '(s2n)\n', (4131, 4136), True, 'import numpy as np\n'), ((5465, 5533), 'numpy.linalg.norm', 'np.linalg.norm', (["[params['tx'], params['ty'] - yOffset, params['tz']]"], {}), "([params['tx'], params['ty'] - yOffset, params['tz']])\n", (5479, 5533), True, 'import numpy as np\n'), ((1031, 1066), 'numpy.dot', 'np.dot', (['cameraToPlane', 'point.camera'], {}), '(cameraToPlane, point.camera)\n', (1037, 1066), True, 'import numpy as np\n'), ((2081, 2126), 'numpy.multiply', 'np.multiply', (['u', '[correction, correction, 1.0]'], {}), '(u, [correction, correction, 1.0])\n', (2092, 2126), True, 'import numpy as np\n'), ((3168, 3227), 'numpy.array', 'np.array', (['[point.pixel[0], point.pixel[1], 1.0]', 'np.float64'], {}), '([point.pixel[0], point.pixel[1], 1.0], np.float64)\n', (3176, 3227), True, 'import numpy as np\n'), ((4179, 4240), 'numpy.array', 'np.array', (['[point.normal[0], point.normal[1], 1.0]', 'np.float64'], {}), '([point.normal[0], point.normal[1], 1.0], np.float64)\n', (4187, 4240), True, 'import numpy as np\n'), ((583, 657), 'numpy.dot', 'np.dot', (['worldToCamera', '[point.world[0], point.world[1], point.world[2], 1]'], {}), '(worldToCamera, [point.world[0], point.world[1], point.world[2], 1])\n', (589, 657), True, 'import numpy as np\n'), ((1688, 1702), 'numpy.dot', 'np.dot', (['s2n', 'p'], {}), '(s2n, p)\n', (1694, 1702), True, 'import numpy as np\n'), ((2727, 2741), 'numpy.dot', 'np.dot', (['n2p', 'p'], {}), '(n2p, p)\n', (2733, 2741), True, 'import numpy as np\n'), ((2760, 2774), 'numpy.dot', 'np.dot', (['n2p', 'd'], {}), '(n2p, d)\n', (2766, 2774), True, 'import numpy as np\n'), ((3305, 3319), 'numpy.dot', 'np.dot', (['p2n', 'p'], {}), '(p2n, p)\n', (3311, 3319), True, 'import numpy as np\n'), ((3755, 3811), 'numpy.multiply', 'np.multiply', (['point.normal', '[correction, correction, 1.0]'], {}), '(point.normal, [correction, correction, 1.0])\n', (3766, 3811), True, 'import numpy as np\n'), ((4318, 4332), 'numpy.dot', 'np.dot', (['n2s', 'p'], {}), '(n2s, p)\n', (4324, 4332), True, 'import numpy as np\n')] |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import urllib
from typing import List, Dict, Tuple
import pandas as pd
import base64
import requests
import dill
import copy
import numpy as np
from tldextract import TLDExtract
from bs4 import BeautifulSoup
requests.packages.urllib3.disable_warnings()
dill.settings['recurse'] = True
no_fetch_extract = TLDExtract(suffix_list_urls=None, cache_dir=False)
VERSION = get_demisto_version_as_str()
if VERSION[0] + '.' + VERSION[2] >= '6.5':
NEW_DEMISTO_VERSION = True
else:
NEW_DEMISTO_VERSION = False
OOB_MAJOR_VERSION_INFO_KEY = 'major'
OOB_MINOR_VERSION_INFO_KEY = 'minor'
MAJOR_VERSION = 1
MINOR_DEFAULT_VERSION = 0
MSG_SOMETHING_WRONG_IN_RASTERIZE = "Something went wrong with rasterize"
MSG_ENABLE_WHOIS = "Please enable whois integration for more accurate prediction"
MSG_MODEL_VERSION_IN_DEMISTO = "Model version in demisto: %s.%s"
MSG_NO_MODEL_IN_DEMISTO = "There is no existing model version in demisto"
MSG_NO_URL_GIVEN = "Please input at least one URL"
MSG_FAILED_RASTERIZE = "Rasterize error: ERR_NAME_NOT_RESOLVED"
MSG_FAILED_RASTERIZE_TIMEOUT = "Timeout rasterize"
MSG_IMPOSSIBLE_CONNECTION = "Failed to establish a new connection - Name or service not known"
MSG_UPDATE_MODEL = "Update demisto model from docker model version %s.%s"
MSG_UPDATE_LOGO = "Update demisto model from docker model version %s.%s and transfering logos from demisto version %s.%s"
MSG_WRONG_CONFIG_MODEL = 'Wrong configuration of the model'
MSG_NO_ACTION_ON_MODEL = "Use current model"
MSG_WHITE_LIST = "White List"
MSG_REDIRECT = 'Prediction will be made on the last URL'
MSG_NEED_TO_UPDATE_RASTERIZE = "Please install and/or update rasterize pack"
EMPTY_STRING = ""
URL_PHISHING_MODEL_NAME = "url_phishing_model"
OUT_OF_THE_BOX_MODEL_PATH = '/model/model_docker.pkl'
UNKNOWN_MODEL_TYPE = 'UNKNOWN_MODEL_TYPE'
THRESHOLD_NEW_DOMAIN_MONTHS = 6
DOMAIN_AGE_KEY = 'New domain (less than %s months)' % str(THRESHOLD_NEW_DOMAIN_MONTHS)
MALICIOUS_VERDICT = "Malicious"
BENIGN_VERDICT = "Benign"
SUSPICIOUS_VERDICT = "Suspicious"
BENIGN_VERDICT_WHITELIST = "Benign - Top domains from Majestic"
UNKNOWN = 'Unknown'
BENIGN_THRESHOLD = 0.5
SUSPICIOUS_THRESHOLD = 0.7
SCORE_INVALID_URL = -1.0
SCORE_BENIGN = 0.0 # type: float
GREEN_COLOR = "{{color:#1DB846}}(%s)" if NEW_DEMISTO_VERSION else "**%s**"
RED_COLOR = "{{color:#D13C3C}}(%s)" if NEW_DEMISTO_VERSION else "**%s**"
VERDICT_MALICIOUS_COLOR = "{{color:#D13C3C}}(**%s**)" if NEW_DEMISTO_VERSION else "**%s**"
VERDICT_SUSPICIOUS_COLOR = "{{color:#EF9700}}(**%s**)" if NEW_DEMISTO_VERSION else "**%s**"
VERDICT_BENIGN_COLOR = "{{color:#1DB846}}(**%s**)" if NEW_DEMISTO_VERSION else "**%s**"
VERDICT_ERROR_COLOR = "{{color:#D13C3C}}(**%s**)" if NEW_DEMISTO_VERSION else "**%s**"
MAPPING_VERDICT_COLOR = {MALICIOUS_VERDICT: VERDICT_MALICIOUS_COLOR, BENIGN_VERDICT: VERDICT_BENIGN_COLOR,
SUSPICIOUS_VERDICT: VERDICT_SUSPICIOUS_COLOR, BENIGN_VERDICT_WHITELIST: VERDICT_BENIGN_COLOR}
SCORE_THRESHOLD = 0.6 # type: float
STATUS_CODE_VALID = 200
MODEL_KEY_URL_SCORE = 'url_score'
MODEL_KEY_LOGO_FOUND = 'logo_found'
MODEL_KEY_SEO = 'seo'
MODEL_KEY_LOGO_IMAGE_BYTES = 'image_bytes'
MODEL_KEY_LOGIN_FORM = 'login_form'
KEY_CONTENT_DOMAIN = "Domain"
KEY_CONTENT_URL = "URL"
KEY_CONTENT_LOGO = "UseOfSuspiciousLogo"
KEY_CONTENT_LOGIN = "HasLoginForm"
KEY_CONTENT_URL_SCORE = "URLStaticScore"
KEY_CONTENT_SEO = "BadSEOQuality"
KEY_CONTENT_AGE = "NewDomain"
KEY_CONTENT_VERDICT = "FinalVerdict"
KEY_CONTENT_IS_WHITELISTED = "TopMajesticDomain"
KEY_CONTENT_DBOT_SCORE = 'DBotScore'
KEY_HR_DOMAIN = "Domain"
KEY_HR_URL = 'Url'
KEY_HR_SEO = "Search engine optimisation"
KEY_HR_LOGIN = "Is there a Login form ?"
KEY_HR_LOGO = "Suspiscious use of company logo"
KEY_HR_URL_SCORE = "URL severity score (from 0 to 1)"
KEY_CONTENT_SUMMARY_URL = 'URL'
KEY_CONTENT_SUMMARY_FINAL_VERDICT = 'FinalVerdict'
KEY_IMAGE_RASTERIZE = "image_b64"
KEY_IMAGE_HTML = "html"
KEY_CURRENT_URL_RASTERIZE = 'current_url'
KEY_FINAL_VERDICT = "Final Verdict"
WEIGHT_HEURISTIC = {DOMAIN_AGE_KEY: 3, MODEL_KEY_LOGIN_FORM: 1, MODEL_KEY_SEO: 1,
MODEL_KEY_URL_SCORE: 2, MODEL_KEY_LOGO_FOUND: 1}
MAPPING_VERDICT_TO_DISPLAY_VERDICT = {
MODEL_KEY_SEO: {True: RED_COLOR % 'Bad', False: GREEN_COLOR % 'Good'},
MODEL_KEY_LOGO_FOUND: {True: RED_COLOR % 'Suspicious', False: GREEN_COLOR % 'Not Suspicious'},
MODEL_KEY_LOGIN_FORM: {True: RED_COLOR % 'Yes', False: GREEN_COLOR % 'No'},
DOMAIN_AGE_KEY: {True: RED_COLOR % 'Less than 6 months ago', False: GREEN_COLOR % 'More than 6 months ago',
None: None}
} # type: Dict
TIMEOUT_REQUESTS = 5
WAIT_TIME_RASTERIZE = 5
TIMEOUT_RASTERIZE = 20
DOMAIN_CHECK_RASTERIZE = 'google.com'
def get_model_data(model_name: str):
"""
Return model data saved in demisto (string of encoded base 64)
:param model_name: name of the model to load from demisto
:return: str, str
"""
res_model = demisto.executeCommand("getMLModel", {"modelName": model_name})[0]
if is_error(res_model):
raise DemistoException("Error reading model %s from Demisto" % model_name)
else:
model_data = res_model['Contents']['modelData']
try:
model_type = res_model['Contents']['model']["type"]["type"]
return model_data, model_type
except Exception:
return model_data, UNKNOWN_MODEL_TYPE
def decode_model_data(model_data: str):
"""
Decode the base 64 version of the model
:param model_data: string of the encoded based 64 model
:return: Model
"""
return dill.loads(base64.b64decode(model_data.encode('utf-8'))) # guardrails-disable-line
def load_oob(path=OUT_OF_THE_BOX_MODEL_PATH):
"""
Load pickle model from the docker
:param path: path of the model saved in the docker
:return: bytes
"""
with open(path, 'rb') as f:
model_b = f.read()
model_64 = base64.b64encode(model_b)
return model_64
def load_model_from_docker(path=OUT_OF_THE_BOX_MODEL_PATH):
model = dill.load(open(path, 'rb')) # guardrails-disable-line
return model
def load_oob_model(path: str):
"""
Load and save model from the model in the docker
:return: None
"""
try:
encoded_model = load_oob(path)
except Exception:
raise DemistoException(traceback.format_exc())
res = demisto.executeCommand('createMLModel', {'modelData': encoded_model.decode('utf-8'),
'modelName': URL_PHISHING_MODEL_NAME,
'modelLabels': [MALICIOUS_VERDICT, BENIGN_VERDICT,
SUSPICIOUS_VERDICT],
'modelOverride': 'true',
'modelHidden': True,
'modelType': 'url_phishing',
'modelExtraInfo': {
OOB_MAJOR_VERSION_INFO_KEY: MAJOR_VERSION,
OOB_MINOR_VERSION_INFO_KEY: MINOR_DEFAULT_VERSION}})
if is_error(res):
raise DemistoException(get_error(res))
return MSG_UPDATE_MODEL % (MAJOR_VERSION, MINOR_DEFAULT_VERSION)
def oob_model_exists_and_updated() -> Tuple[bool, int, int, str]:
"""
Check is the model exist and is updated in demisto
:return: book
"""
res_model = demisto.executeCommand("getMLModel", {"modelName": URL_PHISHING_MODEL_NAME})[0]
if is_error(res_model):
return False, -1, -1, ''
model_data = res_model['Contents']['modelData']
existing_model_version_major = res_model['Contents']['model']['extra'].get(OOB_MAJOR_VERSION_INFO_KEY, -1)
existing_model_version_minor = res_model['Contents']['model']['extra'].get(OOB_MINOR_VERSION_INFO_KEY, -1)
return True, existing_model_version_major, existing_model_version_minor, model_data
def image_from_base64_to_bytes(base64_message: str):
"""
Transform image from base64 string into bytes
:param base64_message:
:return:
"""
base64_bytes = base64_message.encode('utf-8')
message_bytes = base64.b64decode(base64_bytes)
return message_bytes
def extract_domainv2(url):
ext = no_fetch_extract(url)
return ext.domain + "." + ext.suffix
def in_white_list(model, url: str) -> bool:
"""
Check if url belongs to the Model whitelist
:param model: model which contains top_domains attribute
:param url: url to check
:return:
"""
return extract_domainv2(url) in model.top_domains
def get_colored_pred_json(pred_json: Dict) -> Dict:
"""
Create copy and color json values according to their values.
:param pred_json: json to color
:return: json
"""
pred_json_colored = copy.deepcopy(pred_json)
pred_json_colored[MODEL_KEY_SEO] = MAPPING_VERDICT_TO_DISPLAY_VERDICT[MODEL_KEY_SEO][pred_json[MODEL_KEY_SEO]]
pred_json_colored[MODEL_KEY_LOGO_FOUND] = MAPPING_VERDICT_TO_DISPLAY_VERDICT[MODEL_KEY_LOGO_FOUND][
pred_json[MODEL_KEY_LOGO_FOUND]]
pred_json_colored[MODEL_KEY_LOGIN_FORM] = MAPPING_VERDICT_TO_DISPLAY_VERDICT[MODEL_KEY_LOGIN_FORM][
pred_json[MODEL_KEY_LOGIN_FORM]]
pred_json_colored[DOMAIN_AGE_KEY] = MAPPING_VERDICT_TO_DISPLAY_VERDICT[DOMAIN_AGE_KEY][pred_json[DOMAIN_AGE_KEY]]
return pred_json_colored
def create_X_pred(output_rasterize: Dict, url: str) -> pd.DataFrame:
"""
Create dataframe to predict from the rasterize output
:param output_rasterize: Dict from the output of rasterize command
:param url: url to examine
:return: pd.DataFrame
"""
website64 = output_rasterize.get(KEY_IMAGE_RASTERIZE, None)
html = output_rasterize.get(KEY_IMAGE_HTML, None)
X_pred = pd.DataFrame(columns=['name', 'image', 'html'])
X_pred.loc[0] = [url, website64, html]
return X_pred
def prepend_protocol(url: str, protocol: str, www: bool = True) -> str:
"""forceModel
Append a protocol name (usually http or https) and www to a url
:param url: url
:param protocol: protocol we want to add (usually http or https)
:return: str
"""
p = urllib.parse.urlparse(url, protocol) # type: ignore
netloc = p.netloc or p.path
path = p.path if p.netloc else ''
if not netloc.startswith('www.') and www:
netloc = 'www.' + netloc
p = urllib.parse.ParseResult(protocol, netloc, path, *p[3:]) # type: ignore
return p.geturl()
def verdict_to_int(verdict):
if verdict == MALICIOUS_VERDICT:
return 3
if verdict == BENIGN_VERDICT or verdict == BENIGN_VERDICT_WHITELIST:
return 1
if verdict == SUSPICIOUS_VERDICT:
return 2
def return_entry_summary(pred_json: Dict, url: str, whitelist: bool, output_rasterize: Dict, verdict: str):
"""
Return entry to demisto
:param pred_json: json with output of the model
:param url: url
:param whitelist: if url belongs to whitelist of the model
:return: entry to demisto
"""
if whitelist:
return
if verdict == BENIGN_VERDICT_WHITELIST:
verdict = BENIGN_VERDICT
if whitelist or not pred_json:
url_score = SCORE_BENIGN
url_score_colored = GREEN_COLOR % str(url_score) if url_score < SCORE_THRESHOLD else RED_COLOR % str(
url_score)
else:
url_score = round(pred_json[MODEL_KEY_URL_SCORE], 2)
url_score_colored = GREEN_COLOR % str(url_score) if url_score < SCORE_THRESHOLD else RED_COLOR % str(
url_score) # type: ignore
if pred_json:
pred_json_colored = get_colored_pred_json(pred_json)
else:
pred_json_colored = {}
domain = extract_domainv2(url)
explain = {
KEY_CONTENT_DOMAIN: domain,
KEY_CONTENT_URL: url,
KEY_CONTENT_LOGO: str(pred_json.get(MODEL_KEY_LOGO_FOUND, UNKNOWN)),
KEY_CONTENT_LOGIN: str(pred_json.get(MODEL_KEY_LOGIN_FORM, UNKNOWN)),
KEY_CONTENT_URL_SCORE: url_score,
KEY_CONTENT_SEO: str(pred_json.get(MODEL_KEY_SEO, UNKNOWN)),
KEY_CONTENT_VERDICT: verdict,
KEY_CONTENT_IS_WHITELISTED: str(whitelist)
}
context_DBot_score = {
'Indicator': url,
'Type': 'URL',
'Vendor': 'DBotPhishingURL',
'Score': verdict_to_int(verdict)
}
if pred_json and pred_json[DOMAIN_AGE_KEY] is not None:
explain[KEY_CONTENT_AGE] = str(pred_json[DOMAIN_AGE_KEY])
explain_hr = {
KEY_HR_URL: url,
KEY_HR_SEO: str(pred_json_colored.get(MODEL_KEY_SEO, UNKNOWN)),
KEY_HR_LOGIN: str(pred_json_colored.get(MODEL_KEY_LOGIN_FORM, UNKNOWN)),
KEY_HR_LOGO: str(pred_json_colored.get(MODEL_KEY_LOGO_FOUND, UNKNOWN)),
KEY_HR_URL_SCORE: url_score_colored
}
if pred_json and pred_json[DOMAIN_AGE_KEY] is not None:
explain_hr[DOMAIN_AGE_KEY] = str(pred_json_colored[DOMAIN_AGE_KEY])
if verdict == BENIGN_VERDICT:
return_entry = {
"Type": entryTypes["note"],
"ContentsFormat": formats['json'],
"HumanReadable": tableToMarkdown("Phishing prediction evidence | %s" % domain, explain_hr),
"Contents": explain,
"EntryContext": {'DBotPredictURLPhishing': explain}
}
else:
return_entry = {
"Type": entryTypes["note"],
"ContentsFormat": formats['json'],
"HumanReadable": tableToMarkdown("Phishing prediction evidence | %s" % domain, explain_hr),
"Contents": explain,
"EntryContext": {'DBotPredictURLPhishing': explain, KEY_CONTENT_DBOT_SCORE: context_DBot_score},
"Tags": ['DBOT_URL_PHISHING_MALICIOUS']
}
return_results(return_entry)
# Get rasterize image or logo detection if logo was found
if pred_json:
image = pred_json[MODEL_KEY_LOGO_IMAGE_BYTES]
if not image:
image = image_from_base64_to_bytes(output_rasterize.get(KEY_IMAGE_RASTERIZE, None))
res = fileResult(filename='Logo detection engine', data=image)
res['Type'] = entryTypes['image']
if pred_json[MODEL_KEY_LOGO_FOUND]:
res["Tags"] = ['DBOT_URL_PHISHING_MALICIOUS']
return_results(res)
return explain
def return_entry_white_list(url):
"""
Create syntethci entry when url belongs to whitelist
:param url: url
:return:
"""
explain = {
KEY_CONTENT_DOMAIN: extract_domainv2(url),
KEY_CONTENT_URL: url,
KEY_CONTENT_AGE: MSG_WHITE_LIST,
KEY_CONTENT_LOGO: MSG_WHITE_LIST,
KEY_CONTENT_LOGIN: MSG_WHITE_LIST,
KEY_CONTENT_URL_SCORE: MSG_WHITE_LIST,
KEY_CONTENT_SEO: MSG_WHITE_LIST
}
explain_hr = {
KEY_HR_URL: url,
KEY_HR_SEO: MSG_WHITE_LIST,
DOMAIN_AGE_KEY: MSG_WHITE_LIST,
KEY_HR_LOGIN: MSG_WHITE_LIST,
KEY_HR_LOGO: MSG_WHITE_LIST,
KEY_HR_URL_SCORE: MSG_WHITE_LIST
}
verdict_hr = {
"Verdict": BENIGN_VERDICT,
"URL": url
}
return_entry = {
"Type": entryTypes["note"],
"ContentsFormat": formats['json'],
"HumanReadable": tableToMarkdown("Verdict", verdict_hr) + tableToMarkdown("Report", explain_hr),
"Contents": explain,
"EntryContext": {'DBotPredictURLPhishing': explain}
}
return_results(return_entry)
def get_score(pred_json):
use_age = False
use_logo = False
if pred_json[DOMAIN_AGE_KEY]:
use_age = True
if pred_json[MODEL_KEY_LOGO_FOUND]:
use_logo = True
if pred_json[DOMAIN_AGE_KEY] is None:
domain_age_key = 0
else:
domain_age_key = pred_json[DOMAIN_AGE_KEY]
total_weight_used = WEIGHT_HEURISTIC[DOMAIN_AGE_KEY] * use_age + WEIGHT_HEURISTIC[MODEL_KEY_LOGIN_FORM] \
+ WEIGHT_HEURISTIC[MODEL_KEY_SEO] + WEIGHT_HEURISTIC[MODEL_KEY_URL_SCORE] \
+ WEIGHT_HEURISTIC[MODEL_KEY_LOGO_FOUND] * use_logo
score = (use_age * WEIGHT_HEURISTIC[DOMAIN_AGE_KEY] * domain_age_key
+ WEIGHT_HEURISTIC[MODEL_KEY_LOGIN_FORM] * pred_json[MODEL_KEY_LOGIN_FORM]
+ WEIGHT_HEURISTIC[MODEL_KEY_SEO] * pred_json[MODEL_KEY_SEO]
+ WEIGHT_HEURISTIC[MODEL_KEY_URL_SCORE] * pred_json[MODEL_KEY_URL_SCORE]
+ use_logo * WEIGHT_HEURISTIC[MODEL_KEY_LOGO_FOUND] * pred_json[MODEL_KEY_LOGO_FOUND]) / total_weight_used
return score
def get_verdict(pred_json: Dict, is_white_listed: bool) -> Tuple[float, str]:
"""
Return verdict of the url based on the output of the model
:param pred_json: output from the model
:return:
"""
if is_white_listed:
return SCORE_BENIGN, BENIGN_VERDICT
score = get_score(pred_json)
if pred_json[MODEL_KEY_LOGO_FOUND]:
return score, MALICIOUS_VERDICT
else:
if score < BENIGN_THRESHOLD:
return score, BENIGN_VERDICT
elif score < SUSPICIOUS_THRESHOLD:
return score, SUSPICIOUS_VERDICT
else:
return score, MALICIOUS_VERDICT
def create_dict_context(url, last_url, verdict, pred_json, score, is_white_listed, output_rasterize):
return {'url_redirect': url, 'url': last_url, 'verdict': verdict, 'pred_json': pred_json, 'score': score,
'is_white_listed': is_white_listed,
'output_rasterize': output_rasterize}
def extract_created_date(entry_list: List):
"""
Check if domain age is younger than THRESHOLD_NEW_DOMAIN_YEAR year
:param entry_list: output of the whois command
:return: bool
"""
for entry in entry_list:
if is_error(entry):
continue
else:
date_str = entry['EntryContext'].get('Domain(val.Name && val.Name == obj.Name)', {}).get('WHOIS', {}).get(
'CreationDate', None)
if date_str:
date = datetime.strptime(date_str, '%d-%m-%Y')
threshold_date = datetime.now() - timedelta(days=THRESHOLD_NEW_DOMAIN_MONTHS * 30)
return date > threshold_date
return None
def get_prediction_single_url(model, url, force_model, who_is_enabled, debug):
is_white_listed = False
# Rasterize html and image
res_rasterize = demisto.executeCommand('rasterize', {'type': 'json',
'url': url,
'wait_time': WAIT_TIME_RASTERIZE,
'execution-timeout': TIMEOUT_RASTERIZE
})
if is_error(res_rasterize):
error = get_error(res_rasterize)
if 'disabled' in error or 'enabled' in error:
raise DemistoException(MSG_NEED_TO_UPDATE_RASTERIZE)
elif 'timeout' in error:
return create_dict_context(url, url, MSG_FAILED_RASTERIZE_TIMEOUT, {}, SCORE_INVALID_URL, is_white_listed,
{})
elif 'ERR_NAME_NOT_RESOLVED' in error:
return create_dict_context(url, url, MSG_FAILED_RASTERIZE, {}, SCORE_INVALID_URL, is_white_listed, {})
else:
return create_dict_context(url, url, error, {}, SCORE_INVALID_URL, is_white_listed, {})
if len(res_rasterize) > 0 and isinstance(res_rasterize[0]['Contents'], str):
return create_dict_context(url, url, MSG_FAILED_RASTERIZE, {}, SCORE_INVALID_URL, is_white_listed, {})
if KEY_IMAGE_RASTERIZE not in res_rasterize[0]['Contents'].keys() or KEY_IMAGE_HTML \
not in res_rasterize[0]['Contents'].keys():
return create_dict_context(url, url, MSG_SOMETHING_WRONG_IN_RASTERIZE, {}, SCORE_INVALID_URL, is_white_listed,
{})
if len(res_rasterize) > 0:
output_rasterize = res_rasterize[0]['Contents']
else:
create_dict_context(url, url, MSG_SOMETHING_WRONG_IN_RASTERIZE, {}, SCORE_INVALID_URL, is_white_listed, {})
if KEY_CURRENT_URL_RASTERIZE not in output_rasterize.keys():
raise DemistoException(MSG_NEED_TO_UPDATE_RASTERIZE)
# Get final url and redirection
final_url = output_rasterize.get(KEY_CURRENT_URL_RASTERIZE, url)
if final_url != url:
url_redirect = '%s -> %s (%s)' % (url, final_url, MSG_REDIRECT)
else:
url_redirect = final_url
# Check domain age from WHOIS command
domain = extract_domainv2(final_url)
# Check is domain in white list - If yes we don't run the model
if in_white_list(model, final_url):
if not force_model:
is_white_listed = True
return create_dict_context(url_redirect, final_url, BENIGN_VERDICT_WHITELIST, {}, SCORE_BENIGN,
is_white_listed, {})
else:
is_white_listed = True
res_whois = []
if who_is_enabled:
try:
res_whois = demisto.executeCommand('whois', {'query': domain, 'execution-timeout': 5
})
except Exception:
res_whois = []
is_new_domain = extract_created_date(res_whois)
X_pred = create_X_pred(output_rasterize, final_url)
pred_json = model.predict(X_pred)
if debug:
return_results(pred_json['debug_top_words'])
return_results(pred_json['debug_found_domains_list'])
return_results(pred_json['seo'])
return_results(pred_json['debug_image'])
pred_json[DOMAIN_AGE_KEY] = is_new_domain
score, verdict = get_verdict(pred_json, is_white_listed)
return create_dict_context(url_redirect, final_url, verdict, pred_json, score, is_white_listed, output_rasterize)
def return_general_summary(results, tag="Summary"):
df_summary = pd.DataFrame()
df_summary['URL'] = [x.get('url_redirect') for x in results]
df_summary[KEY_FINAL_VERDICT] = [MAPPING_VERDICT_COLOR[x.get('verdict')] % x.get('verdict')
if x.get('verdict') in MAPPING_VERDICT_COLOR.keys()
else VERDICT_ERROR_COLOR % x.get('verdict') for x in results]
summary_context = [
{KEY_CONTENT_SUMMARY_URL: x.get('url_redirect'), KEY_CONTENT_SUMMARY_FINAL_VERDICT: BENIGN_VERDICT,
KEY_CONTENT_IS_WHITELISTED: 'True'} for x in results if x.get('is_white_listed')]
df_summary_json = df_summary.to_dict(orient='records')
return_entry = {
"Type": entryTypes["note"],
"ContentsFormat": formats['json'],
"HumanReadable": tableToMarkdown("Phishing prediction summary for URLs", df_summary_json,
headers=['URL', KEY_FINAL_VERDICT]),
"Contents": summary_context,
"EntryContext": {'DBotPredictURLPhishing': summary_context}
}
if tag is not None:
return_entry["Tags"] = ['DBOT_URL_PHISHING_{}'.format(tag)]
return_results(return_entry)
return df_summary_json
def return_detailed_summary(results):
outputs = []
severity_list = [x.get('score') for x in results]
indice_descending_severity = np.argsort(-np.array(severity_list), kind='mergesort')
for i in range(len(results)):
index = indice_descending_severity[i]
if results[index].get('score') == SCORE_INVALID_URL:
continue
verdict = results[index].get('verdict')
pred_json = results[index].get('pred_json')
url = results[index].get('url')
is_white_listed = results[index].get('is_white_listed')
output_rasterize = results[index].get('output_rasterize')
summary_json = return_entry_summary(pred_json, url, is_white_listed, output_rasterize, verdict)
outputs.append(summary_json)
outputs = [x for x in outputs if x]
return outputs
def save_model_in_demisto(model):
encoded_model = base64.b64encode(dill.dumps(model)) # guardrails-disable-line
res = demisto.executeCommand('createMLModel', {'modelData': encoded_model.decode('utf-8'),
'modelName': URL_PHISHING_MODEL_NAME,
'modelLabels': [MALICIOUS_VERDICT, BENIGN_VERDICT],
'modelOverride': 'true',
'modelHidden': True,
'modelType': 'url_phishing',
'modelExtraInfo': {
OOB_MAJOR_VERSION_INFO_KEY: model.major,
OOB_MINOR_VERSION_INFO_KEY: model.minor}})
if is_error(res):
raise DemistoException(get_error(res))
def extract_urls(text):
res = demisto.executeCommand("extractIndicators", {"text": text})
if is_error(res):
raise DemistoException(get_error(res))
return list(set(json.loads(res[0]["Contents"]).get("URL", [])))
def load_demisto_model():
model_64_str = get_model_data(URL_PHISHING_MODEL_NAME)[0]
model = decode_model_data(model_64_str)
return model
def get_final_urls(urls, max_urls, model):
final_url = []
seen = []
low_priority_urls = []
i = 0
for url in urls:
if i < max_urls:
if extract_domainv2(url) in seen or extract_domainv2(url) in model.top_domains:
low_priority_urls.append(url)
else:
final_url.append(url)
seen.append(extract_domainv2(url))
i += 1
if len(final_url) < max_urls:
final_url = final_url + low_priority_urls[:min(len(low_priority_urls), max_urls - len(final_url))]
return final_url
def extract_embedded_urls_from_html(html):
embedded_urls = []
soup = BeautifulSoup(html)
for a in soup.findAll('a'):
if a.has_attr('href'):
if a['href'] not in a.get_text():
embedded_urls.append(a['href'])
return embedded_urls
def get_urls_to_run(email_body, email_html, urls_argument, max_urls, model, msg_list, debug):
if email_body:
urls_email_body = extract_urls(email_body)
else:
if (not email_body and email_html):
urls_email_body = extract_urls(BeautifulSoup(email_html).get_text())
else:
urls_email_body = []
if email_html:
urls_email_html = extract_embedded_urls_from_html(email_html)
else:
urls_email_html = []
if isinstance(urls_argument, list):
urls_only = urls_argument
else:
urls_only = [x.strip() for x in urls_argument.split(' ') if x]
urls = urls_email_body + urls_only + urls_email_html
urls = list(set(urls))
if not urls:
msg_list.append(MSG_NO_URL_GIVEN)
return_results(MSG_NO_URL_GIVEN)
return [], msg_list
urls = get_final_urls(urls, max_urls, model)
urls = [demisto.executeCommand("UnEscapeURLs", {"input": x})[0]['Contents'] for x in urls]
if debug:
return_results(urls)
return urls, msg_list
def update_model_docker_from_model(model_docker, model):
model_docker.logos_dict = model.logos_dict
model_docker.top_domains = model.top_domains
model_docker.clf.named_steps.preprocessor.named_transformers_[
'image'].named_steps.trans.logo_dict = model.logos_dict
model_docker.clf.named_steps.preprocessor.named_transformers_[
'url'].named_steps.trans.d_top_domains = model.top_domains
model_docker.clf.named_steps.preprocessor.named_transformers_[
'image'].named_steps.trans.top_domains = model.logos_dict
return model_docker
def update_and_load_model(debug, exist, reset_model, msg_list, demisto_major_version, demisto_minor_version,
model_data):
if debug:
if exist:
msg_list.append(MSG_MODEL_VERSION_IN_DEMISTO % (demisto_major_version, demisto_minor_version))
else:
msg_list.append(MSG_NO_MODEL_IN_DEMISTO)
if reset_model or not exist or (
demisto_major_version < MAJOR_VERSION and demisto_minor_version == MINOR_DEFAULT_VERSION):
msg_list.append(load_oob_model(OUT_OF_THE_BOX_MODEL_PATH))
model_64_str = get_model_data(URL_PHISHING_MODEL_NAME)[0]
model = decode_model_data(model_64_str)
elif demisto_major_version == MAJOR_VERSION:
model = decode_model_data(model_data)
msg_list.append(MSG_NO_ACTION_ON_MODEL)
elif (demisto_major_version < MAJOR_VERSION) and (demisto_minor_version > MINOR_DEFAULT_VERSION):
model_docker = load_model_from_docker()
model_docker_minor = model_docker.minor
model = load_demisto_model()
model_docker = update_model_docker_from_model(model_docker, model)
model_docker.minor += 1
save_model_in_demisto(model_docker)
msg_list.append(MSG_UPDATE_LOGO % (MAJOR_VERSION, model_docker_minor, model.major, model.minor))
model_64_str = get_model_data(URL_PHISHING_MODEL_NAME)[0]
model = decode_model_data(model_64_str)
else:
msg_list.append(MSG_WRONG_CONFIG_MODEL)
raise DemistoException(MSG_WRONG_CONFIG_MODEL)
return model, msg_list
def check_if_whois_installed():
try:
demisto.executeCommand('whois', {'query': DOMAIN_CHECK_RASTERIZE, 'execution-timeout': 5
})
return True
except ValueError:
return_results(MSG_ENABLE_WHOIS)
return False
def main():
who_is_enabled = check_if_whois_installed()
try:
msg_list = [] # type: List
# Check existing version of the model in demisto
exist, demisto_major_version, demisto_minor_version, model_data = oob_model_exists_and_updated()
# Load arguments
reset_model = demisto.args().get('resetModel', 'False') == 'True'
debug = demisto.args().get('debug', 'False') == 'True'
force_model = demisto.args().get('forceModel', 'False') == 'True'
email_body = demisto.args().get('emailBody', "")
email_html = demisto.args().get('emailHTML', "")
max_urls = int(demisto.args().get('maxNumberOfURL', 5))
urls_argument = demisto.args().get('urls', '')
# Update model if necessary and load the model
model, msg_list = update_and_load_model(debug, exist, reset_model, msg_list, demisto_major_version,
demisto_minor_version, model_data)
# Get all the URLs on which we will run the model
urls, msg_list = get_urls_to_run(email_body, email_html, urls_argument, max_urls, model, msg_list, debug)
if not urls:
return
# Run the model and get predictions
results = [get_prediction_single_url(model, x, force_model, who_is_enabled, debug) for x in urls]
# Return outputs
general_summary = return_general_summary(results)
detailed_summary = return_detailed_summary(results)
if debug:
return_results(msg_list)
return general_summary, detailed_summary, msg_list
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute URL Phishing script. Error: {str(ex)}')
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| [
"requests.packages.urllib3.disable_warnings",
"urllib.parse.urlparse",
"demistomock.args",
"base64.b64encode",
"base64.b64decode",
"urllib.parse.ParseResult",
"bs4.BeautifulSoup",
"numpy.array",
"demistomock.executeCommand",
"tldextract.TLDExtract",
"copy.deepcopy",
"pandas.DataFrame",
"dill... | [((309, 353), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', ([], {}), '()\n', (351, 353), False, 'import requests\n'), ((407, 457), 'tldextract.TLDExtract', 'TLDExtract', ([], {'suffix_list_urls': 'None', 'cache_dir': '(False)'}), '(suffix_list_urls=None, cache_dir=False)\n', (417, 457), False, 'from tldextract import TLDExtract\n'), ((8373, 8403), 'base64.b64decode', 'base64.b64decode', (['base64_bytes'], {}), '(base64_bytes)\n', (8389, 8403), False, 'import base64\n'), ((9011, 9035), 'copy.deepcopy', 'copy.deepcopy', (['pred_json'], {}), '(pred_json)\n', (9024, 9035), False, 'import copy\n'), ((9992, 10039), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'image', 'html']"}), "(columns=['name', 'image', 'html'])\n", (10004, 10039), True, 'import pandas as pd\n'), ((10383, 10419), 'urllib.parse.urlparse', 'urllib.parse.urlparse', (['url', 'protocol'], {}), '(url, protocol)\n', (10404, 10419), False, 'import urllib\n'), ((10593, 10649), 'urllib.parse.ParseResult', 'urllib.parse.ParseResult', (['protocol', 'netloc', 'path', '*p[3:]'], {}), '(protocol, netloc, path, *p[3:])\n', (10617, 10649), False, 'import urllib\n'), ((18396, 18539), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""rasterize"""', "{'type': 'json', 'url': url, 'wait_time': WAIT_TIME_RASTERIZE,\n 'execution-timeout': TIMEOUT_RASTERIZE}"], {}), "('rasterize', {'type': 'json', 'url': url,\n 'wait_time': WAIT_TIME_RASTERIZE, 'execution-timeout': TIMEOUT_RASTERIZE})\n", (18418, 18539), True, 'import demistomock as demisto\n'), ((21919, 21933), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21931, 21933), True, 'import pandas as pd\n'), ((24943, 25002), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""extractIndicators"""', "{'text': text}"], {}), "('extractIndicators', {'text': text})\n", (24965, 25002), True, 'import demistomock as demisto\n'), ((25961, 25980), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {}), '(html)\n', (25974, 25980), False, 'from bs4 import BeautifulSoup\n'), ((5035, 5098), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""getMLModel"""', "{'modelName': model_name}"], {}), "('getMLModel', {'modelName': model_name})\n", (5057, 5098), True, 'import demistomock as demisto\n'), ((6012, 6037), 'base64.b64encode', 'base64.b64encode', (['model_b'], {}), '(model_b)\n', (6028, 6037), False, 'import base64\n'), ((7639, 7715), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""getMLModel"""', "{'modelName': URL_PHISHING_MODEL_NAME}"], {}), "('getMLModel', {'modelName': URL_PHISHING_MODEL_NAME})\n", (7661, 7715), True, 'import demistomock as demisto\n'), ((24012, 24029), 'dill.dumps', 'dill.dumps', (['model'], {}), '(model)\n', (24022, 24029), False, 'import dill\n'), ((29419, 29513), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""whois"""', "{'query': DOMAIN_CHECK_RASTERIZE, 'execution-timeout': 5}"], {}), "('whois', {'query': DOMAIN_CHECK_RASTERIZE,\n 'execution-timeout': 5})\n", (29441, 29513), True, 'import demistomock as demisto\n'), ((21068, 21142), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""whois"""', "{'query': domain, 'execution-timeout': 5}"], {}), "('whois', {'query': domain, 'execution-timeout': 5})\n", (21090, 21142), True, 'import demistomock as demisto\n'), ((23260, 23283), 'numpy.array', 'np.array', (['severity_list'], {}), '(severity_list)\n', (23268, 23283), True, 'import numpy as np\n'), ((27067, 27119), 'demistomock.executeCommand', 'demisto.executeCommand', (['"""UnEscapeURLs"""', "{'input': x}"], {}), "('UnEscapeURLs', {'input': x})\n", (27089, 27119), True, 'import demistomock as demisto\n'), ((30185, 30199), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (30197, 30199), True, 'import demistomock as demisto\n'), ((30242, 30256), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (30254, 30256), True, 'import demistomock as demisto\n'), ((30366, 30380), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (30378, 30380), True, 'import demistomock as demisto\n'), ((29975, 29989), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (29987, 29989), True, 'import demistomock as demisto\n'), ((30043, 30057), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (30055, 30057), True, 'import demistomock as demisto\n'), ((30112, 30126), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (30124, 30126), True, 'import demistomock as demisto\n'), ((30301, 30315), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (30313, 30315), True, 'import demistomock as demisto\n'), ((26426, 26451), 'bs4.BeautifulSoup', 'BeautifulSoup', (['email_html'], {}), '(email_html)\n', (26439, 26451), False, 'from bs4 import BeautifulSoup\n')] |
import numpy as np, pandas as pd, statsmodels.api as sm
from collections import defaultdict
from .load_screens import load_screens
from scipy.stats import cauchy
from statsmodels.stats.multitest import fdrcorrection
# Load screens and modules
screens = load_screens()
genes = screens.index
modules = np.array([
module.split() for d in (0.2, 0.5, 0.9) for module in pd.read_csv(
f'modules_d_{d}.csv', usecols=['Members'], squeeze=True)])
# Get cancer types; filter out cancer types that only appear once
cell_line_cancer_types = screens.columns.str.split('_').str[1:].str.join(
' ').str.capitalize().to_series()
cancer_type_frequencies = cell_line_cancer_types.value_counts()
singletons = cancer_type_frequencies.index[cancer_type_frequencies == 1]
non_singletons_mask = ~cell_line_cancer_types.isin(singletons).values
screens = screens.iloc[:, non_singletons_mask]
cell_line_cancer_types = cell_line_cancer_types[non_singletons_mask]
# One-hot encode
one_hot_cancer_types = pd.get_dummies(cell_line_cancer_types)
cancer_types = one_hot_cancer_types.columns
# Run multivariate GLS on each gene
cholsigmainv = np.linalg.cholesky(np.linalg.pinv(screens.cov())).T
inputs = cholsigmainv.dot(sm.add_constant(one_hot_cancer_types))
gene_ps = pd.DataFrame(index=genes, columns=cancer_types, dtype=float)
for gene in genes:
output = cholsigmainv.dot(screens.loc[gene])
model = sm.OLS(output, inputs).fit()
gene_ps.loc[gene] = model.pvalues[1:]
# Combine GLS p-values with ACAT
ACAT = lambda ps: cauchy.sf(np.tan((0.5 - ps) * np.pi).mean())
results = defaultdict(list)
for gene_set in modules:
gene_set_ps = gene_ps[genes.isin(gene_set)]
for cancer_type in cancer_types:
p_meta = ACAT(gene_set_ps[cancer_type].astype(np.float128))
results['Module genes'].append(', '.join(gene_set))
results['Cancer type'].append(cancer_type)
results['p'].append(p_meta)
results = pd.DataFrame(results)
# FDR-correct by cancer type
results['FDR'] = results.p.groupby(results['Cancer type']).apply(
lambda p: pd.Series(fdrcorrection(p)[1], index=p.index))
# Save significant module-cancer type dependencies
results[results.FDR < 0.1].sort_values('p').to_csv(
'cancer_type_dependencies.tsv', sep='\t', index=False) | [
"numpy.tan",
"pandas.DataFrame",
"pandas.read_csv",
"statsmodels.api.add_constant",
"collections.defaultdict",
"pandas.get_dummies",
"statsmodels.api.OLS",
"statsmodels.stats.multitest.fdrcorrection"
] | [((996, 1034), 'pandas.get_dummies', 'pd.get_dummies', (['cell_line_cancer_types'], {}), '(cell_line_cancer_types)\n', (1010, 1034), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((1259, 1319), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'genes', 'columns': 'cancer_types', 'dtype': 'float'}), '(index=genes, columns=cancer_types, dtype=float)\n', (1271, 1319), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((1579, 1596), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1590, 1596), False, 'from collections import defaultdict\n'), ((1932, 1953), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (1944, 1953), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((1210, 1247), 'statsmodels.api.add_constant', 'sm.add_constant', (['one_hot_cancer_types'], {}), '(one_hot_cancer_types)\n', (1225, 1247), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((371, 439), 'pandas.read_csv', 'pd.read_csv', (['f"""modules_d_{d}.csv"""'], {'usecols': "['Members']", 'squeeze': '(True)'}), "(f'modules_d_{d}.csv', usecols=['Members'], squeeze=True)\n", (382, 439), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((1400, 1422), 'statsmodels.api.OLS', 'sm.OLS', (['output', 'inputs'], {}), '(output, inputs)\n', (1406, 1422), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((1534, 1560), 'numpy.tan', 'np.tan', (['((0.5 - ps) * np.pi)'], {}), '((0.5 - ps) * np.pi)\n', (1540, 1560), True, 'import numpy as np, pandas as pd, statsmodels.api as sm\n'), ((2075, 2091), 'statsmodels.stats.multitest.fdrcorrection', 'fdrcorrection', (['p'], {}), '(p)\n', (2088, 2091), False, 'from statsmodels.stats.multitest import fdrcorrection\n')] |
import ase.db
import warnings
import numpy
import matplotlib.pyplot as plt
from ase.data import covalent_radii
from scipy.stats import linregress
import os, os.path
from scipy.constants import pi, epsilon_0
db_file = "../../data/gpaw_data/c2db.db"
if not os.path.exists(db_file):
raise FileExistsError(("Please download the c2db data into ../../data/gpaw_data/ folder,"
"from https://cmr.fysik.dtu.dk/_downloads/c2db.db"))
db = ase.db.connect(db_file)
valence = numpy.load("../post_processing/valence.npy")
pol = numpy.load("../post_processing/valence.npy")
def get_thick(atom_row):
pos = atom_row.positions[:, -1]
diff = covalent_radii[atom_row.numbers]
zmax = numpy.max(pos + diff) - numpy.min(pos - diff)
vals = valence[atom_row.numbers] # valence electrons
atom_pol = pol[atom_row.numbers]
A = atom_row.cell_area
return zmax, sum(vals) / A, sum(atom_pol) / A
def get_data():
candidates = db.select(selection="gap_gw>0.5")
candidates = db.select(selection="gap_gw>0.05")
materials = []
alpha_x = []
alpha_z = []
Eg_HSE = []
Eg_GW = []
Eg_PBE = []
thick = []
n_2D = []
polar = []
for mol in candidates:
if "Cr" in mol.formula: # CrS2 stuffs are not correct?
continue
print("{0}-{1}".format(mol.formula, mol.prototype))
togo = True
for attrib in ("gap", "gap_hse",
"gap_gw", "alphax", "alphaz"):
if not hasattr(mol, attrib):
warnings.warn("{0} doesn't have attribute {1}!".format(mol.formula,
attrib))
togo = False
if togo is not True:
warnings.warn("{0} not calculated!".format(mol.formula))
continue
materials.append("{0}-{1}".format(mol.formula, mol.prototype))
alpha_x.append(mol.alphax)
alpha_z.append(mol.alphaz)
Eg_HSE.append(mol.gap_hse)
Eg_GW.append(mol.gap_gw)
Eg_PBE.append(mol.gap)
delta, n, apol = get_thick(mol)
thick.append(delta)
n_2D.append(n)
polar.append(apol)
print(len(alpha_x))
alpha_x = numpy.array(alpha_x)
alpha_z = numpy.array(alpha_z)
Eg_HSE = numpy.array(Eg_HSE)
Eg_GW = numpy.array(Eg_GW)
Eg_PBE = numpy.array(Eg_PBE)
thick = numpy.array(thick)
n_2D = numpy.array(n_2D)
polar = numpy.array(polar)
return alpha_x, alpha_z, Eg_HSE, thick
'''
img_path = "../../tmp_img/"
plt.style.use("science")
# plt.figure(figsize=(7, 3.5)) #
# plt.subplot(121) #
# plt.plot(Eg_HSE, alpha_x * 4 * pi, "o", alpha=0.5) #
# plt.xlabel("$E_{\\rm{g}}$ (eV)") #
# plt.ylabel("$\\alpha_{xx}/\\varepsilon_0$ ($\\AA$)") #
# #
# plt.subplot(122) #
# plt.plot(Eg_HSE, alpha_z * 4 * pi, "o", alpha=0.5) #
# plt.xlabel("$E_{\\rm{g}}$ (eV)") #
# plt.ylabel("$\\alpha_{zz} / \\varepsilon_0$ ($\\AA$)") #
# #
# plt.tight_layout() #
# plt.savefig(os.path.join(img #
# _path, "alpha_Eg_original.svg"))
# x-direction
plt.figure(figsize=(3.5, 3.5))
plt.plot(Eg_HSE, 1 / (alpha_x), "o", alpha=0.5)
k, b, r, *_ = linregress(x=Eg_HSE, y=1/alpha_x)
print(k, b, r)
xx = numpy.linspace(0.3, 8)
yy = k * xx + b
plt.plot(xx, yy, "--")
plt.xlabel("$E_{\\rm{g}}$ (eV)")
plt.ylabel("$(4 \\pi \\varepsilon_0)/\\alpha_{\parallel}$ ($\\AA^{-1}$)")
plt.savefig(os.path.join(img_path, "alpha_xx_1_Eg.svg"))
# z-direction
plt.figure(figsize=(3.5, 3.5))
plt.plot(thick, alpha_z, "o", alpha=0.5)
# plt.plot(polar, alpha_z, "o", alpha=0.5)
k, b, r, *_ = linregress(x=thick, y=alpha_z)
print(k, b, r)
xx = numpy.linspace(2, 10)
yy = k * xx + b
# yyy = 1 / (4 * pi) * xx - 0.05
plt.plot(xx, yy, "--")
# plt.plot(xx, yyy, "--")
plt.xlabel("Thickness ($\\AA$)")
plt.ylabel("$\\alpha_{\\perp} / (4 \pi \\varepsilon_0)$ ($\\AA$)")
plt.savefig(os.path.join(img_path, "alpha_zz_thick.svg"))
#x-direction
plt.figure(figsize=(3.5, 3.5))
# plt.plot(thick, alpha_z, "o", alpha=0.5)
plt.plot(polar, alpha_z, "o", alpha=0.5)
# k, b, r, *_ = linregress(x=thick, y=alpha_z)
# print(k, b, r)
# xx = numpy.linspace(2, 10)
# yy = k * xx + b
# yyy = 1 / (4 * pi) * xx - 0.05
# plt.plot(xx, yy, "--")
# plt.plot(xx, yyy, "--")
plt.text(x=2, y=10, s="$\\alpha^{\\perp} = \\frac{\\hbar^2 e^2 \\rho_e}{m_e E_{\mathrm{g}}^2}$")
plt.xlabel("Total Atomic Polarizability per Area (Bohr$^3$)")
plt.ylabel("$\\alpha^{\\perp} / (4 \pi \\varepsilon_0)$ ($\\AA$)")
plt.savefig(os.path.join(img_path, "alpha_zz_polar.svg"))
# z-direction with atomic polarizability
plt.figure(figsize=(3.5, 3.5))
# plt.plot(thick, alpha_z, "o", alpha=0.5)
plt.plot(polar, alpha_x, "o", alpha=0.5)
k, b, r, *_ = linregress(x=thick, y=alpha_z)
print(k, b, r)
# xx = numpy.linspace(2, 10)
# yy = k * xx + b
# yyy = 1 / (4 * pi) * xx - 0.05
# plt.plot(xx, yy, "--")
# plt.plot(xx, yyy, "--")
plt.xlabel("Total Atomic Polarizability per Area (Bohr$^3$)")
plt.ylabel("$\\alpha_{\\parallel} / (4 \pi \\varepsilon_0)$ ($\\AA$)")
plt.savefig(os.path.join(img_path, "alpha_xx_polar.svg"))
'''
| [
"os.path.exists",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.load"
] | [((488, 532), 'numpy.load', 'numpy.load', (['"""../post_processing/valence.npy"""'], {}), "('../post_processing/valence.npy')\n", (498, 532), False, 'import numpy\n'), ((539, 583), 'numpy.load', 'numpy.load', (['"""../post_processing/valence.npy"""'], {}), "('../post_processing/valence.npy')\n", (549, 583), False, 'import numpy\n'), ((256, 279), 'os.path.exists', 'os.path.exists', (['db_file'], {}), '(db_file)\n', (270, 279), False, 'import os, os.path\n'), ((2226, 2246), 'numpy.array', 'numpy.array', (['alpha_x'], {}), '(alpha_x)\n', (2237, 2246), False, 'import numpy\n'), ((2261, 2281), 'numpy.array', 'numpy.array', (['alpha_z'], {}), '(alpha_z)\n', (2272, 2281), False, 'import numpy\n'), ((2295, 2314), 'numpy.array', 'numpy.array', (['Eg_HSE'], {}), '(Eg_HSE)\n', (2306, 2314), False, 'import numpy\n'), ((2327, 2345), 'numpy.array', 'numpy.array', (['Eg_GW'], {}), '(Eg_GW)\n', (2338, 2345), False, 'import numpy\n'), ((2359, 2378), 'numpy.array', 'numpy.array', (['Eg_PBE'], {}), '(Eg_PBE)\n', (2370, 2378), False, 'import numpy\n'), ((2391, 2409), 'numpy.array', 'numpy.array', (['thick'], {}), '(thick)\n', (2402, 2409), False, 'import numpy\n'), ((2421, 2438), 'numpy.array', 'numpy.array', (['n_2D'], {}), '(n_2D)\n', (2432, 2438), False, 'import numpy\n'), ((2451, 2469), 'numpy.array', 'numpy.array', (['polar'], {}), '(polar)\n', (2462, 2469), False, 'import numpy\n'), ((702, 723), 'numpy.max', 'numpy.max', (['(pos + diff)'], {}), '(pos + diff)\n', (711, 723), False, 'import numpy\n'), ((726, 747), 'numpy.min', 'numpy.min', (['(pos - diff)'], {}), '(pos - diff)\n', (735, 747), False, 'import numpy\n')] |
import datetime as dt
import numba as nb
import numpy as np
from randomgen import Xoroshiro128
x = Xoroshiro128()
f = x.ctypes.next_uint32
s = x.ctypes.state
@nb.jit(nopython=True)
def bounded_uint(lb: int, ub: int, state: int) -> int:
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = f(state) & mask
while val > delta:
val = f(state) & mask
return lb + val
print(bounded_uint(323, 2394691, s.value))
@nb.jit(nopython=True)
def bounded_uints(lb: int, ub: int, n: int, state: int) -> None:
out = np.empty(n, dtype=np.uint32)
for i in range(n):
out[i] = bounded_uint(lb, ub, state)
bounded_uints(323, 2394691, 10000000, s.value)
g = x.cffi.next_double
cffi_state = x.cffi.state
state_addr = x.cffi.state_address
def normals(n: int, state: int) -> np.ndarray:
out = np.empty(n)
for i in range((n + 1) // 2):
x1 = 2.0 * g(state) - 1.0
x2 = 2.0 * g(state) - 1.0
r2 = x1 * x1 + x2 * x2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * g(state) - 1.0
x2 = 2.0 * g(state) - 1.0
r2 = x1 * x1 + x2 * x2
f = np.sqrt(-2.0 * np.log(r2) / r2)
out[2 * i] = f * x1
if 2 * i + 1 < n:
out[2 * i + 1] = f * x2
return out
print(normals(10, cffi_state).var())
# Warm up
normalsj = nb.jit(normals, nopython=True)
normalsj(1, state_addr)
start = dt.datetime.now()
normalsj(1000000, state_addr)
ms = 1000 * (dt.datetime.now() - start).total_seconds()
print(
"1,000,000 Polar-transform (numba/Xoroshiro128) randoms in "
"{ms:0.1f}ms".format(ms=ms)
)
start = dt.datetime.now()
np.random.standard_normal(1000000)
ms = 1000 * (dt.datetime.now() - start).total_seconds()
print("1,000,000 Polar-transform (NumPy) randoms in {ms:0.1f}ms".format(ms=ms))
| [
"numpy.random.standard_normal",
"numpy.log",
"randomgen.Xoroshiro128",
"datetime.datetime.now",
"numba.jit",
"numpy.empty"
] | [((102, 116), 'randomgen.Xoroshiro128', 'Xoroshiro128', ([], {}), '()\n', (114, 116), False, 'from randomgen import Xoroshiro128\n'), ((164, 185), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (170, 185), True, 'import numba as nb\n'), ((528, 549), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (534, 549), True, 'import numba as nb\n'), ((1417, 1447), 'numba.jit', 'nb.jit', (['normals'], {'nopython': '(True)'}), '(normals, nopython=True)\n', (1423, 1447), True, 'import numba as nb\n'), ((1481, 1498), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1496, 1498), True, 'import datetime as dt\n'), ((1700, 1717), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1715, 1717), True, 'import datetime as dt\n'), ((1718, 1752), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(1000000)'], {}), '(1000000)\n', (1743, 1752), True, 'import numpy as np\n'), ((625, 653), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.uint32'}), '(n, dtype=np.uint32)\n', (633, 653), True, 'import numpy as np\n'), ((914, 925), 'numpy.empty', 'np.empty', (['n'], {}), '(n)\n', (922, 925), True, 'import numpy as np\n'), ((1542, 1559), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1557, 1559), True, 'import datetime as dt\n'), ((1766, 1783), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1781, 1783), True, 'import datetime as dt\n'), ((1235, 1245), 'numpy.log', 'np.log', (['r2'], {}), '(r2)\n', (1241, 1245), True, 'import numpy as np\n')] |
"""LinearSolver that uses PetSC KSP to solve for a system's derivatives."""
from __future__ import division, print_function
import numpy as np
try:
import petsc4py
from petsc4py import PETSc
except ImportError:
PETSc = None
from openmdao.solvers.solver import LinearSolver
from openmdao.utils.general_utils import warn_deprecation
KSP_TYPES = [
"richardson",
"chebyshev",
"cg",
"groppcg",
"pipecg",
"pipecgrr",
"cgne",
"nash",
"stcg",
"gltr",
"fcg",
"pipefcg",
"gmres",
"pipefgmres",
"fgmres",
"lgmres",
"dgmres",
"pgmres",
"tcqmr",
"bcgs",
"ibcgs",
"fbcgs",
"fbcgsr",
"bcgsl",
"cgs",
"tfqmr",
"cr",
"pipecr",
"lsqr",
"preonly",
"qcg",
"bicg",
"minres",
"symmlq",
"lcd",
"python",
"gcr",
"pipegcr",
"tsirm",
"cgls"
]
def _get_petsc_vec_array_new(vec):
"""
Get the array of values for the given PETSc vector.
Helper function to handle a petsc backwards incompatibility.
Parameters
----------
vec : petsc vector
Vector whose data is being requested.
Returns
-------
ndarray
A readonly copy of the array of values from vec.
"""
return vec.getArray(readonly=True)
def _get_petsc_vec_array_old(vec):
"""
Get the array of values for the given PETSc vector.
Helper function to handle a petsc backwards incompatibility.
Parameters
----------
vec : petsc vector
Vector whose data is being requested.
Returns
-------
ndarray
An array of values from vec.
"""
return vec.getArray()
if PETSc:
try:
petsc_version = petsc4py.__version__
except AttributeError: # hack to fix doc-tests
petsc_version = "3.5"
if PETSc and int((petsc_version).split('.')[1]) >= 6:
_get_petsc_vec_array = _get_petsc_vec_array_new
else:
_get_petsc_vec_array = _get_petsc_vec_array_old
class Monitor(object):
"""
Prints output from PETSc's KSP solvers.
Callable object given to KSP as a callback for printing the residual.
Attributes
----------
_solver : _solver
the openmdao solver.
_norm : float
the current norm.
_norm0 : float
the norm for the first iteration.
"""
def __init__(self, solver):
"""
Store pointer to the openmdao solver and initialize norms.
Parameters
----------
solver : object
the openmdao solver.
"""
self._solver = solver
self._norm = 1.0
self._norm0 = 1.0
def __call__(self, ksp, counter, norm):
"""
Store norm if first iteration, and print norm.
Parameters
----------
ksp : object
the KSP solver.
counter : int
the counter.
norm : float
the norm.
"""
if counter == 0 and norm != 0.0:
self._norm0 = norm
self._norm = norm
self._solver._mpi_print(counter, norm, norm / self._norm0)
self._solver._iter_count += 1
class PETScKrylov(LinearSolver):
"""
LinearSolver that uses PetSC KSP to solve for a system's derivatives.
Attributes
----------
precon : Solver
Preconditioner for linear solve. Default is None for no preconditioner.
_ksp : dist
dictionary of KSP instances (keyed on vector name).
"""
SOLVER = 'LN: PETScKrylov'
def __init__(self, **kwargs):
"""
Declare the solver options.
Parameters
----------
**kwargs : dict
dictionary of options set by the instantiating class/script.
"""
if PETSc is None:
raise RuntimeError("PETSc is not available.")
super(PETScKrylov, self).__init__(**kwargs)
# initialize dictionary of KSP instances (keyed on vector name)
self._ksp = {}
# initialize preconditioner to None
self.precon = None
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
super(PETScKrylov, self)._declare_options()
self.options.declare('ksp_type', default='fgmres', values=KSP_TYPES,
desc="KSP algorithm to use. Default is 'fgmres'.")
self.options.declare('restart', default=1000, types=int,
desc='Number of iterations between restarts. Larger values increase '
'iteration cost, but may be necessary for convergence')
self.options.declare('precon_side', default='right', values=['left', 'right'],
desc='Preconditioner side, default is right.')
# changing the default maxiter from the base class
self.options['maxiter'] = 100
def _assembled_jac_solver_iter(self):
"""
Return a generator of linear solvers using assembled jacs.
"""
if self.options['assemble_jac']:
yield self
if self.precon is not None:
for s in self.precon._assembled_jac_solver_iter():
yield s
def _setup_solvers(self, system, depth):
"""
Assign system instance, set depth, and optionally perform setup.
Parameters
----------
system : <System>
pointer to the owning system.
depth : int
depth of the current system (already incremented).
"""
super(PETScKrylov, self)._setup_solvers(system, depth)
if self.precon is not None:
self.precon._setup_solvers(self._system, self._depth + 1)
def _set_solver_print(self, level=2, type_='all'):
"""
Control printing for solvers and subsolvers in the model.
Parameters
----------
level : int
iprint level. Set to 2 to print residuals each iteration; set to 1
to print just the iteration totals; set to 0 to disable all printing
except for failures, and set to -1 to disable all printing including failures.
type_ : str
Type of solver to set: 'LN' for linear, 'NL' for nonlinear, or 'all' for all.
"""
super(PETScKrylov, self)._set_solver_print(level=level, type_=type_)
if self.precon is not None and type_ != 'NL':
self.precon._set_solver_print(level=level, type_=type_)
def mult(self, mat, in_vec, result):
"""
Apply Jacobian matrix (KSP Callback).
The following attributes must be defined when solve is called to
provide information used in this callback:
_system : System
pointer to the owning system.
_vec_name : str
the right-hand-side (RHS) vector name.
_mode : str
'fwd' or 'rev'.
Parameters
----------
mat : PETSc.Mat
PETSc matrix object.
in_vec : PetSC Vector
Incoming vector.
result : PetSC Vector
Empty array into which we place the matrix-vector product.
"""
# assign x and b vectors based on mode
system = self._system
vec_name = self._vec_name
if self._mode == 'fwd':
x_vec = system._vectors['output'][vec_name]
b_vec = system._vectors['residual'][vec_name]
else: # rev
x_vec = system._vectors['residual'][vec_name]
b_vec = system._vectors['output'][vec_name]
# set value of x vector to KSP provided value
x_vec._data[:] = _get_petsc_vec_array(in_vec)
# apply linear
scope_out, scope_in = system._get_scope()
system._apply_linear(self._assembled_jac, [vec_name], self._rel_systems, self._mode,
scope_out, scope_in)
# stuff resulting value of b vector into result for KSP
result.array[:] = b_vec._data
def _linearize_children(self):
"""
Return a flag that is True when we need to call linearize on our subsystems' solvers.
Returns
-------
boolean
Flag for indicating child linerization
"""
precon = self.precon
return (precon is not None) and (precon._linearize_children())
def _linearize(self):
"""
Perform any required linearization operations such as matrix factorization.
"""
if self.precon is not None:
self.precon._linearize()
def solve(self, vec_names, mode, rel_systems=None):
"""
Solve the linear system for the problem in self._system.
The full solution vector is returned.
Parameters
----------
vec_names : list
list of vector names.
mode : string
Derivative mode, can be 'fwd' or 'rev'.
rel_systems : set of str
Names of systems relevant to the current solve.
"""
self._vec_names = vec_names
self._rel_systems = rel_systems
self._mode = mode
system = self._system
options = self.options
if self._system.under_complex_step:
msg = 'PETScKrylov solver is not supported under complex step.'
raise RuntimeError(msg)
maxiter = options['maxiter']
atol = options['atol']
rtol = options['rtol']
for vec_name in vec_names:
self._vec_name = vec_name
# assign x and b vectors based on mode
if self._mode == 'fwd':
x_vec = system._vectors['output'][vec_name]
b_vec = system._vectors['residual'][vec_name]
else: # rev
x_vec = system._vectors['residual'][vec_name]
b_vec = system._vectors['output'][vec_name]
# create numpy arrays to interface with PETSc
sol_array = x_vec._data.copy()
rhs_array = b_vec._data.copy()
# create PETSc vectors from numpy arrays
sol_petsc_vec = PETSc.Vec().createWithArray(sol_array, comm=system.comm)
rhs_petsc_vec = PETSc.Vec().createWithArray(rhs_array, comm=system.comm)
# run PETSc solver
self._iter_count = 0
ksp = self._get_ksp_solver(system, vec_name)
ksp.setTolerances(max_it=maxiter, atol=atol, rtol=rtol)
ksp.solve(rhs_petsc_vec, sol_petsc_vec)
# stuff the result into the x vector
x_vec._data[:] = sol_array
sol_petsc_vec = rhs_petsc_vec = None
def apply(self, mat, in_vec, result):
"""
Apply preconditioner.
Parameters
----------
mat : PETSc.Mat
PETSc matrix object.
in_vec : PETSc.Vector
Incoming vector
result : PETSc.Vector
Empty vector in which the preconditioned in_vec is stored.
"""
if self.precon:
system = self._system
vec_name = self._vec_name
mode = self._mode
# Need to clear out any junk from the inputs.
system._vectors['input'][vec_name].set_const(0.0)
# assign x and b vectors based on mode
if mode == 'fwd':
x_vec = system._vectors['output'][vec_name]
b_vec = system._vectors['residual'][vec_name]
else: # rev
x_vec = system._vectors['residual'][vec_name]
b_vec = system._vectors['output'][vec_name]
# set value of b vector to KSP provided value
b_vec._data[:] = _get_petsc_vec_array(in_vec)
# call the preconditioner
self._solver_info.append_precon()
self.precon.solve([vec_name], mode)
self._solver_info.pop()
# stuff resulting value of x vector into result for KSP
result.array[:] = x_vec._data
else:
# no preconditioner, just pass back the incoming vector
result.array[:] = _get_petsc_vec_array(in_vec)
def _get_ksp_solver(self, system, vec_name):
"""
Get an instance of the KSP solver for `vec_name` in `system`.
Instances will be created on first request and cached for future use.
Parameters
----------
system : `System`
Parent `System` object.
vec_name : string
name of vector.
Returns
-------
KSP
the KSP solver instance.
"""
# use cached instance if available
if vec_name in self._ksp:
return self._ksp[vec_name]
iproc = system.comm.rank
lsize = np.sum(system._var_sizes[vec_name]['output'][iproc, :])
size = np.sum(system._var_sizes[vec_name]['output'])
jac_mat = PETSc.Mat().createPython([(lsize, size), (lsize, size)],
comm=system.comm)
jac_mat.setPythonContext(self)
jac_mat.setUp()
ksp = self._ksp[vec_name] = PETSc.KSP().create(comm=system.comm)
ksp.setOperators(jac_mat)
ksp.setType(self.options['ksp_type'])
ksp.setGMRESRestart(self.options['restart'])
if self.options['precon_side'] == 'left':
ksp.setPCSide(PETSc.PC.Side.LEFT)
else:
ksp.setPCSide(PETSc.PC.Side.RIGHT)
ksp.setMonitor(Monitor(self))
ksp.setInitialGuessNonzero(True)
pc_mat = ksp.getPC()
pc_mat.setType('python')
pc_mat.setPythonContext(self)
return ksp
@property
def preconditioner(self):
"""
Provide 'preconditioner' property for backwards compatibility.
Returns
-------
<LinearSolver>
reference to the 'precon' property.
"""
warn_deprecation("The 'preconditioner' property provides backwards compatibility "
"with OpenMDAO <= 1.x ; use 'precon' instead.")
return self.precon
@preconditioner.setter
def preconditioner(self, precon):
"""
Provide for setting the 'preconditioner' property for backwards compatibility.
Parameters
----------
precon : <LinearSolver>
reference to a <LinearSolver> to be assigned to the 'precon' property.
"""
warn_deprecation("The 'preconditioner' property provides backwards compatibility "
"with OpenMDAO <= 1.x ; use 'precon' instead.")
self.precon = precon
class PetscKSP(PETScKrylov):
"""
Deprecated. Use PETScKrylov.
"""
def __init__(self, **kwargs):
"""
Initialize attributes.
Parameters
----------
**kwargs : dict
Named args.
"""
super(PetscKSP, self).__init__(**kwargs)
warn_deprecation('PetscKSP is deprecated. Use PETScKrylov instead.')
| [
"petsc4py.PETSc.KSP",
"petsc4py.PETSc.Vec",
"petsc4py.PETSc.Mat",
"numpy.sum",
"openmdao.utils.general_utils.warn_deprecation"
] | [((12728, 12783), 'numpy.sum', 'np.sum', (["system._var_sizes[vec_name]['output'][iproc, :]"], {}), "(system._var_sizes[vec_name]['output'][iproc, :])\n", (12734, 12783), True, 'import numpy as np\n'), ((12799, 12844), 'numpy.sum', 'np.sum', (["system._var_sizes[vec_name]['output']"], {}), "(system._var_sizes[vec_name]['output'])\n", (12805, 12844), True, 'import numpy as np\n'), ((13862, 13999), 'openmdao.utils.general_utils.warn_deprecation', 'warn_deprecation', (['"""The \'preconditioner\' property provides backwards compatibility with OpenMDAO <= 1.x ; use \'precon\' instead."""'], {}), '(\n "The \'preconditioner\' property provides backwards compatibility with OpenMDAO <= 1.x ; use \'precon\' instead."\n )\n', (13878, 13999), False, 'from openmdao.utils.general_utils import warn_deprecation\n'), ((14384, 14521), 'openmdao.utils.general_utils.warn_deprecation', 'warn_deprecation', (['"""The \'preconditioner\' property provides backwards compatibility with OpenMDAO <= 1.x ; use \'precon\' instead."""'], {}), '(\n "The \'preconditioner\' property provides backwards compatibility with OpenMDAO <= 1.x ; use \'precon\' instead."\n )\n', (14400, 14521), False, 'from openmdao.utils.general_utils import warn_deprecation\n'), ((14884, 14953), 'openmdao.utils.general_utils.warn_deprecation', 'warn_deprecation', (['"""PetscKSP is deprecated. Use PETScKrylov instead."""'], {}), "('PetscKSP is deprecated. Use PETScKrylov instead.')\n", (14900, 14953), False, 'from openmdao.utils.general_utils import warn_deprecation\n'), ((12864, 12875), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (12873, 12875), False, 'from petsc4py import PETSc\n'), ((13082, 13093), 'petsc4py.PETSc.KSP', 'PETSc.KSP', ([], {}), '()\n', (13091, 13093), False, 'from petsc4py import PETSc\n'), ((10091, 10102), 'petsc4py.PETSc.Vec', 'PETSc.Vec', ([], {}), '()\n', (10100, 10102), False, 'from petsc4py import PETSc\n'), ((10176, 10187), 'petsc4py.PETSc.Vec', 'PETSc.Vec', ([], {}), '()\n', (10185, 10187), False, 'from petsc4py import PETSc\n')] |
# -*- coding: utf-8 -*-
# 版权所有 2020 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
import codecs
import json
import locale
import os
import sys
from copy import copy
from itertools import chain
from typing import Dict, Iterable, Optional
import h5py
import numpy as np
import pandas
from rqalpha.const import COMMISSION_TYPE, INSTRUMENT_TYPE
from rqalpha.model.instrument import Instrument
from rqalpha.utils.datetime_func import convert_date_to_date_int
from rqalpha.utils.functools import lru_cache
from rqalpha.utils.i18n import gettext as _
from .storage_interface import (AbstractCalendarStore, AbstractDateSet,
AbstractDayBarStore, AbstractDividendStore,
AbstractInstrumentStore,
AbstractSimpleFactorStore)
class ExchangeTradingCalendarStore(AbstractCalendarStore):
def __init__(self, f):
self._f = f
def get_trading_calendar(self):
# type: () -> pandas.DatetimeIndex
return pandas.to_datetime([str(d) for d in np.load(self._f, allow_pickle=False)])
class FutureInfoStore(object):
COMMISSION_TYPE_MAP = {
"by_volume": COMMISSION_TYPE.BY_VOLUME,
"by_money": COMMISSION_TYPE.BY_MONEY
}
def __init__(self, f, custom_future_info):
with open(f, "r") as json_file:
self._default_data = {
item.get("order_book_id") or item.get("underlying_symbol"): self._process_future_info_item(
item
) for item in json.load(json_file)
}
self._custom_data = custom_future_info
self._future_info = {}
@classmethod
def _process_future_info_item(cls, item):
item["commission_type"] = cls.COMMISSION_TYPE_MAP[item["commission_type"]]
return item
def get_future_info(self, instrument):
# type: (Instrument) -> Dict[str, float]
order_book_id = instrument.order_book_id
try:
return self._future_info[order_book_id]
except KeyError:
custom_info = self._custom_data.get(order_book_id) or self._custom_data.get(instrument.underlying_symbol)
info = self._default_data.get(order_book_id) or self._default_data.get(instrument.underlying_symbol)
if custom_info:
info = copy(info) or {}
info.update(custom_info)
elif not info:
raise NotImplementedError(_("unsupported future instrument {}").format(order_book_id))
return self._future_info.setdefault(order_book_id, info)
class InstrumentStore(AbstractInstrumentStore):
def __init__(self, instruments, instrument_type):
# type: (Iterable[Instrument], INSTRUMENT_TYPE) -> None
self._instrument_type = instrument_type
self._instruments = {}
self._sym_id_map = {}
for ins in instruments:
if ins.type != instrument_type:
continue
self._instruments[ins.order_book_id] = ins
self._sym_id_map[ins.symbol] = ins.order_book_id
@property
def instrument_type(self):
# type: () -> INSTRUMENT_TYPE
return self._instrument_type
@property
def all_id_and_syms(self):
# type: () -> Iterable[str]
return chain(self._instruments.keys(), self._sym_id_map.keys())
def get_instruments(self, id_or_syms):
# type: (Optional[Iterable[str]]) -> Iterable[Instrument]
if id_or_syms is None:
return self._instruments.values()
order_book_ids = set()
for i in id_or_syms:
if i in self._instruments:
order_book_ids.add(i)
elif i in self._sym_id_map:
order_book_ids.add(self._sym_id_map[i])
return (self._instruments[i] for i in order_book_ids)
class ShareTransformationStore(object):
def __init__(self, f):
with codecs.open(f, 'r', encoding="utf-8") as store:
self._share_transformation = json.load(store)
def get_share_transformation(self, order_book_id):
try:
transformation_data = self._share_transformation[order_book_id]
except KeyError:
return
return transformation_data["successor"], transformation_data["share_conversion_ratio"]
def open_h5(path, *args, **kwargs):
# why do this? non-ascii path in windows!!
if sys.platform == "win32":
try:
l = locale.getlocale(locale.LC_ALL)[1]
except TypeError:
l = None
if l and l.lower() == "utf-8":
path = path.encode("utf-8")
try:
return h5py.File(path, *args, **kwargs)
except OSError as e:
raise RuntimeError(_(
"open data bundle failed, you can remove {} and try to regenerate bundle: {}"
).format(path, e))
class DayBarStore(AbstractDayBarStore):
DEFAULT_DTYPE = np.dtype([
('datetime', np.uint64),
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('volume', np.float),
])
def __init__(self, path):
if not os.path.exists(path):
raise FileExistsError("File {} not exist,please update bundle.".format(path))
self._h5 = open_h5(path, mode="r")
def get_bars(self, order_book_id):
try:
return self._h5[order_book_id][:]
except KeyError:
return np.empty(0, dtype=self.DEFAULT_DTYPE)
def get_date_range(self, order_book_id):
try:
data = self._h5[order_book_id]
return data[0]['datetime'], data[-1]['datetime']
except KeyError:
return 20050104, 20050104
class FutureDayBarStore(DayBarStore):
DEFAULT_DTYPE = np.dtype(DayBarStore.DEFAULT_DTYPE.descr + [("open_interest", '<f8')])
class DividendStore(AbstractDividendStore):
def __init__(self, path):
self._h5 = open_h5(path, mode="r")
def get_dividend(self, order_book_id):
try:
return self._h5[order_book_id][:]
except KeyError:
return None
class YieldCurveStore:
def __init__(self, path):
self._data = open_h5(path, mode="r")["data"][:]
def get_yield_curve(self, start_date, end_date, tenor):
d1 = convert_date_to_date_int(start_date)
d2 = convert_date_to_date_int(end_date)
s = self._data['date'].searchsorted(d1)
e = self._data['date'].searchsorted(d2, side='right')
if e == len(self._data):
e -= 1
if self._data[e]['date'] == d2:
e += 1
if e < s:
return None
df = pandas.DataFrame(self._data[s:e])
df.index = pandas.to_datetime([str(d) for d in df['date']])
del df['date']
if tenor is not None:
return df[tenor]
return df
class SimpleFactorStore(AbstractSimpleFactorStore):
def __init__(self, path):
self._h5 = open_h5(path, mode="r")
def get_factors(self, order_book_id):
try:
return self._h5[order_book_id][:]
except KeyError:
return None
class DateSet(AbstractDateSet):
def __init__(self, f):
self._h5 = open_h5(f, mode="r")
@lru_cache(None)
def get_days(self, order_book_id):
try:
days = self._h5[order_book_id][:]
return set(days.tolist())
except KeyError:
return set()
def contains(self, order_book_id, dates):
date_set = self.get_days(order_book_id)
if not date_set:
return None
def _to_dt_int(d):
if isinstance(d, (int, np.int64, np.uint64)):
return int(d // 1000000) if d > 100000000 else int(d)
else:
return d.year * 10000 + d.month * 100 + d.day
return [(_to_dt_int(d) in date_set) for d in dates]
| [
"os.path.exists",
"rqalpha.utils.i18n.gettext",
"rqalpha.utils.functools.lru_cache",
"h5py.File",
"json.load",
"copy.copy",
"rqalpha.utils.datetime_func.convert_date_to_date_int",
"locale.getlocale",
"codecs.open",
"numpy.empty",
"pandas.DataFrame",
"numpy.dtype",
"numpy.load"
] | [((5505, 5646), 'numpy.dtype', 'np.dtype', (["[('datetime', np.uint64), ('open', np.float), ('close', np.float), ('high',\n np.float), ('low', np.float), ('volume', np.float)]"], {}), "([('datetime', np.uint64), ('open', np.float), ('close', np.float),\n ('high', np.float), ('low', np.float), ('volume', np.float)])\n", (5513, 5646), True, 'import numpy as np\n'), ((6366, 6436), 'numpy.dtype', 'np.dtype', (["(DayBarStore.DEFAULT_DTYPE.descr + [('open_interest', '<f8')])"], {}), "(DayBarStore.DEFAULT_DTYPE.descr + [('open_interest', '<f8')])\n", (6374, 6436), True, 'import numpy as np\n'), ((7846, 7861), 'rqalpha.utils.functools.lru_cache', 'lru_cache', (['None'], {}), '(None)\n', (7855, 7861), False, 'from rqalpha.utils.functools import lru_cache\n'), ((5238, 5270), 'h5py.File', 'h5py.File', (['path', '*args'], {}), '(path, *args, **kwargs)\n', (5247, 5270), False, 'import h5py\n'), ((6893, 6929), 'rqalpha.utils.datetime_func.convert_date_to_date_int', 'convert_date_to_date_int', (['start_date'], {}), '(start_date)\n', (6917, 6929), False, 'from rqalpha.utils.datetime_func import convert_date_to_date_int\n'), ((6943, 6977), 'rqalpha.utils.datetime_func.convert_date_to_date_int', 'convert_date_to_date_int', (['end_date'], {}), '(end_date)\n', (6967, 6977), False, 'from rqalpha.utils.datetime_func import convert_date_to_date_int\n'), ((7258, 7291), 'pandas.DataFrame', 'pandas.DataFrame', (['self._data[s:e]'], {}), '(self._data[s:e])\n', (7274, 7291), False, 'import pandas\n'), ((4517, 4554), 'codecs.open', 'codecs.open', (['f', '"""r"""'], {'encoding': '"""utf-8"""'}), "(f, 'r', encoding='utf-8')\n", (4528, 4554), False, 'import codecs\n'), ((4606, 4622), 'json.load', 'json.load', (['store'], {}), '(store)\n', (4615, 4622), False, 'import json\n'), ((5744, 5764), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5758, 5764), False, 'import os\n'), ((5053, 5084), 'locale.getlocale', 'locale.getlocale', (['locale.LC_ALL'], {}), '(locale.LC_ALL)\n', (5069, 5084), False, 'import locale\n'), ((6042, 6079), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'self.DEFAULT_DTYPE'}), '(0, dtype=self.DEFAULT_DTYPE)\n', (6050, 6079), True, 'import numpy as np\n'), ((1647, 1683), 'numpy.load', 'np.load', (['self._f'], {'allow_pickle': '(False)'}), '(self._f, allow_pickle=False)\n', (1654, 1683), True, 'import numpy as np\n'), ((2132, 2152), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2141, 2152), False, 'import json\n'), ((2926, 2936), 'copy.copy', 'copy', (['info'], {}), '(info)\n', (2930, 2936), False, 'from copy import copy\n'), ((5323, 5408), 'rqalpha.utils.i18n.gettext', '_', (['"""open data bundle failed, you can remove {} and try to regenerate bundle: {}"""'], {}), "('open data bundle failed, you can remove {} and try to regenerate bundle: {}'\n )\n", (5324, 5408), True, 'from rqalpha.utils.i18n import gettext as _\n'), ((3053, 3090), 'rqalpha.utils.i18n.gettext', '_', (['"""unsupported future instrument {}"""'], {}), "('unsupported future instrument {}')\n", (3054, 3090), True, 'from rqalpha.utils.i18n import gettext as _\n')] |
"""
hAcc (Hail Accumulation) implementation
Algorthim published by:
<NAME>., <NAME>., <NAME>., & <NAME>. (2019). Using Operational Radar to Identify Deep Hail Accumulations from Thunderstorms, Weather and Forecasting, 34(1), 133-150. from https://journals.ametsoc.org/view/journals/wefo/34/1/waf-d-18-0053_1.xml
and
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Colorado Plowable Hailstorms: Synoptic Weather, Radar, and Lightning Characteristics, Weather and Forecasting, 31(2), 663-693. from https://journals.ametsoc.org/view/journals/wefo/31/2/waf-d-15-0037_1.xml
Contains the LASH retrieval for gridded radar data.
<NAME> - 12 August 2021
"""
import numpy as np
from pyhail import common
def main(radar, fz_level, pressure, z_fname, hsda_fname, mesh_fname, sp_reflectivity_threshold=55, heights_fieldname='gate_z'):
"""
Hail Accumulation defined by Robinson et al. 2018 and Kalina et al. 2016.
If the heights field exists, this will be used and save a small amount of computation time.
Parameters:
===========
radar : object
Py-ART radar object.
fz_level: int
wet bulb freezing level (m)
pressure: float (1,)
mean pressure between the surface and the height of the 0C wet-bulb temperature
z_fname: str
reflectivity field name
hsda_fname: str
field name for HSDR
mesh_fname: str
field name for MESH
sp_reflectivity_threshold: float
value used to threshold reflectivity for single pol analysis
Returns:
hAcc_meta: dict
pyart field dictionary containing hAcc dataset
"""
Z = radar.fields[z_fname]["data"]
if np.ma.is_masked(Z):
Z = Z.filled(0)
if hsda_fname is None:
#use a simple single pol HCA for hail (fixed threshold)
hail_hca = Z >= sp_reflectivity_threshold
else:
#use hsda to determine hail
hail_hca = radar.fields[hsda_fname]["data"]
if np.ma.is_masked(hail_hca):
hail_hca = hail_hca.filled(0)
#load mesh
mesh = radar.get_field(0, mesh_fname)
if np.ma.is_masked(mesh):
mesh = mesh.filled(0)
# calculate height
try:
heights = radar.fields[heights_fieldname]['data']
except:
rg, azg = np.meshgrid(radar.range["data"], radar.azimuth["data"])
rg, eleg = np.meshgrid(radar.range["data"], radar.elevation["data"])
_, _, heights = common.antenna_to_cartesian(rg / 1000, azg, eleg)
n = 0.64 # packing density of monodisperse spheres (Kalina et al. 2016)
ph = 900 # density of ice (kg m-3)
epsilon = 0.814
Ze = 10.0 ** (Z / 10.0) # convert Z to Ze
IWC = (
(4.4 * 10 ** -5) * Ze ** (0.71) / 1000
) # Ice Water Content (kg m-3) derived from Ze follow Heysfield and Miller 1998
# remove IWC values where hail_hca is not hail (less than 1)
IWC[hail_hca < 1] = 0
# remove IWC values where temperature is at or below 0
IWC[heights > fz_level] = 0
# get lowest valid IWC
# insert sweeps into 3D array (el, az, rg)
el_sort_idx = np.argsort(radar.fixed_angle["data"])
az = radar.get_azimuth(0)
rg = radar.range["data"]
IWC_3d = np.ma.zeros((len(el_sort_idx), len(az), len(rg)))
for i, el_idx in enumerate(el_sort_idx):
IWC_3d[i, :, :] = IWC[radar.get_slice(el_idx)]
# mask zero values
IWC_3d_masked = np.ma.masked_array(IWC_3d, IWC_3d == 0)
data_shape = IWC_3d_masked.shape
# find the lowest unmasked value by first finding edges
edges = np.ma.notmasked_edges(IWC_3d_masked, axis=0)
# use first edge on axis 0 (lowest in height)
IWC_lowest_valid = np.zeros_like(mesh)
IWC_lowest_valid[edges[0][1], edges[0][2]] = IWC_3d_masked[edges[0]]
# pressure correction from Heysmfield and Write (2014)
PC = (1000 / pressure) ** 0.545
# diameter-fall speed relation from Heysmfield and Wright (2014), units of cm/s
Vt = 488 * (mesh / 10) ** 0.84 * PC
# calculate LASH (units of cm/s)
hAcc = (1 / epsilon) * (1 / (n * ph)) * IWC_lowest_valid * Vt
hAcc = hAcc * 60 # convert cm/s to cm/min
# hAcc is only valid at the surface, to represent it in pyart radar objects, insert it into the lowest sweep
hAcc_field = np.zeros_like(radar.fields[z_fname]["data"])
hAcc_field[radar.get_slice(el_sort_idx[0])] = hAcc
hAcc_meta = {
"data": hAcc_field,
"units": "cm/min",
"long_name": "hail accumulation",
"description": "Hail Accumulation Retrieval developed by Wallace et al. (2019) doi:10.1175/WAF-D-18-0053.1",
"comments": "only valid in the lowest sweep",
}
return hAcc_meta
| [
"numpy.ma.is_masked",
"pyhail.common.antenna_to_cartesian",
"numpy.argsort",
"numpy.ma.notmasked_edges",
"numpy.meshgrid",
"numpy.ma.masked_array",
"numpy.zeros_like"
] | [((1670, 1688), 'numpy.ma.is_masked', 'np.ma.is_masked', (['Z'], {}), '(Z)\n', (1685, 1688), True, 'import numpy as np\n'), ((2098, 2119), 'numpy.ma.is_masked', 'np.ma.is_masked', (['mesh'], {}), '(mesh)\n', (2113, 2119), True, 'import numpy as np\n'), ((3084, 3121), 'numpy.argsort', 'np.argsort', (["radar.fixed_angle['data']"], {}), "(radar.fixed_angle['data'])\n", (3094, 3121), True, 'import numpy as np\n'), ((3388, 3427), 'numpy.ma.masked_array', 'np.ma.masked_array', (['IWC_3d', '(IWC_3d == 0)'], {}), '(IWC_3d, IWC_3d == 0)\n', (3406, 3427), True, 'import numpy as np\n'), ((3537, 3581), 'numpy.ma.notmasked_edges', 'np.ma.notmasked_edges', (['IWC_3d_masked'], {'axis': '(0)'}), '(IWC_3d_masked, axis=0)\n', (3558, 3581), True, 'import numpy as np\n'), ((3655, 3674), 'numpy.zeros_like', 'np.zeros_like', (['mesh'], {}), '(mesh)\n', (3668, 3674), True, 'import numpy as np\n'), ((4250, 4294), 'numpy.zeros_like', 'np.zeros_like', (["radar.fields[z_fname]['data']"], {}), "(radar.fields[z_fname]['data'])\n", (4263, 4294), True, 'import numpy as np\n'), ((1965, 1990), 'numpy.ma.is_masked', 'np.ma.is_masked', (['hail_hca'], {}), '(hail_hca)\n', (1980, 1990), True, 'import numpy as np\n'), ((2272, 2327), 'numpy.meshgrid', 'np.meshgrid', (["radar.range['data']", "radar.azimuth['data']"], {}), "(radar.range['data'], radar.azimuth['data'])\n", (2283, 2327), True, 'import numpy as np\n'), ((2347, 2404), 'numpy.meshgrid', 'np.meshgrid', (["radar.range['data']", "radar.elevation['data']"], {}), "(radar.range['data'], radar.elevation['data'])\n", (2358, 2404), True, 'import numpy as np\n'), ((2429, 2478), 'pyhail.common.antenna_to_cartesian', 'common.antenna_to_cartesian', (['(rg / 1000)', 'azg', 'eleg'], {}), '(rg / 1000, azg, eleg)\n', (2456, 2478), False, 'from pyhail import common\n')] |
# -*- coding: utf-8 -*-
"""Script for removig double picked partiles in STAR file."""
import os
import math
import logging
import time
import csv
import itertools
import operator
import copy as cp
import scipy as sp
import numpy as np
from datetime import timedelta
# import pyseg as ps
from pyorg import sub, disperse_io
# filepaths
# ROOT_PATH = '/fs/pool/pool-lucic2/antonio/ribo_johannes/lum_ext_repick/FTR/stat'
ROOT_PATH = '/fs/pool/pool-lucic2/antonio/ribo_johannes/lum_ext_repick/ribo'
# Input STAR file
in_star = ROOT_PATH + '/ref_rln2/run1_ribo_data.star' # '/../FTR/stat/in/2_class_T.star' # '/class_ABC/run1_C_it050_data.star'
# To select a specific microsome
in_mic = None # '/fs/pool/pool-lucic2/johannes/tomograms/stefan/160614/tomo22_z120_ves_1.mrc' # '/fs/pool/pool-lucic2/johannes/tomograms/stefan/160614/oriented_ribo_bin2/tomo22_z120_ves_1.mrc'
# Output STAR file
out_star = ROOT_PATH + '/ref_rln2/0_run1_ribo_dpick.star' # '/ref_rln2/2_class_T_dpick.star' # '/class_ABC/run1_C_it050_data_dp8_v2.star'
# Parameters
pre_bin = None # .5
res = 1.048 # nm/vx
ssup = 15 # 8 # 20 # 8 # nm
s_ubin = 2. # 4.
s_pbin = 0.5 # 1.0
seg_lbl = 1
err_dst = 15 # 5 # 15 # nm
err_ptg = 60 # %
max_shift = 30 # 10 # nm
log_crit = True
def main():
start_time = time.time()
print('Loading star file: {}.'.format(in_star))
star, star_ves = sub.Star(), sub.Star()
star.load(in_star)
star_ves = cp.deepcopy(star)
print('Pre-processing input star file...')
print('\tRemoving rlnRandomSubset column...')
if star.has_column('_rlnRandomSubset'):
star.del_column('_rlnRandomSubset')
star_ves.del_column('_rlnRandomSubset')
if in_mic is not None:
print('\tChoosing micrograph: ' + in_mic)
del_l = list()
for row in range(star.get_nrows()):
if star.get_element('_rlnMicrographName', row) != in_mic:
del_l.append(row)
print('\t\t-Deleting ' + str(len(del_l)) + ' of ' + str(star.get_nrows()) + ' particles.')
star.del_rows(del_l)
star_ves.del_rows(del_l)
print('\tParticles pre-processing...')
for row in range(star.get_nrows()):
x = star.get_element('_rlnCoordinateX', row) * s_pbin
y = star.get_element('_rlnCoordinateY', row) * s_pbin
z = star.get_element('_rlnCoordinateZ', row) * s_pbin
star.set_element('_rlnCoordinateX', row, x)
star.set_element('_rlnCoordinateY', row, y)
star.set_element('_rlnCoordinateZ', row, z)
star_ves.set_element('_rlnCoordinateX', row, x * 2.)
star_ves.set_element('_rlnCoordinateY', row, y * 2.)
star_ves.set_element('_rlnCoordinateZ', row, z * 2.)
print('\tRemoving highly shifted particles (' + str(max_shift) + ' nm)')
del_l = list()
max_shift_v = max_shift / (res / float(s_ubin))
for row in range(star.get_nrows()):
# Getting shifting coordinates
sx = star.get_element('_rlnOriginX', row)
sy = star.get_element('_rlnOriginY', row)
sz = star.get_element('_rlnOriginZ', row)
shifts = np.asarray((sx, sy, sz), dtype=np.float)
dst = math.sqrt((shifts * shifts).sum())
if dst > max_shift_v:
del_l.append(row)
print('\t\t-Deleting ' + str(len(del_l)) + ' of ' + str(star.get_nrows()) + ' particles.')
star.del_rows(del_l)
star_ves.del_rows(del_l)
print('\tRemoving badly segmented microsomes...')
err_dst_v = err_dst / res
pt_ssup_v = ssup / res
s_bin = 1. / s_ubin
parts_mic, del_l = dict(), list()
part_coords = np.zeros(shape=(star.get_nrows(), 3), dtype=np.float32)
for row in range(star.get_nrows()):
# Getting particle coordinates
x = star.get_element('_rlnCoordinateX', row)
y = star.get_element('_rlnCoordinateY', row)
z = star.get_element('_rlnCoordinateZ', row)
sx = star.get_element('_rlnOriginX', row)
sy = star.get_element('_rlnOriginY', row)
sz = star.get_element('_rlnOriginZ', row)
part_coords[row, :] = x - sy * s_bin, y - sx * s_bin, z - sz * s_bin
# part_coords[row, :] = x - sx * s_bin, y - sy * s_bin, z - sz * s_bin
# Adding partcle to micrographs dictionary
mic = star.get_element('_rlnMicrographName', row)
mic_seg = os.path.split(mic)[0] + '/' + os.path.splitext(os.path.split(mic)[1])[0] + '_seg.mrc'
path_mic, stem_mic = os.path.split(mic_seg)
path_mic = path_mic.replace('/oriented_ribo_bin2', '')
name = stem_mic.split('_')
seg_star = sub.Star()
seg_star.load(path_mic + '/graph_ribo/' + name[0] + '_' + name[1] + '_mb_graph.star')
try:
row_seg = seg_star.find_element('_psSegImage', mic_seg)
except ValueError:
hold_path, hold_stem = os.path.split(mic_seg)
mic_seg = hold_path + '/oriented_ribo_bin2/' + hold_stem
row_seg = seg_star.find_element('_psSegImage', mic_seg)
part_coords[row, 0] -= seg_star.get_element('_psSegOffY', row_seg)
part_coords[row, 1] -= seg_star.get_element('_psSegOffX', row_seg)
part_coords[row, 2] -= seg_star.get_element('_psSegOffZ', row_seg)
# Finding segmentation offset
try:
parts_mic[mic_seg].append(row)
except KeyError:
parts_mic[mic_seg] = list()
parts_mic[mic_seg].append(row)
mics_del, tot_dst = 0, 0
for mic_seg, rows in zip(iter(parts_mic.keys()), iter(parts_mic.values())):
try:
mic = disperse_io.load_tomo(mic_seg)
except IOError:
hold_path, hold_stem = os.path.split(mic_seg)
mic_seg = hold_path + '/oriented_ribo_bin2/' + hold_stem
mic = disperse_io.load_tomo(mic_seg)
mic_dst = sp.ndimage.morphology.distance_transform_edt(mic!=seg_lbl)
num_err = 0
for row in rows:
coord = np.round(part_coords[row, :]).astype(np.int)
hold_dst = mic_dst[coord[1], coord[0], coord[2]]
tot_dst += hold_dst
if hold_dst > err_dst_v:
del_l.append(row)
num_err += 1
ptg = 100. * (num_err / float(len(rows)))
print('\t\tProcessing microsome: ' + mic_seg)
print('\t\t\t-Number of particles: ' + str(len(rows)))
print('\t\t\t-Number of bad particles: ' + str(num_err) + ' (' + str(ptg) + '%)')
if ptg > err_ptg:
print('\t\t\t-BAD MICROSOME DELETING ALL PARTICLES!')
for row in rows:
try:
del_l.index(row)
except ValueError:
del_l.append(row)
mics_del += 1
print('\tTotal distance measured + ' + str(tot_dst) + ' vx')
print('\tDeleted microsomes + ' + str(mics_del) + ' of ' + str(len(list(parts_mic.keys()))))
print('\t\t-Deleted ' + str(len(del_l)) + ' of ' + str(star.get_nrows()) + ' particles.')
star.del_rows(del_l)
star_ves.del_rows(del_l)
print('\tFrom microsomes path indexing to tomograms path indexing...')
for row in range(star.get_nrows()):
hold_mic = star.get_element('_rlnMicrographName', row)
new_mic = hold_mic.replace('/oriented_ribo_bin2', '')
tomo_path, fname = os.path.split(new_mic)
hold_stem = fname.split('_ves_')[0]
star.set_element(key='_rlnMicrographName', val=tomo_path + '/' + hold_stem + '_bin_4.em', row=row)
print('\t\tApplying scale suppression (' + str(pt_ssup_v) + ' vx)...')
# 'Computing tomograms dictionary
parts_mic, del_l = dict(), list()
part_coords = np.zeros(shape=(star.get_nrows(), 3), dtype=np.float32)
for row in range(star.get_nrows()):
# Getting particle coordinates
x = star.get_element('_rlnCoordinateX', row)
y = star.get_element('_rlnCoordinateY', row)
z = star.get_element('_rlnCoordinateZ', row)
sx = star.get_element('_rlnOriginX', row)
sy = star.get_element('_rlnOriginY', row)
sz = star.get_element('_rlnOriginZ', row)
part_coords[row, :] = x - sy*s_bin, y - sx*s_bin, z - sz*s_bin
# part_coords[row, :] = x - sx * s_bin, y - sy * s_bin, z - sz * s_bin
# Adding partcle to micrographs dictionary
mic = star.get_element('_rlnMicrographName', row)
try:
parts_mic[mic].append(row)
except KeyError:
parts_mic[mic] = list()
parts_mic[mic].append(row)
# Particle suppression on output STAR file (maximum likelihood criterium)
if log_crit:
for mic, rows in zip(iter(parts_mic.keys()), iter(parts_mic.values())):
mic_coords = np.zeros(shape=(len(rows), 3), dtype=np.float32)
mic_lut = np.ones(shape=len(rows), dtype=np.bool)
mic_logs = np.zeros(shape=len(rows), dtype=np.bool)
for i, row in enumerate(rows):
mic_coords[i, :] = part_coords[row, :]
mic_logs[i] = star.get_element('_rlnLogLikeliContribution', row)
log_ids = np.argsort(mic_logs)[::-1]
for i in log_ids:
if mic_lut[i]:
hold = mic_coords - mic_coords[i, :]
dsts = np.sqrt((hold * hold).sum(axis=1))
ids = np.where((dsts < pt_ssup_v) & mic_lut)[0]
# Only clean neighbours when we are place at maximum
for idx in ids:
if mic_lut[idx] and (idx != i):
mic_lut[idx] = False
del_l.append(rows[idx])
else:
# Particle suppression on output STAR file (first found criterium)
for mic, rows in zip(iter(parts_mic.keys()), iter(parts_mic.values())):
mic_coords = np.zeros(shape=(len(rows), 3), dtype=np.float32)
mic_lut = np.ones(shape=len(rows), dtype=np.bool)
for i, row in enumerate(rows):
mic_coords[i, :] = part_coords[row, :]
for i, coord in enumerate(mic_coords):
if mic_lut[i]:
hold = mic_coords - coord
dsts = np.sqrt((hold * hold).sum(axis=1))
ids = np.where(dsts < pt_ssup_v)[0]
for idx in ids:
if mic_lut[idx] and (idx != i):
mic_lut[idx] = False
del_l.append(rows[idx])
print('\t\t-Deleted ' + str(len(del_l)) + ' of ' + str(star.get_nrows()) + ' particles.')
star.del_rows(del_l)
star_ves.del_rows(del_l)
print('\tChecking removing procedure...')
parts_mic = dict()
part_coords = np.zeros(shape=(star.get_nrows(), 3), dtype=np.float32)
for row in range(star.get_nrows()):
# Getting particle coordinates
x = star.get_element('_rlnCoordinateX', row)
y = star.get_element('_rlnCoordinateY', row)
z = star.get_element('_rlnCoordinateZ', row)
sx = star.get_element('_rlnOriginX', row)
sy = star.get_element('_rlnOriginY', row)
sz = star.get_element('_rlnOriginZ', row)
part_coords[row, :] = x - sy * s_bin, y - sx * s_bin, z - sz * s_bin
# part_coords[row, :] = x - sx * s_bin, y - sy * s_bin, z - sz * s_bin
# Adding partcle to micrographs dictionary
mic = star.get_element('_rlnMicrographName', row)
try:
parts_mic[mic].append(row)
except KeyError:
parts_mic[mic] = list()
parts_mic[mic].append(row)
for mic, rows in zip(iter(parts_mic.keys()), iter(parts_mic.values())):
if len(rows) <= 1:
continue
mic_coords = np.zeros(shape=(len(rows), 3), dtype=np.float32)
for i, row in enumerate(rows):
mic_coords[i, :] = part_coords[row, :]
for i, coord in enumerate(mic_coords):
hold = mic_coords - coord
dsts = np.sqrt((hold * hold).sum(axis=1))
dsts_min = np.sort(dsts)[1]
if dsts_min <= pt_ssup_v:
print('\t-WARNING: particle in row ' + str(rows[i]) + ' with minimum distance ' + str(dsts_min*res) + 'nm not suppressed!')
imgs = star.get_column_data('_rlnImageName')
for row, img in enumerate(imgs):
if imgs.count(img) != 1:
print('\t-WARNING: not a single entry for particle in row ' + str(row) + ' with image name ' + imgs)
# Store the Star file
print('Storing output Star file in: ' + out_star)
star.store(out_star)
out_star_stem = os.path.splitext(out_star)[0]
out_star_ves = out_star_stem + '_ves.star'
print('Storing output Star with vesicles file in: ' + out_star_ves)
star_ves.store(out_star_ves)
out_path, out_stem = os.path.split(out_star)
out_star_shift = out_path + '/' + os.path.splitext(out_stem)[0] + '_shift.star'
print('\tGenerating the shifted version: ' + out_star_shift)
for row in range(star.get_nrows()):
# Getting particle coordinates
x = star.get_element('_rlnCoordinateX', row)
y = star.get_element('_rlnCoordinateY', row)
z = star.get_element('_rlnCoordinateZ', row)
sx = star.get_element('_rlnOriginX', row)
sy = star.get_element('_rlnOriginY', row)
sz = star.get_element('_rlnOriginZ', row)
star.set_element('_rlnCoordinateX', row, x - sy*s_bin)
star.set_element('_rlnCoordinateY', row, y - sx*s_bin)
star.set_element('_rlnCoordinateZ', row, z - sz*s_bin)
star.set_element('_rlnOriginX', row, 0)
star.set_element('_rlnOriginY', row, 0)
star.set_element('_rlnOriginZ', row, 0)
star.store(out_star_shift)
print('Finished. Runtime {}.'.format(str(timedelta(seconds=time.time()-start_time))))
if __name__ == "__main__":
main() | [
"numpy.where",
"pyorg.disperse_io.load_tomo",
"numpy.sort",
"numpy.asarray",
"os.path.splitext",
"os.path.split",
"pyorg.sub.Star",
"scipy.ndimage.morphology.distance_transform_edt",
"numpy.argsort",
"copy.deepcopy",
"time.time",
"numpy.round"
] | [((1276, 1287), 'time.time', 'time.time', ([], {}), '()\n', (1285, 1287), False, 'import time\n'), ((1423, 1440), 'copy.deepcopy', 'cp.deepcopy', (['star'], {}), '(star)\n', (1434, 1440), True, 'import copy as cp\n'), ((12721, 12744), 'os.path.split', 'os.path.split', (['out_star'], {}), '(out_star)\n', (12734, 12744), False, 'import os\n'), ((1362, 1372), 'pyorg.sub.Star', 'sub.Star', ([], {}), '()\n', (1370, 1372), False, 'from pyorg import sub, disperse_io\n'), ((1374, 1384), 'pyorg.sub.Star', 'sub.Star', ([], {}), '()\n', (1382, 1384), False, 'from pyorg import sub, disperse_io\n'), ((3090, 3130), 'numpy.asarray', 'np.asarray', (['(sx, sy, sz)'], {'dtype': 'np.float'}), '((sx, sy, sz), dtype=np.float)\n', (3100, 3130), True, 'import numpy as np\n'), ((4423, 4445), 'os.path.split', 'os.path.split', (['mic_seg'], {}), '(mic_seg)\n', (4436, 4445), False, 'import os\n'), ((4563, 4573), 'pyorg.sub.Star', 'sub.Star', ([], {}), '()\n', (4571, 4573), False, 'from pyorg import sub, disperse_io\n'), ((5787, 5847), 'scipy.ndimage.morphology.distance_transform_edt', 'sp.ndimage.morphology.distance_transform_edt', (['(mic != seg_lbl)'], {}), '(mic != seg_lbl)\n', (5831, 5847), True, 'import scipy as sp\n'), ((7262, 7284), 'os.path.split', 'os.path.split', (['new_mic'], {}), '(new_mic)\n', (7275, 7284), False, 'import os\n'), ((12513, 12539), 'os.path.splitext', 'os.path.splitext', (['out_star'], {}), '(out_star)\n', (12529, 12539), False, 'import os\n'), ((5538, 5568), 'pyorg.disperse_io.load_tomo', 'disperse_io.load_tomo', (['mic_seg'], {}), '(mic_seg)\n', (5559, 5568), False, 'from pyorg import sub, disperse_io\n'), ((4811, 4833), 'os.path.split', 'os.path.split', (['mic_seg'], {}), '(mic_seg)\n', (4824, 4833), False, 'import os\n'), ((5628, 5650), 'os.path.split', 'os.path.split', (['mic_seg'], {}), '(mic_seg)\n', (5641, 5650), False, 'import os\n'), ((5738, 5768), 'pyorg.disperse_io.load_tomo', 'disperse_io.load_tomo', (['mic_seg'], {}), '(mic_seg)\n', (5759, 5768), False, 'from pyorg import sub, disperse_io\n'), ((9037, 9057), 'numpy.argsort', 'np.argsort', (['mic_logs'], {}), '(mic_logs)\n', (9047, 9057), True, 'import numpy as np\n'), ((11959, 11972), 'numpy.sort', 'np.sort', (['dsts'], {}), '(dsts)\n', (11966, 11972), True, 'import numpy as np\n'), ((12783, 12809), 'os.path.splitext', 'os.path.splitext', (['out_stem'], {}), '(out_stem)\n', (12799, 12809), False, 'import os\n'), ((5911, 5940), 'numpy.round', 'np.round', (['part_coords[row, :]'], {}), '(part_coords[row, :])\n', (5919, 5940), True, 'import numpy as np\n'), ((4308, 4326), 'os.path.split', 'os.path.split', (['mic'], {}), '(mic)\n', (4321, 4326), False, 'import os\n'), ((9270, 9308), 'numpy.where', 'np.where', (['((dsts < pt_ssup_v) & mic_lut)'], {}), '((dsts < pt_ssup_v) & mic_lut)\n', (9278, 9308), True, 'import numpy as np\n'), ((10193, 10219), 'numpy.where', 'np.where', (['(dsts < pt_ssup_v)'], {}), '(dsts < pt_ssup_v)\n', (10201, 10219), True, 'import numpy as np\n'), ((4355, 4373), 'os.path.split', 'os.path.split', (['mic'], {}), '(mic)\n', (4368, 4373), False, 'import os\n'), ((13710, 13721), 'time.time', 'time.time', ([], {}), '()\n', (13719, 13721), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""Unit-tests for pyfun/utilities.py"""
from __future__ import division
import unittest
import numpy as np
from chebpy.core.settings import DefaultPrefs
from chebpy.core.chebtech import Chebtech2
from chebpy.core.algorithms import bary, clenshaw, coeffmult
from tests.utilities import (testfunctions, scaled_tol, infNormLessThanTol,
infnorm)
# aliases
pi = np.pi
sin = np.sin
cos = np.cos
exp = np.exp
eps = DefaultPrefs.eps
np.random.seed(0)
# turn off 'divide' and 'invalid' Runtimewarnings: these are invoked in the
# barycentric formula and the warned-of behaviour is actually required
np.seterr(divide='ignore', invalid='ignore')
class Evaluation(unittest.TestCase):
"""Tests for the Barycentric formula and Clenshaw algorithm"""
def setUp(self):
npts = 15
self.xk = Chebtech2._chebpts(npts)
self.vk = Chebtech2._barywts(npts)
self.fk = np.random.rand(npts)
self.ak = np.random.rand(11)
self.xx = -1 + 2*np.random.rand(9)
self.pts = -1 + 2*np.random.rand(1001)
# check an empty array is returned whenever either or both of the first
# two arguments are themselves empty arrays
def test_bary__empty(self):
null = (None, None)
self.assertEquals(bary(np.array([]), np.array([]), *null).size, 0)
self.assertEquals(bary(np.array([.1]), np.array([]), *null).size, 0)
self.assertEquals(bary(np.array([]), np.array([.1]), *null).size, 0)
self.assertEquals(bary(self.pts, np.array([]), *null).size, 0)
self.assertEquals(bary(np.array([]), self.pts, *null).size, 0)
self.assertNotEquals(bary(np.array([.1]), np.array([.1]), *null).size, 0)
def test_clenshaw__empty(self):
self.assertEquals(clenshaw(np.array([]), np.array([])).size, 0)
self.assertEquals(clenshaw(np.array([]), np.array([1.])).size, 0)
self.assertEquals(clenshaw(np.array([1.]), np.array([])).size, 0)
self.assertEquals(clenshaw(self.pts, np.array([])).size, 0)
self.assertEquals(clenshaw(np.array([]), self.pts).size, 0)
self.assertNotEquals(clenshaw(np.array([.1]), np.array([.1])).size, 0)
# check that scalars get evaluated to scalars (not arrays)
def test_clenshaw__scalar_input(self):
for x in self.xx:
self.assertTrue(np.isscalar(clenshaw(x,self.ak)))
self.assertFalse(np.isscalar(clenshaw(xx,self.ak)))
def test_bary__scalar_input(self):
for x in self.xx:
self.assertTrue(np.isscalar(bary(x,self.fk,self.xk,self.vk)))
self.assertFalse(np.isscalar(bary(xx,self.fk,self.xk,self.vk)))
# Check that we always get float output for constant Chebtechs, even
# when passing in an integer input.
# TODO: Move these tests elsewhere?
def test_bary__float_output(self):
ff = Chebtech2.initconst(1)
gg = Chebtech2.initconst(1.)
self.assertTrue(isinstance(ff(0, "bary"), float))
self.assertTrue(isinstance(gg(0, "bary"), float))
def test_clenshaw__float_output(self):
ff = Chebtech2.initconst(1)
gg = Chebtech2.initconst(1.)
self.assertTrue(isinstance(ff(0, "clenshaw"), float))
self.assertTrue(isinstance(gg(0, "clenshaw"), float))
# Check that we get consistent output from bary and clenshaw
# TODO: Move these tests elsewhere?
def test_bary_clenshaw_consistency(self):
coeffs = np.random.rand(3)
evalpts = (0.5, np.array([]), np.array([.5]), np.array([.5, .6]))
for n in range(len(coeffs)):
ff = Chebtech2(coeffs[:n])
for xx in evalpts:
fb = ff(xx, "bary")
fc = ff(xx, "clenshaw")
self.assertEquals(type(fb), type(fc))
evalpts = [np.linspace(-1,1,n) for n in np.array([1e2, 1e3, 1e4, 1e5])]
ptsarry = [Chebtech2._chebpts(n) for n in np.array([100, 200])]
methods = [bary, clenshaw]
def evalTester(method, fun, evalpts, chebpts):
x = evalpts
xk = chebpts
fvals = fun(xk)
if method is bary:
vk = Chebtech2._barywts(fvals.size)
a = bary(x, fvals, xk, vk)
tol_multiplier = 1e0
elif method is clenshaw:
ak = Chebtech2._vals2coeffs(fvals)
a = clenshaw(x, ak)
tol_multiplier = 2e1
b = fun(evalpts)
n = evalpts.size
tol = tol_multiplier * scaled_tol(n)
return infNormLessThanTol(a, b, tol)
for method in methods:
for (fun, _, _) in testfunctions:
for j, chebpts in enumerate(ptsarry):
for k, xx in enumerate(evalpts):
testfun = evalTester(method, fun, xx, chebpts)
testfun.__name__ = "test_{}_{}_{:02}_{:02}".format(
method.__name__, fun.__name__, j, k)
setattr(Evaluation, testfun.__name__, testfun)
class CoeffMult(unittest.TestCase):
def setUp(self):
self.f = lambda x: exp(x)
self.g = lambda x: cos(x)
self.fn = 15
self.gn = 15
def test_coeffmult(self):
f, g = self.f, self.g
fn, gn = self.fn, self.gn
hn = fn + gn - 1
h = lambda x: self.f(x) * self.g(x)
fc = Chebtech2.initfun(f, fn).prolong(hn).coeffs
gc = Chebtech2.initfun(g, gn).prolong(hn).coeffs
hc = coeffmult(fc, gc)
HC = Chebtech2.initfun(h, hn).coeffs
self.assertLessEqual( infnorm(hc-HC), 2e1*eps)
# reset the testsfun variable so it doesn't get picked up by nose
testfun = None
| [
"numpy.random.rand",
"chebpy.core.chebtech.Chebtech2._vals2coeffs",
"tests.utilities.infNormLessThanTol",
"chebpy.core.chebtech.Chebtech2._barywts",
"tests.utilities.scaled_tol",
"numpy.array",
"numpy.linspace",
"chebpy.core.algorithms.clenshaw",
"tests.utilities.infnorm",
"numpy.random.seed",
"... | [((485, 502), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (499, 502), True, 'import numpy as np\n'), ((651, 695), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (660, 695), True, 'import numpy as np\n'), ((3805, 3826), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (3816, 3826), True, 'import numpy as np\n'), ((3877, 3898), 'chebpy.core.chebtech.Chebtech2._chebpts', 'Chebtech2._chebpts', (['n'], {}), '(n)\n', (3895, 3898), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((4417, 4446), 'tests.utilities.infNormLessThanTol', 'infNormLessThanTol', (['a', 'b', 'tol'], {}), '(a, b, tol)\n', (4435, 4446), False, 'from tests.utilities import testfunctions, scaled_tol, infNormLessThanTol, infnorm\n'), ((860, 884), 'chebpy.core.chebtech.Chebtech2._chebpts', 'Chebtech2._chebpts', (['npts'], {}), '(npts)\n', (878, 884), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((903, 927), 'chebpy.core.chebtech.Chebtech2._barywts', 'Chebtech2._barywts', (['npts'], {}), '(npts)\n', (921, 927), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((946, 966), 'numpy.random.rand', 'np.random.rand', (['npts'], {}), '(npts)\n', (960, 966), True, 'import numpy as np\n'), ((985, 1003), 'numpy.random.rand', 'np.random.rand', (['(11)'], {}), '(11)\n', (999, 1003), True, 'import numpy as np\n'), ((2878, 2900), 'chebpy.core.chebtech.Chebtech2.initconst', 'Chebtech2.initconst', (['(1)'], {}), '(1)\n', (2897, 2900), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((2914, 2938), 'chebpy.core.chebtech.Chebtech2.initconst', 'Chebtech2.initconst', (['(1.0)'], {}), '(1.0)\n', (2933, 2938), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((3111, 3133), 'chebpy.core.chebtech.Chebtech2.initconst', 'Chebtech2.initconst', (['(1)'], {}), '(1)\n', (3130, 3133), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((3147, 3171), 'chebpy.core.chebtech.Chebtech2.initconst', 'Chebtech2.initconst', (['(1.0)'], {}), '(1.0)\n', (3166, 3171), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((3464, 3481), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (3478, 3481), True, 'import numpy as np\n'), ((3834, 3878), 'numpy.array', 'np.array', (['[100.0, 1000.0, 10000.0, 100000.0]'], {}), '([100.0, 1000.0, 10000.0, 100000.0])\n', (3842, 3878), True, 'import numpy as np\n'), ((3908, 3928), 'numpy.array', 'np.array', (['[100, 200]'], {}), '([100, 200])\n', (3916, 3928), True, 'import numpy as np\n'), ((4096, 4126), 'chebpy.core.chebtech.Chebtech2._barywts', 'Chebtech2._barywts', (['fvals.size'], {}), '(fvals.size)\n', (4114, 4126), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((4139, 4161), 'chebpy.core.algorithms.bary', 'bary', (['x', 'fvals', 'xk', 'vk'], {}), '(x, fvals, xk, vk)\n', (4143, 4161), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((4391, 4404), 'tests.utilities.scaled_tol', 'scaled_tol', (['n'], {}), '(n)\n', (4401, 4404), False, 'from tests.utilities import testfunctions, scaled_tol, infNormLessThanTol, infnorm\n'), ((5313, 5330), 'chebpy.core.algorithms.coeffmult', 'coeffmult', (['fc', 'gc'], {}), '(fc, gc)\n', (5322, 5330), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((3506, 3518), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3514, 3518), True, 'import numpy as np\n'), ((3520, 3535), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (3528, 3535), True, 'import numpy as np\n'), ((3536, 3556), 'numpy.array', 'np.array', (['[0.5, 0.6]'], {}), '([0.5, 0.6])\n', (3544, 3556), True, 'import numpy as np\n'), ((3610, 3631), 'chebpy.core.chebtech.Chebtech2', 'Chebtech2', (['coeffs[:n]'], {}), '(coeffs[:n])\n', (3619, 3631), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((4234, 4263), 'chebpy.core.chebtech.Chebtech2._vals2coeffs', 'Chebtech2._vals2coeffs', (['fvals'], {}), '(fvals)\n', (4256, 4263), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((4276, 4291), 'chebpy.core.algorithms.clenshaw', 'clenshaw', (['x', 'ak'], {}), '(x, ak)\n', (4284, 4291), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((5344, 5368), 'chebpy.core.chebtech.Chebtech2.initfun', 'Chebtech2.initfun', (['h', 'hn'], {}), '(h, hn)\n', (5361, 5368), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((5406, 5422), 'tests.utilities.infnorm', 'infnorm', (['(hc - HC)'], {}), '(hc - HC)\n', (5413, 5422), False, 'from tests.utilities import testfunctions, scaled_tol, infNormLessThanTol, infnorm\n'), ((1029, 1046), 'numpy.random.rand', 'np.random.rand', (['(9)'], {}), '(9)\n', (1043, 1046), True, 'import numpy as np\n'), ((1073, 1093), 'numpy.random.rand', 'np.random.rand', (['(1001)'], {}), '(1001)\n', (1087, 1093), True, 'import numpy as np\n'), ((2436, 2457), 'chebpy.core.algorithms.clenshaw', 'clenshaw', (['xx', 'self.ak'], {}), '(xx, self.ak)\n', (2444, 2457), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((2636, 2671), 'chebpy.core.algorithms.bary', 'bary', (['xx', 'self.fk', 'self.xk', 'self.vk'], {}), '(xx, self.fk, self.xk, self.vk)\n', (2640, 2671), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((1310, 1322), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1318, 1322), True, 'import numpy as np\n'), ((1324, 1336), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1332, 1336), True, 'import numpy as np\n'), ((1385, 1400), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (1393, 1400), True, 'import numpy as np\n'), ((1401, 1413), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1409, 1413), True, 'import numpy as np\n'), ((1462, 1474), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1470, 1474), True, 'import numpy as np\n'), ((1476, 1491), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (1484, 1491), True, 'import numpy as np\n'), ((1549, 1561), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1557, 1561), True, 'import numpy as np\n'), ((1610, 1622), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1618, 1622), True, 'import numpy as np\n'), ((1684, 1699), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (1692, 1699), True, 'import numpy as np\n'), ((1700, 1715), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (1708, 1715), True, 'import numpy as np\n'), ((1804, 1816), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1812, 1816), True, 'import numpy as np\n'), ((1818, 1830), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1826, 1830), True, 'import numpy as np\n'), ((1876, 1888), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1884, 1888), True, 'import numpy as np\n'), ((1890, 1905), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1898, 1905), True, 'import numpy as np\n'), ((1950, 1965), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1958, 1965), True, 'import numpy as np\n'), ((1966, 1978), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1974, 1978), True, 'import numpy as np\n'), ((2034, 2046), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2042, 2046), True, 'import numpy as np\n'), ((2092, 2104), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2100, 2104), True, 'import numpy as np\n'), ((2163, 2178), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (2171, 2178), True, 'import numpy as np\n'), ((2179, 2194), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (2187, 2194), True, 'import numpy as np\n'), ((2377, 2397), 'chebpy.core.algorithms.clenshaw', 'clenshaw', (['x', 'self.ak'], {}), '(x, self.ak)\n', (2385, 2397), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((2565, 2599), 'chebpy.core.algorithms.bary', 'bary', (['x', 'self.fk', 'self.xk', 'self.vk'], {}), '(x, self.fk, self.xk, self.vk)\n', (2569, 2599), False, 'from chebpy.core.algorithms import bary, clenshaw, coeffmult\n'), ((5199, 5223), 'chebpy.core.chebtech.Chebtech2.initfun', 'Chebtech2.initfun', (['f', 'fn'], {}), '(f, fn)\n', (5216, 5223), False, 'from chebpy.core.chebtech import Chebtech2\n'), ((5256, 5280), 'chebpy.core.chebtech.Chebtech2.initfun', 'Chebtech2.initfun', (['g', 'gn'], {}), '(g, gn)\n', (5273, 5280), False, 'from chebpy.core.chebtech import Chebtech2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""OpenEO Python UDF interface"""
from typing import Tuple
import numpy
import xarray
from openeo_udf.api.datacube import DataCube
__license__ = "Apache License, Version 2.0"
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
def create_datacube(name:str, value: float, shape: Tuple=(3, 2, 2), dims: Tuple=("t", "x", "y")) -> DataCube:
"""Create a datacube from shape and dimension parameter. The number of shapes and
dimensions must be equal."""
coords = {}
for dim, size in zip(dims, shape):
coords[dim] = list(range(size))
array = xarray.DataArray(numpy.zeros(shape=shape), coords=coords, dims=dims)
array.data += value
array.name = name
hc = DataCube(array=array)
return hc
| [
"numpy.zeros",
"openeo_udf.api.datacube.DataCube"
] | [((808, 829), 'openeo_udf.api.datacube.DataCube', 'DataCube', ([], {'array': 'array'}), '(array=array)\n', (816, 829), False, 'from openeo_udf.api.datacube import DataCube\n'), ((701, 725), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (712, 725), False, 'import numpy\n')] |
import igl
import numpy as np
import mpmath as mp
import os
import argparse
import matplotlib.pyplot as plt
from conformal_py import *
from overload_math import *
from render import *
from collections import namedtuple
from copy import deepcopy
import meshplot as meshp
import pickle
RenderInfo = namedtuple('RenderInfo', 'pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W')
def render_texture(out, name, v3d, f, m, u, cones, reindex, render_info, build_double):
fid_mat = render_info.fid_mat
pt_fids = render_info.pt_fids
pt_bcs = render_info.pt_bcs
bc_mat = render_info.bc_mat
cam = render_info.cam
bd_thick = render_info.bd_thick
view = render_info.view
proj = render_info.proj
H = render_info.H
W = render_info.W
reindex = np.array(reindex)
# update original cone ids to m
cones = [idx for idx in range(len(reindex)) if reindex[idx] in cones]
fid_mat_input = deepcopy(fid_mat)
bc_mat_input = deepcopy(bc_mat)
cnt = 0
for i in trange(H):
for j in range(W):
if fid_mat[i][j] > -1:
fid_mat[i][j] = pt_fids[cnt]
bc_mat[i][j] = pt_bcs[cnt]
cnt += 1
is_cut_h = []
if use_mpf:
u_cpp, v_cpp, is_cut_h = layout_mpf(m, list(u), is_cut_h, -1)
u_cpp = [mp.mpf(repr(u_cppi)) for u_cppi in u_cpp]
v_cpp = [mp.mpf(repr(v_cppi)) for v_cppi in v_cpp]
else:
u_cpp, v_cpp, is_cut_h = layout_float(m, list(u), is_cut_h, -1)
fid_mat = add_cut_to_sin(m.n, m.opp, m.to, cones, m.type, is_cut_h, reindex, v3d, f, bd_thick, fid_mat, cam, H, W, build_double)
N_bw = 10
def cprs(x):
x = max(0,min(1,x))
return max(0, min(1, 3 * x * x - 2 * x * x * x))
print("draw grid...")
if use_mpf:
u = np.array([mp.mpf(repr(ui)) for ui in u])
color_rgb_gd = draw_grid_mpf(fid_mat, bc_mat, m.h, m.n, m.to, u_cpp, v_cpp, u, cprs, H, W, N_bw)
else:
u = np.array(u)
color_rgb_gd = draw_grid(fid_mat, bc_mat, m.h, m.n, m.to, u_cpp, v_cpp, u, cprs, H, W, N_bw) # faster but less accurate float alternative: draw_grid
plt.imsave(out + "/" + name + "_" + str(N_bw) + "_gd_plain.png", color_rgb_gd)
print("add shading...")
add_shading(color_rgb_gd, v3d, f, fid_mat_input, bc_mat_input, view, proj)
plt.imsave(out + "/" + name + "_" + str(N_bw) + "_gd.png", color_rgb_gd)
def do_conformal(m, dir, out, output_type="param", output_format="obj", use_mpf=False, error_log=False, energy_cond=False, energy_samples=False, suffix=None, flip_count=False, prec=None, no_round_Th_hat=False, print_summary=False, eps=None, no_plot_result=False, bypass_overlay=False, max_itr=500, no_lm_reset=False, do_reduction=False,lambda0=1,bound_norm_thres=1, log_level=2):
if use_mpf:
if prec == None:
mp.prec = 100
else:
mp.prec = prec
if eps == None:
eps = 0
float_type = mp.mpf
else:
float_type = float
if eps == None:
eps = 0
v3d, f = igl.read_triangle_mesh(dir+'/'+m)
dot_index = m.rfind(".")
name = m[:dot_index]
if suffix != None:
name = name+"_"+str(suffix)
else:
name = name
Th_hat = np.loadtxt(dir+"/"+name+"_Th_hat", dtype=str)
Th_hat = nparray_from_float64(Th_hat,float_type)
if use_mpf and not no_round_Th_hat:
# Round rational multiples of pi to multiprecision accuray
for i,angle in enumerate(Th_hat):
n=round(60*angle/mp.pi)
Th_hat[i] = n*mp.pi/60
# identify the cones - used for visualization
is_bd = igl.is_border_vertex(v3d, f)
# need to build double mesh when it has boundary
build_double = (np.sum(is_bd) != 0)
cones = np.array([id for id in range(len(Th_hat)) if np.abs(Th_hat[id]-2*mpi(float_type)) > 1e-15 and not is_bd[id]], dtype=int)
W = 500; H = 300 # figure size
bd_thick = 2; sin_size = 3
pt_fids = []; pt_bcs=[]
if output_type == "render" and output_format == "png" and not no_plot_result:
with open("data/cameras/" + name + "_camera.pickle", 'rb') as fp:
cam = pickle.load(fp)
vc = pickle.load(fp)
fc = pickle.load(fp)
red_size = pickle.load(fp)
blue_size = pickle.load(fp)
(view, proj, vp) = cam
if not build_double:
fc = fc[:red_size+blue_size,:]
fid_mat, bc_mat = get_pt_mat(cam, v3d, f, vc, fc, red_size, blue_size, W, H)
for i in range(H):
for j in range(W):
if fid_mat[i][j] > -1:
pt_fids.append(fid_mat[i][j])
pt_bcs.append(bc_mat[i][j])
# Create algorithm parameter struct
alg_params = AlgorithmParameters()
alg_params.MPFR_PREC = mp.prec
alg_params.initial_ptolemy = False
alg_params.error_eps = eps
if use_mpf:
alg_params.min_lambda = pow(2, -100)
else:
alg_params.min_lambda = 1e-16
alg_params.newton_decr_thres = -0.01 * eps * eps;
alg_params.max_itr = max_itr
alg_params.bypass_overlay = bypass_overlay;
stats_params = StatsParameters()
stats_params.flip_count = flip_count
stats_params.output_dir = out
if use_mpf:
stats_params.name = name + "_mpf"
else:
stats_params.name = name + "_float"
stats_params.print_summary = print_summary
stats_params.error_log = error_log
stats_params.log_level = log_level
# Create line search parameter struct
ls_params = LineSearchParameters()
ls_params.energy_cond = energy_cond
ls_params.energy_samples = energy_samples
ls_params.do_reduction = do_reduction
ls_params.do_grad_norm_decrease = True
ls_params.bound_norm_thres = bound_norm_thres
ls_params.lambda0 = lambda0
ls_params.reset_lambda = not no_lm_reset
if float_type == float:
if output_type == "he_metric" and output_format == "pickle":
n, opp, l = conformal_metric_cl_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, l), pf)
elif output_type == "vf_metric" and output_format == "pickle":
vo, fo, l = conformal_metric_vl_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((vo, fo, l), pf)
elif output_type == "param" and output_format == "pickle":
n, opp, u, v = conformal_parametrization_cl_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, u, v), pf)
elif output_type == "param" and output_format == "obj":
vo, fo, u, v, ft = conformal_parametrization_vf_double(v3d, f, Th_hat, alg_params, ls_params, stats_params)
write_texture_obj_double(out + "/" + name + "_out.obj", vo, fo, u, v, ft)
elif output_type == "render" and output_format == "png": # for texture rendering
m_o, u, pt_fids, pt_bcs, reindex, _ = conformal_metric_double(v3d, f, Th_hat, pt_fids, pt_bcs, alg_params, ls_params, stats_params);
m = m_o._m
if not no_plot_result:
render_info = RenderInfo(pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W)
render_texture(out, name, v3d, f, m, u, cones, reindex, render_info, build_double)
else:
print("non-supported output-type/output-format")
print("output_type options:")
print(" 'render'")
print(" 'vf_metric'")
print(" 'he_metric'")
print(" 'param'")
print("output format options:")
print(" 'png' (compatible with 'render' only)")
print(" 'pickle' (compatible with 'he_metric', 'vf_metric' and 'param')")
print(" 'obj' (compatible with 'param')")
else:
set_mpf_prec(alg_params.MPFR_PREC)
vnstr = np.vectorize(lambda a:str(repr(a))[5:-2])
Th_hat = vnstr(Th_hat)
if output_type == "he_metric" and output_format == "pickle":
n, opp, l = conformal_metric_cl_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
l_str = np.array([str(l[idx]) for idx in range(len(l))])
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, l_str), pf)
elif output_type == "vf_metric" and output_format == "pickle":
vo, fo, l = conformal_metric_vl_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
vo_str = [[str(vo[i][k]) for k in range(3)] for i in range(len(vo))]
l_str = np.array([str(l[idx]) for idx in range(len(l))])
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((vo_str, fo, l_str), pf)
elif output_type == "param" and output_format == "pickle":
n, opp, u, v = conformal_parametrization_cl_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
u_str = [str(u[i]) for i in range(len(u))]
v_str = [str(v[i]) for i in range(len(v))]
with open(out + "/" + name + "_out.pickle", 'wb') as pf:
pickle.dump((n, opp, u_str, v_str), pf)
elif output_type == "param" and output_format == "obj":
vo, fo, u, v, ft = conformal_parametrization_vf_mpf(v3d, f, Th_hat, alg_params, ls_params, stats_params)
vo_fl = [[float(str(vo[i][k])) for k in range(3)] for i in range(len(vo))]
u_fl = [float(str(u[i])) for i in range(len(u))]
v_fl = [float(str(v[i])) for i in range(len(v))]
write_texture_obj_double(out + "/" + name + "_out.obj", vo_fl, fo, u_fl, v_fl, ft)
elif output_type == "render" and output_format == "png": # default interface - for texture rendering
m_o, u, pt_fids, pt_bcs, reindex, _ = conformal_metric_mpf(v3d, f, Th_hat, pt_fids, pt_bcs, alg_params, ls_params, stats_params);
m = m_o._m
if not no_plot_result:
render_info = RenderInfo(pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W)
render_texture(out, name, v3d, f, m, u, cones, reindex, render_info, build_double)
else:
print("non-supported output-type/output-format")
print("output_type options:")
print(" 'render'")
print(" 'vf_metric'")
print(" 'he_metric'")
print(" 'param'")
print("output format options:")
print(" 'png' (compatible with 'render' only)")
print(" 'pickle' (compatible with 'he_metric', 'vf_metric' and 'param')")
print(" 'obj' (compatible with 'param')")
if __name__ == "__main__":
# Parse arguments for the script
parser = argparse.ArgumentParser(description='Run the conformal map with options.')
parser.add_argument("-i", "--input", help="input folder that stores obj files and Th_hat")
parser.add_argument("-o", "--output", help="output folder for stats", default="out")
parser.add_argument("-f", "--fname", help="filename of the obj file")
parser.add_argument("--use_mpf", action="store_true", help="True for enable multiprecision", default=False)
parser.add_argument("--do_reduction", action="store_true", help="do reduction for search direction", default=False)
parser.add_argument("-p", "--prec", help="choose the mantissa value of mpf", type=int)
parser.add_argument("-m", "--max_itr", help="choose the maximum number of iterations", type=int, default=50)
parser.add_argument("--energy_cond", action="store_true", help="True for enable energy computation for line-search")
parser.add_argument("--energy_samples", action="store_true", help="True for write out energy sample and newton decrement before linesearch")
parser.add_argument("--error_log", action="store_true", help="True for enable writing out the max/ave angle errors per newton iteration")
parser.add_argument("--flip_count", action="store_true", help="True for enable collecting flip type stats")
parser.add_argument("--no_round_Th_hat", action="store_true", help="True for NOT rounding Th_hat values to multiples of pi/60")
parser.add_argument("--print_summary", action="store_true", help="print a summary table contains target angle range and final max curvature error")
parser.add_argument("--no_plot_result", action="store_true", help="True for NOT rendering the results, used only for reproducing figures to speedup.")
parser.add_argument("--bypass_overlay", action="store_true", help="True for NOT compute overlay, used only for reproducing figures to speedup.")
parser.add_argument("--no_lm_reset", action="store_true", help="True for using double the previous lambda for line search.")
parser.add_argument("--suffix", help="id assigned to each model for the random test")
parser.add_argument("--eps", help="target error threshold")
parser.add_argument("--lambda0", help="initial lambda value", type=float, default=1)
parser.add_argument("--bound_norm_thres", help="threshold to drop the norm bound", type=float, default=1e-10)
parser.add_argument("--output_type", action='store', help="output type selection: 'render', 'he_metric', 'vf_metric', 'param'", type=str, default="render")
parser.add_argument("--output_format", action='store', help="output file format selection: 'png', 'pickle', 'obj'", type=str, default="png")
parser.add_argument("--log_level", help="console logger info level [verbose 0-6]", type=int, default=2)
args = parser.parse_args()
output = args.output
input = args.input
fname = args.fname
use_mpf = args.use_mpf
do_reduction = args.do_reduction
max_itr = args.max_itr
energy_cond = args.energy_cond
error_log = args.error_log
flip_count = args.flip_count
no_round_Th_hat = args.no_round_Th_hat
prec = args.prec
no_lm_reset = args.no_lm_reset
suffix = args.suffix
print_summary = args.print_summary
no_plot_result = args.no_plot_result
bypass_overlay = args.bypass_overlay
eps = args.eps
lambda0 = args.lambda0
bound_norm_thres = args.bound_norm_thres
log_level = args.log_level
energy_samples = args.energy_samples
output_type = args.output_type
output_format = args.output_format
if not os.path.isdir(output):
os.makedirs(output, exist_ok=True)
if eps != None:
eps = float(eps)
do_conformal(fname, input, output, output_type, output_format, use_mpf, error_log, energy_cond, energy_samples, suffix, flip_count, prec, no_round_Th_hat, print_summary, eps, no_plot_result, bypass_overlay, max_itr, no_lm_reset, do_reduction, lambda0, bound_norm_thres, log_level) | [
"collections.namedtuple",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"igl.read_triangle_mesh",
"pickle.load",
"igl.is_border_vertex",
"numpy.array",
"numpy.sum",
"os.path.isdir",
"copy.deepcopy",
"numpy.loadtxt"
] | [((298, 395), 'collections.namedtuple', 'namedtuple', (['"""RenderInfo"""', '"""pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W"""'], {}), "('RenderInfo',\n 'pt_fids, pt_bcs, fid_mat, bc_mat, cam, bd_thick, view, proj, H, W')\n", (308, 395), False, 'from collections import namedtuple\n'), ((800, 817), 'numpy.array', 'np.array', (['reindex'], {}), '(reindex)\n', (808, 817), True, 'import numpy as np\n'), ((943, 960), 'copy.deepcopy', 'deepcopy', (['fid_mat'], {}), '(fid_mat)\n', (951, 960), False, 'from copy import deepcopy\n'), ((978, 994), 'copy.deepcopy', 'deepcopy', (['bc_mat'], {}), '(bc_mat)\n', (986, 994), False, 'from copy import deepcopy\n'), ((2991, 3028), 'igl.read_triangle_mesh', 'igl.read_triangle_mesh', (["(dir + '/' + m)"], {}), "(dir + '/' + m)\n", (3013, 3028), False, 'import igl\n'), ((3183, 3234), 'numpy.loadtxt', 'np.loadtxt', (["(dir + '/' + name + '_Th_hat')"], {'dtype': 'str'}), "(dir + '/' + name + '_Th_hat', dtype=str)\n", (3193, 3234), True, 'import numpy as np\n'), ((3563, 3591), 'igl.is_border_vertex', 'igl.is_border_vertex', (['v3d', 'f'], {}), '(v3d, f)\n', (3583, 3591), False, 'import igl\n'), ((10558, 10632), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the conformal map with options."""'}), "(description='Run the conformal map with options.')\n", (10581, 10632), False, 'import argparse\n'), ((1935, 1946), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (1943, 1946), True, 'import numpy as np\n'), ((3670, 3683), 'numpy.sum', 'np.sum', (['is_bd'], {}), '(is_bd)\n', (3676, 3683), True, 'import numpy as np\n'), ((14294, 14315), 'os.path.isdir', 'os.path.isdir', (['output'], {}), '(output)\n', (14307, 14315), False, 'import os\n'), ((14321, 14355), 'os.makedirs', 'os.makedirs', (['output'], {'exist_ok': '(True)'}), '(output, exist_ok=True)\n', (14332, 14355), False, 'import os\n'), ((4091, 4106), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4102, 4106), False, 'import pickle\n'), ((4120, 4135), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4131, 4135), False, 'import pickle\n'), ((4149, 4164), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4160, 4164), False, 'import pickle\n'), ((4184, 4199), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4195, 4199), False, 'import pickle\n'), ((4220, 4235), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4231, 4235), False, 'import pickle\n'), ((6029, 6057), 'pickle.dump', 'pickle.dump', (['(n, opp, l)', 'pf'], {}), '((n, opp, l), pf)\n', (6040, 6057), False, 'import pickle\n'), ((8237, 8269), 'pickle.dump', 'pickle.dump', (['(n, opp, l_str)', 'pf'], {}), '((n, opp, l_str), pf)\n', (8248, 8269), False, 'import pickle\n'), ((6302, 6330), 'pickle.dump', 'pickle.dump', (['(vo, fo, l)', 'pf'], {}), '((vo, fo, l), pf)\n', (6313, 6330), False, 'import pickle\n'), ((8653, 8689), 'pickle.dump', 'pickle.dump', (['(vo_str, fo, l_str)', 'pf'], {}), '((vo_str, fo, l_str), pf)\n', (8664, 8689), False, 'import pickle\n'), ((6583, 6614), 'pickle.dump', 'pickle.dump', (['(n, opp, u, v)', 'pf'], {}), '((n, opp, u, v), pf)\n', (6594, 6614), False, 'import pickle\n'), ((9042, 9081), 'pickle.dump', 'pickle.dump', (['(n, opp, u_str, v_str)', 'pf'], {}), '((n, opp, u_str, v_str), pf)\n', (9053, 9081), False, 'import pickle\n')] |
import matplotlib.pyplot as plt
import numpy as np
def loadData(filename1, filename2):
a = np.loadtxt(filename1)
b = np.loadtxt(filename2)
all_data = np.array([a, b])
bplot = plt.boxplot(all_data,
notch=False, # box instead of notch shape
sym='bs', # red squares for outliers
vert=True,
patch_artist = True)
colors = ['red', 'green']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
# adding horizontal grid lines
# add x-tick labels
plt.xticks([y+1 for y in range(len(all_data))], ['Class_0', 'Class_1'])
plt.xlabel('Skewness')
#plt.setp(bplot1, xticklabels=['class_0', 'class_1'])
plt.show()
loadData('Skewness_0', 'Skewness_1')
| [
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.loadtxt",
"matplotlib.pyplot.show"
] | [((97, 118), 'numpy.loadtxt', 'np.loadtxt', (['filename1'], {}), '(filename1)\n', (107, 118), True, 'import numpy as np\n'), ((127, 148), 'numpy.loadtxt', 'np.loadtxt', (['filename2'], {}), '(filename2)\n', (137, 148), True, 'import numpy as np\n'), ((165, 181), 'numpy.array', 'np.array', (['[a, b]'], {}), '([a, b])\n', (173, 181), True, 'import numpy as np\n'), ((195, 269), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['all_data'], {'notch': '(False)', 'sym': '"""bs"""', 'vert': '(True)', 'patch_artist': '(True)'}), "(all_data, notch=False, sym='bs', vert=True, patch_artist=True)\n", (206, 269), True, 'import matplotlib.pyplot as plt\n'), ((660, 682), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Skewness"""'], {}), "('Skewness')\n", (670, 682), True, 'import matplotlib.pyplot as plt\n'), ((745, 755), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (753, 755), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
"""
A command line script for processing CoMPASS data
"""
import os
import sys
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import click
from compy import compassrun, utilities
def main():
"""Process user-selected runs and plot filtered TOF spectra."""
args = sys.argv[1:]
argc = len(args)
if argc > 0:
folders = [str(Path(arg).resolve()) for arg in args]
print(f'Folders specified: {folders}')
else:
folders = None
# process data
pkl_flag = click.confirm('\nWould you like to load data from pickle?',
default=False)
if pkl_flag:
runs = utilities.load_pickle()
else:
key_tuples, VERBOSE = compassrun.initialize(folders=folders)
runs = compassrun.process_runs(key_tuples)
merge_flag = click.confirm('\nWould you like to merge runs?',
default=True)
if merge_flag:
utilities.merge_related_runs(runs, quiet=True)
# plot filtered TOF spectra for all keys
print_flag = click.confirm('\nWould you like to plot the spectra?',
default=False)
if print_flag:
plt.figure(figsize=(16, 9))
for key in runs.keys():
print(key)
if ('TOF' in runs[key].spectra['filtered']) and (
'vals' in runs[key].spectra['filtered']['TOF']):
vals_raw = np.array(runs[key].spectra['filtered']['TOF']['vals'])
bins = np.array(runs[key].spectra['filtered']['TOF']['bins'])
t = runs[key].t_meas
print('plotting key: ', key, t, sum([i for i in vals_raw]))
vals_err = np.sqrt(vals_raw) / t
vals = vals_raw / t
plt.errorbar(x=bins, y=vals, yerr=vals_err,
marker='s', linestyle='None',
drawstyle='steps-mid',
label=key.replace('_', '-'))
if len(runs.keys()) > 0:
plt.xlim(25, 185)
plt.xlabel(r'TIME [$\mu$s]')
plt.ylabel('COUNTS/MINUTE')
# plt.ylim(0, 3.5)
plt.legend()
plt.tight_layout()
plt.show()
else:
print('No spectra found to plot!')
# save data to pickle
save_flag = click.confirm('\nWould you like to save the runs as a pickle?',
default=False)
if save_flag:
utilities.save_pickle(runs)
print('\nThank you for using compy, the CoMPASS Python Companion!')
trans_flag = click.confirm('\nWould you like to calculate transmission?',
default=False)
if trans_flag:
keys = list(runs.keys())
print('\nProcessed keys are', f'{keys}')
key_ob = input('Which key would you like to use for open beam?\n')
# add transmission
[runs[key].add_trans(runs, key_ob, t_offset=0)
for key in keys if key != key_ob]
# plot transmission for target runs
trans_plot_flag = click.confirm('\nWould you like to plot transmission?',
default=False)
if trans_plot_flag:
[runs[key].plot_trans(runs, key, key_ob, n_bins=600, t_offset=5.56)
for key in keys if key != key_ob]
return runs
if __name__ == '__main__':
os.chdir('/mnt/c/Users/Avram/Dropbox (MIT)/MIT/research/NRTA/experiments/')
runs = main()
| [
"click.confirm",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"compy.utilities.merge_related_runs",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"compy.utilities.save_pickle",
"os.chdir",
"compy.compassrun.initialize",
"compy.utilities.load_pickle",
"matplotlib.pyplot.figure",
"numpy.array",
"ma... | [((579, 656), 'click.confirm', 'click.confirm', (['"""\nWould you like to load data from pickle?"""'], {'default': '(False)'}), '("""\nWould you like to load data from pickle?""", default=False)\n', (592, 656), False, 'import click\n'), ((1146, 1218), 'click.confirm', 'click.confirm', (['"""\nWould you like to plot the spectra?"""'], {'default': '(False)'}), '("""\nWould you like to plot the spectra?""", default=False)\n', (1159, 1218), False, 'import click\n'), ((2463, 2549), 'click.confirm', 'click.confirm', (['"""\nWould you like to save the runs as a pickle?"""'], {'default': '(False)'}), '("""\nWould you like to save the runs as a pickle?""", default=\n False)\n', (2476, 2549), False, 'import click\n'), ((2722, 2800), 'click.confirm', 'click.confirm', (['"""\nWould you like to calculate transmission?"""'], {'default': '(False)'}), '("""\nWould you like to calculate transmission?""", default=False)\n', (2735, 2800), False, 'import click\n'), ((3540, 3615), 'os.chdir', 'os.chdir', (['"""/mnt/c/Users/Avram/Dropbox (MIT)/MIT/research/NRTA/experiments/"""'], {}), "('/mnt/c/Users/Avram/Dropbox (MIT)/MIT/research/NRTA/experiments/')\n", (3548, 3615), False, 'import os\n'), ((718, 741), 'compy.utilities.load_pickle', 'utilities.load_pickle', ([], {}), '()\n', (739, 741), False, 'from compy import compassrun, utilities\n'), ((784, 822), 'compy.compassrun.initialize', 'compassrun.initialize', ([], {'folders': 'folders'}), '(folders=folders)\n', (805, 822), False, 'from compy import compassrun, utilities\n'), ((839, 874), 'compy.compassrun.process_runs', 'compassrun.process_runs', (['key_tuples'], {}), '(key_tuples)\n', (862, 874), False, 'from compy import compassrun, utilities\n'), ((897, 962), 'click.confirm', 'click.confirm', (['"""\nWould you like to merge runs?"""'], {'default': '(True)'}), '("""\nWould you like to merge runs?""", default=True)\n', (910, 962), False, 'import click\n'), ((1277, 1304), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1287, 1304), True, 'import matplotlib.pyplot as plt\n'), ((2601, 2628), 'compy.utilities.save_pickle', 'utilities.save_pickle', (['runs'], {}), '(runs)\n', (2622, 2628), False, 'from compy import compassrun, utilities\n'), ((3214, 3287), 'click.confirm', 'click.confirm', (['"""\nWould you like to plot transmission?"""'], {'default': '(False)'}), '("""\nWould you like to plot transmission?""", default=False)\n', (3227, 3287), False, 'import click\n'), ((1033, 1079), 'compy.utilities.merge_related_runs', 'utilities.merge_related_runs', (['runs'], {'quiet': '(True)'}), '(runs, quiet=True)\n', (1061, 1079), False, 'from compy import compassrun, utilities\n'), ((2139, 2156), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(25)', '(185)'], {}), '(25, 185)\n', (2147, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2170, 2198), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TIME [$\\\\mu$s]"""'], {}), "('TIME [$\\\\mu$s]')\n", (2180, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2239), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""COUNTS/MINUTE"""'], {}), "('COUNTS/MINUTE')\n", (2222, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2297), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2295, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2311, 2329), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2327, 2329), True, 'import matplotlib.pyplot as plt\n'), ((2343, 2353), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2351, 2353), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1577), 'numpy.array', 'np.array', (["runs[key].spectra['filtered']['TOF']['vals']"], {}), "(runs[key].spectra['filtered']['TOF']['vals'])\n", (1531, 1577), True, 'import numpy as np\n'), ((1602, 1656), 'numpy.array', 'np.array', (["runs[key].spectra['filtered']['TOF']['bins']"], {}), "(runs[key].spectra['filtered']['TOF']['bins'])\n", (1610, 1656), True, 'import numpy as np\n'), ((1800, 1817), 'numpy.sqrt', 'np.sqrt', (['vals_raw'], {}), '(vals_raw)\n', (1807, 1817), True, 'import numpy as np\n'), ((420, 429), 'pathlib.Path', 'Path', (['arg'], {}), '(arg)\n', (424, 429), False, 'from pathlib import Path\n')] |
# (C) 2019 <NAME> <<EMAIL>>
import numpy as np
from imgaug import augmenters as iaa
def normalize(X):
return (X / 255.0).copy()
def denormalize(X):
X_dn = (X * 255).copy()
return X_dn
def transform(aug_type, magnitude, X):
if aug_type == "crop":
X_aug = iaa.Crop(px=(0, int(magnitude * 32))).augment_images(X)
elif aug_type == "gaussian-blur":
X_aug = iaa.GaussianBlur(sigma=(0, magnitude * 25.0)).augment_images(X)
elif aug_type == "rotate":
X_aug = iaa.Affine(rotate=(-180 * magnitude, 180 * magnitude)).augment_images(X)
elif aug_type == "shear":
X_aug = iaa.Affine(shear=(-90 * magnitude, 90 * magnitude)).augment_images(X)
elif aug_type == "translate-x":
X_aug = iaa.Affine(
translate_percent={"x": (-magnitude, magnitude), "y": (0, 0)}
).augment_images(X)
elif aug_type == "translate-y":
X_aug = iaa.Affine(
translate_percent={"x": (0, 0), "y": (-magnitude, magnitude)}
).augment_images(X)
elif aug_type == "horizontal-flip":
X_aug = iaa.Fliplr(magnitude).augment_images(X)
elif aug_type == "vertical-flip":
X_aug = iaa.Flipud(magnitude).augment_images(X)
elif aug_type == "sharpen":
X_aug = iaa.Sharpen(
alpha=(0, 1.0), lightness=(0.50, 5 * magnitude)
).augment_images(X)
elif aug_type == "emboss":
X_aug = iaa.Emboss(
alpha=(0, 1.0), strength=(0.0, 20.0 * magnitude)
).augment_images(X)
elif aug_type == "additive-gaussian-noise":
X_aug = iaa.AdditiveGaussianNoise(
loc=0, scale=(0.0, magnitude * 255), per_channel=0.5
).augment_images(X)
elif aug_type == "dropout":
X_aug = iaa.Dropout(
(0.01, max(0.011, magnitude)), per_channel=0.5
).augment_images(
X
) # Dropout first argument should be smaller than second one
elif aug_type == "coarse-dropout":
X_aug = iaa.CoarseDropout(
(0.03, 0.15), size_percent=(0.30, np.log10(magnitude * 3)), per_channel=0.2
).augment_images(X)
elif aug_type == "gamma-contrast":
X_norm = normalize(X)
X_aug_norm = iaa.GammaContrast(magnitude * 1.75).augment_images(
X_norm
) # needs 0-1 values
X_aug = denormalize(X_aug_norm)
elif aug_type == "brighten":
X_aug = iaa.Add(
(int(-40 * magnitude), int(40 * magnitude)), per_channel=0.5
).augment_images(
X
) # brighten
elif aug_type == "invert":
X_aug = iaa.Invert(1.0).augment_images(X) # magnitude not used
elif aug_type == "fog":
X_aug = iaa.Fog().augment_images(X) # magnitude not used
elif aug_type == "clouds":
X_aug = iaa.Clouds().augment_images(X) # magnitude not used
elif aug_type == "histogram-equalize":
X_aug = iaa.AllChannelsHistogramEqualization().augment_images(
X
) # magnitude not used
elif aug_type == "super-pixels": # deprecated
X_norm = normalize(X)
X_norm2 = (X_norm * 2) - 1
X_aug_norm2 = iaa.Superpixels(
p_replace=(0, magnitude), n_segments=(100, 100)
).augment_images(X_norm2)
X_aug_norm = (X_aug_norm2 + 1) / 2
X_aug = denormalize(X_aug_norm)
elif aug_type == "perspective-transform":
X_norm = normalize(X)
X_aug_norm = iaa.PerspectiveTransform(
scale=(0.01, max(0.02, magnitude))
).augment_images(
X_norm
) # first scale param must be larger
np.clip(X_aug_norm, 0.0, 1.0, out=X_aug_norm)
X_aug = denormalize(X_aug_norm)
elif aug_type == "elastic-transform": # deprecated
X_norm = normalize(X)
X_norm2 = (X_norm * 2) - 1
X_aug_norm2 = iaa.ElasticTransformation(
alpha=(0.0, max(0.5, magnitude * 300)), sigma=5.0
).augment_images(X_norm2)
X_aug_norm = (X_aug_norm2 + 1) / 2
X_aug = denormalize(X_aug_norm)
elif aug_type == "add-to-hue-and-saturation":
X_aug = iaa.AddToHueAndSaturation(
(int(-45 * magnitude), int(45 * magnitude))
).augment_images(X)
elif aug_type == "coarse-salt-pepper":
X_aug = iaa.CoarseSaltAndPepper(p=0.2, size_percent=magnitude).augment_images(X)
elif aug_type == "grayscale":
X_aug = iaa.Grayscale(alpha=(0.0, magnitude)).augment_images(X)
else:
raise ValueError
return X_aug
def augment_by_policy(
X, y, *hyperparams
):
"""
"""
portion = 1
assert (
portion >= 0.0 and portion <= 1.0
), "portion argument value is out of accepted interval"
# convert data to 255 from normalized
_X = denormalize(X)
if portion == 1.0:
X_portion = _X
y_portion = y
else:
# get a portion of data
ix = np.random.choice(len(_X), int(len(_X) * portion), False)
X_portion = _X[ix].copy()
y_portion = y[ix].copy()
if X_portion.shape[0] == 0:
print("X_portion has zero size !!!")
nix = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
X_portion = _X[nix].copy()
y_portion = y[nix].copy()
all_X_portion_aug=None
all_y_portion = None
for i in range(0,len(hyperparams)-1,4):
# transform that portion
X_portion_aug = transform(hyperparams[i], hyperparams[i+1], X_portion) # first transform
assert (
X_portion_aug.min() >= -0.1 and X_portion_aug.max() <= 255.1
), "first transform is unvalid"
np.clip(X_portion_aug, 0, 255, out=X_portion_aug)
X_portion_aug = transform(
hyperparams[i+2], hyperparams[i+3], X_portion_aug
) # second transform
assert (
X_portion_aug.min() >= -0.1 and X_portion_aug.max() <= 255.1
), "second transform is unvalid"
np.clip(X_portion_aug, 0, 255, out=X_portion_aug)
if all_X_portion_aug is None:
all_X_portion_aug = X_portion_aug
all_y_portion = y_portion
else:
all_X_portion_aug = np.concatenate([all_X_portion_aug, X_portion_aug])
all_y_portion = np.concatenate([all_y_portion, y_portion])
augmented_data = {
"X_train": all_X_portion_aug / 255.0,
"y_train": all_y_portion,
} # back to normalization
return augmented_data # augmenteed data is mostly smaller than whole data
| [
"numpy.clip",
"imgaug.augmenters.Grayscale",
"imgaug.augmenters.Fog",
"imgaug.augmenters.AdditiveGaussianNoise",
"imgaug.augmenters.GammaContrast",
"numpy.log10",
"imgaug.augmenters.Flipud",
"imgaug.augmenters.AllChannelsHistogramEqualization",
"imgaug.augmenters.GaussianBlur",
"imgaug.augmenters.... | [((5580, 5629), 'numpy.clip', 'np.clip', (['X_portion_aug', '(0)', '(255)'], {'out': 'X_portion_aug'}), '(X_portion_aug, 0, 255, out=X_portion_aug)\n', (5587, 5629), True, 'import numpy as np\n'), ((5897, 5946), 'numpy.clip', 'np.clip', (['X_portion_aug', '(0)', '(255)'], {'out': 'X_portion_aug'}), '(X_portion_aug, 0, 255, out=X_portion_aug)\n', (5904, 5946), True, 'import numpy as np\n'), ((6116, 6166), 'numpy.concatenate', 'np.concatenate', (['[all_X_portion_aug, X_portion_aug]'], {}), '([all_X_portion_aug, X_portion_aug])\n', (6130, 6166), True, 'import numpy as np\n'), ((6195, 6237), 'numpy.concatenate', 'np.concatenate', (['[all_y_portion, y_portion]'], {}), '([all_y_portion, y_portion])\n', (6209, 6237), True, 'import numpy as np\n'), ((395, 440), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0, magnitude * 25.0)'}), '(sigma=(0, magnitude * 25.0))\n', (411, 440), True, 'from imgaug import augmenters as iaa\n'), ((506, 560), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'rotate': '(-180 * magnitude, 180 * magnitude)'}), '(rotate=(-180 * magnitude, 180 * magnitude))\n', (516, 560), True, 'from imgaug import augmenters as iaa\n'), ((625, 676), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'shear': '(-90 * magnitude, 90 * magnitude)'}), '(shear=(-90 * magnitude, 90 * magnitude))\n', (635, 676), True, 'from imgaug import augmenters as iaa\n'), ((747, 820), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'translate_percent': "{'x': (-magnitude, magnitude), 'y': (0, 0)}"}), "(translate_percent={'x': (-magnitude, magnitude), 'y': (0, 0)})\n", (757, 820), True, 'from imgaug import augmenters as iaa\n'), ((913, 986), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'translate_percent': "{'x': (0, 0), 'y': (-magnitude, magnitude)}"}), "(translate_percent={'x': (0, 0), 'y': (-magnitude, magnitude)})\n", (923, 986), True, 'from imgaug import augmenters as iaa\n'), ((1083, 1104), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['magnitude'], {}), '(magnitude)\n', (1093, 1104), True, 'from imgaug import augmenters as iaa\n'), ((1177, 1198), 'imgaug.augmenters.Flipud', 'iaa.Flipud', (['magnitude'], {}), '(magnitude)\n', (1187, 1198), True, 'from imgaug import augmenters as iaa\n'), ((1265, 1324), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0, 1.0)', 'lightness': '(0.5, 5 * magnitude)'}), '(alpha=(0, 1.0), lightness=(0.5, 5 * magnitude))\n', (1276, 1324), True, 'from imgaug import augmenters as iaa\n'), ((1413, 1473), 'imgaug.augmenters.Emboss', 'iaa.Emboss', ([], {'alpha': '(0, 1.0)', 'strength': '(0.0, 20.0 * magnitude)'}), '(alpha=(0, 1.0), strength=(0.0, 20.0 * magnitude))\n', (1423, 1473), True, 'from imgaug import augmenters as iaa\n'), ((1578, 1657), 'imgaug.augmenters.AdditiveGaussianNoise', 'iaa.AdditiveGaussianNoise', ([], {'loc': '(0)', 'scale': '(0.0, magnitude * 255)', 'per_channel': '(0.5)'}), '(loc=0, scale=(0.0, magnitude * 255), per_channel=0.5)\n', (1603, 1657), True, 'from imgaug import augmenters as iaa\n'), ((2208, 2243), 'imgaug.augmenters.GammaContrast', 'iaa.GammaContrast', (['(magnitude * 1.75)'], {}), '(magnitude * 1.75)\n', (2225, 2243), True, 'from imgaug import augmenters as iaa\n'), ((2048, 2071), 'numpy.log10', 'np.log10', (['(magnitude * 3)'], {}), '(magnitude * 3)\n', (2056, 2071), True, 'import numpy as np\n'), ((2589, 2604), 'imgaug.augmenters.Invert', 'iaa.Invert', (['(1.0)'], {}), '(1.0)\n', (2599, 2604), True, 'from imgaug import augmenters as iaa\n'), ((2689, 2698), 'imgaug.augmenters.Fog', 'iaa.Fog', ([], {}), '()\n', (2696, 2698), True, 'from imgaug import augmenters as iaa\n'), ((2786, 2798), 'imgaug.augmenters.Clouds', 'iaa.Clouds', ([], {}), '()\n', (2796, 2798), True, 'from imgaug import augmenters as iaa\n'), ((2898, 2936), 'imgaug.augmenters.AllChannelsHistogramEqualization', 'iaa.AllChannelsHistogramEqualization', ([], {}), '()\n', (2934, 2936), True, 'from imgaug import augmenters as iaa\n'), ((3600, 3645), 'numpy.clip', 'np.clip', (['X_aug_norm', '(0.0)', '(1.0)'], {'out': 'X_aug_norm'}), '(X_aug_norm, 0.0, 1.0, out=X_aug_norm)\n', (3607, 3645), True, 'import numpy as np\n'), ((3137, 3201), 'imgaug.augmenters.Superpixels', 'iaa.Superpixels', ([], {'p_replace': '(0, magnitude)', 'n_segments': '(100, 100)'}), '(p_replace=(0, magnitude), n_segments=(100, 100))\n', (3152, 3201), True, 'from imgaug import augmenters as iaa\n'), ((4271, 4325), 'imgaug.augmenters.CoarseSaltAndPepper', 'iaa.CoarseSaltAndPepper', ([], {'p': '(0.2)', 'size_percent': 'magnitude'}), '(p=0.2, size_percent=magnitude)\n', (4294, 4325), True, 'from imgaug import augmenters as iaa\n'), ((4394, 4431), 'imgaug.augmenters.Grayscale', 'iaa.Grayscale', ([], {'alpha': '(0.0, magnitude)'}), '(alpha=(0.0, magnitude))\n', (4407, 4431), True, 'from imgaug import augmenters as iaa\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose
import pytest
from astropy.coordinates import SkyCoord
from astropy.io import fits
import astropy.units as u
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import WCS
from ...core import PixCoord
from ...tests.helpers import make_simple_wcs
from ..point import PointPixelRegion, PointSkyRegion
from .test_common import BaseTestPixelRegion, BaseTestSkyRegion
from .utils import HAS_MATPLOTLIB # noqa
@pytest.fixture(scope='session', name='wcs')
def wcs_fixture():
filename = get_pkg_data_filename('data/example_header.fits')
header = fits.getheader(filename)
return WCS(header)
class TestPointPixelRegion(BaseTestPixelRegion):
reg = PointPixelRegion(PixCoord(3, 4))
sample_box = [-2, 8, -1, 9]
inside = []
outside = [(3.1, 4.2), (5, 4)]
expected_area = 0
expected_repr = '<PointPixelRegion(center=PixCoord(x=3, y=4))>'
expected_str = 'Region: PointPixelRegion\ncenter: PixCoord(x=3, y=4)'
def test_copy(self):
reg = self.reg.copy()
assert reg.center.xy == (3, 4)
assert reg.visual == {}
assert reg.meta == {}
def test_pix_sky_roundtrip(self):
wcs = make_simple_wcs(SkyCoord(2 * u.deg, 3 * u.deg), 0.1 * u.deg, 20)
reg_new = self.reg.to_sky(wcs).to_pixel(wcs)
assert_allclose(reg_new.center.x, self.reg.center.x)
assert_allclose(reg_new.center.y, self.reg.center.y)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_as_artist(self):
artist = self.reg.as_artist()
assert artist.get_data() == ([3], [4])
artist = self.reg.as_artist(origin=(1, 1))
assert artist.get_data() == ([2], [3])
def test_rotate(self):
reg = self.reg.rotate(PixCoord(2, 3), 90 * u.deg)
assert_allclose(reg.center.xy, (1, 4))
class TestPointSkyRegion(BaseTestSkyRegion):
reg = PointSkyRegion(SkyCoord(3, 4, unit='deg'))
expected_repr = ('<PointSkyRegion(center=<SkyCoord (ICRS): (ra, dec) '
'in deg\n (3., 4.)>)>')
expected_str = ('Region: PointSkyRegion\ncenter: <SkyCoord (ICRS): '
'(ra, dec) in deg\n (3., 4.)>')
def test_copy(self):
reg = self.reg.copy()
assert_allclose(reg.center.ra.deg, 3)
assert reg.visual == {}
assert reg.meta == {}
def test_contains(self, wcs):
position = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
# points do not contain things
assert all(self.reg.contains(position, wcs)
== np.array([False, False], dtype='bool'))
| [
"astropy.io.fits.getheader",
"numpy.testing.assert_allclose",
"astropy.coordinates.SkyCoord",
"numpy.array",
"pytest.mark.skipif",
"pytest.fixture",
"astropy.wcs.WCS",
"astropy.utils.data.get_pkg_data_filename"
] | [((554, 597), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'name': '"""wcs"""'}), "(scope='session', name='wcs')\n", (568, 597), False, 'import pytest\n'), ((632, 681), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/example_header.fits"""'], {}), "('data/example_header.fits')\n", (653, 681), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((695, 719), 'astropy.io.fits.getheader', 'fits.getheader', (['filename'], {}), '(filename)\n', (709, 719), False, 'from astropy.io import fits\n'), ((731, 742), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (734, 742), False, 'from astropy.wcs import WCS\n'), ((1541, 1581), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_MATPLOTLIB"""'], {}), "('not HAS_MATPLOTLIB')\n", (1559, 1581), False, 'import pytest\n'), ((1421, 1473), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg_new.center.x', 'self.reg.center.x'], {}), '(reg_new.center.x, self.reg.center.x)\n', (1436, 1473), False, 'from numpy.testing import assert_allclose\n'), ((1482, 1534), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg_new.center.y', 'self.reg.center.y'], {}), '(reg_new.center.y, self.reg.center.y)\n', (1497, 1534), False, 'from numpy.testing import assert_allclose\n'), ((1892, 1930), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg.center.xy', '(1, 4)'], {}), '(reg.center.xy, (1, 4))\n', (1907, 1930), False, 'from numpy.testing import assert_allclose\n'), ((2004, 2030), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(3)', '(4)'], {'unit': '"""deg"""'}), "(3, 4, unit='deg')\n", (2012, 2030), False, 'from astropy.coordinates import SkyCoord\n'), ((2348, 2385), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg.center.ra.deg', '(3)'], {}), '(reg.center.ra.deg, 3)\n', (2363, 2385), False, 'from numpy.testing import assert_allclose\n'), ((2502, 2542), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['([1, 2] * u.deg)', '([3, 4] * u.deg)'], {}), '([1, 2] * u.deg, [3, 4] * u.deg)\n', (2510, 2542), False, 'from astropy.coordinates import SkyCoord\n'), ((1311, 1341), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(2 * u.deg)', '(3 * u.deg)'], {}), '(2 * u.deg, 3 * u.deg)\n', (1319, 1341), False, 'from astropy.coordinates import SkyCoord\n'), ((2656, 2694), 'numpy.array', 'np.array', (['[False, False]'], {'dtype': '"""bool"""'}), "([False, False], dtype='bool')\n", (2664, 2694), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: lapis-hong
# @Date : 2018/2/2
"""Wide and Deep Model Prediction
Not support for custom classifier, cause use different variable name scope, key not found in checkpoint"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
import time
import numpy as np
import tensorflow as tf
from lib.read_conf import Config
from lib.dataset import input_fn
from lib.build_estimator import build_estimator, build_custom_estimator
from lib.utils.util import elapse_time
CONFIG = Config().train
parser = argparse.ArgumentParser(description='Wide and Deep Model Prediction')
parser.add_argument(
'--model_dir', type=str, default=CONFIG["model_dir"],
help='Model checkpoint dir for evaluating.')
parser.add_argument(
'--model_type', type=str, default=CONFIG["model_type"],
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--data_dir', type=str, default=CONFIG["pred_data"],
help='Evaluating data dir.')
parser.add_argument(
'--image_data_dir', type=str, default=None,
help='Evaluating image data dir.')
parser.add_argument(
'--batch_size', type=int, default=CONFIG["batch_size"],
help='Number of examples per batch.')
parser.add_argument(
'--checkpoint_path', type=str, default=CONFIG["checkpoint_path"],
help="Path of a specific checkpoint to predict. If None, the latest checkpoint in model_dir is used.")
def main(unused_argv):
print("Using TensorFlow version %s" % tf.__version__)
# assert "1.4" <= tf.__version__, "TensorFlow r1.4 or later is needed"
if FLAGS.data_dir is None:
raise ValueError("Must specify prediction data_file by --data_dir")
print('Model type: {}'.format(FLAGS.model_type))
model_dir = os.path.join(FLAGS.model_dir, FLAGS.model_type)
print('Model directory: {}'.format(model_dir))
# model = build_estimator(model_dir, FLAGS.model_type)
model = build_custom_estimator(model_dir, FLAGS.model_type)
tf.logging.info('Build estimator: {}'.format(model))
# weights and other parameters (e.g. Adagrad) of the model
name_ls = model.get_variable_names()
print_shape = True
total_linear_weights = 0
for name in name_ls:
if print_shape:
shape = model.get_variable_value(name).shape
print(name, "\t", shape)
if name[:6] == "linear" and \
(name[-7:] == "weights"or name[-4:] == "bias"):
total_linear_weights += np.prod(shape)
else:
print(name)
if print_shape:
print("Total parameters in linear model: {}".format(total_linear_weights))
# embedding layer look up
sample_embedding = model.get_variable_value(
'dnn/input_from_feature_columns/input_layer/ad_cates_embedding/embedding_weights')
ids = [10, 20, 30]
with tf.Session() as sess:
lookup = tf.nn.embedding_lookup(sample_embedding,ids=ids).eval()
print(lookup)
# predictions
tf.logging.info('='*30+'START PREDICTION'+'='*30)
probability = []
start = time.time()
# FLAGS.batch_size, FLAGS.data_dir
predictions = model.predict(input_fn=lambda: input_fn("../data/pred_large/", FLAGS.image_data_dir, 'pred', batch_size=32),
predict_keys=None,
hooks=None,
checkpoint_path=FLAGS.checkpoint_path) # defaults None to use latest_checkpoint
for pred_dict in predictions: # dict{probabilities, classes, class_ids}
class_id = pred_dict['class_ids'][0]
probability.append((class_id, pred_dict['probabilities'][class_id]))
# print('\nPrediction is "{}" ({:.1f}%)'.format(class_id, 100 * probability))
end = time.time()
tf.logging.info('=' * 30 + 'FINISH PREDICTION, TAKE {} ns'.format(end - start) + '=' * 30)
for prob in probability:
class_id, probab = prob
print('\nPrediction is "{}" ({:.1f}%)'.format(class_id, 100 * probab))
print("length:", len(probability))
print("\nduration:", end - start)
if __name__ == '__main__':
# Set to INFO for tracking training, default is WARN. ERROR for least messages
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"numpy.prod",
"tensorflow.nn.embedding_lookup",
"lib.dataset.input_fn",
"argparse.ArgumentParser",
"lib.read_conf.Config",
"tensorflow.logging.info",
"tensorflow.Session",
"os.path.join",
"tensorflow.logging.set_verbosity",
"lib.build_estimator.build_custom_estimator",
"time.time",
"tensorflow... | [((684, 753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Wide and Deep Model Prediction"""'}), "(description='Wide and Deep Model Prediction')\n", (707, 753), False, 'import argparse\n'), ((660, 668), 'lib.read_conf.Config', 'Config', ([], {}), '()\n', (666, 668), False, 'from lib.read_conf import Config\n'), ((1905, 1952), 'os.path.join', 'os.path.join', (['FLAGS.model_dir', 'FLAGS.model_type'], {}), '(FLAGS.model_dir, FLAGS.model_type)\n', (1917, 1952), False, 'import os\n'), ((2075, 2126), 'lib.build_estimator.build_custom_estimator', 'build_custom_estimator', (['model_dir', 'FLAGS.model_type'], {}), '(model_dir, FLAGS.model_type)\n', (2097, 2126), False, 'from lib.build_estimator import build_estimator, build_custom_estimator\n'), ((3133, 3190), 'tensorflow.logging.info', 'tf.logging.info', (["('=' * 30 + 'START PREDICTION' + '=' * 30)"], {}), "('=' * 30 + 'START PREDICTION' + '=' * 30)\n", (3148, 3190), True, 'import tensorflow as tf\n'), ((3216, 3227), 'time.time', 'time.time', ([], {}), '()\n', (3225, 3227), False, 'import time\n'), ((3900, 3911), 'time.time', 'time.time', ([], {}), '()\n', (3909, 3911), False, 'import time\n'), ((4341, 4382), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4365, 4382), True, 'import tensorflow as tf\n'), ((4435, 4487), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main', 'argv': '([sys.argv[0]] + unparsed)'}), '(main=main, argv=[sys.argv[0]] + unparsed)\n', (4445, 4487), True, 'import tensorflow as tf\n'), ((2993, 3005), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3003, 3005), True, 'import tensorflow as tf\n'), ((2634, 2648), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (2641, 2648), True, 'import numpy as np\n'), ((3032, 3081), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['sample_embedding'], {'ids': 'ids'}), '(sample_embedding, ids=ids)\n', (3054, 3081), True, 'import tensorflow as tf\n'), ((3317, 3393), 'lib.dataset.input_fn', 'input_fn', (['"""../data/pred_large/"""', 'FLAGS.image_data_dir', '"""pred"""'], {'batch_size': '(32)'}), "('../data/pred_large/', FLAGS.image_data_dir, 'pred', batch_size=32)\n", (3325, 3393), False, 'from lib.dataset import input_fn\n')] |
#!/usr/bin/python3
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", type=float, default=None, help="radius")
parser.add_argument("-g", type=float, default=None, help="gamma")
parser.add_argument("-m", type=int, default=None, help="mobile particles")
parser.add_argument("--sigma", type=float, default=1.0, help="lennard jones sigma")
args = parser.parse_args()
r_opt = np.power(2.0, 1.0/6.0) * args.sigma
if args.r != None and args.g != None and args.m != None:
raise ValueError("please choose 1 input parameters, not all 3")
if args.r != None and args.g != None and args.m == None:
raise ValueError("please choose 1 input parameters, not 2")
elif args.r != None and args.g == None and args.m != None:
raise ValueError("please choose 1 input parameters, not 2")
elif args.r == None and args.g != None and args.m != None:
raise ValueError("please choose 1 input parameters, not 2")
if args.r != None:
args.g = np.arcsin(r_opt/(2.0*args.r))
args.m = 16.0*args.r*args.r / (1.1027*r_opt*r_opt)
elif args.g != None:
args.g *= np.pi/180
args.r = r_opt / (2.0*np.sin(args.g))
args.m = 4.0 / (1.1027*np.sin(args.g)*np.sin(args.g))
elif args.m != None:
args.r = r_opt*np.sqrt(1.1027*args.m) / 4.0
args.g = np.arcsin(2.0 / (np.sqrt(1.1027*args.m)))
print(f"radius: {args.r:>.4f} gamma: {args.g*180/np.pi:>.4f} particles: {args.m:>.1f}") | [
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.power",
"numpy.arcsin",
"numpy.sin"
] | [((85, 110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (108, 110), False, 'import argparse\n'), ((441, 465), 'numpy.power', 'np.power', (['(2.0)', '(1.0 / 6.0)'], {}), '(2.0, 1.0 / 6.0)\n', (449, 465), True, 'import numpy as np\n'), ((1004, 1037), 'numpy.arcsin', 'np.arcsin', (['(r_opt / (2.0 * args.r))'], {}), '(r_opt / (2.0 * args.r))\n', (1013, 1037), True, 'import numpy as np\n'), ((1160, 1174), 'numpy.sin', 'np.sin', (['args.g'], {}), '(args.g)\n', (1166, 1174), True, 'import numpy as np\n'), ((1218, 1232), 'numpy.sin', 'np.sin', (['args.g'], {}), '(args.g)\n', (1224, 1232), True, 'import numpy as np\n'), ((1203, 1217), 'numpy.sin', 'np.sin', (['args.g'], {}), '(args.g)\n', (1209, 1217), True, 'import numpy as np\n'), ((1274, 1298), 'numpy.sqrt', 'np.sqrt', (['(1.1027 * args.m)'], {}), '(1.1027 * args.m)\n', (1281, 1298), True, 'import numpy as np\n'), ((1333, 1357), 'numpy.sqrt', 'np.sqrt', (['(1.1027 * args.m)'], {}), '(1.1027 * args.m)\n', (1340, 1357), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
DSET_MEAN = [0.41189489566336, 0.4251328133025, 0.4326707089857]
DSET_STD = [0.27413549931506, 0.28506257482912, 0.28284674400252]
label_colours = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def view_annotated(tensor, plot=True):
temp = tensor.numpy()
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0,11):
r[temp==l]=label_colours[l,0]
g[temp==l]=label_colours[l,1]
b[temp==l]=label_colours[l,2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:,:,0] = (r/255.0)#[:,:,0]
rgb[:,:,1] = (g/255.0)#[:,:,1]
rgb[:,:,2] = (b/255.0)#[:,:,2]
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def decode_image(tensor):
inp = tensor.numpy().transpose((1, 2, 0))
mean = np.array(DSET_MEAN)
std = np.array(DSET_STD)
inp = std * inp + mean
return inp
def view_image(tensor):
inp = decode_image(tensor)
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.show()
| [
"numpy.clip",
"matplotlib.pyplot.imshow",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
] | [((457, 573), 'numpy.array', 'np.array', (['[Sky, Building, Pole, Road, Pavement, Tree, SignSymbol, Fence, Car,\n Pedestrian, Bicyclist, Unlabelled]'], {}), '([Sky, Building, Pole, Road, Pavement, Tree, SignSymbol, Fence, Car,\n Pedestrian, Bicyclist, Unlabelled])\n', (465, 573), True, 'import numpy as np\n'), ((854, 897), 'numpy.zeros', 'np.zeros', (['(temp.shape[0], temp.shape[1], 3)'], {}), '((temp.shape[0], temp.shape[1], 3))\n', (862, 897), True, 'import numpy as np\n'), ((1172, 1191), 'numpy.array', 'np.array', (['DSET_MEAN'], {}), '(DSET_MEAN)\n', (1180, 1191), True, 'import numpy as np\n'), ((1202, 1220), 'numpy.array', 'np.array', (['DSET_STD'], {}), '(DSET_STD)\n', (1210, 1220), True, 'import numpy as np\n'), ((1329, 1347), 'numpy.clip', 'np.clip', (['inp', '(0)', '(1)'], {}), '(inp, 0, 1)\n', (1336, 1347), True, 'import numpy as np\n'), ((1352, 1367), 'matplotlib.pyplot.imshow', 'plt.imshow', (['inp'], {}), '(inp)\n', (1362, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1380, 1382), True, 'import matplotlib.pyplot as plt\n'), ((1024, 1039), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {}), '(rgb)\n', (1034, 1039), True, 'import matplotlib.pyplot as plt\n'), ((1048, 1058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1056, 1058), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import zeros_like
from paddle.fluid import core, Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestZerosLikeAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x = paddle.fluid.data('x', [3, 4])
self.assertRaises(TypeError, zeros_like, x, 'int8')
def test_eager(self):
with _test_eager_guard():
self.test_errors()
class TestZerosLikeAPI(unittest.TestCase):
def test_api(self):
shape = [3, 4]
startup_program = Program()
train_program = Program()
with program_guard(train_program, startup_program):
x = paddle.fluid.data('X', shape)
out1 = zeros_like(x)
out2 = zeros_like(x, np.bool_)
out3 = zeros_like(x, 'float64')
out4 = zeros_like(x, 'int32')
out5 = zeros_like(x, 'int64')
place = (fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() else fluid.CPUPlace())
exe = fluid.Executor(place)
outs = exe.run(train_program,
feed={'X': np.ones(shape).astype('float32')},
fetch_list=[out1, out2, out3, out4, out5])
for (i, dtype) in enumerate(
[np.float32, np.bool_, np.float64, np.int32, np.int64]):
self.assertEqual(outs[i].dtype, dtype)
self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_api()
class TestZerosLikeImpeartive(unittest.TestCase):
def test_out(self):
shape = [3, 4]
place = (fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() else fluid.CPUPlace())
paddle.disable_static(place)
x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]:
out = zeros_like(x, dtype)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),
True)
out = paddle.tensor.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
out = paddle.tensor.creation.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
paddle.enable_static()
def test_eager(self):
with _test_eager_guard():
self.test_out()
if (__name__ == '__main__'):
unittest.main()
| [
"paddle.fluid.Program",
"paddle.fluid.data",
"paddle.fluid.framework._test_eager_guard",
"numpy.ones",
"paddle.fluid.CPUPlace",
"paddle.tensor.zeros_like",
"paddle.enable_static",
"numpy.zeros",
"paddle.disable_static",
"paddle.fluid.Executor",
"paddle.tensor.creation.zeros_like",
"paddle.flui... | [((3230, 3245), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3243, 3245), False, 'import unittest\n'), ((1314, 1323), 'paddle.fluid.Program', 'Program', ([], {}), '()\n', (1321, 1323), False, 'from paddle.fluid import core, Program, program_guard\n'), ((1348, 1357), 'paddle.fluid.Program', 'Program', ([], {}), '()\n', (1355, 1357), False, 'from paddle.fluid import core, Program, program_guard\n'), ((1790, 1811), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (1804, 1811), True, 'import paddle.fluid as fluid\n'), ((2525, 2553), 'paddle.disable_static', 'paddle.disable_static', (['place'], {}), '(place)\n', (2546, 2553), False, 'import paddle\n'), ((2840, 2867), 'paddle.tensor.zeros_like', 'paddle.tensor.zeros_like', (['x'], {}), '(x)\n', (2864, 2867), False, 'import paddle\n'), ((2960, 2996), 'paddle.tensor.creation.zeros_like', 'paddle.tensor.creation.zeros_like', (['x'], {}), '(x)\n', (2993, 2996), False, 'import paddle\n'), ((3083, 3105), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (3103, 3105), False, 'import paddle\n'), ((1008, 1038), 'paddle.fluid.data', 'paddle.fluid.data', (['"""x"""', '[3, 4]'], {}), "('x', [3, 4])\n", (1025, 1038), False, 'import paddle\n'), ((1143, 1162), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (1160, 1162), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((1371, 1416), 'paddle.fluid.program_guard', 'program_guard', (['train_program', 'startup_program'], {}), '(train_program, startup_program)\n', (1384, 1416), False, 'from paddle.fluid import core, Program, program_guard\n'), ((1434, 1463), 'paddle.fluid.data', 'paddle.fluid.data', (['"""X"""', 'shape'], {}), "('X', shape)\n", (1451, 1463), False, 'import paddle\n'), ((1483, 1496), 'paddle.zeros_like', 'zeros_like', (['x'], {}), '(x)\n', (1493, 1496), False, 'from paddle import zeros_like\n'), ((1516, 1539), 'paddle.zeros_like', 'zeros_like', (['x', 'np.bool_'], {}), '(x, np.bool_)\n', (1526, 1539), False, 'from paddle import zeros_like\n'), ((1559, 1583), 'paddle.zeros_like', 'zeros_like', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (1569, 1583), False, 'from paddle import zeros_like\n'), ((1603, 1625), 'paddle.zeros_like', 'zeros_like', (['x', '"""int32"""'], {}), "(x, 'int32')\n", (1613, 1625), False, 'from paddle import zeros_like\n'), ((1645, 1667), 'paddle.zeros_like', 'zeros_like', (['x', '"""int64"""'], {}), "(x, 'int64')\n", (1655, 1667), False, 'from paddle import zeros_like\n'), ((1724, 1752), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (1750, 1752), False, 'from paddle.fluid import core, Program, program_guard\n'), ((1685, 1703), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (1700, 1703), True, 'import paddle.fluid as fluid\n'), ((1758, 1774), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (1772, 1774), True, 'import paddle.fluid as fluid\n'), ((2260, 2279), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (2277, 2279), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((2465, 2493), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (2491, 2493), False, 'from paddle.fluid import core, Program, program_guard\n'), ((2426, 2444), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (2441, 2444), True, 'import paddle.fluid as fluid\n'), ((2499, 2515), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2513, 2515), True, 'import paddle.fluid as fluid\n'), ((2583, 2597), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (2590, 2597), True, 'import numpy as np\n'), ((2694, 2714), 'paddle.zeros_like', 'zeros_like', (['x', 'dtype'], {}), '(x, dtype)\n', (2704, 2714), False, 'from paddle import zeros_like\n'), ((3146, 3165), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (3163, 3165), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((969, 978), 'paddle.fluid.Program', 'Program', ([], {}), '()\n', (976, 978), False, 'from paddle.fluid import core, Program, program_guard\n'), ((980, 989), 'paddle.fluid.Program', 'Program', ([], {}), '()\n', (987, 989), False, 'from paddle.fluid import core, Program, program_guard\n'), ((2909, 2931), 'numpy.zeros', 'np.zeros', (['shape', 'dtype'], {}), '(shape, dtype)\n', (2917, 2931), True, 'import numpy as np\n'), ((3038, 3060), 'numpy.zeros', 'np.zeros', (['shape', 'dtype'], {}), '(shape, dtype)\n', (3046, 3060), True, 'import numpy as np\n'), ((1884, 1898), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1891, 1898), True, 'import numpy as np\n'), ((2183, 2205), 'numpy.zeros', 'np.zeros', (['shape', 'dtype'], {}), '(shape, dtype)\n', (2191, 2205), True, 'import numpy as np\n'), ((2760, 2782), 'numpy.zeros', 'np.zeros', (['shape', 'dtype'], {}), '(shape, dtype)\n', (2768, 2782), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_allclose
import pytest
from linearmodels.asset_pricing.model import LinearFactorModelGMM
from linearmodels.tests.asset_pricing._utility import generate_data, get_all
@pytest.fixture(params=["numpy", "pandas"])
def data(request):
return generate_data(nportfolio=10, output=request.param)
def test_linear_model_gmm_moments_jacobian(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors)
res = mod.fit(cov_type="robust", disp=0, debiased=False)
params = np.r_[
res.betas.values.ravel(),
res.risk_premia.values.ravel(),
mod.factors.ndarray.mean(0),
]
mod_mom = mod._moments(params[:, None], True)
mom = []
p = mod.portfolios.ndarray
f = mod.factors.ndarray
n = f.shape[0]
fc = np.c_[np.ones((n, 1)), f]
mu = f.mean(0)[None, :]
lam = res.risk_premia.values[None, :]
x = f - mu + lam
b = res.betas.values
for i in range(p.shape[1]):
eps = p[:, i : (i + 1)] - x @ b[[i]].T
for j in range(fc.shape[1]):
mom.append(eps * fc[:, [j]])
mom.append(f - mu)
mom_arr = np.hstack(tuple(mom))
mod_jac = mod._jacobian(params, True)
jac = np.zeros((mom_arr.shape[1], params.shape[0]))
nport, nf = p.shape[1], f.shape[1]
# 1,1
jac[: (nport * (nf + 1)), : nport * nf] = np.kron(np.eye(nport), fc.T @ x / n)
# 1, 2
col = []
for i in range(nport):
col.append(fc.T @ np.ones((n, 1)) @ b[[i]] / n)
col = np.vstack(tuple(col))
jac[: (nport * (nf + 1)), nport * nf : nport * nf + nf] = col
# 1, 3
col = []
for i in range(nport):
col.append(-fc.T @ np.ones((n, 1)) @ b[[i]] / n)
col = np.vstack(tuple(col))
jac[: (nport * (nf + 1)), -nf:] = col
# 2,2
jac[-nf:, -nf:] = np.eye(nf)
assert_allclose(mom_arr, mod_mom)
assert_allclose(jac, mod_jac)
me = mom_arr - mom_arr.mean(0)[None, :]
s = me.T @ me / n
s = (s + s.T) / 2
cov = np.linalg.inv(jac.T @ np.linalg.inv(s) @ jac) / n
cov = (cov + cov.T) / 2
assert_allclose(np.diag(cov), np.diag(res.cov), rtol=5e-3)
get_all(res)
@pytest.mark.smoke
def test_linear_model_gmm_smoke_iterate(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors)
res = mod.fit(cov_type="robust", disp=5, steps=20)
get_all(res)
@pytest.mark.smoke
def test_linear_model_gmm_smoke_risk_free(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors, risk_free=True)
res = mod.fit(cov_type="robust", disp=10)
get_all(res)
str(res._cov_est)
res._cov_est.__repr__()
str(res._cov_est.config)
@pytest.mark.smoke
def test_linear_model_gmm_kernel_smoke(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors)
res = mod.fit(cov_type="kernel", disp=10)
get_all(res)
str(res._cov_est)
res._cov_est.__repr__()
str(res._cov_est.config)
@pytest.mark.smoke
def test_linear_model_gmm_kernel_bandwidth_smoke(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors)
res = mod.fit(cov_type="kernel", bandwidth=10, disp=10)
get_all(res)
@pytest.mark.smoke
def test_linear_model_gmm_cue_smoke(data):
mod = LinearFactorModelGMM(data.portfolios, data.factors, risk_free=True)
res = mod.fit(cov_type="robust", disp=10, use_cue=True)
get_all(res)
| [
"numpy.eye",
"numpy.ones",
"numpy.testing.assert_allclose",
"linearmodels.tests.asset_pricing._utility.generate_data",
"numpy.diag",
"linearmodels.asset_pricing.model.LinearFactorModelGMM",
"numpy.zeros",
"numpy.linalg.inv",
"pytest.fixture",
"linearmodels.tests.asset_pricing._utility.get_all"
] | [((222, 264), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['numpy', 'pandas']"}), "(params=['numpy', 'pandas'])\n", (236, 264), False, 'import pytest\n'), ((295, 345), 'linearmodels.tests.asset_pricing._utility.generate_data', 'generate_data', ([], {'nportfolio': '(10)', 'output': 'request.param'}), '(nportfolio=10, output=request.param)\n', (308, 345), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((408, 459), 'linearmodels.asset_pricing.model.LinearFactorModelGMM', 'LinearFactorModelGMM', (['data.portfolios', 'data.factors'], {}), '(data.portfolios, data.factors)\n', (428, 459), False, 'from linearmodels.asset_pricing.model import LinearFactorModelGMM\n'), ((1220, 1265), 'numpy.zeros', 'np.zeros', (['(mom_arr.shape[1], params.shape[0])'], {}), '((mom_arr.shape[1], params.shape[0]))\n', (1228, 1265), True, 'import numpy as np\n'), ((1817, 1827), 'numpy.eye', 'np.eye', (['nf'], {}), '(nf)\n', (1823, 1827), True, 'import numpy as np\n'), ((1833, 1866), 'numpy.testing.assert_allclose', 'assert_allclose', (['mom_arr', 'mod_mom'], {}), '(mom_arr, mod_mom)\n', (1848, 1866), False, 'from numpy.testing import assert_allclose\n'), ((1871, 1900), 'numpy.testing.assert_allclose', 'assert_allclose', (['jac', 'mod_jac'], {}), '(jac, mod_jac)\n', (1886, 1900), False, 'from numpy.testing import assert_allclose\n'), ((2145, 2157), 'linearmodels.tests.asset_pricing._utility.get_all', 'get_all', (['res'], {}), '(res)\n', (2152, 2157), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((2236, 2287), 'linearmodels.asset_pricing.model.LinearFactorModelGMM', 'LinearFactorModelGMM', (['data.portfolios', 'data.factors'], {}), '(data.portfolios, data.factors)\n', (2256, 2287), False, 'from linearmodels.asset_pricing.model import LinearFactorModelGMM\n'), ((2347, 2359), 'linearmodels.tests.asset_pricing._utility.get_all', 'get_all', (['res'], {}), '(res)\n', (2354, 2359), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((2440, 2507), 'linearmodels.asset_pricing.model.LinearFactorModelGMM', 'LinearFactorModelGMM', (['data.portfolios', 'data.factors'], {'risk_free': '(True)'}), '(data.portfolios, data.factors, risk_free=True)\n', (2460, 2507), False, 'from linearmodels.asset_pricing.model import LinearFactorModelGMM\n'), ((2558, 2570), 'linearmodels.tests.asset_pricing._utility.get_all', 'get_all', (['res'], {}), '(res)\n', (2565, 2570), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((2727, 2778), 'linearmodels.asset_pricing.model.LinearFactorModelGMM', 'LinearFactorModelGMM', (['data.portfolios', 'data.factors'], {}), '(data.portfolios, data.factors)\n', (2747, 2778), False, 'from linearmodels.asset_pricing.model import LinearFactorModelGMM\n'), ((2829, 2841), 'linearmodels.tests.asset_pricing._utility.get_all', 'get_all', (['res'], {}), '(res)\n', (2836, 2841), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((3008, 3059), 'linearmodels.asset_pricing.model.LinearFactorModelGMM', 'LinearFactorModelGMM', (['data.portfolios', 'data.factors'], {}), '(data.portfolios, data.factors)\n', (3028, 3059), False, 'from linearmodels.asset_pricing.model import LinearFactorModelGMM\n'), ((3124, 3136), 'linearmodels.tests.asset_pricing._utility.get_all', 'get_all', (['res'], {}), '(res)\n', (3131, 3136), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((3211, 3278), 'linearmodels.asset_pricing.model.LinearFactorModelGMM', 'LinearFactorModelGMM', (['data.portfolios', 'data.factors'], {'risk_free': '(True)'}), '(data.portfolios, data.factors, risk_free=True)\n', (3231, 3278), False, 'from linearmodels.asset_pricing.model import LinearFactorModelGMM\n'), ((3343, 3355), 'linearmodels.tests.asset_pricing._utility.get_all', 'get_all', (['res'], {}), '(res)\n', (3350, 3355), False, 'from linearmodels.tests.asset_pricing._utility import generate_data, get_all\n'), ((1369, 1382), 'numpy.eye', 'np.eye', (['nport'], {}), '(nport)\n', (1375, 1382), True, 'import numpy as np\n'), ((2098, 2110), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (2105, 2110), True, 'import numpy as np\n'), ((2112, 2128), 'numpy.diag', 'np.diag', (['res.cov'], {}), '(res.cov)\n', (2119, 2128), True, 'import numpy as np\n'), ((815, 830), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (822, 830), True, 'import numpy as np\n'), ((2022, 2038), 'numpy.linalg.inv', 'np.linalg.inv', (['s'], {}), '(s)\n', (2035, 2038), True, 'import numpy as np\n'), ((1475, 1490), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1482, 1490), True, 'import numpy as np\n'), ((1681, 1696), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1688, 1696), True, 'import numpy as np\n')] |
### IMPORT LIBRARIES ###
from matplotlib import style
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.plotting import table
from datetime import datetime, timedelta
from datetime import date
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import desc
from flask import Flask, jsonify
### DATABASE SET UP ###
# Create an engine
engine = create_engine("sqlite:///data/hawaii.sqlite")
# Use automap_base() to reflect the existing table and using .prepare() to reflect the schema and produce mapping
Base = automap_base()
# Save references to each table
Base.prepare(engine, reflect = True)
# Create a session
session = Session(engine)
# Create mapped classes with new variable names
HawaiiMeasurement = Base.classes.measurement
HawaiiStation = Base.classes.station
### FLASK SET UP ###
app = Flask(__name__)
### ROUTE SET UP ###
@app.route("/")
def homepage():
return (
f"You made it the SQLAlchemy Project: Climate App!<br/>"
f"Available Routes:<br/>"
f"/precipitation<br/>"
f"/stations<br/>"
f"/tobs<br/>"
f"/api/v1.0/temp/start/end"
)
@app.route("/precipitation")
def precipitation():
# Create a session
session = Session(engine)
print("Server is working. Server received request for Precipitation Page")
# Calculate the date 1 year ago from last date in database
last_data_point = (session.query(HawaiiMeasurement.date)
.order_by(HawaiiMeasurement.date.desc())
.all())[0]
# Query for the date and precipitation for the last year
last_data = date.fromisoformat('2017-08-23')
last_year = last_data - timedelta(days = 365)
# List of precipitation the past year
one_year_prcp = (session
.query(HawaiiMeasurement.date, HawaiiMeasurement.prcp)
.filter(HawaiiMeasurement.date >= last_year)
.all())
session.close()
# Use a shortcut for loop and use it to convert a list to dict by using `date` = key & `prcp` = value
convert_dict = {date: prcp for date, prcp in one_year_prcp}
# Return the jsonify() representation of the dictionary
return jsonify(convert_dict)
# SUB: return "json of dictionary using jsonify(dict_name)"
@app.route("/stations")
def stations():
# Create a session
session = Session(engine)
print("Server is working. Server received request for Stations Page")
# Query for the list of stations
total_number_of_stations = (session.query(HawaiiStation.station).all())
session.close()
# numpy.ravel() function returns contiguous flattened array
stations_list = list(np.ravel(total_number_of_stations))
return jsonify(stations_list=stations_list)
# SUB: return "json of dictionary using jsonify(dict_name)"
@app.route("/tobs")
def tobs():
# Create a session
session = Session(engine)
print("Server is working. Server received request for TOB Page")
# Calculate the date 1 year ago from last date in database
last_data = date.fromisoformat('2017-08-23')
last_year = last_data - timedelta(days = 365)
# Query the dates and temperature observations of the most active station for the last year of data.
active_stat = (session
.query(HawaiiMeasurement.station,
func.avg(HawaiiMeasurement.tobs)
, func.max(HawaiiMeasurement.tobs)
, func.min(HawaiiMeasurement.tobs))
.filter(HawaiiMeasurement.station == 'USC00519281')
.all())
# # Query the primary station for all tobs from the last year
highest_tob = (session
.query(HawaiiMeasurement.date
, HawaiiMeasurement.station
, HawaiiMeasurement.tobs)
.filter(HawaiiMeasurement.station == 'USC00519281')
.filter(HawaiiMeasurement.date >= last_year)
.all())
session.close()
tobs = list(np.ravel(highest_tob))
return jsonify(tobs=tobs)
# SUB: return "json of dictionary using jsonify(dict_name)"
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def summary(start=None, end=None):
# Create a session
session = Session(engine)
# Select statement
sel = [func.min(HawaiiMeasurement.tobs), func.avg(HawaiiMeasurement.tobs), func.max(HawaiiMeasurement.tobs)]
# Given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.
if not end:
dates_greater = (session
.query(func.min(HawaiiMeasurement.tobs)
, func.avg(HawaiiMeasurement.tobs)
, func.max(HawaiiMeasurement.tobs))
.filter(HawaiiMeasurement.date >= start)
.all())
return jsonify(summary=dates_greater)
dates_greater = (session
.query(func.min(HawaiiMeasurement.tobs)
, func.avg(HawaiiMeasurement.tobs)
, func.max(HawaiiMeasurement.tobs))
.filter(HawaiiMeasurement.date >= start)
.filter(HawaiiMeasurement.date <= end)
.all())
session.close()
return jsonify(summary=dates_greater)
if __name__ == '__main__':
app.run(debug=True)
| [
"sqlalchemy.func.min",
"flask.Flask",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"sqlalchemy.func.max",
"sqlalchemy.func.avg",
"numpy.ravel",
"datetime.timedelta",
"datetime.date.fromisoformat",
"flask.jsonify"
] | [((490, 535), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///data/hawaii.sqlite"""'], {}), "('sqlite:///data/hawaii.sqlite')\n", (503, 535), False, 'from sqlalchemy import create_engine, func\n'), ((657, 671), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (669, 671), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((770, 785), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (777, 785), False, 'from sqlalchemy.orm import Session\n'), ((946, 961), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (951, 961), False, 'from flask import Flask, jsonify\n'), ((1341, 1356), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (1348, 1356), False, 'from sqlalchemy.orm import Session\n'), ((1700, 1732), 'datetime.date.fromisoformat', 'date.fromisoformat', (['"""2017-08-23"""'], {}), "('2017-08-23')\n", (1718, 1732), False, 'from datetime import date\n'), ((2237, 2258), 'flask.jsonify', 'jsonify', (['convert_dict'], {}), '(convert_dict)\n', (2244, 2258), False, 'from flask import Flask, jsonify\n'), ((2406, 2421), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (2413, 2421), False, 'from sqlalchemy.orm import Session\n'), ((2767, 2803), 'flask.jsonify', 'jsonify', ([], {'stations_list': 'stations_list'}), '(stations_list=stations_list)\n', (2774, 2803), False, 'from flask import Flask, jsonify\n'), ((2939, 2954), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (2946, 2954), False, 'from sqlalchemy.orm import Session\n'), ((3104, 3136), 'datetime.date.fromisoformat', 'date.fromisoformat', (['"""2017-08-23"""'], {}), "('2017-08-23')\n", (3122, 3136), False, 'from datetime import date\n'), ((4081, 4099), 'flask.jsonify', 'jsonify', ([], {'tobs': 'tobs'}), '(tobs=tobs)\n', (4088, 4099), False, 'from flask import Flask, jsonify\n'), ((4318, 4333), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (4325, 4333), False, 'from sqlalchemy.orm import Session\n'), ((5303, 5333), 'flask.jsonify', 'jsonify', ([], {'summary': 'dates_greater'}), '(summary=dates_greater)\n', (5310, 5333), False, 'from flask import Flask, jsonify\n'), ((1761, 1780), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1770, 1780), False, 'from datetime import datetime, timedelta\n'), ((2720, 2754), 'numpy.ravel', 'np.ravel', (['total_number_of_stations'], {}), '(total_number_of_stations)\n', (2728, 2754), True, 'import numpy as np\n'), ((3165, 3184), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (3174, 3184), False, 'from datetime import datetime, timedelta\n'), ((4047, 4068), 'numpy.ravel', 'np.ravel', (['highest_tob'], {}), '(highest_tob)\n', (4055, 4068), True, 'import numpy as np\n'), ((4374, 4406), 'sqlalchemy.func.min', 'func.min', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (4382, 4406), False, 'from sqlalchemy import create_engine, func\n'), ((4408, 4440), 'sqlalchemy.func.avg', 'func.avg', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (4416, 4440), False, 'from sqlalchemy import create_engine, func\n'), ((4442, 4474), 'sqlalchemy.func.max', 'func.max', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (4450, 4474), False, 'from sqlalchemy import create_engine, func\n'), ((4916, 4946), 'flask.jsonify', 'jsonify', ([], {'summary': 'dates_greater'}), '(summary=dates_greater)\n', (4923, 4946), False, 'from flask import Flask, jsonify\n'), ((3393, 3425), 'sqlalchemy.func.avg', 'func.avg', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (3401, 3425), False, 'from sqlalchemy import create_engine, func\n'), ((3448, 3480), 'sqlalchemy.func.max', 'func.max', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (3456, 3480), False, 'from sqlalchemy import create_engine, func\n'), ((3503, 3535), 'sqlalchemy.func.min', 'func.min', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (3511, 3535), False, 'from sqlalchemy import create_engine, func\n'), ((4684, 4716), 'sqlalchemy.func.min', 'func.min', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (4692, 4716), False, 'from sqlalchemy import create_engine, func\n'), ((4735, 4767), 'sqlalchemy.func.avg', 'func.avg', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (4743, 4767), False, 'from sqlalchemy import create_engine, func\n'), ((4786, 4818), 'sqlalchemy.func.max', 'func.max', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (4794, 4818), False, 'from sqlalchemy import create_engine, func\n'), ((5000, 5032), 'sqlalchemy.func.min', 'func.min', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (5008, 5032), False, 'from sqlalchemy import create_engine, func\n'), ((5051, 5083), 'sqlalchemy.func.avg', 'func.avg', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (5059, 5083), False, 'from sqlalchemy import create_engine, func\n'), ((5102, 5134), 'sqlalchemy.func.max', 'func.max', (['HawaiiMeasurement.tobs'], {}), '(HawaiiMeasurement.tobs)\n', (5110, 5134), False, 'from sqlalchemy import create_engine, func\n')] |
# We need to set this variable to shut-up TensorFlow's C++ logging messages
import csv
import os
import faulthandler
import signal
import sys
from typing import Dict
faulthandler.enable()
faulthandler.register(signal.SIGUSR1)
import numpy as np
import tensorflow as tf
import gorilla
import tqdm
from fslks.experiments import Predictions, Task
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow_datasets.core.utils import gcs_utils
from absl import flags
from absl import app
from absl import logging
from fslks import tasks
from fslks import experiments
from fslks import evaluation
FLAGS = flags.FLAGS
flags.DEFINE_spaceseplist("training_tasks", [], "One or more tasks to be used for pretraining")
flags.DEFINE_spaceseplist("validation_tasks", [], "One or more tasks to be used for validation during pretraining")
flags.DEFINE_spaceseplist("testing_tasks", [], "One or more tasks to be used for evaluating pretrained models")
flags.DEFINE_integer('num_epochs', 10, 'Number of epochs to train')
flags.DEFINE_integer('warmup_epochs', 3, 'Number of warmup epochs before normal training')
flags.DEFINE_integer('batch_size', 8, 'Batch size to use for training')
flags.DEFINE_integer('prefetch_size', -1, 'Number of batches to prefetch (default: AUTOTUNE)')
flags.DEFINE_integer('eval_batch_size', 8, 'Batch size to use when evaluating validation/test sets')
flags.DEFINE_integer('eval_batches', 10, 'Number of batches to evaluate when testing')
flags.DEFINE_boolean('use_xla', False, 'Enable XLA optimization')
flags.DEFINE_boolean('use_tpu', False, 'Use TPU ressources')
flags.DEFINE_boolean('use_amp', False, 'Enable AMP optimization')
flags.DEFINE_boolean('do_train', False, 'Train and validate the specified model')
flags.DEFINE_boolean('do_predict', False, 'Save (trained) model predictions model')
flags.DEFINE_boolean('do_test', False, 'Evaluate the performance of a (trained) model')
flags.DEFINE_integer('max_seq_len', 512, 'Maximum sequence length')
flags.DEFINE_string('init_checkpoint', 't5-base', 'Name of pretrained transformer model to load')
flags.DEFINE_string('checkpoint_dir', None, 'Path to save checkpoints')
flags.DEFINE_string('prediction_dir', None, 'Path to save/load predictions')
flags.DEFINE_string('data_dir', None, 'Path to TensorFlow DataSets home (e.g., ~/tensorflow_datasets)')
flags.DEFINE_string('cache_dir', None, 'Path to save TensorFlow DataSet cache files (e.g., /tmp)')
flags.DEFINE_string('checksum_dir', '/data/LHC_kitchensink/tensorflow_datasets/url_checksums',
help='Path to checksum directory')
flags.DEFINE_integer('steps_per_epoch', 1000, 'Number of steps considered as an epoch')
flags.DEFINE_enum('implementation', default='pytorch', enum_values=['tensorflow', 'pytorch'],
help='implementation to use for huggingface models')
flags.DEFINE_enum('evaluation', default='basic', enum_values=['basic', 'nlg'],
help='method to use for evaluating model performance')
flags.DEFINE_integer('seed', default=1337, help='Random seed used for experiments')
flags.DEFINE_float('temperature', default=2., help='Temperature used for task mixing')
flags.DEFINE_boolean('dynamic_mixing', default=False,
help='Whether to turn on dynamic task mixing based on validation losses')
flags.DEFINE_boolean('mix_from_validation', default=True,
help='If True, dynamic mixing will use validation losses; otherwise, training losses will be used.')
flags.DEFINE_float('clip_mixing_size', default=2e14, help='Maximum size to clip datasets for proprtional mixing')
flags.DEFINE_integer('test_limit', default=sys.maxsize, help='Maximum number of predictions to evaluate per task')
def save_predictions(predictions: Predictions, output_dir: str):
logging.info('Saving predictions for tasks %s', set(predictions.keys()))
for task, task_predictions in predictions.items():
logging.info('Saving predictions for %s splits %s', task, set(task_predictions.keys()))
for split, split_predictions_fn in task_predictions.items():
split_predictions = split_predictions_fn()
if split_predictions is not None:
split_output_dir = os.path.join(output_dir, task, str(split))
os.makedirs(split_output_dir, exist_ok=True)
output_file = os.path.join(split_output_dir, 'predictions.csv')
logging.info('Saving %d predictions for %s[%s] at %s...', len(split_predictions['prompt']), task, split,
output_file)
with open(output_file, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['prompt', 'predictions', 'targets'])
for prompt_, predictions_, targets_ in zip(split_predictions['prompt'],
split_predictions['predictions'],
split_predictions['targets']):
writer.writerow([prompt_, predictions_, targets_])
def load_predictions(output_dir: str, testing_tasks) -> Predictions:
predictions: Predictions = {}
for task in testing_tasks:
predictions_file = os.path.join(output_dir, task.dataset, str(task.split), 'predictions.csv')
if not os.path.exists(predictions_file):
logging.warning('Unable to load predictions for %s: %s not found', task, predictions_file)
continue
split_predictions = []
targets = []
prompts = []
with open(predictions_file) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
split_predictions.append(row['predictions'])
targets.append(row['targets'])
prompts.append(row['prompt'])
if task not in predictions:
predictions[task.dataset] = {}
# Python does lazy binding so we need to store the results in an immutable variable, and then
# use the variable as the default argument to the lambda since the default argument is actually
# eagerly bound when the lambda is declared. Yes, this is awful.
results = {
'prompts': np.asarray(prompts),
'predictions': np.asarray(split_predictions),
'targets': np.asarray(targets)
}
# noinspection PyDefaultArgument
predictions[task.dataset][task.split] = lambda t=results: t
logging.info('Loaded %d predictions for %s', len(prompts), task)
return predictions
# noinspection PyUnusedLocal
def main(argv):
del argv # Unused.
logging.set_verbosity(logging.DEBUG)
# TPU
tpu = None
if FLAGS.use_tpu:
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print(f"{'='*80}\nREPLICAS: {strategy.num_replicas_in_sync}\n{'='*80}")
# Disable TQDM threading (solves a weird C runtime error)
tqdm.tqdm.monitor_interval = 0
# Directories
os.makedirs(FLAGS.cache_dir, exist_ok=True)
os.makedirs(FLAGS.checkpoint_dir, exist_ok=True)
os.makedirs(FLAGS.prediction_dir, exist_ok=True)
if FLAGS.do_train or FLAGS.do_predict or (FLAGS.do_test and not FLAGS.prediction_dir):
experiment: experiments.Experiment
with strategy.scope():
if FLAGS.implementation == 'tensorflow':
# configure_tf(FLAGS.use_xla, FLAGS.use_amp)
experiment = experiments.TFExperiment(cache_dir=FLAGS.cache_dir,
configuration_name=FLAGS.init_checkpoint,
max_seq_len=FLAGS.max_seq_len,
use_xla=FLAGS.use_xla,
use_amp=FLAGS.use_amp,
seed=FLAGS.seed)
elif FLAGS.implementation == 'pytorch':
experiment = experiments.PTExperiment(cache_dir=FLAGS.cache_dir,
configuration_name=FLAGS.init_checkpoint,
max_seq_len=FLAGS.max_seq_len,
use_amp=FLAGS.use_amp,
warmup_epochs=FLAGS.warmup_epochs,
seed=FLAGS.seed,
temperature=FLAGS.temperature,
dynamic_mixing=FLAGS.dynamic_mixing,
mix_from_validation=FLAGS.mix_from_validation,
clip_mixing_size=FLAGS.clip_mixing_size)
else:
raise NotImplementedError('Unsupported implementation \"%s\"' % FLAGS.implementation)
# Load model
model = experiment.load_model(model_name=FLAGS.init_checkpoint)
patch_settings = gorilla.Settings(allow_hit=True)
def _patched_gcs_dataset_info_files(dataset_dir):
try:
original = gorilla.get_original_attribute(gcs_utils, 'gcs_dataset_info_files')
return original(dataset_dir)
except IOError as ioe:
logging.error('Failed to connect to GCS', exc_info=ioe)
return None
patch = gorilla.Patch(gcs_utils, 'gcs_dataset_info_files', _patched_gcs_dataset_info_files, settings=patch_settings)
gorilla.apply(patch)
# Setup tfds parameters
Task.data_dir = FLAGS.data_dir
# Task.add_checksum_dir(FLAGS.checksum_dir)
# Register all our defined task mappings
tasks.register_task_mappings()
if FLAGS.do_train:
# Parse dataset and split
with strategy.scope():
training_tasks = Task.parse_train_tasks(FLAGS.training_tasks)
validation_tasks = Task.parse_validation_tasks(FLAGS.validation_tasks)
if FLAGS.dynamic_mixing and FLAGS.mix_from_validation:
train_sets: Dict[str, Task] = {t.dataset: t for t in training_tasks}
valid_sets: Dict[str, Task] = {t.dataset: t for t in validation_tasks}
if train_sets.keys() != valid_sets.keys():
logging.error('Dynamic mixing from validation requites validation data for each training task!')
for dataset in train_sets.keys() - valid_sets.keys():
if Task.split_in_dataset("validation", dataset):
valid_sets[dataset] = Task(dataset, 'validation')
logging.warning('Adding %s to validation tasks', dataset)
else:
train_sets[dataset] = Task(dataset, 'train[:70%]')
valid_sets[dataset] = Task(dataset, 'train[-30%:]')
logging.warning('Adjusting %s to use 80%% for training and 20%% for validation', dataset)
training_tasks = []
validation_tasks = []
for dataset in train_sets:
training_tasks.append(train_sets[dataset])
validation_tasks.append(valid_sets[dataset])
for dataset in valid_sets.keys() - train_sets.keys():
validation_tasks.append(valid_sets[dataset])
if FLAGS.checkpoint_dir:
# Make directories to save best checkpoint and final checkpoint
os.makedirs(FLAGS.checkpoint_dir, exist_ok=True)
FLAGS.append_flags_into_file(os.path.join(FLAGS.checkpoint_dir, 'flags.cfg'))
best_dir = "{0}_best".format(FLAGS.checkpoint_dir)
os.makedirs(best_dir, exist_ok=True)
FLAGS.append_flags_into_file(os.path.join(best_dir, 'flags.cfg'))
# Train model
logging.info('Training %s with %s...', FLAGS.init_checkpoint, ' '.join(FLAGS.training_tasks))
experiment.train(model,
training_tasks=training_tasks,
validation_tasks=validation_tasks,
num_epochs=FLAGS.num_epochs,
steps_per_epoch=FLAGS.steps_per_epoch,
prefetch_size=FLAGS.prefetch_size,
batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.eval_batch_size,
eval_batches=FLAGS.eval_batches,
checkpoint_file=FLAGS.checkpoint_dir)
if FLAGS.checkpoint_dir:
# Save final checkpoint
experiment.save_model(model, FLAGS.checkpoint_dir)
if FLAGS.do_predict or (FLAGS.do_test and not FLAGS.prediction_dir):
# Reload model, using best checkpoint if available.
# Otherwise use the existing model.
model_dir = "{0}_best".format(FLAGS.checkpoint_dir)
if os.path.isdir(model_dir):
logging.info("Loading best performing checkpoint: %s" % (model_dir))
model = experiment.load_model(model_name=model_dir)
if FLAGS.do_predict:
# Evaluate the model
testing_tasks = Task.parse_test_tasks(FLAGS.testing_tasks)
logging.info('Predicting %s with %s...', ' '.join(FLAGS.testing_tasks), FLAGS.init_checkpoint)
predictions = experiment.predict(model,
tasks=testing_tasks,
eval_batch_size=FLAGS.eval_batch_size)
save_predictions(predictions, FLAGS.prediction_dir)
if FLAGS.do_test:
testing_tasks = Task.parse_test_tasks(FLAGS.testing_tasks)
if FLAGS.prediction_dir:
predictions = load_predictions(FLAGS.prediction_dir, testing_tasks)
else:
logging.warning('--prediction_dir was not specified, generating predictions from scratch')
predictions = experiment.predict(model,
tasks=testing_tasks,
eval_batch_size=FLAGS.eval_batch_size)
evaluator = evaluation.get_evaluator(FLAGS.evaluation)
results = evaluator.evaluate(predictions, FLAGS.test_limit)
print('Results:')
print(results)
if not any([FLAGS.do_train, FLAGS.do_predict, FLAGS.do_test]):
logging.error('Please specify at least one of --do_train, --do_predict, or --do_test')
if __name__ == '__main__':
# This is how abseil knows to parse arguments and flags
app.run(main)
| [
"csv.DictReader",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.distribute.TPUStrategy",
"absl.flags.DEFINE_spaceseplist",
"absl.logging.info",
"fslks.experiments.Task.parse_train_tasks",
"fslks.experiments.Task",
"fslks.experiments.Task.split_in_dataset",
"gorilla.Setting... | [((167, 188), 'faulthandler.enable', 'faulthandler.enable', ([], {}), '()\n', (186, 188), False, 'import faulthandler\n'), ((189, 226), 'faulthandler.register', 'faulthandler.register', (['signal.SIGUSR1'], {}), '(signal.SIGUSR1)\n', (210, 226), False, 'import faulthandler\n'), ((619, 718), 'absl.flags.DEFINE_spaceseplist', 'flags.DEFINE_spaceseplist', (['"""training_tasks"""', '[]', '"""One or more tasks to be used for pretraining"""'], {}), "('training_tasks', [],\n 'One or more tasks to be used for pretraining')\n", (644, 718), False, 'from absl import flags\n'), ((715, 834), 'absl.flags.DEFINE_spaceseplist', 'flags.DEFINE_spaceseplist', (['"""validation_tasks"""', '[]', '"""One or more tasks to be used for validation during pretraining"""'], {}), "('validation_tasks', [],\n 'One or more tasks to be used for validation during pretraining')\n", (740, 834), False, 'from absl import flags\n'), ((831, 946), 'absl.flags.DEFINE_spaceseplist', 'flags.DEFINE_spaceseplist', (['"""testing_tasks"""', '[]', '"""One or more tasks to be used for evaluating pretrained models"""'], {}), "('testing_tasks', [],\n 'One or more tasks to be used for evaluating pretrained models')\n", (856, 946), False, 'from absl import flags\n'), ((944, 1011), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_epochs"""', '(10)', '"""Number of epochs to train"""'], {}), "('num_epochs', 10, 'Number of epochs to train')\n", (964, 1011), False, 'from absl import flags\n'), ((1012, 1106), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""warmup_epochs"""', '(3)', '"""Number of warmup epochs before normal training"""'], {}), "('warmup_epochs', 3,\n 'Number of warmup epochs before normal training')\n", (1032, 1106), False, 'from absl import flags\n'), ((1103, 1174), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""', '(8)', '"""Batch size to use for training"""'], {}), "('batch_size', 8, 'Batch size to use for training')\n", (1123, 1174), False, 'from absl import flags\n'), ((1175, 1273), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""prefetch_size"""', '(-1)', '"""Number of batches to prefetch (default: AUTOTUNE)"""'], {}), "('prefetch_size', -1,\n 'Number of batches to prefetch (default: AUTOTUNE)')\n", (1195, 1273), False, 'from absl import flags\n'), ((1270, 1374), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_batch_size"""', '(8)', '"""Batch size to use when evaluating validation/test sets"""'], {}), "('eval_batch_size', 8,\n 'Batch size to use when evaluating validation/test sets')\n", (1290, 1374), False, 'from absl import flags\n'), ((1371, 1461), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_batches"""', '(10)', '"""Number of batches to evaluate when testing"""'], {}), "('eval_batches', 10,\n 'Number of batches to evaluate when testing')\n", (1391, 1461), False, 'from absl import flags\n'), ((1458, 1523), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_xla"""', '(False)', '"""Enable XLA optimization"""'], {}), "('use_xla', False, 'Enable XLA optimization')\n", (1478, 1523), False, 'from absl import flags\n'), ((1524, 1584), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_tpu"""', '(False)', '"""Use TPU ressources"""'], {}), "('use_tpu', False, 'Use TPU ressources')\n", (1544, 1584), False, 'from absl import flags\n'), ((1585, 1650), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_amp"""', '(False)', '"""Enable AMP optimization"""'], {}), "('use_amp', False, 'Enable AMP optimization')\n", (1605, 1650), False, 'from absl import flags\n'), ((1651, 1736), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""do_train"""', '(False)', '"""Train and validate the specified model"""'], {}), "('do_train', False,\n 'Train and validate the specified model')\n", (1671, 1736), False, 'from absl import flags\n'), ((1733, 1820), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""do_predict"""', '(False)', '"""Save (trained) model predictions model"""'], {}), "('do_predict', False,\n 'Save (trained) model predictions model')\n", (1753, 1820), False, 'from absl import flags\n'), ((1817, 1908), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""do_test"""', '(False)', '"""Evaluate the performance of a (trained) model"""'], {}), "('do_test', False,\n 'Evaluate the performance of a (trained) model')\n", (1837, 1908), False, 'from absl import flags\n'), ((1905, 1972), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_seq_len"""', '(512)', '"""Maximum sequence length"""'], {}), "('max_seq_len', 512, 'Maximum sequence length')\n", (1925, 1972), False, 'from absl import flags\n'), ((1973, 2074), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""init_checkpoint"""', '"""t5-base"""', '"""Name of pretrained transformer model to load"""'], {}), "('init_checkpoint', 't5-base',\n 'Name of pretrained transformer model to load')\n", (1992, 2074), False, 'from absl import flags\n'), ((2071, 2142), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""checkpoint_dir"""', 'None', '"""Path to save checkpoints"""'], {}), "('checkpoint_dir', None, 'Path to save checkpoints')\n", (2090, 2142), False, 'from absl import flags\n'), ((2143, 2219), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""prediction_dir"""', 'None', '"""Path to save/load predictions"""'], {}), "('prediction_dir', None, 'Path to save/load predictions')\n", (2162, 2219), False, 'from absl import flags\n'), ((2220, 2327), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_dir"""', 'None', '"""Path to TensorFlow DataSets home (e.g., ~/tensorflow_datasets)"""'], {}), "('data_dir', None,\n 'Path to TensorFlow DataSets home (e.g., ~/tensorflow_datasets)')\n", (2239, 2327), False, 'from absl import flags\n'), ((2324, 2426), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""cache_dir"""', 'None', '"""Path to save TensorFlow DataSet cache files (e.g., /tmp)"""'], {}), "('cache_dir', None,\n 'Path to save TensorFlow DataSet cache files (e.g., /tmp)')\n", (2343, 2426), False, 'from absl import flags\n'), ((2423, 2561), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""checksum_dir"""', '"""/data/LHC_kitchensink/tensorflow_datasets/url_checksums"""'], {'help': '"""Path to checksum directory"""'}), "('checksum_dir',\n '/data/LHC_kitchensink/tensorflow_datasets/url_checksums', help=\n 'Path to checksum directory')\n", (2442, 2561), False, 'from absl import flags\n'), ((2573, 2664), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""steps_per_epoch"""', '(1000)', '"""Number of steps considered as an epoch"""'], {}), "('steps_per_epoch', 1000,\n 'Number of steps considered as an epoch')\n", (2593, 2664), False, 'from absl import flags\n'), ((2661, 2817), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""implementation"""'], {'default': '"""pytorch"""', 'enum_values': "['tensorflow', 'pytorch']", 'help': '"""implementation to use for huggingface models"""'}), "('implementation', default='pytorch', enum_values=[\n 'tensorflow', 'pytorch'], help=\n 'implementation to use for huggingface models')\n", (2678, 2817), False, 'from absl import flags\n'), ((2826, 2963), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""evaluation"""'], {'default': '"""basic"""', 'enum_values': "['basic', 'nlg']", 'help': '"""method to use for evaluating model performance"""'}), "('evaluation', default='basic', enum_values=['basic',\n 'nlg'], help='method to use for evaluating model performance')\n", (2843, 2963), False, 'from absl import flags\n'), ((2978, 3066), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""'], {'default': '(1337)', 'help': '"""Random seed used for experiments"""'}), "('seed', default=1337, help=\n 'Random seed used for experiments')\n", (2998, 3066), False, 'from absl import flags\n'), ((3062, 3154), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""temperature"""'], {'default': '(2.0)', 'help': '"""Temperature used for task mixing"""'}), "('temperature', default=2.0, help=\n 'Temperature used for task mixing')\n", (3080, 3154), False, 'from absl import flags\n'), ((3149, 3281), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""dynamic_mixing"""'], {'default': '(False)', 'help': '"""Whether to turn on dynamic task mixing based on validation losses"""'}), "('dynamic_mixing', default=False, help=\n 'Whether to turn on dynamic task mixing based on validation losses')\n", (3169, 3281), False, 'from absl import flags\n'), ((3298, 3466), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""mix_from_validation"""'], {'default': '(True)', 'help': '"""If True, dynamic mixing will use validation losses; otherwise, training losses will be used."""'}), "('mix_from_validation', default=True, help=\n 'If True, dynamic mixing will use validation losses; otherwise, training losses will be used.'\n )\n", (3318, 3466), False, 'from absl import flags\n'), ((3478, 3609), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""clip_mixing_size"""'], {'default': '(200000000000000.0)', 'help': '"""Maximum size to clip datasets for proprtional mixing"""'}), "('clip_mixing_size', default=200000000000000.0, help=\n 'Maximum size to clip datasets for proprtional mixing')\n", (3496, 3609), False, 'from absl import flags\n'), ((3592, 3711), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""test_limit"""'], {'default': 'sys.maxsize', 'help': '"""Maximum number of predictions to evaluate per task"""'}), "('test_limit', default=sys.maxsize, help=\n 'Maximum number of predictions to evaluate per task')\n", (3612, 3711), False, 'from absl import flags\n'), ((6675, 6711), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (6696, 6711), False, 'from absl import logging\n'), ((7372, 7415), 'os.makedirs', 'os.makedirs', (['FLAGS.cache_dir'], {'exist_ok': '(True)'}), '(FLAGS.cache_dir, exist_ok=True)\n', (7383, 7415), False, 'import os\n'), ((7420, 7468), 'os.makedirs', 'os.makedirs', (['FLAGS.checkpoint_dir'], {'exist_ok': '(True)'}), '(FLAGS.checkpoint_dir, exist_ok=True)\n', (7431, 7468), False, 'import os\n'), ((7473, 7521), 'os.makedirs', 'os.makedirs', (['FLAGS.prediction_dir'], {'exist_ok': '(True)'}), '(FLAGS.prediction_dir, exist_ok=True)\n', (7484, 7521), False, 'import os\n'), ((9428, 9460), 'gorilla.Settings', 'gorilla.Settings', ([], {'allow_hit': '(True)'}), '(allow_hit=True)\n', (9444, 9460), False, 'import gorilla\n'), ((9797, 9909), 'gorilla.Patch', 'gorilla.Patch', (['gcs_utils', '"""gcs_dataset_info_files"""', '_patched_gcs_dataset_info_files'], {'settings': 'patch_settings'}), "(gcs_utils, 'gcs_dataset_info_files',\n _patched_gcs_dataset_info_files, settings=patch_settings)\n", (9810, 9909), False, 'import gorilla\n'), ((9910, 9930), 'gorilla.apply', 'gorilla.apply', (['patch'], {}), '(patch)\n', (9923, 9930), False, 'import gorilla\n'), ((10093, 10123), 'fslks.tasks.register_task_mappings', 'tasks.register_task_mappings', ([], {}), '()\n', (10121, 10123), False, 'from fslks import tasks\n'), ((14909, 14922), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (14916, 14922), False, 'from absl import app\n'), ((6965, 7011), 'tensorflow.config.experimental_connect_to_cluster', 'tf.config.experimental_connect_to_cluster', (['tpu'], {}), '(tpu)\n', (7006, 7011), True, 'import tensorflow as tf\n'), ((7020, 7066), 'tensorflow.tpu.experimental.initialize_tpu_system', 'tf.tpu.experimental.initialize_tpu_system', (['tpu'], {}), '(tpu)\n', (7061, 7066), True, 'import tensorflow as tf\n'), ((7086, 7116), 'tensorflow.distribute.TPUStrategy', 'tf.distribute.TPUStrategy', (['tpu'], {}), '(tpu)\n', (7111, 7116), True, 'import tensorflow as tf\n'), ((7146, 7174), 'tensorflow.distribute.get_strategy', 'tf.distribute.get_strategy', ([], {}), '()\n', (7172, 7174), True, 'import tensorflow as tf\n'), ((13553, 13595), 'fslks.experiments.Task.parse_test_tasks', 'Task.parse_test_tasks', (['FLAGS.testing_tasks'], {}), '(FLAGS.testing_tasks)\n', (13574, 13595), False, 'from fslks.experiments import Predictions, Task\n'), ((13997, 14039), 'fslks.experiments.Task.parse_test_tasks', 'Task.parse_test_tasks', (['FLAGS.testing_tasks'], {}), '(FLAGS.testing_tasks)\n', (14018, 14039), False, 'from fslks.experiments import Predictions, Task\n'), ((14493, 14535), 'fslks.evaluation.get_evaluator', 'evaluation.get_evaluator', (['FLAGS.evaluation'], {}), '(FLAGS.evaluation)\n', (14517, 14535), False, 'from fslks import evaluation\n'), ((14729, 14820), 'absl.logging.error', 'logging.error', (['"""Please specify at least one of --do_train, --do_predict, or --do_test"""'], {}), "(\n 'Please specify at least one of --do_train, --do_predict, or --do_test')\n", (14742, 14820), False, 'from absl import logging\n'), ((5346, 5378), 'os.path.exists', 'os.path.exists', (['predictions_file'], {}), '(predictions_file)\n', (5360, 5378), False, 'import os\n'), ((5392, 5486), 'absl.logging.warning', 'logging.warning', (['"""Unable to load predictions for %s: %s not found"""', 'task', 'predictions_file'], {}), "('Unable to load predictions for %s: %s not found', task,\n predictions_file)\n", (5407, 5486), False, 'from absl import logging\n'), ((5648, 5672), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (5662, 5672), False, 'import csv\n'), ((6261, 6280), 'numpy.asarray', 'np.asarray', (['prompts'], {}), '(prompts)\n', (6271, 6280), True, 'import numpy as np\n'), ((6309, 6338), 'numpy.asarray', 'np.asarray', (['split_predictions'], {}), '(split_predictions)\n', (6319, 6338), True, 'import numpy as np\n'), ((6363, 6382), 'numpy.asarray', 'np.asarray', (['targets'], {}), '(targets)\n', (6373, 6382), True, 'import numpy as np\n'), ((6791, 6842), 'tensorflow.distribute.cluster_resolver.TPUClusterResolver', 'tf.distribute.cluster_resolver.TPUClusterResolver', ([], {}), '()\n', (6840, 6842), True, 'import tensorflow as tf\n'), ((9552, 9619), 'gorilla.get_original_attribute', 'gorilla.get_original_attribute', (['gcs_utils', '"""gcs_dataset_info_files"""'], {}), "(gcs_utils, 'gcs_dataset_info_files')\n", (9582, 9619), False, 'import gorilla\n'), ((10242, 10286), 'fslks.experiments.Task.parse_train_tasks', 'Task.parse_train_tasks', (['FLAGS.training_tasks'], {}), '(FLAGS.training_tasks)\n', (10264, 10286), False, 'from fslks.experiments import Predictions, Task\n'), ((10318, 10369), 'fslks.experiments.Task.parse_validation_tasks', 'Task.parse_validation_tasks', (['FLAGS.validation_tasks'], {}), '(FLAGS.validation_tasks)\n', (10345, 10369), False, 'from fslks.experiments import Predictions, Task\n'), ((11874, 11922), 'os.makedirs', 'os.makedirs', (['FLAGS.checkpoint_dir'], {'exist_ok': '(True)'}), '(FLAGS.checkpoint_dir, exist_ok=True)\n', (11885, 11922), False, 'import os\n'), ((12088, 12124), 'os.makedirs', 'os.makedirs', (['best_dir'], {'exist_ok': '(True)'}), '(best_dir, exist_ok=True)\n', (12099, 12124), False, 'import os\n'), ((13295, 13319), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (13308, 13319), False, 'import os\n'), ((14179, 14274), 'absl.logging.warning', 'logging.warning', (['"""--prediction_dir was not specified, generating predictions from scratch"""'], {}), "(\n '--prediction_dir was not specified, generating predictions from scratch')\n", (14194, 14274), False, 'from absl import logging\n'), ((4266, 4310), 'os.makedirs', 'os.makedirs', (['split_output_dir'], {'exist_ok': '(True)'}), '(split_output_dir, exist_ok=True)\n', (4277, 4310), False, 'import os\n'), ((4341, 4390), 'os.path.join', 'os.path.join', (['split_output_dir', '"""predictions.csv"""'], {}), "(split_output_dir, 'predictions.csv')\n", (4353, 4390), False, 'import os\n'), ((7831, 8028), 'fslks.experiments.TFExperiment', 'experiments.TFExperiment', ([], {'cache_dir': 'FLAGS.cache_dir', 'configuration_name': 'FLAGS.init_checkpoint', 'max_seq_len': 'FLAGS.max_seq_len', 'use_xla': 'FLAGS.use_xla', 'use_amp': 'FLAGS.use_amp', 'seed': 'FLAGS.seed'}), '(cache_dir=FLAGS.cache_dir, configuration_name=\n FLAGS.init_checkpoint, max_seq_len=FLAGS.max_seq_len, use_xla=FLAGS.\n use_xla, use_amp=FLAGS.use_amp, seed=FLAGS.seed)\n', (7855, 8028), False, 'from fslks import experiments\n'), ((9704, 9759), 'absl.logging.error', 'logging.error', (['"""Failed to connect to GCS"""'], {'exc_info': 'ioe'}), "('Failed to connect to GCS', exc_info=ioe)\n", (9717, 9759), False, 'from absl import logging\n'), ((11964, 12011), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', '"""flags.cfg"""'], {}), "(FLAGS.checkpoint_dir, 'flags.cfg')\n", (11976, 12011), False, 'import os\n'), ((12166, 12201), 'os.path.join', 'os.path.join', (['best_dir', '"""flags.cfg"""'], {}), "(best_dir, 'flags.cfg')\n", (12178, 12201), False, 'import os\n'), ((13337, 13403), 'absl.logging.info', 'logging.info', (["('Loading best performing checkpoint: %s' % model_dir)"], {}), "('Loading best performing checkpoint: %s' % model_dir)\n", (13349, 13403), False, 'from absl import logging\n'), ((4640, 4660), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (4650, 4660), False, 'import csv\n'), ((8360, 8738), 'fslks.experiments.PTExperiment', 'experiments.PTExperiment', ([], {'cache_dir': 'FLAGS.cache_dir', 'configuration_name': 'FLAGS.init_checkpoint', 'max_seq_len': 'FLAGS.max_seq_len', 'use_amp': 'FLAGS.use_amp', 'warmup_epochs': 'FLAGS.warmup_epochs', 'seed': 'FLAGS.seed', 'temperature': 'FLAGS.temperature', 'dynamic_mixing': 'FLAGS.dynamic_mixing', 'mix_from_validation': 'FLAGS.mix_from_validation', 'clip_mixing_size': 'FLAGS.clip_mixing_size'}), '(cache_dir=FLAGS.cache_dir, configuration_name=\n FLAGS.init_checkpoint, max_seq_len=FLAGS.max_seq_len, use_amp=FLAGS.\n use_amp, warmup_epochs=FLAGS.warmup_epochs, seed=FLAGS.seed,\n temperature=FLAGS.temperature, dynamic_mixing=FLAGS.dynamic_mixing,\n mix_from_validation=FLAGS.mix_from_validation, clip_mixing_size=FLAGS.\n clip_mixing_size)\n', (8384, 8738), False, 'from fslks import experiments\n'), ((10689, 10795), 'absl.logging.error', 'logging.error', (['"""Dynamic mixing from validation requites validation data for each training task!"""'], {}), "(\n 'Dynamic mixing from validation requites validation data for each training task!'\n )\n", (10702, 10795), False, 'from absl import logging\n'), ((10879, 10923), 'fslks.experiments.Task.split_in_dataset', 'Task.split_in_dataset', (['"""validation"""', 'dataset'], {}), "('validation', dataset)\n", (10900, 10923), False, 'from fslks.experiments import Predictions, Task\n'), ((10971, 10998), 'fslks.experiments.Task', 'Task', (['dataset', '"""validation"""'], {}), "(dataset, 'validation')\n", (10975, 10998), False, 'from fslks.experiments import Predictions, Task\n'), ((11023, 11080), 'absl.logging.warning', 'logging.warning', (['"""Adding %s to validation tasks"""', 'dataset'], {}), "('Adding %s to validation tasks', dataset)\n", (11038, 11080), False, 'from absl import logging\n'), ((11153, 11181), 'fslks.experiments.Task', 'Task', (['dataset', '"""train[:70%]"""'], {}), "(dataset, 'train[:70%]')\n", (11157, 11181), False, 'from fslks.experiments import Predictions, Task\n'), ((11228, 11257), 'fslks.experiments.Task', 'Task', (['dataset', '"""train[-30%:]"""'], {}), "(dataset, 'train[-30%:]')\n", (11232, 11257), False, 'from fslks.experiments import Predictions, Task\n'), ((11282, 11376), 'absl.logging.warning', 'logging.warning', (['"""Adjusting %s to use 80%% for training and 20%% for validation"""', 'dataset'], {}), "('Adjusting %s to use 80%% for training and 20%% for validation'\n , dataset)\n", (11297, 11376), False, 'from absl import logging\n')] |
from typing import Iterable, List
import numpy as np
from ortools.algorithms.pywrapknapsack_solver import KnapsackSolver
import scipy.ndimage as nd
from loguru import logger
def f1_score(pred: np.ndarray, test: np.ndarray) -> float:
"""Compute F1-score on binary classification task.
:param pred: Predicted binary label. Sized [N].
:param test: Ground truth binary label. Sized [N].
:return: F1-score value.
"""
assert pred.shape == test.shape
pred = np.asarray(pred, dtype=np.bool)
test = np.asarray(test, dtype=np.bool)
overlap = (pred & test).sum()
if overlap == 0:
return 0.0
precision = overlap / pred.sum()
recall = overlap / test.sum()
logger.info(f"Precision {precision}, Recall {recall}")
f1 = 2 * precision * recall / (precision + recall)
return float(f1)
def knapsack(values: Iterable[int],
weights: Iterable[int],
capacity: int
) -> List[int]:
"""Solve 0/1 knapsack problem using dynamic programming.
:param values: Values of each items. Sized [N].
:param weights: Weights of each items. Sized [N].
:param capacity: Total capacity of the knapsack.
:return: List of packed item indices.
"""
knapsack_solver = KnapsackSolver(
KnapsackSolver.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER, 'test'
)
values = list(values)
weights = list(weights)
capacity = int(capacity)
knapsack_solver.Init(values, [weights], [capacity])
knapsack_solver.Solve()
packed_items = [x for x in range(0, len(weights))
if knapsack_solver.BestSolutionContains(x)]
return packed_items
def downsample_summ(summ: np.ndarray) -> np.ndarray:
"""Down-sample the summary by 15 times"""
return summ[::15]
def get_keyshot_summ(pred: np.ndarray,
cps: np.ndarray,
n_frames: int,
nfps: np.ndarray,
picks: np.ndarray,
proportion: float = 0.15
) -> np.ndarray:
"""Generate keyshot-based video summary i.e. a binary vector.
:param pred: Predicted importance scores.
:param cps: Change points, 2D matrix, each row contains a segment.
:param n_frames: Original number of frames.
:param nfps: Number of frames per segment.
:param picks: Positions of subsampled frames in the original video.
:param proportion: Max length of video summary compared to original length.
:return: Generated keyshot-based summary.
"""
assert pred.shape == picks.shape
picks = np.asarray(picks, dtype=np.int32)
# Get original frame scores from downsampled sequence
frame_scores = np.zeros(n_frames, dtype=np.float32)
for i in range(len(picks)):
pos_lo = picks[i]
pos_hi = picks[i + 1] if i + 1 < len(picks) else n_frames
frame_scores[pos_lo:pos_hi] = pred[i]
# Assign scores to video shots as the average of the frames.
seg_scores = np.zeros(len(cps), dtype=np.int32)
for seg_idx, (first, last) in enumerate(cps):
scores = frame_scores[first:last + 1]
seg_scores[seg_idx] = int(1000 * scores.mean())
# Apply knapsack algorithm to find the best shots
limits = int(n_frames * proportion)
packed = knapsack(seg_scores, nfps, limits)
# Get key-shot based summary
summary = np.zeros(n_frames, dtype=np.bool)
for seg_idx in packed:
first, last = cps[seg_idx]
summary[first:last + 1] = True
return summary
def bbox2summary(seq_len: int,
pred_cls: np.ndarray,
pred_bboxes: np.ndarray,
change_points: np.ndarray,
n_frames: int,
nfps: np.ndarray,
picks: np.ndarray,
binary_closing: bool = False,
) -> np.ndarray:
"""Convert predicted bounding boxes to summary"""
score = np.zeros(seq_len, dtype=np.float32)
for bbox_idx in range(len(pred_bboxes)):
lo, hi = pred_bboxes[bbox_idx, 0], pred_bboxes[bbox_idx, 1]
score[lo:hi] = np.maximum(score[lo:hi], [pred_cls[bbox_idx]])
pred_summ = get_keyshot_summ(score, change_points, n_frames, nfps, picks)
if binary_closing:
pred_summ = nd.binary_closing(pred_summ.astype(np.int32)).astype(bool)
return pred_summ
def get_summ_diversity(pred_summ: np.ndarray,
features: np.ndarray
) -> float:
"""Evaluate diversity of the generated summary.
:param pred_summ: Predicted down-sampled summary. Sized [N, F].
:param features: Normalized down-sampled video features. Sized [N, F].
:return: Diversity value.
"""
assert len(pred_summ) == len(features)
pred_summ = np.asarray(pred_summ, dtype=np.bool)
pos_features = features[pred_summ]
if len(pos_features) < 2:
return 0.0
diversity = 0.0
for feat in pos_features:
diversity += (feat * pos_features).sum() - (feat * feat).sum()
diversity /= len(pos_features) * (len(pos_features) - 1)
return diversity
def get_summ_f1score(pred_summ: np.ndarray,
test_summ: np.ndarray,
eval_metric: str = 'avg'
) -> float:
"""Compare predicted summary with ground truth summary (keyshot-based).
:param pred_summ: Predicted binary label of N frames. Sized [N].
:param test_summ: Ground truth binary labels of U users. Sized [U, N].
:param eval_metric: Evaluation method. Choose from (max, avg).
:return: F1-score value.
"""
pred_summ = np.asarray(pred_summ, dtype=np.bool)
test_summ = np.asarray(test_summ, dtype=np.bool)
if len(test_summ.shape) == 1:
test_summ = test_summ.reshape((1, -1))
_, n_frames = test_summ.shape
if pred_summ.size > n_frames:
pred_summ = pred_summ[:n_frames]
elif pred_summ.size < n_frames:
pred_summ = np.pad(pred_summ, (0, n_frames - pred_summ.size))
f1s = [f1_score(user_summ, pred_summ) for user_summ in test_summ]
if eval_metric == 'avg':
final_f1 = np.mean(f1s)
elif eval_metric == 'max':
final_f1 = np.max(f1s)
else:
raise ValueError(f'Invalid eval metric {eval_metric}')
return float(final_f1)
| [
"numpy.mean",
"loguru.logger.info",
"numpy.asarray",
"ortools.algorithms.pywrapknapsack_solver.KnapsackSolver",
"numpy.max",
"numpy.zeros",
"numpy.pad",
"numpy.maximum"
] | [((483, 514), 'numpy.asarray', 'np.asarray', (['pred'], {'dtype': 'np.bool'}), '(pred, dtype=np.bool)\n', (493, 514), True, 'import numpy as np\n'), ((526, 557), 'numpy.asarray', 'np.asarray', (['test'], {'dtype': 'np.bool'}), '(test, dtype=np.bool)\n', (536, 557), True, 'import numpy as np\n'), ((707, 761), 'loguru.logger.info', 'logger.info', (['f"""Precision {precision}, Recall {recall}"""'], {}), "(f'Precision {precision}, Recall {recall}')\n", (718, 761), False, 'from loguru import logger\n'), ((1262, 1336), 'ortools.algorithms.pywrapknapsack_solver.KnapsackSolver', 'KnapsackSolver', (['KnapsackSolver.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER', '"""test"""'], {}), "(KnapsackSolver.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER, 'test')\n", (1276, 1336), False, 'from ortools.algorithms.pywrapknapsack_solver import KnapsackSolver\n'), ((2598, 2631), 'numpy.asarray', 'np.asarray', (['picks'], {'dtype': 'np.int32'}), '(picks, dtype=np.int32)\n', (2608, 2631), True, 'import numpy as np\n'), ((2710, 2746), 'numpy.zeros', 'np.zeros', (['n_frames'], {'dtype': 'np.float32'}), '(n_frames, dtype=np.float32)\n', (2718, 2746), True, 'import numpy as np\n'), ((3378, 3411), 'numpy.zeros', 'np.zeros', (['n_frames'], {'dtype': 'np.bool'}), '(n_frames, dtype=np.bool)\n', (3386, 3411), True, 'import numpy as np\n'), ((3941, 3976), 'numpy.zeros', 'np.zeros', (['seq_len'], {'dtype': 'np.float32'}), '(seq_len, dtype=np.float32)\n', (3949, 3976), True, 'import numpy as np\n'), ((4781, 4817), 'numpy.asarray', 'np.asarray', (['pred_summ'], {'dtype': 'np.bool'}), '(pred_summ, dtype=np.bool)\n', (4791, 4817), True, 'import numpy as np\n'), ((5622, 5658), 'numpy.asarray', 'np.asarray', (['pred_summ'], {'dtype': 'np.bool'}), '(pred_summ, dtype=np.bool)\n', (5632, 5658), True, 'import numpy as np\n'), ((5675, 5711), 'numpy.asarray', 'np.asarray', (['test_summ'], {'dtype': 'np.bool'}), '(test_summ, dtype=np.bool)\n', (5685, 5711), True, 'import numpy as np\n'), ((4113, 4159), 'numpy.maximum', 'np.maximum', (['score[lo:hi]', '[pred_cls[bbox_idx]]'], {}), '(score[lo:hi], [pred_cls[bbox_idx]])\n', (4123, 4159), True, 'import numpy as np\n'), ((6129, 6141), 'numpy.mean', 'np.mean', (['f1s'], {}), '(f1s)\n', (6136, 6141), True, 'import numpy as np\n'), ((5959, 6008), 'numpy.pad', 'np.pad', (['pred_summ', '(0, n_frames - pred_summ.size)'], {}), '(pred_summ, (0, n_frames - pred_summ.size))\n', (5965, 6008), True, 'import numpy as np\n'), ((6192, 6203), 'numpy.max', 'np.max', (['f1s'], {}), '(f1s)\n', (6198, 6203), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import sys
import itertools
import re
def get_segment_range(bin_start, bin_end):
return range(int(bin_start), int(bin_end)+1)
def get_1D_peak(segment_range, MACS2_peak_ranges_list):
for segment in segment_range:
if segment in MACS2_peak_ranges_list:
return True
return False
def validate_input_data(input_data):
params = {}
if 'OUT_DIR' in input_data:
params['OUT_DIR'] = input_data['OUT_DIR'][1]
if 'BINNING_RANGE' in input_data:
params['BINNING_RANGE'] = input_data['BINNING_RANGE'].astype('int')[1]
if 'BIN_SIZE' in input_data:
params['BIN_SIZE'] = input_data['BIN_SIZE'].astype('int')[1]
if 'DATASET_NAME' in input_data:
params['DATASET_NAME'] = input_data['DATASET_NAME'][1]
if 'MACS2_PATH' in input_data:
params['MACS2_PATH'] = input_data['MACS2_PATH'][1]
if 'GF_PATH' in input_data:
params['GF_PATH'] = input_data['GF_PATH'][1]
if 'LONG_PATH' in input_data:
params['LONG_PATH'] = input_data['LONG_PATH'][1]
if 'LONG_FORMAT' in input_data:
params['LONG_FORMAT'] = input_data['LONG_FORMAT'][1]
if 'SHORT_PATH' in input_data:
params['SHORT_PATH'] = input_data['SHORT_PATH'][1]
if 'SHORT_FORMAT' in input_data:
params['SHORT_FORMAT'] = input_data['SHORT_FORMAT'][1]
if 'N_CHROMS' in input_data:
params['N_CHROMS'] = input_data['N_CHROMS'].astype('int')[1]
if 'SEX_CHROMS' in input_data:
params['SEX_CHROMS'] = input_data['SEX_CHROMS'][1]
else:
params['SEX_CHROMS'] = ''
return params
def load_MACS2(MACS2_PATH):
try:
MACS2_full = pd.read_csv(MACS2_PATH, sep='\t', skip_blank_lines=True,comment='#',header=None, usecols=[0,1,2])
MACS2_full.columns = ['chr','start','end']
MACS2_full = MACS2_full.astype({'chr':str})
except pd.errors.EmptyDataError:
MACS2_full = pd.DataFrame(columns=['chr','start','end'])
return(MACS2_full)
def load_metadata(GF_PATH, BIN_SIZE):
try:
metadata_full = pd.read_csv(GF_PATH, sep='\t',header=None, low_memory=False)
metadata_full.columns = ['chr','start','end','effective_length','gc','mappability']
metadata_full = metadata_full.astype({'chr':str})
metadata_full['bin1_mid'] = metadata_full['start']//BIN_SIZE
metadata_full['bin2_mid'] = metadata_full['bin1_mid']
metadata_full['bin'] = metadata_full['bin1_mid']
except pd.errors.EmptyDataError:
metadata_full = pd.DataFrame(columns=['chr','start','end','effective_length','gc','mappability','bin1_mid','bin2_mid','bin'])
return(metadata_full)
def parse_fname(chrom, type, params):
if type == 'short':
fname = params['SHORT_PATH']+params['SHORT_FORMAT']
else:
fname = params['LONG_PATH']+params['LONG_FORMAT']
for p in params:
pn = '[' + p + ']'
if pn in fname:
fname = fname.replace(pn, params[p])
if '[CHROMOSOME]' in fname:
fname = fname.replace('[CHROMOSOME]', chrom)
else:
if type == 'short':
print('File format needs to contain [CHROMOSOME] tag:', params['SHORT_FORMAT'])
else:
print('File format needs to contain [CHROMOSOME] tag:', params['LONG_FORMAT'])
exit()
return fname
def get_chrom_from_MACS2(MACS2_full):
chr = []
if MACS2_full.shape[0]:
chr = MACS2_full['chr'].unique().tolist()
## remove XY, MT, ...
p = re.compile('^(chr)?((\d{1,2})|(IX|IV|V?I{0,3}))$')
chr = [s for s in chr if p.match(s)]
return chr
def init(p):
## checking that all files are available
print('loading parameters file')
input_data = pd.read_csv(p.run_file,sep='=',skip_blank_lines=True, comment='#',index_col=0,header=None)
input_data = input_data.transpose()
params = validate_input_data(input_data)
print('loading MACS2 peaks')
MACS2_full = load_MACS2(params['MACS2_PATH'])
## setting up chromosomes, TODO: extract the chromosome from MACS2_full["chr"], make sure use the clean chromosomes
#chroms = ['chr' + str(i) for i in range(1,params['N_CHROMS']+1,1)]
chroms = get_chrom_from_MACS2(MACS2_full)
print(chroms)
if params['SEX_CHROMS'] == 'X':
chroms.append('chrX')
elif params['SEX_CHROMS'] == 'Y':
chroms.append('chrY')
elif params['SEX_CHROMS'] == 'XY':
chroms.append('chrX')
chroms.append('chrY')
print(chroms)
params['BIN_RANGE'] = float(params['BINNING_RANGE'])/float(params['BIN_SIZE'])
print('loading metadata file')
metadata_full = load_metadata(params['GF_PATH'], params['BIN_SIZE'])
qc_str = '' ## content of qc.maps file
for CHR in chroms:
print('doing chromosome ',CHR,'\n')
#handling MACS2 peaks
print('-- handling MACS2 peaks')
MACS2 = MACS2_full[MACS2_full['chr'] == CHR].copy()
if not MACS2.shape[0]:
CHR = CHR.replace("chr", "")
MACS2 = MACS2_full[MACS2_full['chr'] == CHR].copy()
if not MACS2.shape[0]:
continue
MACS2['start_bin'] = np.floor(MACS2['start']/params['BIN_SIZE']).fillna(0)
MACS2['end_bin'] = np.ceil(MACS2['end']/params['BIN_SIZE']).fillna(0)
#perform this hack becasue apply returns wrong data type in some rare case
specialCase = False
if MACS2.iloc[0]['end_bin'] - MACS2.iloc[0]['start_bin'] == MACS2.shape[1] - 1:
MACS2.iloc[0,MACS2.columns.get_loc('start_bin')] = MACS2.iloc[0]['start_bin'] - 1
specialCase = True
MACS2_peak_ranges = MACS2.apply(lambda row: range(int(row['start_bin']),int(row['end_bin'])), axis=1).values.tolist()
MACS2_peak_ranges_list = set(itertools.chain.from_iterable(MACS2_peak_ranges))
if specialCase:
MACS2_peak_ranges_list.remove(MACS2.iloc[0]['start_bin'])
print('-- handling short.bed\n')
ps_short = pd.read_csv(parse_fname(CHR, 'short', params),header=None,sep='\t')
if ps_short.shape[0]:
new_cols = ['chr','start','end','name']
ps_short.rename(columns=dict(zip(ps_short.columns[0:], new_cols)),inplace=True)
ps_short = ps_short.astype({'chr':str})
ps_short['bin'] = ps_short[['start','end']].mean(axis=1)//params['BIN_SIZE']
ps_short['short_count'] = 1
count_data_short = ps_short[['chr','bin','short_count']].groupby(['chr','bin']).count()
count_data_short.reset_index(inplace=True)
print('-- handling long.bedpe\n')
##### getting overlap
## load long.bed file
ps_long = pd.read_csv(parse_fname(CHR, 'long', params),header=None,sep='\t')
long_cols = ['chr1','start1','end1','chr2','start2','end2', 'count']
ps_long.rename(columns=dict(zip(ps_long.columns[0:], long_cols)),inplace=True)
ps_long = ps_long.astype({'chr1':str, 'chr2':str})
## filter only reads at the same chromosome and proper orientation
ps_long = ps_long[(ps_long['chr1'] == CHR) & (ps_long['chr2'] == CHR) ]
if ps_long.shape[0]:
ps_long['read1_bin_mid'] = ((ps_long['start1'] + ps_long['end1']) / 2.0)//params['BIN_SIZE']
ps_long['read2_bin_mid'] = ((ps_long['start2'] + ps_long['end2']) / 2.0)//params['BIN_SIZE']
ps_long['bin1_mid'] = ps_long.loc[:,['read1_bin_mid','read2_bin_mid']].min(axis=1)
ps_long['bin2_mid'] = ps_long.loc[:,['read1_bin_mid','read2_bin_mid']].max(axis=1)
#ps_long['count'] = 1
#count_data = ps_long[['bin1_mid', 'bin2_mid','count']].groupby(['bin1_mid','bin2_mid']).count()
count_data = ps_long[['bin1_mid', 'bin2_mid', 'count']]
count_data.reset_index(inplace=True)
count_data_and = count_data[(count_data['bin1_mid'].isin(MACS2_peak_ranges_list)) & (count_data['bin2_mid'].isin(MACS2_peak_ranges_list))].copy()
count_data_and = count_data_and[(np.abs(count_data_and['bin1_mid'] - count_data_and['bin2_mid']) <= params['BIN_RANGE'])
& (np.abs(count_data_and['bin1_mid'] - count_data_and['bin2_mid']) >= 1)]
count_data_and['1D_peak_bin1'] = 1
count_data_and['1D_peak_bin2'] = 1
count_data_xor = count_data[(count_data.bin1_mid.isin(MACS2_peak_ranges_list)) ^ (count_data.bin2_mid.isin(MACS2_peak_ranges_list))]
count_data_xor = count_data_xor[(np.abs(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid']) <= params['BIN_RANGE'])
& (np.abs(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid']) >= 1)]
count_data_xor_bin1 = count_data_xor[(count_data_xor.bin1_mid.isin(MACS2_peak_ranges_list))].copy()
count_data_xor_bin1['1D_peak_bin1'] = 1
count_data_xor_bin1['1D_peak_bin2'] = 0
count_data_xor_bin2 = count_data_xor[(count_data_xor.bin2_mid.isin(MACS2_peak_ranges_list))].copy()
count_data_xor_bin2['1D_peak_bin1'] = 0
count_data_xor_bin2['1D_peak_bin2'] = 1
count_data_xor = pd.concat([count_data_xor_bin1, count_data_xor_bin2],ignore_index=True)
print('-- calculating values for maps.qc file\n')
AND_sum = count_data_and['count'].sum()
XOR_sum = count_data_xor['count'].sum()
NOT_sum = count_data['count'].sum() - AND_sum - XOR_sum
qc_str = qc_str +\
'AND_set\t' + str(AND_sum) + '\tnumber of pairs in AND set at chromsome ' + CHR + '\n' +\
'XOR_set\t' + str(XOR_sum) + '\tnumber of pairs in XOR set at chromsome ' + CHR + '\n' +\
'NOT_set\t' + str(NOT_sum) + '\tnumber of pairs in NOT set at chromsome ' + CHR + '\n'
print('-- handling metadata\n')
metadata = metadata_full[metadata_full['chr'] == CHR].copy()
metadata = pd.merge(metadata, count_data_short, on = ['bin','chr'], how='outer')
metadata['short_count'] = metadata['short_count'].fillna(0)
print('-- attaching genome features atributes to AND set')
reg_and = pd.merge(count_data_and, metadata[['bin1_mid','effective_length','gc','mappability','short_count']],
on='bin1_mid')
reg_and.rename(columns={'effective_length':'effective_length1','gc':'gc1','mappability':'mappability1',
'short_count':'short_count1'},inplace=True)
reg_and = pd.merge(reg_and, metadata[['bin2_mid','effective_length','gc','mappability','short_count']],
on='bin2_mid')
reg_and.rename(columns={'effective_length':'effective_length2','gc':'gc2','mappability':'mappability2',
'short_count':'short_count2'},inplace=True)
reg_and = reg_and[(reg_and['effective_length1'] > 0) & (reg_and['effective_length2'] > 0)]
reg_and['dist'] = pd.to_numeric(np.abs(reg_and['bin1_mid'] - reg_and['bin2_mid']))
reg_and['logl'] = np.log((reg_and['effective_length1'] + 1.0) * (reg_and['effective_length2'] + 1.0) / (params['BIN_SIZE'] * params['BIN_SIZE']))
reg_and['loggc'] = np.log(reg_and['gc1'] * reg_and['gc2'])
reg_and['logm'] = np.log(reg_and['mappability1'] * reg_and['mappability2'])
reg_and['logdist'] = np.log((1.0 + reg_and['dist']) / params['BIN_RANGE'])
max_short_and = (reg_and['short_count1'].max() + 1.0) * (reg_and['short_count2'].max() + 1.0)
reg_and['logShortCount'] = np.log(
(reg_and['short_count1'] + 1.0) * (reg_and['short_count2'] + 1.0) / max_short_and
)
reg_and['bin1_mid'] = reg_and['bin1_mid'] * params['BIN_SIZE']
reg_and['bin2_mid'] = reg_and['bin2_mid'] * params['BIN_SIZE']
print('-- attaching genome features atributes to XOR set')
reg_xor = pd.merge(count_data_xor, metadata[['bin1_mid','effective_length','gc','mappability','short_count']],
on='bin1_mid')
reg_xor.rename(columns={'effective_length':'effective_length1','gc':'gc1','mappability':'mappability1',
'short_count':'short_count1'},inplace=True)
reg_xor = pd.merge(reg_xor, metadata[['bin2_mid','effective_length','gc','mappability','short_count']],
on='bin2_mid')
reg_xor.rename(columns={'effective_length':'effective_length2','gc':'gc2','mappability':'mappability2',
'short_count':'short_count2'},inplace=True)
reg_xor = reg_xor[(reg_xor['effective_length1'] > 0) & (reg_xor['effective_length2'] > 0)]
reg_xor['dist'] = pd.to_numeric(np.abs(reg_xor['bin1_mid'] - reg_xor['bin2_mid']))
reg_xor['logl'] = np.log((reg_xor['effective_length1'] + 1.0) * (reg_xor['effective_length2'] + 1.0) / (params['BIN_SIZE'] * params['BIN_SIZE']))
reg_xor['loggc'] = np.log(reg_xor['gc1'] * reg_xor['gc2'])
reg_xor['logm'] = np.log(reg_xor['mappability1'] * reg_xor['mappability2'])
reg_xor['logdist'] = np.log((1.0 + reg_xor['dist']) / params['BIN_RANGE'])
max_short_xor = (reg_xor['short_count1'].max() + 1.0) * (reg_xor['short_count2'].max() + 1.0)
reg_xor['logShortCount'] = np.log(
(reg_xor['short_count1'] + 1.0) * (reg_xor['short_count2'] + 1.0) / max_short_xor
)
reg_xor['bin1_mid'] = reg_xor['bin1_mid'] * params['BIN_SIZE']
reg_xor['bin2_mid'] = reg_xor['bin2_mid'] * params['BIN_SIZE']
print ('--saving output\n')
fout_name = params['OUT_DIR'] + 'reg_raw.' + str(CHR) + '.' + params['DATASET_NAME'] + '.'+ str(int(params['BIN_SIZE']/1000)) + 'k.and'
reg_and.to_csv(fout_name, sep='\t')
fout_name = params['OUT_DIR'] + 'reg_raw.' + str(CHR) + '.' + params['DATASET_NAME'] + '.'+ str(int(params['BIN_SIZE']/1000)) + 'k.xor'
reg_xor.to_csv(fout_name, sep='\t')
else:
print('no bin pairs in long or short bedpe files for chromosome ',CHR,'. Doing next chromosome')
print('-- saving .qc.maps file\n')
qc_fname = params['OUT_DIR'] + params['DATASET_NAME'] + '.maps.qc'
qc_file = open(qc_fname,'w')
qc_file.write(qc_str)
qc_file.close()
def main():
parser = argparse.ArgumentParser()
parser.prog = 'PROG'
parser.description = "MAPS"
parser.epilog = "This is where the command-line utility's epilog goes."
parser.add_argument('run_file', help = 'file containing run parameters')
p = parser.parse_args(sys.argv[1:])
init(p)
if __name__ == "__main__":
main()
| [
"numpy.abs",
"numpy.ceil",
"argparse.ArgumentParser",
"pandas.read_csv",
"re.compile",
"pandas.merge",
"numpy.log",
"numpy.floor",
"itertools.chain.from_iterable",
"pandas.DataFrame",
"pandas.concat"
] | [((3814, 3912), 'pandas.read_csv', 'pd.read_csv', (['p.run_file'], {'sep': '"""="""', 'skip_blank_lines': '(True)', 'comment': '"""#"""', 'index_col': '(0)', 'header': 'None'}), "(p.run_file, sep='=', skip_blank_lines=True, comment='#',\n index_col=0, header=None)\n", (3825, 3912), True, 'import pandas as pd\n'), ((14821, 14846), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14844, 14846), False, 'import argparse\n'), ((1760, 1865), 'pandas.read_csv', 'pd.read_csv', (['MACS2_PATH'], {'sep': '"""\t"""', 'skip_blank_lines': '(True)', 'comment': '"""#"""', 'header': 'None', 'usecols': '[0, 1, 2]'}), "(MACS2_PATH, sep='\\t', skip_blank_lines=True, comment='#',\n header=None, usecols=[0, 1, 2])\n", (1771, 1865), True, 'import pandas as pd\n'), ((2158, 2219), 'pandas.read_csv', 'pd.read_csv', (['GF_PATH'], {'sep': '"""\t"""', 'header': 'None', 'low_memory': '(False)'}), "(GF_PATH, sep='\\t', header=None, low_memory=False)\n", (2169, 2219), True, 'import pandas as pd\n'), ((3590, 3641), 're.compile', 're.compile', (['"""^(chr)?((\\\\d{1,2})|(IX|IV|V?I{0,3}))$"""'], {}), "('^(chr)?((\\\\d{1,2})|(IX|IV|V?I{0,3}))$')\n", (3600, 3641), False, 'import re\n'), ((2019, 2064), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chr', 'start', 'end']"}), "(columns=['chr', 'start', 'end'])\n", (2031, 2064), True, 'import pandas as pd\n'), ((2618, 2739), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chr', 'start', 'end', 'effective_length', 'gc', 'mappability', 'bin1_mid',\n 'bin2_mid', 'bin']"}), "(columns=['chr', 'start', 'end', 'effective_length', 'gc',\n 'mappability', 'bin1_mid', 'bin2_mid', 'bin'])\n", (2630, 2739), True, 'import pandas as pd\n'), ((5856, 5904), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['MACS2_peak_ranges'], {}), '(MACS2_peak_ranges)\n', (5885, 5904), False, 'import itertools\n'), ((5237, 5282), 'numpy.floor', 'np.floor', (["(MACS2['start'] / params['BIN_SIZE'])"], {}), "(MACS2['start'] / params['BIN_SIZE'])\n", (5245, 5282), True, 'import numpy as np\n'), ((5318, 5360), 'numpy.ceil', 'np.ceil', (["(MACS2['end'] / params['BIN_SIZE'])"], {}), "(MACS2['end'] / params['BIN_SIZE'])\n", (5325, 5360), True, 'import numpy as np\n'), ((9343, 9415), 'pandas.concat', 'pd.concat', (['[count_data_xor_bin1, count_data_xor_bin2]'], {'ignore_index': '(True)'}), '([count_data_xor_bin1, count_data_xor_bin2], ignore_index=True)\n', (9352, 9415), True, 'import pandas as pd\n'), ((10179, 10247), 'pandas.merge', 'pd.merge', (['metadata', 'count_data_short'], {'on': "['bin', 'chr']", 'how': '"""outer"""'}), "(metadata, count_data_short, on=['bin', 'chr'], how='outer')\n", (10187, 10247), True, 'import pandas as pd\n'), ((10426, 10549), 'pandas.merge', 'pd.merge', (['count_data_and', "metadata[['bin1_mid', 'effective_length', 'gc', 'mappability', 'short_count']]"], {'on': '"""bin1_mid"""'}), "(count_data_and, metadata[['bin1_mid', 'effective_length', 'gc',\n 'mappability', 'short_count']], on='bin1_mid')\n", (10434, 10549), True, 'import pandas as pd\n'), ((10786, 10902), 'pandas.merge', 'pd.merge', (['reg_and', "metadata[['bin2_mid', 'effective_length', 'gc', 'mappability', 'short_count']]"], {'on': '"""bin2_mid"""'}), "(reg_and, metadata[['bin2_mid', 'effective_length', 'gc',\n 'mappability', 'short_count']], on='bin2_mid')\n", (10794, 10902), True, 'import pandas as pd\n'), ((11353, 11484), 'numpy.log', 'np.log', (["((reg_and['effective_length1'] + 1.0) * (reg_and['effective_length2'] + 1.0\n ) / (params['BIN_SIZE'] * params['BIN_SIZE']))"], {}), "((reg_and['effective_length1'] + 1.0) * (reg_and['effective_length2'] +\n 1.0) / (params['BIN_SIZE'] * params['BIN_SIZE']))\n", (11359, 11484), True, 'import numpy as np\n'), ((11516, 11555), 'numpy.log', 'np.log', (["(reg_and['gc1'] * reg_and['gc2'])"], {}), "(reg_and['gc1'] * reg_and['gc2'])\n", (11522, 11555), True, 'import numpy as np\n'), ((11590, 11647), 'numpy.log', 'np.log', (["(reg_and['mappability1'] * reg_and['mappability2'])"], {}), "(reg_and['mappability1'] * reg_and['mappability2'])\n", (11596, 11647), True, 'import numpy as np\n'), ((11685, 11738), 'numpy.log', 'np.log', (["((1.0 + reg_and['dist']) / params['BIN_RANGE'])"], {}), "((1.0 + reg_and['dist']) / params['BIN_RANGE'])\n", (11691, 11738), True, 'import numpy as np\n'), ((11892, 11985), 'numpy.log', 'np.log', (["((reg_and['short_count1'] + 1.0) * (reg_and['short_count2'] + 1.0) /\n max_short_and)"], {}), "((reg_and['short_count1'] + 1.0) * (reg_and['short_count2'] + 1.0) /\n max_short_and)\n", (11898, 11985), True, 'import numpy as np\n'), ((12279, 12402), 'pandas.merge', 'pd.merge', (['count_data_xor', "metadata[['bin1_mid', 'effective_length', 'gc', 'mappability', 'short_count']]"], {'on': '"""bin1_mid"""'}), "(count_data_xor, metadata[['bin1_mid', 'effective_length', 'gc',\n 'mappability', 'short_count']], on='bin1_mid')\n", (12287, 12402), True, 'import pandas as pd\n'), ((12639, 12755), 'pandas.merge', 'pd.merge', (['reg_xor', "metadata[['bin2_mid', 'effective_length', 'gc', 'mappability', 'short_count']]"], {'on': '"""bin2_mid"""'}), "(reg_xor, metadata[['bin2_mid', 'effective_length', 'gc',\n 'mappability', 'short_count']], on='bin2_mid')\n", (12647, 12755), True, 'import pandas as pd\n'), ((13206, 13337), 'numpy.log', 'np.log', (["((reg_xor['effective_length1'] + 1.0) * (reg_xor['effective_length2'] + 1.0\n ) / (params['BIN_SIZE'] * params['BIN_SIZE']))"], {}), "((reg_xor['effective_length1'] + 1.0) * (reg_xor['effective_length2'] +\n 1.0) / (params['BIN_SIZE'] * params['BIN_SIZE']))\n", (13212, 13337), True, 'import numpy as np\n'), ((13369, 13408), 'numpy.log', 'np.log', (["(reg_xor['gc1'] * reg_xor['gc2'])"], {}), "(reg_xor['gc1'] * reg_xor['gc2'])\n", (13375, 13408), True, 'import numpy as np\n'), ((13443, 13500), 'numpy.log', 'np.log', (["(reg_xor['mappability1'] * reg_xor['mappability2'])"], {}), "(reg_xor['mappability1'] * reg_xor['mappability2'])\n", (13449, 13500), True, 'import numpy as np\n'), ((13538, 13591), 'numpy.log', 'np.log', (["((1.0 + reg_xor['dist']) / params['BIN_RANGE'])"], {}), "((1.0 + reg_xor['dist']) / params['BIN_RANGE'])\n", (13544, 13591), True, 'import numpy as np\n'), ((13745, 13838), 'numpy.log', 'np.log', (["((reg_xor['short_count1'] + 1.0) * (reg_xor['short_count2'] + 1.0) /\n max_short_xor)"], {}), "((reg_xor['short_count1'] + 1.0) * (reg_xor['short_count2'] + 1.0) /\n max_short_xor)\n", (13751, 13838), True, 'import numpy as np\n'), ((11268, 11317), 'numpy.abs', 'np.abs', (["(reg_and['bin1_mid'] - reg_and['bin2_mid'])"], {}), "(reg_and['bin1_mid'] - reg_and['bin2_mid'])\n", (11274, 11317), True, 'import numpy as np\n'), ((13121, 13170), 'numpy.abs', 'np.abs', (["(reg_xor['bin1_mid'] - reg_xor['bin2_mid'])"], {}), "(reg_xor['bin1_mid'] - reg_xor['bin2_mid'])\n", (13127, 13170), True, 'import numpy as np\n'), ((8176, 8239), 'numpy.abs', 'np.abs', (["(count_data_and['bin1_mid'] - count_data_and['bin2_mid'])"], {}), "(count_data_and['bin1_mid'] - count_data_and['bin2_mid'])\n", (8182, 8239), True, 'import numpy as np\n'), ((8294, 8357), 'numpy.abs', 'np.abs', (["(count_data_and['bin1_mid'] - count_data_and['bin2_mid'])"], {}), "(count_data_and['bin1_mid'] - count_data_and['bin2_mid'])\n", (8300, 8357), True, 'import numpy as np\n'), ((8665, 8728), 'numpy.abs', 'np.abs', (["(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid'])"], {}), "(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid'])\n", (8671, 8728), True, 'import numpy as np\n'), ((8783, 8846), 'numpy.abs', 'np.abs', (["(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid'])"], {}), "(count_data_xor['bin1_mid'] - count_data_xor['bin2_mid'])\n", (8789, 8846), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SA - Multi
MDO - Assignment 4b.
MIT - Spring 2021
@author: cristianjunge
"""
import pandas as pd
import math
import random
import numpy as np
def ConstraintsCalculation (x,manure_limit):
r1 = 1000000
Manure_total = x.Demand0.sum()
if Manure_total > manure_limit:
g1 = r1*(Manure_total-manure_limit)**2
print('Violation!')
else:
g1 = 0
G = [g1]
return G
def ObjectiveFunction (x,Model,ModelParameters,Lambd,co2_price,manure_limit):
annual_net_income,Costs_tup,rev_dict,MovementsKg,net_co2_offset_kg_per_year = Model(x,*ModelParameters)
## Compute constraints
G = ConstraintsCalculation (x,manure_limit)
pen = 0
for g in G:
pen = pen + g
# lambda
#obj_val = - (Lambd)*annual_net_income/0.01 - (1-Lambd)*net_co2_offset_kg_per_year/1 + pen
obj_val = - (Lambd)*(annual_net_income+(net_co2_offset_kg_per_year*co2_price/1000))/0.01 - (1-Lambd)*net_co2_offset_kg_per_year/1 + pen
#obj_val = - (0.95)*annual_net_income/1 - (0.05)*net_co2_offset_kg_per_year/1 + pen
#return obj_val,annual_net_income,net_co2_offset_kg_per_year
return obj_val,annual_net_income+(net_co2_offset_kg_per_year*co2_price/1000),net_co2_offset_kg_per_year
def PerturbationFun(x,params,N): ## Perturbs N dimensions and electricity
manure_limit = 8870365.07
perc_change = 0.05
dem00 = np.array(x.Demand0)
dem0 = np.array(x.Demand0)
f_e0 = np.array(x.PerElect)
dims = [random.randint(0,len(x)-1) for i in range(N)]
for d in dims:
dem0[d] = dem0[d] + (random.random()*2-1)*manure_limit*perc_change
if dem0[d] < 0:
dem0[d] = 0
tot_dem = np.sum(dem0)
feas_loop = 0
while tot_dem > manure_limit:
feas_loop = feas_loop + 1
if feas_loop >10000:
print('Stuck')
print(dem0)
dem0[d] = dem00[d] + (random.random()*2-1)*manure_limit*perc_change
if dem0[d] < 0:
dem0[d] = 0
tot_dem = np.sum(dem0)
dims = [random.randint(0,len(x)-1) for i in range(N)]
for d in dims:
if dem0[d] == 0:
f_e0[d] = 0
else:
f_e0[d] = random.random()
x_new = pd.DataFrame(np.transpose([dem0,f_e0]),x.index,['Demand0','PerElect'])
return x_new
def PerturbationFun_loose(x,params,N,manure_limit): ## Perturbs N dimensions and electricity
dem0 = np.array(x.Demand0)
f_e0 = np.array(x.PerElect)
dims = [random.randint(0,len(x)-1) for i in range(N)]
for d in dims:
dem0[d] = random.random()*manure_limit
tot_dem = np.sum(dem0)
while tot_dem > manure_limit:
dem0[d] = random.random()*manure_limit
tot_dem = np.sum(dem0)
dims = [random.randint(0,len(x)-1) for i in range(N)]
for d in dims:
if dem0[d] == 0:
f_e0[d] = 0
else:
f_e0[d] = random.random()
x_new = pd.DataFrame(np.transpose([dem0,f_e0]),x.index,['Demand0','PerElect'])
return x_new
def PerturbationFun_coupled(x,params,N): ## Perturbs N dimensions and electricity
manure_limit = 8870365.07
dem0 = np.array(x.Demand0)
f_e0 = np.array(x.PerElect)
dims = [random.randint(0,len(x)-1) for i in range(N)]
for d in dims:
dem0[d] = random.random()*manure_limit
tot_dem = np.sum(dem0)
while tot_dem > manure_limit:
dem0[d] = random.random()*manure_limit
tot_dem = np.sum(dem0)
if dem0[d] == 0:
f_e0[d] = 0
else:
f_e0[d] = random.random()
x_new = pd.DataFrame(np.transpose([dem0,f_e0]),x.index,['Demand0','PerElect'])
return x_new
def PerturbationFunAll(x):
manure_limit = 8870365.07
perc_change = 0.01
dem0 = np.array(x.Demand0)
incr = [random.random()*manure_limit*perc_change for i in range(len(x))]
demand = dem0 + incr
fract_elect = [random.random() for i in range(len(x))]
for i in range(len(demand)):
if demand[i]<0:
demand[i] = 0
fract_elect[i] = 0
if np.sum(demand) > manure_limit:
demand = demand*manure_limit/np.sum(demand)
x_new = pd.DataFrame(np.transpose([demand,fract_elect]),x.index,['Demand0','PerElect'])
return x_new
def cooling (params):
Type = params['Type']
T0 = params['T0']
T_end = params['T_end']
dT = params['dT']
T_vect = [T0]
T_i = T_vect[0]
if Type == 1: ## Exponential Cooling
while T_i > T_end:
T_i = dT*T_vect[-1]
T_vect.append(T_i)
return T_vect
if Type == 0: ## Linear Cooling
while T_i > T_end:
T_i = T_vect[-1] - dT
T_vect.append(T_i)
return T_vect
def SA_algorithm(x0,Model,SAparams,ModelParameters,Lambd,co2_price,manure_limit):
CoolingSchedule = cooling(SAparams)
x = x0
X_hist = [x0]
X_opt = [x0]
obj_val0,annual_net_income0,net_co2_offset_kg_per_year = ObjectiveFunction(x,Model,ModelParameters,Lambd,co2_price,manure_limit)
E_hist = [obj_val0]
E_opt = [obj_val0]
E_best = obj_val0
E_bestV = [obj_val0]
NI_best = annual_net_income0
NI_bestV = [annual_net_income0]
CO2_best = net_co2_offset_kg_per_year
CO2_bestV = [net_co2_offset_kg_per_year]
x_best = x
Neq = SAparams['Neq']
N_it_max = len(CoolingSchedule)*Neq
print('Number of iterations in cooling schedule:', N_it_max,'\n')
#print('0 %',int(NI_best/1000),'kBRL')
N_it = 0
i = 0
for T in CoolingSchedule:
for n in range(Neq):
E,NI,CO2 = ObjectiveFunction(x,Model,ModelParameters,Lambd,co2_price,manure_limit)
## Perturbed x:
#x_new = PerturbationFun(x,ModelParameters,1)
x_new = PerturbationFun_loose(x,ModelParameters,2,manure_limit)
#x_new = PerturbationFun_coupled(x,ModelParameters,2)
#x_new = PerturbationFunAll(x)
X_hist.append(x_new)
E_new,NI_new,CO2_new = ObjectiveFunction(x_new,Model,ModelParameters,Lambd,co2_price,manure_limit)
E_hist.append(E_new)
dE = E_new - E
if E_new < E:
x = x_new
X_opt.append(x)
E_opt.append(E_new)
if E_new < E_best:
E_best = E_new
x_best = x_new
NI_best = NI_new
CO2_best = CO2_new
elif math.exp(-dE/T) > random.random():
x = x_new
#X_opt.append(x)
#E_opt.append(E_new)
E_bestV.append(E_best)
NI_bestV.append(NI_best)
CO2_bestV.append(CO2_best)
N_it = N_it +1
i = print_progress(N_it/N_it_max*100,i,NI_best,CO2_best,NI_new,CO2_new)
return X_hist,X_opt,E_hist,E_opt,E_bestV,N_it,x_best,NI_bestV,CO2_bestV
def print_progress(perc,i,NI_best,CO2_best,NI_new,CO2_new):
marks = np.arange(0,105,5)
if perc >= marks[i+1]:
print(marks[i+1],'%',int(NI_best),'BRL', int(CO2_best),'kgCO2',int(NI_new),'BRL', int(CO2_new),'kgCO2')
i = i + 1
return i
| [
"numpy.array",
"numpy.sum",
"random.random",
"numpy.transpose",
"numpy.arange",
"math.exp"
] | [((1550, 1569), 'numpy.array', 'np.array', (['x.Demand0'], {}), '(x.Demand0)\n', (1558, 1569), True, 'import numpy as np\n'), ((1583, 1602), 'numpy.array', 'np.array', (['x.Demand0'], {}), '(x.Demand0)\n', (1591, 1602), True, 'import numpy as np\n'), ((1616, 1636), 'numpy.array', 'np.array', (['x.PerElect'], {}), '(x.PerElect)\n', (1624, 1636), True, 'import numpy as np\n'), ((2773, 2792), 'numpy.array', 'np.array', (['x.Demand0'], {}), '(x.Demand0)\n', (2781, 2792), True, 'import numpy as np\n'), ((2806, 2826), 'numpy.array', 'np.array', (['x.PerElect'], {}), '(x.PerElect)\n', (2814, 2826), True, 'import numpy as np\n'), ((3614, 3633), 'numpy.array', 'np.array', (['x.Demand0'], {}), '(x.Demand0)\n', (3622, 3633), True, 'import numpy as np\n'), ((3647, 3667), 'numpy.array', 'np.array', (['x.PerElect'], {}), '(x.PerElect)\n', (3655, 3667), True, 'import numpy as np\n'), ((4342, 4361), 'numpy.array', 'np.array', (['x.Demand0'], {}), '(x.Demand0)\n', (4350, 4361), True, 'import numpy as np\n'), ((8048, 8068), 'numpy.arange', 'np.arange', (['(0)', '(105)', '(5)'], {}), '(0, 105, 5)\n', (8057, 8068), True, 'import numpy as np\n'), ((1900, 1912), 'numpy.sum', 'np.sum', (['dem0'], {}), '(dem0)\n', (1906, 1912), True, 'import numpy as np\n'), ((2575, 2601), 'numpy.transpose', 'np.transpose', (['[dem0, f_e0]'], {}), '([dem0, f_e0])\n', (2587, 2601), True, 'import numpy as np\n'), ((2991, 3003), 'numpy.sum', 'np.sum', (['dem0'], {}), '(dem0)\n', (2997, 3003), True, 'import numpy as np\n'), ((3398, 3424), 'numpy.transpose', 'np.transpose', (['[dem0, f_e0]'], {}), '([dem0, f_e0])\n', (3410, 3424), True, 'import numpy as np\n'), ((3832, 3844), 'numpy.sum', 'np.sum', (['dem0'], {}), '(dem0)\n', (3838, 3844), True, 'import numpy as np\n'), ((4153, 4179), 'numpy.transpose', 'np.transpose', (['[dem0, f_e0]'], {}), '([dem0, f_e0])\n', (4165, 4179), True, 'import numpy as np\n'), ((4493, 4508), 'random.random', 'random.random', ([], {}), '()\n', (4506, 4508), False, 'import random\n'), ((4669, 4683), 'numpy.sum', 'np.sum', (['demand'], {}), '(demand)\n', (4675, 4683), True, 'import numpy as np\n'), ((4795, 4830), 'numpy.transpose', 'np.transpose', (['[demand, fract_elect]'], {}), '([demand, fract_elect])\n', (4807, 4830), True, 'import numpy as np\n'), ((2318, 2330), 'numpy.sum', 'np.sum', (['dem0'], {}), '(dem0)\n', (2324, 2330), True, 'import numpy as np\n'), ((2529, 2544), 'random.random', 'random.random', ([], {}), '()\n', (2542, 2544), False, 'import random\n'), ((2939, 2954), 'random.random', 'random.random', ([], {}), '()\n', (2952, 2954), False, 'import random\n'), ((3150, 3162), 'numpy.sum', 'np.sum', (['dem0'], {}), '(dem0)\n', (3156, 3162), True, 'import numpy as np\n'), ((3352, 3367), 'random.random', 'random.random', ([], {}), '()\n', (3365, 3367), False, 'import random\n'), ((3780, 3795), 'random.random', 'random.random', ([], {}), '()\n', (3793, 3795), False, 'import random\n'), ((3991, 4003), 'numpy.sum', 'np.sum', (['dem0'], {}), '(dem0)\n', (3997, 4003), True, 'import numpy as np\n'), ((4107, 4122), 'random.random', 'random.random', ([], {}), '()\n', (4120, 4122), False, 'import random\n'), ((4746, 4760), 'numpy.sum', 'np.sum', (['demand'], {}), '(demand)\n', (4752, 4760), True, 'import numpy as np\n'), ((3086, 3101), 'random.random', 'random.random', ([], {}), '()\n', (3099, 3101), False, 'import random\n'), ((3927, 3942), 'random.random', 'random.random', ([], {}), '()\n', (3940, 3942), False, 'import random\n'), ((4379, 4394), 'random.random', 'random.random', ([], {}), '()\n', (4392, 4394), False, 'import random\n'), ((7444, 7461), 'math.exp', 'math.exp', (['(-dE / T)'], {}), '(-dE / T)\n', (7452, 7461), False, 'import math\n'), ((7462, 7477), 'random.random', 'random.random', ([], {}), '()\n', (7475, 7477), False, 'import random\n'), ((1760, 1775), 'random.random', 'random.random', ([], {}), '()\n', (1773, 1775), False, 'import random\n'), ((2159, 2174), 'random.random', 'random.random', ([], {}), '()\n', (2172, 2174), False, 'import random\n')] |
'''
Function:
dqn agent
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import cv2
import time
import torch
import random
import numpy as np
import torch.nn as nn
from collections import deque
from .network import DeepQNetwork
'''dqn agent'''
class DQNAgent():
def __init__(self, mode, fps, checkpointspath, **kwargs):
self.mode = mode
self.fps = fps
self.checkpointspath = checkpointspath
# define the necessary variables
self.imagesize = (84, 84)
self.num_input_frames = 4
self.num_actions = 3
self.save_interval = 5000
self.replay_memory_record = deque()
self.init_epsilon = 0.1
self.end_epsilon = 1e-4
self.epsilon = self.init_epsilon
self.batch_size = 32
self.replay_memory_size = 1e4
self.discount_factor = 0.99
self.pos_save_prob = 0.1
self.num_observes = 3200
self.num_explores = 1e5
self.input_image = None
self.num_iters = 0
self.num_games = 0
self.score = 0
self.max_score = 0
self.use_cuda = torch.cuda.is_available()
self.FloatTensor = torch.cuda.FloatTensor if self.use_cuda else torch.FloatTensor
# define the model
self.dqn_model = DeepQNetwork(self.imagesize, self.num_input_frames, self.num_actions)
self.dqn_model = self.dqn_model.cuda() if self.use_cuda else self.dqn_model
self.dqn_model.apply(DeepQNetwork.initWeights)
self.optimizer = torch.optim.Adam(self.dqn_model.parameters(), lr=1e-4)
self.loss_func = nn.MSELoss()
'''train the agent'''
def train(self, game_cotroller):
action = np.array([0] * self.num_actions)
action[0] = 1
image, score, is_dead = game_cotroller.run(action)
image = self.preprocess(image, self.imagesize)
self.input_image = np.tile(image, (self.num_input_frames, 1, 1))
self.input_image = self.input_image.reshape(1, self.input_image.shape[0], self.input_image.shape[1], self.input_image.shape[2])
last_time = 0
while True:
# randomly or use dqn_model to decide the action of T-Rex
action = np.array([0] * self.num_actions)
if random.random() <= self.epsilon:
action[random.choice(list(range(self.num_actions)))] = 1
else:
self.dqn_model.eval()
input_image = torch.from_numpy(self.input_image).type(self.FloatTensor)
with torch.no_grad():
preds = self.dqn_model(input_image).cpu().data.numpy()
action[np.argmax(preds)] = 1
self.dqn_model.train()
# perform the action
image, score, is_dead = game_cotroller.run(action)
image = self.preprocess(image, self.imagesize)
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])
input_image_prev = self.input_image.copy()
self.input_image = np.append(image, self.input_image[:, :self.num_input_frames-1, :, :], axis=1)
# control the FPS
if last_time:
fps_now = 1 / (time.time() - last_time)
if fps_now > self.fps:
time.sleep(1 / self.fps - 1 / fps_now)
last_time = time.time()
# get reward
if is_dead:
self.num_games += 1
reward = -1
else:
reward = 0.1
# record score
self.score = score
if score > self.max_score:
self.max_score = score
# save the game data for training dqn
if is_dead or random.random() <= self.pos_save_prob:
self.replay_memory_record.append([input_image_prev, self.input_image, action, np.array([int(is_dead)]), np.array([reward])])
if len(self.replay_memory_record) > self.replay_memory_size:
self.replay_memory_record.popleft()
# train the model
loss = torch.Tensor([0]).type(self.FloatTensor)
if self.num_iters > self.num_observes:
self.optimizer.zero_grad()
minibatch = random.sample(self.replay_memory_record, self.batch_size)
states, states1, actions, is_deads, rewards = zip(*minibatch)
states = torch.from_numpy(np.concatenate(states)).type(self.FloatTensor)
states1 = torch.from_numpy(np.concatenate(states1)).type(self.FloatTensor)
actions = torch.from_numpy(np.concatenate(actions)).type(self.FloatTensor).view(self.batch_size, -1)
is_deads = torch.from_numpy(np.concatenate(is_deads)).type(self.FloatTensor)
rewards = torch.from_numpy(np.concatenate(rewards)).type(self.FloatTensor)
with torch.no_grad():
targets = rewards + self.discount_factor * self.dqn_model(states1).max(-1)[0] * (1 - is_deads)
targets = targets.detach()
preds = torch.sum(self.dqn_model(states) * actions, dim=1)
loss = self.loss_func(preds, targets)
loss.backward()
self.optimizer.step()
# update epsilon
self.num_iters += 1
if (self.epsilon > self.end_epsilon) and (self.num_iters > self.num_observes):
self.epsilon -= (self.init_epsilon - self.end_epsilon) / self.num_explores
# save the model
if self.num_iters % self.save_interval == 0:
self.save(self.checkpointspath)
# print necessary info
print('[State]: train, [Games]: %s, [Iter]: %s, [Score]: %s, [Max Score]: %s, [Epsilon]: %s, [Action]: %s, [Reward]: %s, [Loss]: %.3f' % (self.num_games, self.num_iters, self.score, self.max_score, self.epsilon, np.argmax(action), reward, loss.item()))
'''test the agent'''
def test(self, game_cotroller):
action = np.array([0] * self.num_actions)
action[0] = 1
image, score, is_dead = game_cotroller.run(action)
image = self.preprocess(image, self.imagesize)
self.input_image = np.tile(image, (self.num_input_frames, 1, 1))
self.input_image = self.input_image.reshape(1, self.input_image.shape[0], self.input_image.shape[1], self.input_image.shape[2])
last_time = 0
while True:
# randomly or use dqn_model to decide the action of T-Rex
action = np.array([0] * self.num_actions)
if random.random() <= self.end_epsilon:
action[random.choice(list(range(self.num_actions)))] = 1
else:
self.dqn_model.eval()
input_image = torch.from_numpy(self.input_image).type(self.FloatTensor)
with torch.no_grad():
preds = self.dqn_model(input_image).cpu().data.numpy()
action[np.argmax(preds)] = 1
# perform the action
image, score, is_dead = game_cotroller.run(action)
image = self.preprocess(image, self.imagesize)
image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])
self.input_image = np.append(image, self.input_image[:, :self.num_input_frames-1, :, :], axis=1)
if is_dead: self.num_games += 1
# control the FPS
if last_time:
fps_now = 1 / (time.time() - last_time)
if fps_now > self.fps:
time.sleep(1 / self.fps - 1 / fps_now)
last_time = time.time()
# record score
self.score = score
if score > self.max_score:
self.max_score = score
# print necessary info
print('[State]: test, [Games]: %s, [Score]: %s, [Max Score]: %s, [Epsilon]: %s, [Action]: %s' % (self.num_games, self.score, self.max_score, self.end_epsilon, np.argmax(action)))
'''load checkpoints'''
def load(self, checkpointspath):
print('Loading checkpoints from %s...' % checkpointspath)
self.dqn_model.load_state_dict(torch.load(checkpointspath))
'''save checkpoints'''
def save(self, checkpointspath):
print('Saving checkpoints into %s...' % checkpointspath)
torch.save(self.dqn_model.state_dict(), checkpointspath)
'''preprocess image'''
def preprocess(self, image, size):
image = cv2.resize(image, size)
image[image > 0] = 255
image = np.expand_dims(image, 0)
return image | [
"numpy.tile",
"random.sample",
"collections.deque",
"torch.load",
"torch.Tensor",
"numpy.argmax",
"time.sleep",
"torch.from_numpy",
"numpy.append",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"random.random",
"numpy.expand_dims",
"numpy.concatenate",
"torch.no_grad",
... | [((565, 572), 'collections.deque', 'deque', ([], {}), '()\n', (570, 572), False, 'from collections import deque\n'), ((949, 974), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (972, 974), False, 'import torch\n'), ((1389, 1401), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1399, 1401), True, 'import torch.nn as nn\n'), ((1470, 1502), 'numpy.array', 'np.array', (['([0] * self.num_actions)'], {}), '([0] * self.num_actions)\n', (1478, 1502), True, 'import numpy as np\n'), ((1642, 1687), 'numpy.tile', 'np.tile', (['image', '(self.num_input_frames, 1, 1)'], {}), '(image, (self.num_input_frames, 1, 1))\n', (1649, 1687), True, 'import numpy as np\n'), ((5037, 5069), 'numpy.array', 'np.array', (['([0] * self.num_actions)'], {}), '([0] * self.num_actions)\n', (5045, 5069), True, 'import numpy as np\n'), ((5209, 5254), 'numpy.tile', 'np.tile', (['image', '(self.num_input_frames, 1, 1)'], {}), '(image, (self.num_input_frames, 1, 1))\n', (5216, 5254), True, 'import numpy as np\n'), ((7106, 7129), 'cv2.resize', 'cv2.resize', (['image', 'size'], {}), '(image, size)\n', (7116, 7129), False, 'import cv2\n'), ((7165, 7189), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (7179, 7189), True, 'import numpy as np\n'), ((1921, 1953), 'numpy.array', 'np.array', (['([0] * self.num_actions)'], {}), '([0] * self.num_actions)\n', (1929, 1953), True, 'import numpy as np\n'), ((2583, 2662), 'numpy.append', 'np.append', (['image', 'self.input_image[:, :self.num_input_frames - 1, :, :]'], {'axis': '(1)'}), '(image, self.input_image[:, :self.num_input_frames - 1, :, :], axis=1)\n', (2592, 2662), True, 'import numpy as np\n'), ((2829, 2840), 'time.time', 'time.time', ([], {}), '()\n', (2838, 2840), False, 'import time\n'), ((5488, 5520), 'numpy.array', 'np.array', (['([0] * self.num_actions)'], {}), '([0] * self.num_actions)\n', (5496, 5520), True, 'import numpy as np\n'), ((6081, 6160), 'numpy.append', 'np.append', (['image', 'self.input_image[:, :self.num_input_frames - 1, :, :]'], {'axis': '(1)'}), '(image, self.input_image[:, :self.num_input_frames - 1, :, :], axis=1)\n', (6090, 6160), True, 'import numpy as np\n'), ((6362, 6373), 'time.time', 'time.time', ([], {}), '()\n', (6371, 6373), False, 'import time\n'), ((6831, 6858), 'torch.load', 'torch.load', (['checkpointspath'], {}), '(checkpointspath)\n', (6841, 6858), False, 'import torch\n'), ((1960, 1975), 'random.random', 'random.random', ([], {}), '()\n', (1973, 1975), False, 'import random\n'), ((3526, 3583), 'random.sample', 'random.sample', (['self.replay_memory_record', 'self.batch_size'], {}), '(self.replay_memory_record, self.batch_size)\n', (3539, 3583), False, 'import random\n'), ((5527, 5542), 'random.random', 'random.random', ([], {}), '()\n', (5540, 5542), False, 'import random\n'), ((2174, 2189), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2187, 2189), False, 'import torch\n'), ((2262, 2278), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (2271, 2278), True, 'import numpy as np\n'), ((2775, 2813), 'time.sleep', 'time.sleep', (['(1 / self.fps - 1 / fps_now)'], {}), '(1 / self.fps - 1 / fps_now)\n', (2785, 2813), False, 'import time\n'), ((3093, 3108), 'random.random', 'random.random', ([], {}), '()\n', (3106, 3108), False, 'import random\n'), ((3396, 3413), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (3408, 3413), False, 'import torch\n'), ((4080, 4095), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4093, 4095), False, 'import torch\n'), ((5745, 5760), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5758, 5760), False, 'import torch\n'), ((5833, 5849), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (5842, 5849), True, 'import numpy as np\n'), ((6308, 6346), 'time.sleep', 'time.sleep', (['(1 / self.fps - 1 / fps_now)'], {}), '(1 / self.fps - 1 / fps_now)\n', (6318, 6346), False, 'import time\n'), ((2107, 2141), 'torch.from_numpy', 'torch.from_numpy', (['self.input_image'], {}), '(self.input_image)\n', (2123, 2141), False, 'import torch\n'), ((2718, 2729), 'time.time', 'time.time', ([], {}), '()\n', (2727, 2729), False, 'import time\n'), ((3240, 3258), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (3248, 3258), True, 'import numpy as np\n'), ((4930, 4947), 'numpy.argmax', 'np.argmax', (['action'], {}), '(action)\n', (4939, 4947), True, 'import numpy as np\n'), ((5678, 5712), 'torch.from_numpy', 'torch.from_numpy', (['self.input_image'], {}), '(self.input_image)\n', (5694, 5712), False, 'import torch\n'), ((6251, 6262), 'time.time', 'time.time', ([], {}), '()\n', (6260, 6262), False, 'import time\n'), ((6660, 6677), 'numpy.argmax', 'np.argmax', (['action'], {}), '(action)\n', (6669, 6677), True, 'import numpy as np\n'), ((3680, 3702), 'numpy.concatenate', 'np.concatenate', (['states'], {}), '(states)\n', (3694, 3702), True, 'import numpy as np\n'), ((3758, 3781), 'numpy.concatenate', 'np.concatenate', (['states1'], {}), '(states1)\n', (3772, 3781), True, 'import numpy as np\n'), ((3943, 3967), 'numpy.concatenate', 'np.concatenate', (['is_deads'], {}), '(is_deads)\n', (3957, 3967), True, 'import numpy as np\n'), ((4023, 4046), 'numpy.concatenate', 'np.concatenate', (['rewards'], {}), '(rewards)\n', (4037, 4046), True, 'import numpy as np\n'), ((3837, 3860), 'numpy.concatenate', 'np.concatenate', (['actions'], {}), '(actions)\n', (3851, 3860), True, 'import numpy as np\n')] |
"""Generate traditional spatial folds"""
import os
import time
from dataclasses import dataclass, field
import pandas as pd
import numpy as np
from tqdm import tqdm
import geostatspy.geostats as geostats
from scipy.spatial.distance import cdist
from src.scv.scv import SpatialCV
ULTRACONSERVATIVE = "TraditionalSCV"
@dataclass
class TraditionalSCV(SpatialCV):
"""Represents the Traditional Spatial Cross-Validation.
Attributes
----------
data: pd.Dataframe
The spatial dataset to generate the folds
fold_col: str
The fold column name
target_col: str
The targer attribute column name
meshblocks: pd.Dataframe
The meshblocks regarding the spatial objects in the data
fast: bool
Whether to skip the semivariogram process and run with the ICMLA21 paper results
root_path : str
Root path
"""
target_col: str = "TARGET"
index_col: str = "INDEX"
meshblocks: pd.DataFrame = field(default_factory=pd.DataFrame)
index_meshblocks: str = None
sill_target: np.float64 = None
def _calculate_sill(self):
# Calculates sill, variance of the target variable
self.sill_target = self.data[self.target_col].var()
def _calculate_buffer_size(self):
# Calculate the size of the removing buffer
tmin = -9999.0
tmax = 9999.0 # no trimming
lag_dist = 0.3
lag_tol = 0.15
nlag = 100
# maximum lag is 700m and tolerance > 1/2 lag distance for smoothing
bandh = 9999.9
atol = 22.5 # no bandwidth, directional variograms
isill = 0 # standardize sill
# print(self.data[["x", "y", self.target_col]])
lag, gamma, _ = geostats.gamv(
self.data,
"x",
"y",
self.target_col,
tmin,
tmax,
lag_dist,
lag_tol,
nlag,
360,
atol,
bandh,
isill,
)
range = [(h, g) for h, g in zip(lag, gamma) if g > self.sill_target]
return range[0][0]
def _calculate_buffer(self, buffer_size):
test = self.test_data[["x", "y"]].values.tolist()
test = np.reshape(test, (-1, 2))
training = self.train_data[["x", "y"]].values.tolist()
training = np.reshape(training, (-1, 2))
dist = cdist(training, test)
max_dist = pd.Series(
np.amin(dist, axis=1), index=self.train_data.index, name="max_dist"
)
return [idx for idx, value in max_dist.iteritems() if value < buffer_size]
def _generate_x_y(self):
self.meshblocks.index = self.meshblocks.index.astype(self.data.index.dtype)
if not self.meshblocks.crs:
self.meshblocks = self.meshblocks.set_crs(4326, allow_override=True)
self.meshblocks["x"] = (
self.meshblocks.to_crs("+proj=cea")
.centroid.to_crs(self.meshblocks.crs)
.apply(lambda p: p.x)
)
self.meshblocks["y"] = (
self.meshblocks.to_crs("+proj=cea")
.centroid.to_crs(self.meshblocks.crs)
.apply(lambda p: p.y)
)
self.data = self.data.join(self.meshblocks[["x", "y"]])
def run(self) -> None:
"""Generate ultra-conservartive spatial folds"""
# Create folder folds
start_time = time.time()
name_folds = ULTRACONSERVATIVE
self._make_folders(["folds", name_folds])
self._generate_x_y()
self._calculate_sill()
buffer_size = self._calculate_buffer_size()
for fold_name, test_data in tqdm(
self.data.groupby(by=self.fold_col), desc="Creating folds"
):
# Cread fold folder
self._mkdir(str(fold_name))
# Initialize x , y and reduce
self._split_data_test_train(test_data)
# Calculate removing buffer
removing_buffer = self._calculate_buffer(buffer_size)
self.train_data.drop(index=removing_buffer, inplace=True)
# Save buffered data indexes
self._save_buffered_indexes(removing_buffer)
# Save fold index relation table
self._save_fold_by_index_training()
# Clean data
self._clean_data(cols_drop=[self.fold_col, "x", "y"])
# Save data
# self._save_data()
# Update cur dir
self.cur_dir = os.path.join(self._get_root_path(), "folds", name_folds)
# Save execution time
end_time = time.time()
self._save_time(end_time, start_time)
print(f"Execution time: {end_time-start_time} seconds")
| [
"numpy.reshape",
"numpy.amin",
"scipy.spatial.distance.cdist",
"geostatspy.geostats.gamv",
"time.time",
"dataclasses.field"
] | [((1020, 1055), 'dataclasses.field', 'field', ([], {'default_factory': 'pd.DataFrame'}), '(default_factory=pd.DataFrame)\n', (1025, 1055), False, 'from dataclasses import dataclass, field\n'), ((1769, 1886), 'geostatspy.geostats.gamv', 'geostats.gamv', (['self.data', '"""x"""', '"""y"""', 'self.target_col', 'tmin', 'tmax', 'lag_dist', 'lag_tol', 'nlag', '(360)', 'atol', 'bandh', 'isill'], {}), "(self.data, 'x', 'y', self.target_col, tmin, tmax, lag_dist,\n lag_tol, nlag, 360, atol, bandh, isill)\n", (1782, 1886), True, 'import geostatspy.geostats as geostats\n'), ((2274, 2299), 'numpy.reshape', 'np.reshape', (['test', '(-1, 2)'], {}), '(test, (-1, 2))\n', (2284, 2299), True, 'import numpy as np\n'), ((2382, 2411), 'numpy.reshape', 'np.reshape', (['training', '(-1, 2)'], {}), '(training, (-1, 2))\n', (2392, 2411), True, 'import numpy as np\n'), ((2427, 2448), 'scipy.spatial.distance.cdist', 'cdist', (['training', 'test'], {}), '(training, test)\n', (2432, 2448), False, 'from scipy.spatial.distance import cdist\n'), ((3434, 3445), 'time.time', 'time.time', ([], {}), '()\n', (3443, 3445), False, 'import time\n'), ((4613, 4624), 'time.time', 'time.time', ([], {}), '()\n', (4622, 4624), False, 'import time\n'), ((2491, 2512), 'numpy.amin', 'np.amin', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (2498, 2512), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import torch
import os.path as osp
import matplotlib.pyplot as plt
from mmdet import cv_core
from mmdet.cv_core import Config
from mmdet.datasets.builder import build_dataset, build_dataloader
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument( # datalayer重复次数,由于数据有增强,故在数据量小的时候可以设置重复次数,统计更加准确
'--repeat_count',
type=int,
default=1,
help='datalayer repeat count')
parser.add_argument( # dataloader参数
'--samples_per_gpu',
type=int,
default=32,
help='batch size')
parser.add_argument( # dataloader参数
'--workers_per_gpu',
type=int,
default=16,
help='worker num')
parser.add_argument( # 统计得到的wh数据保存名称
'--out_path',
type=str,
default='wh_data.npy',
help='save wh data npy path')
parser.add_argument( # 当本地有缓存时候,是否使用,而不重新经过datalayer,节省时间
'--use_local',
type=bool,
default=True,
help='is use save npy file')
args = parser.parse_args()
return args
def collect_wh_data(cfg, args, stop_count):
# stop_count 防止数据太大,要很久才能跑完
dataset = build_dataset(cfg.data.train) # 这样才能考虑到数据增强带来的图片比例改变
dataloader = build_dataloader(dataset, args.samples_per_gpu, args.workers_per_gpu)
print('----开始遍历数据集----')
wh_all = []
for count in range(args.repeat_count):
progress_bar = cv_core.ProgressBar(len(dataloader))
for i, data_batch in enumerate(dataloader):
if i > stop_count:
break
gt_bboxes = data_batch['gt_bboxes'].data[0]
gt_bboxes = torch.cat(gt_bboxes, dim=0).numpy()
if len(gt_bboxes) == 0:
continue
w = (gt_bboxes[:, 2] - gt_bboxes[:, 0])
h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
wh = np.stack((w, h), axis=1)
wh_all.append(wh)
progress_bar.update()
wh_all = np.concatenate(wh_all, axis=0)
print(wh_all.shape)
return wh_all
def select_data(cfg, args, stop_count=100):
use_local = args.use_local
out_path = args.out_path
if not use_local or not osp.isfile(out_path):
print('--------重新获取数据---------')
wh_all_data = collect_wh_data(cfg, args, stop_count)
np.save(out_path, wh_all_data)
print('---------保存缓存文件--------')
else:
# 直接读取
print('---------从缓存文件中读取---------')
wh_all_data = np.load(out_path)
return wh_all_data
def statistics_hw_ratio(wh_all):
print('----------统计宽高分布---------')
# 部分参考:https://zhuanlan.zhihu.com/p/108885033
hw_ratio = wh_all[:, 1] / wh_all[:, 0] # anchor里面的ratio就是h/w比例
# 分成两部分单独统计
hw_ratio_larger = hw_ratio[hw_ratio >= 1].astype(np.int) # 会损失些精度
hw_ratio_larger_uq = np.unique(hw_ratio_larger)
box_hw_larger_count = [np.count_nonzero(hw_ratio_larger == i) for i in hw_ratio_larger_uq]
plt.subplot(2, 1, 1)
plt.title('hw_ratio>=1')
plt.xlabel('hw_ratio')
plt.ylabel('num')
plt.bar(hw_ratio_larger_uq, box_hw_larger_count, 0.1) # 0-20之间
# # wh_df = pd.DataFrame(box_hw_larger_count, index=hw_ratio_larger_uq, columns=['hw_ratio>=1'])
# # wh_df.plot(kind='bar', color="#55aacc")
hw_ratio_small = hw_ratio[hw_ratio < 1].round(1)
hw_ratio_small_uq = np.unique(hw_ratio_small)
box_hw_small_count = [np.count_nonzero(hw_ratio_small == i) for i in hw_ratio_small_uq]
plt.subplot(2, 1, 2)
plt.title('hw_ratio<1')
plt.xlabel('hw_ratio')
plt.ylabel('num')
plt.bar(hw_ratio_small_uq, box_hw_small_count, 0.05) # 0-1之间
plt.show()
def statistics_hw_scale(wh_data):
print('----------统计wh尺度分布---------')
# plt.scatter(wh_data[:,0],wh_data[:,1])
plt.subplot(2, 1, 1)
plt.xlabel('w_scale')
plt.ylabel('num')
plt.hist(wh_data[:, 0], bins=1000)
plt.subplot(2, 1, 2)
plt.xlabel('h_scale')
plt.ylabel('num')
plt.hist(wh_data[:, 1], bins=1000)
plt.show()
def calc_kmean(wh_data):
print('----------统计anchor分布---------')
cluster_number = 9 # anchor个数
kmean_clz = cv_core.Kmean(cluster_number)
anchor_nx2 = kmean_clz.clusters(wh_data)
print("K anchors:\n {}".format(anchor_nx2))
if __name__ == '__main__':
args = parse_args()
cfg = Config.fromfile(args.config)
wh_data = select_data(cfg, args)
statistics_hw_ratio(wh_data)
statistics_hw_scale(wh_data)
calc_kmean(wh_data)
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.count_nonzero",
"mmdet.datasets.builder.build_dataloader",
"numpy.save",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"mmdet.cv_core.Config.fromfile",
"mmdet.datasets.builder.build_dataset",
"numpy.stack",
"numpy.concatenate"... | [((262, 317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Browse a dataset"""'}), "(description='Browse a dataset')\n", (285, 317), False, 'import argparse\n'), ((1285, 1314), 'mmdet.datasets.builder.build_dataset', 'build_dataset', (['cfg.data.train'], {}), '(cfg.data.train)\n', (1298, 1314), False, 'from mmdet.datasets.builder import build_dataset, build_dataloader\n'), ((1356, 1425), 'mmdet.datasets.builder.build_dataloader', 'build_dataloader', (['dataset', 'args.samples_per_gpu', 'args.workers_per_gpu'], {}), '(dataset, args.samples_per_gpu, args.workers_per_gpu)\n', (1372, 1425), False, 'from mmdet.datasets.builder import build_dataset, build_dataloader\n'), ((2077, 2107), 'numpy.concatenate', 'np.concatenate', (['wh_all'], {'axis': '(0)'}), '(wh_all, axis=0)\n', (2091, 2107), True, 'import numpy as np\n'), ((2925, 2951), 'numpy.unique', 'np.unique', (['hw_ratio_larger'], {}), '(hw_ratio_larger)\n', (2934, 2951), True, 'import numpy as np\n'), ((3052, 3072), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3063, 3072), True, 'import matplotlib.pyplot as plt\n'), ((3077, 3101), 'matplotlib.pyplot.title', 'plt.title', (['"""hw_ratio>=1"""'], {}), "('hw_ratio>=1')\n", (3086, 3101), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hw_ratio"""'], {}), "('hw_ratio')\n", (3116, 3128), True, 'import matplotlib.pyplot as plt\n'), ((3133, 3150), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num"""'], {}), "('num')\n", (3143, 3150), True, 'import matplotlib.pyplot as plt\n'), ((3155, 3208), 'matplotlib.pyplot.bar', 'plt.bar', (['hw_ratio_larger_uq', 'box_hw_larger_count', '(0.1)'], {}), '(hw_ratio_larger_uq, box_hw_larger_count, 0.1)\n', (3162, 3208), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3471), 'numpy.unique', 'np.unique', (['hw_ratio_small'], {}), '(hw_ratio_small)\n', (3455, 3471), True, 'import numpy as np\n'), ((3569, 3589), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3580, 3589), True, 'import matplotlib.pyplot as plt\n'), ((3594, 3617), 'matplotlib.pyplot.title', 'plt.title', (['"""hw_ratio<1"""'], {}), "('hw_ratio<1')\n", (3603, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3622, 3644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hw_ratio"""'], {}), "('hw_ratio')\n", (3632, 3644), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3666), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num"""'], {}), "('num')\n", (3659, 3666), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3723), 'matplotlib.pyplot.bar', 'plt.bar', (['hw_ratio_small_uq', 'box_hw_small_count', '(0.05)'], {}), '(hw_ratio_small_uq, box_hw_small_count, 0.05)\n', (3678, 3723), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3746, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3875, 3895), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3886, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3900, 3921), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""w_scale"""'], {}), "('w_scale')\n", (3910, 3921), True, 'import matplotlib.pyplot as plt\n'), ((3926, 3943), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num"""'], {}), "('num')\n", (3936, 3943), True, 'import matplotlib.pyplot as plt\n'), ((3948, 3982), 'matplotlib.pyplot.hist', 'plt.hist', (['wh_data[:, 0]'], {'bins': '(1000)'}), '(wh_data[:, 0], bins=1000)\n', (3956, 3982), True, 'import matplotlib.pyplot as plt\n'), ((3987, 4007), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3998, 4007), True, 'import matplotlib.pyplot as plt\n'), ((4012, 4033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""h_scale"""'], {}), "('h_scale')\n", (4022, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num"""'], {}), "('num')\n", (4048, 4055), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4094), 'matplotlib.pyplot.hist', 'plt.hist', (['wh_data[:, 1]'], {'bins': '(1000)'}), '(wh_data[:, 1], bins=1000)\n', (4068, 4094), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4109), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4107, 4109), True, 'import matplotlib.pyplot as plt\n'), ((4231, 4260), 'mmdet.cv_core.Kmean', 'cv_core.Kmean', (['cluster_number'], {}), '(cluster_number)\n', (4244, 4260), False, 'from mmdet import cv_core\n'), ((4417, 4445), 'mmdet.cv_core.Config.fromfile', 'Config.fromfile', (['args.config'], {}), '(args.config)\n', (4432, 4445), False, 'from mmdet.cv_core import Config\n'), ((2416, 2446), 'numpy.save', 'np.save', (['out_path', 'wh_all_data'], {}), '(out_path, wh_all_data)\n', (2423, 2446), True, 'import numpy as np\n'), ((2579, 2596), 'numpy.load', 'np.load', (['out_path'], {}), '(out_path)\n', (2586, 2596), True, 'import numpy as np\n'), ((2979, 3017), 'numpy.count_nonzero', 'np.count_nonzero', (['(hw_ratio_larger == i)'], {}), '(hw_ratio_larger == i)\n', (2995, 3017), True, 'import numpy as np\n'), ((3498, 3535), 'numpy.count_nonzero', 'np.count_nonzero', (['(hw_ratio_small == i)'], {}), '(hw_ratio_small == i)\n', (3514, 3535), True, 'import numpy as np\n'), ((1975, 1999), 'numpy.stack', 'np.stack', (['(w, h)'], {'axis': '(1)'}), '((w, h), axis=1)\n', (1983, 1999), True, 'import numpy as np\n'), ((2284, 2304), 'os.path.isfile', 'osp.isfile', (['out_path'], {}), '(out_path)\n', (2294, 2304), True, 'import os.path as osp\n'), ((1759, 1786), 'torch.cat', 'torch.cat', (['gt_bboxes'], {'dim': '(0)'}), '(gt_bboxes, dim=0)\n', (1768, 1786), False, 'import torch\n')] |
from __future__ import absolute_import, division, print_function
import inspect
import itertools
import os
import warnings
from collections import defaultdict
from contextlib import contextmanager
from numbers import Number
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import scanpy as sc
import scipy as sp
import tensorflow as tf
from anndata._core.aligned_mapping import AxisArrays
from bigarray import MmapArrayWriter
from scipy.sparse import issparse
from scipy.stats import pearsonr, spearmanr
from six import string_types
from sklearn.cluster import MiniBatchKMeans, SpectralClustering
from sklearn.decomposition import IncrementalPCA
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import (mutual_info_classif,
mutual_info_regression)
from sklearn.mixture import GaussianMixture
from odin import visual as vs
from odin.search import diagonal_linear_assignment
from odin.stats import (describe, is_discrete, sparsity_percentage,
train_valid_test_split)
from odin.utils import (MPI, IndexedList, as_tuple, batching, cache_memory,
catch_warnings_ignore, cpu_count, is_primitive)
from odin.utils.crypto import md5_checksum
from sisua.data._single_cell_base import BATCH_SIZE, _OMICbase
from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC
from sisua.data.utils import (apply_artificial_corruption, get_library_size,
is_binary_dtype, is_categorical_dtype,
standardize_protein_name)
from sisua.label_threshold import ProbabilisticEmbedding
# ===========================================================================
# Helper
# ===========================================================================
def _threshold(x, nmin=2, nmax=5):
if x.ndim == 1:
x = x[:, np.newaxis]
models = []
aic = []
for n_components in range(int(nmin), int(nmax)):
gmm = GaussianMixture(n_components=n_components, random_state=1)
gmm.fit(x)
models.append(gmm)
aic.append(gmm.aic(x))
# select the best model
gmm = models[np.argmax(aic)]
y = gmm.predict(x)
idx = np.argmax(gmm.means_.ravel())
return (y == idx).astype(np.bool)
# ===========================================================================
# Main
# ===========================================================================
class _OMICanalyzer(_OMICbase):
def get_x_probs(self, omic=None):
r""" Return the probability embedding of an OMIC """
return self.probabilistic_embedding(omic=omic)[1]
def get_x_bins(self, omic=None):
r""" Return the binary embedding of an OMIC """
return self.probabilistic_embedding(omic=omic)[2]
# ******************** transformation ******************** #
def corrupt(self,
omic=None,
dropout_rate=0.2,
retain_rate=0.2,
distribution='binomial',
inplace=True,
seed=1):
r"""
omic : `OMIC`, which omic type will be corrupted
dropout_rate : scalar (0.0 - 1.0), (default=0.25)
how many entries (in percent) be selected for corruption.
retain_rate : scalar (0.0 - 1.0), (default=0.2)
how much percent of counts retained their original values.
distribution : {'binomial', 'uniform} (default='binomial')
omic : `sisua.data.OMIC`, which OMIC type will be corrupted
inplace : `bool` (default=True). Perform computation inplace or return
new `SingleCellOMIC` with the corrupted data.
seed : `int` (default=8). Seed for the random state.
"""
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('corrupt', locals())
if not (0. < retain_rate < 1. or 0. < dropout_rate < 1.):
return om
for o in omic:
apply_artificial_corruption(om.numpy(o),
dropout=dropout_rate,
retain_rate=retain_rate,
distribution=distribution,
copy=False,
seed=seed)
om._calculate_statistics(o)
return om
def filter_highly_variable_genes(self,
min_disp: float = 1.0,
max_disp: float = np.inf,
min_mean: float = 0.01,
max_mean: float = 8,
n_top_genes: int = 1000,
n_bins: int = 20,
flavor: str = 'seurat',
inplace: bool = True):
r""" Annotate highly variable genes [Satija15]_ [Zheng17]_.
https://www.rdocumentation.org/packages/Seurat/versions/2.3.4/topics/FindVariableGenes
`Expects logarithmized data`.
Depending on `flavor`, this reproduces the R-implementations of Seurat
[Satija15]_ and Cell Ranger [Zheng17]_.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Arguments:
min_disp : `float`, optional (default=0.5)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
max_disp : `float`, optional (default=`np.inf`)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
min_mean : `float`, optional (default=0.0125)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
max_mean : `float`, optional (default=3)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
n_top_genes : {`float`, int`, `None`}, optional (default=`None`)
Number of highly-variable genes to keep., if the value is in (0, 1],
intepret as percent of genes
n_bins : `int`, optional (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1.
flavor : `{'seurat', 'cell_ranger'}`, optional (default='seurat')
Choose the flavor for computing normalized dispersion. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
inplace : `bool` (default=True)
if False, copy the `SingleCellOMIC` and apply the vargene filter.
Returns:
New `SingleCellOMIC` with filtered features if `applying_filter=True`
else assign `SingleCellOMIC.highly_variable_features` with following
attributes.
highly_variable : bool
boolean indicator of highly-variable genes
**means**
means per gene
**dispersions**
dispersions per gene
**dispersions_norm**
normalized dispersions per gene
Notes:
Proxy to `scanpy.pp.highly_variable_genes`. It is recommended to do
`log1p` normalization before if `flavor='seurat'`.
"""
flavor = str(flavor).lower()
if n_top_genes is not None:
if 0. < n_top_genes < 1.:
n_top_genes = int(n_top_genes * self.n_vars)
# prepare the data
# this function will take the exponential of X all the time,
# so non-logarithmzed data might led to overflow
omics = self if inplace else self.copy()
omics._record('filter_highly_variable_genes', locals())
sc.pp.highly_variable_genes(omics,
min_disp=min_disp,
max_disp=max_disp,
min_mean=min_mean,
max_mean=max_mean,
n_top_genes=n_top_genes,
n_bins=int(n_bins),
flavor=flavor,
subset=True,
inplace=False)
omics._name += '_vargene'
omics._n_vars = omics._X.shape[1]
# recalculate library info
omics._calculate_statistics()
return omics
def filter_genes(self,
min_counts=None,
max_counts=None,
min_cells=None,
max_cells=None,
inplace=True):
r""" Filter features (columns) based on number of rows or counts.
Keep columns that have at least ``[min_counts, max_counts]``
or are expressed in at least ``[min_row_counts, max_row_counts]``
Arguments:
min_counts : {int, None} (default=None)
Minimum number of counts required for a gene to pass filtering.
max_counts : {int, None} (default=None)
Maximum number of counts required for a gene to pass filtering.
min_cells : {int, None} (default=None)
Minimum number of cells expressed required for a feature to pass filtering.
max_cells : {int, None} (default=None)
Maximum number of cells expressed required for a feature to pass filtering.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
genes applied
Returns:
if `applying_filter=False` annotates the `SingleCellOMIC`, otherwise,
return new `SingleCellOMIC` with the new subset of genes
gene_subset : `numpy.ndarray`
Boolean index mask that does filtering. `True` means that the
gene is kept. `False` means the gene is removed.
number_per_gene : `numpy.ndarray`
Depending on what was thresholded (`counts` or `cells`), the array
stores `n_counts` or `n_cells` per gene.
Note:
Proxy method to Scanpy preprocessing
"""
omics = self if inplace else self.copy()
omics._record('filter_genes', locals())
sc.pp.filter_genes(omics,
min_counts=min_counts,
max_counts=max_counts,
min_cells=min_cells,
max_cells=max_cells,
inplace=True)
omics._name += '_filtergene'
omics._n_vars = omics._X.shape[1]
# recalculate library info
omics._calculate_statistics()
return omics
def filter_cells(self,
min_counts=None,
max_counts=None,
min_genes=None,
max_genes=None,
inplace=True):
r""" Filter examples (rows) based on number of features or counts.
Keep rows that have at least ``[min_counts, max_counts]``
or are expressed in at least ``[min_col_counts, max_col_counts]``
Arguments:
min_counts : {int, None} (default=None)
Minimum number of counts required for a cell to pass filtering.
max_counts : {int, None} (default=None)
Maximum number of counts required for a cell to pass filtering.
min_genes : {int, None} (default=None)
Minimum number of genes expressed required for a cell to pass filtering.
max_genes : {int, None} (default=None)
Maximum number of genes expressed required for a cell to pass filtering.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
cells applied
Returns:
if `applying_filter=False` annotates the `SingleCellOMIC`, otherwise,
return new `SingleCellOMIC` with the new subset of cells
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Note:
Proxy method to Scanpy preprocessing
"""
# scanpy messed up here, the obs was not updated with the new indices
cells_subset, number_per_cell = sc.pp.filter_cells(self,
min_counts=min_counts,
max_counts=max_counts,
min_genes=min_genes,
max_genes=max_genes,
inplace=False)
omics = self if inplace else self.copy()
omics._record('filter_cells', locals())
omics.apply_indices(cells_subset, observation=True)
omics._name += '_filtercell'
# recalculate library info
omics._calculate_statistics()
return omics
def probabilistic_embedding(self,
omic=None,
n_components_per_class=2,
positive_component=1,
log_norm=True,
clip_quartile=0.,
remove_zeros=True,
ci_threshold=-0.68,
seed=1,
pbe: Optional[ProbabilisticEmbedding] = None):
r""" Fit a GMM on each feature column to get the probability or binary
representation of the features
Return:
`ProbabilisticEmbedding` model
np.ndarray : probabilities X
np.ndarray : binary X
Arguments:
pbe : {`sisua.ProbabilisticEmbedding`, `None`}, optional pretrained
instance of `ProbabilisticEmbedding`
"""
if omic is None:
omic = self.current_omic
self._record('probabilistic_embedding', locals())
# We turn-off default log_norm here since the data can be normalized
# separately in advance.
omic = OMIC.parse(omic)
X = self.numpy(omic)
if X.shape[1] >= 100:
warnings.warn("%d GMM will be trained!" % self.shape[1])
name = omic.name
pbe_name = '%s_pbe' % name
prob_name = '%s_prob' % name
bin_name = '%s_bin' % name
label_name = self.get_labels_name(name)
if is_binary_dtype(X):
X_prob = X
X_bin = X
self.uns[pbe_name] = None
else:
if pbe is None:
if pbe_name not in self.uns:
pbe = ProbabilisticEmbedding(
n_components_per_class=n_components_per_class,
positive_component=positive_component,
log_norm=log_norm,
clip_quartile=clip_quartile,
remove_zeros=remove_zeros,
ci_threshold=ci_threshold,
random_state=seed)
with catch_warnings_ignore(ConvergenceWarning):
pbe.fit(X)
self.uns[pbe_name] = pbe
else:
pbe = self.uns[pbe_name]
else:
assert isinstance(pbe, ProbabilisticEmbedding), \
'pbe, if given, must be instance of sisua.ProbabilisticEmbedding'
# make prediction
X_prob = np.clip(pbe.predict_proba(X), 0. + 1e-8, 1. - 1e-8)
X_bin = pbe.predict(X)
# store the data
if prob_name not in self.obsm:
self.obsm[prob_name] = X_prob
if label_name not in self.obs and name + '_var' in self.uns:
omic_id = self.get_var(name).index
labels = [omic_id[i] for i in np.argmax(self.obsm[prob_name], axis=1)]
self.obs[label_name] = pd.Categorical(labels)
if bin_name not in self.obsm:
self.obsm[bin_name] = X_bin
return pbe, self.obsm[prob_name], self.obsm[bin_name]
def dimension_reduce(self,
omic=None,
n_components=100,
algo='pca',
random_state=1):
r""" Perform dimension reduction on given OMIC data. """
if omic is None:
omic = self.current_omic
self._record('dimension_reduce', locals())
algo = str(algo).lower().strip()
assert algo in ('pca', 'tsne', 'umap'), \
"Only support algorithm: 'pca', 'tsne', 'umap'; but given: '{algo}'"
omic = OMIC.parse(omic)
name = f"{omic.name}_{algo}"
## already transformed
if name in self.obsm:
return self.obsm[name] if n_components is None else \
self.obsm[name][:, :int(n_components)]
X = self.numpy(omic)
n_components = min(n_components, X.shape[1])
### train new PCA model
if algo == 'pca':
X_ = np.empty(shape=(X.shape[0], n_components), dtype=X.dtype)
model = IncrementalPCA(n_components=n_components)
# fitting
for start, end in batching(BATCH_SIZE, n=X.shape[0]):
chunk = X[start:end]
chunk = chunk.toarray() if issparse(chunk) else chunk
model.partial_fit(chunk)
# transforming
for start, end in batching(BATCH_SIZE, n=X.shape[0]):
chunk = X[start:end]
chunk = chunk.toarray() if issparse(chunk) else chunk
X_[start:end] = model.transform(chunk)
### TSNE
elif algo == 'tsne':
from odin.ml import fast_tsne
X_ = fast_tsne(X, n_components=n_components, return_model=False)
model = None
## UMAP
elif algo == 'umap':
try:
import cuml
method = 'rapids'
except ImportError:
method = 'umap'
connectivities, distances, nn = self.neighbors(omic,
method='umap',
random_state=random_state)
self.uns['neighbors'] = nn
self.obsp['connectivities'] = connectivities
self.obsp['distances'] = distances
with catch_warnings_ignore(UserWarning):
sc.tl.umap(self, method=method, random_state=random_state, copy=False)
X_ = self.obsm['X_umap']
model = self.uns['umap']
del self.obsm['X_umap']
del self.uns['umap']
del self.uns['neighbors']
del self.obsp['connectivities']
del self.obsp['distances']
## store and return the result
self.obsm[name] = X_
# the model could be None, in case of t-SNE
self.uns[name] = model
return self.obsm[name] if n_components is None else \
self.obsm[name][:, :int(n_components)]
def expm1(self, omic=None, inplace=True):
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('expm1', locals())
_expm1 = lambda x: (np.expm1(x.data, out=x.data)
if issparse(x) else np.expm1(x, out=x))
X = om.numpy(omic)
for s, e in batching(n=self.n_obs, batch_size=BATCH_SIZE):
X[s:e] = _expm1(X[s:e])
om._calculate_statistics(omic)
return om
def normalize(self,
omic=None,
total=False,
log1p=False,
scale=False,
target_sum=None,
exclude_highly_expressed=False,
max_fraction=0.05,
max_value=None,
inplace=True):
r""" If ``exclude_highly_expressed=True``, very highly expressed genes are
excluded from the computation of the normalization factor (size factor)
for each cell. This is meaningful as these can strongly influence
the resulting normalized values for all other genes [1]_.
Arguments:
total : bool (default=False). Normalize counts per cell.
log1p : bool (default=False). Logarithmize the data matrix.
scale : bool (default=False). Scale data to unit variance and zero mean.
target_sum : {float, None} (default=None)
If None, after normalization, each observation (cell) has a
total count equal to the median of total counts for
observations (cells) before normalization.
exclude_highly_expressed : bool (default=False)
Exclude (very) highly expressed genes for the computation of the
normalization factor (size factor) for each cell. A gene is considered
highly expressed, if it has more than ``max_fraction`` of the total counts
in at least one cell. The not-excluded genes will sum up to
``target_sum``.
max_fraction : bool (default=0.05)
If ``exclude_highly_expressed=True``, consider cells as highly expressed
that have more counts than ``max_fraction`` of the original total counts
in at least one cell.
max_value : `float` or `None`, optional (default=`None`)
Clip (truncate) to this value after scaling. If `None`, do not clip.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
cells applied
References:
Weinreb et al. (2016), SPRING: a kinetic interface for visualizing
high dimensional single-cell expression data, bioRxiv.
Note:
Proxy to `scanpy.pp.normalize_total`, `scanpy.pp.log1p` and
`scanpy.pp.scale`
"""
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('normalize', locals())
if omic != OMIC.transcriptomic:
org_X = om._X
om._X = om.numpy(omic)
if total:
sc.pp.normalize_total(om,
target_sum=target_sum,
exclude_highly_expressed=exclude_highly_expressed,
max_fraction=max_fraction,
inplace=True)
# since the total counts is normalized, store the old library size
om._name += '_total'
if log1p:
sc.pp.log1p(om, chunked=True, chunk_size=BATCH_SIZE, copy=False)
om._name += '_log1p'
del om.uns['log1p']
# scaling may result negative total counts
if scale:
sc.pp.scale(om, zero_center=True, max_value=max_value, copy=False)
om._name += '_scale'
if omic != OMIC.transcriptomic:
om.obsm[omic.name] = om.X
om._X = org_X
om._calculate_statistics(omic)
return om
# ******************** metrics ******************** #
def neighbors(self,
omic=None,
n_neighbors=12,
n_pcs=100,
knn=True,
method='umap',
metric='euclidean',
random_state=1):
r"""\
Compute a neighborhood graph of observations [McInnes18]_.
The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_,
which also provides a method for estimating connectivities of data points -
the connectivity of the manifold (`method=='umap'`). If `method=='gauss'`,
connectivities are computed according to [Coifman05]_, in the adaption of
[Haghverdi16]_.
Arguments:
n_neighbors : `int` (default=12)
The size of local neighborhood (in terms of number of neighboring data
points) used for manifold approximation. Larger values result in more
global views of the manifold, while smaller values result in more local
data being preserved. In general values should be in the range 2 to 100.
If `knn` is `True`, number of nearest neighbors to be searched. If `knn`
is `False`, a Gaussian kernel width is set to the distance of the
`n_neighbors` neighbor.
n_pcs : {`int`, `None`} (default=None)
Use this many PCs. If n_pcs==0 use .X if use_rep is None.
if n_pcs==None, use obsm['X_pca'].
use_rep : {`None`, ‘X’} or any key for .obsm, optional (default=None)
Use the indicated representation. If None, the representation is
chosen automatically: for .n_vars < 50, .X is used, otherwise
‘X_pca’ is used. If ‘X_pca’ is not present, it’s computed with
default parameters.
knn : `bool` (default=True)
If `True`, use a hard threshold to restrict the number of neighbors to
`n_neighbors`, that is, consider a knn graph. Otherwise, use a Gaussian
Kernel to assign low weights to neighbors more distant than the
`n_neighbors` nearest neighbor.
method : {{'umap', 'gauss', `rapids`}} (default: `'umap'`)
Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_
with adaptive width [Haghverdi16]_) for computing connectivities.
Use 'rapids' for the RAPIDS implementation of UMAP (experimental, GPU
only).
metric : {`str`, `callable`} (default='euclidean')
A known metric’s name or a callable that returns a distance.
Returns:
returns neighbors object with the following:
**[OMIC]_connectivities** : sparse matrix (dtype `float32`)
Weighted adjacency matrix of the neighborhood graph of data
points. Weights should be interpreted as connectivities.
**[OMIC]_distances** : sparse matrix (dtype `float32`)
Instead of decaying weights, this stores distances for each pair of
neighbors.
**[OMIC]_neighbors** : dictionary
configuration and params of fitted k-NN.
"""
if omic is None:
omic = self.current_omic
self._record('neighbors', locals())
omic = OMIC.parse(omic)
name = f"{omic.name}_neighbors"
if name not in self.uns:
omic_name = omic.name
if self.get_dim(omic) > 100:
self.dimension_reduce(omic, algo='pca', random_state=random_state)
omic_name = omic.name + '_pca'
with catch_warnings_ignore(Warning):
obj = sc.pp.neighbors(self,
n_neighbors=n_neighbors,
knn=knn,
method=method,
metric=metric,
n_pcs=int(n_pcs),
use_rep=omic_name,
random_state=random_state,
copy=True)
self.uns[name] = obj.uns['neighbors']
self.obsp[f"{omic.name}_connectivities"] = obj.obsp['connectivities']
self.obsp[f"{omic.name}_distances"] = obj.obsp['distances']
del obj
return (self.obsp[f"{omic.name}_connectivities"],
self.obsp[f"{omic.name}_distances"], self.uns[name])
def clustering(self,
omic=None,
n_clusters=None,
n_init='auto',
algo='kmeans',
matching_labels=True,
return_key=False,
random_state=1):
r""" Perform clustering for given OMIC type, the cluster labels will be
assigned to `obs` with key "{omic}_{algo}{n_clusters}"
Arguments:
algo : {'kmeans', 'knn', 'pca', 'tsne', 'umap'}.
Clustering algorithm, in case algo in ('pca', 'tsne', 'umap'),
perform dimension reduction before clustering.
matching_labels : a Boolean. Matching OMIC var_names to appropriate
clusters, only when `n_clusters` is string or OMIC type.
return_key : a Boolean. If True, return the name of the labels
stored in `.obs` instead of the labels array.
"""
if omic is None:
omic = self.current_omic
self._record('clustering', locals())
## clustering algorithm
algo = str(algo).strip().lower()
## input data
omic = OMIC.parse(omic)
cluster_omic = None
if n_clusters is None:
cluster_omic = omic
n_clusters = self.get_dim(omic)
elif isinstance(n_clusters, Number):
n_clusters = int(n_clusters)
else:
cluster_omic = OMIC.parse(n_clusters)
n_clusters = self.get_dim(cluster_omic)
n_clusters = int(n_clusters)
n_init = int(n_init) if isinstance(n_init, Number) else \
int(n_clusters) * 3
## check if output already extracted
output_name = f"{omic.name}_{algo}{n_clusters}"
if output_name in self.obs:
return output_name if return_key else self.obs[output_name]
## warning
if n_clusters > 50:
warnings.warn(
f"Found omic type:{cluster_omic} with {n_clusters} clusters")
## fit KMeans
if algo in ('pca', 'tsne', 'umap', 'kmeans'):
if algo in ('pca', 'tsne', 'umap'):
X = self.dimension_reduce(omic=omic, n_components=100, algo=algo)
else:
X = self.numpy(omic)
model = MiniBatchKMeans(n_clusters=int(n_clusters),
max_iter=1000,
n_init=int(n_init),
compute_labels=False,
batch_size=BATCH_SIZE,
random_state=random_state)
# better suffering the batch
for s, e in batching(BATCH_SIZE, self.n_obs, seed=random_state):
x = X[s:e]
model.partial_fit(x)
# make prediction
labels = []
for s, e in batching(BATCH_SIZE, self.n_obs):
x = X[s:e]
labels.append(model.predict(x))
labels = np.concatenate(labels, axis=0)
## fit KNN
elif algo == 'knn':
connectivities, distances, nn = self.neighbors(omic)
n_neighbors = min(nn['params']['n_neighbors'],
np.min(np.sum(connectivities > 0, axis=1)))
model = SpectralClustering(n_clusters=n_clusters,
random_state=random_state,
n_init=n_init,
affinity='precomputed_nearest_neighbors',
n_neighbors=n_neighbors)
labels = model.fit_predict(connectivities)
else:
raise NotImplementedError(algo)
## correlation matrix
if cluster_omic is not None and matching_labels:
_, X, _ = self.probabilistic_embedding(cluster_omic)
# omic-cluster correlation matrix
corr = np.empty(shape=(X.shape[1], n_clusters), dtype=np.float32)
for i, x in enumerate(X.T):
for lab in range(n_clusters):
mask = labels == lab
corr[i, lab] = np.sum(x[mask])
ids = diagonal_linear_assignment(corr)
varnames = self.get_var_names(cluster_omic)
labels_to_omic = {lab: name for lab, name, in zip(ids, varnames)}
labels = np.array([labels_to_omic[i] for i in labels])
## saving data and model
self.obs[output_name] = pd.Categorical(labels)
# self.uns[output_name] = model
return output_name if return_key else labels
def louvain(self,
omic=None,
resolution=None,
restrict_to=None,
adjacency=None,
flavor='vtraag',
directed=True,
use_weights=False,
partition_type=None,
partition_kwargs={},
random_state=1):
r"""Cluster cells into subgroups [Blondel08]_ [Levine15]_ [Traag17]_.
Cluster cells using the Louvain algorithm [Blondel08]_ in the implementation
of [Traag17]_. The Louvain algorithm has been proposed for single-cell
analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
`~scanpy.external.pp.bbknn` first,
or explicitly passing a ``adjacency`` matrix.
Arguments:
resolution
For the default flavor (``'vtraag'``), you can provide a resolution
(higher resolution means finding more and smaller clusters),
which defaults to 1.0. See “Time as a resolution parameter” in [Lambiotte09]_.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain ``(obs_key, list_of_categories)``.
key_added
Key under which to add the cluster labels. (default: ``'louvain'``)
adjacency
Sparse adjacency matrix of the graph, defaults to
``adata.uns['neighbors']['connectivities']``.
flavor : {``'vtraag'``, ``'igraph'``}
Choose between to packages for computing the clustering.
``'vtraag'`` is much more powerful, and the default.
directed
Interpret the ``adjacency`` matrix as directed graph?
use_weights
Use weights from knn graph.
partition_type
Type of partition to use.
Only a valid argument if ``flavor`` is ``'vtraag'``.
partition_kwargs
Key word arguments to pass to partitioning,
if ``vtraag`` method is being used.
random_state : Change the initialization of the optimization.
Return:
array `[n_samples]` : louvain community indices
array `[n_samples]` : decoded louvain community labels
"""
if omic is None:
omic = self.current_omic
self._record('louvain', locals())
try:
import louvain
except ImportError:
raise ImportError("pip install louvain>=0.6 python-igraph")
omic = OMIC.parse(omic)
output_name = omic.name + '_louvain'
if output_name not in self.obs:
with catch_warnings_ignore(Warning):
connectivities, distances, nn = self.neighbors(omic)
self.uns["neighbors"] = nn
self.obsp["connectivities"] = connectivities
self.obsp["distances"] = distances
sc.tl.louvain(self,
resolution=resolution,
random_state=random_state,
restrict_to=restrict_to,
key_added=output_name,
adjacency=adjacency,
flavor=flavor,
directed=directed,
use_weights=use_weights,
partition_type=partition_type,
partition_kwargs=partition_kwargs,
copy=False)
del self.uns['neighbors']
del self.obsp["connectivities"]
del self.obsp["distances"]
model = self.uns['louvain']
del self.uns['louvain']
self.uns[output_name] = model
y = self.obs[output_name].to_numpy().astype(np.float32)
### decode louvain community into labels
output_labels = f"{output_name}_labels"
if output_labels not in self.obs:
var_names = self.get_var_names(omic)
# mapping community_index -> confident value for each variables
confidence = defaultdict(float)
for i, x in zip(y, self.get_x_probs(omic=omic)):
confidence[int(i)] += x
# thresholding the variables
labels = {}
for community, x in confidence.items():
labels[community] = '_'.join(var_names[_threshold(x, 2, 5)])
# store in obs
self.obs[output_labels] = np.array([labels[i] for i in y])
### return
y_labels = self.obs[output_labels].to_numpy()
return y, y_labels
# ******************** Genes metrics and ranking ******************** #
def top_vars(self, n_vars=100, return_indices=False):
r""" The genes that are highly variated, high dispersion, less dropout
(i.e. smallest counts of zero-values), and appeared in most cells
will be returned.
Arguments:
return_indices : a Boolean. If True, return the index of top genes,
otherwise, return the genes' ID.
"""
self.calculate_quality_metrics()
fnorm = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))
# prepare data
n_cells = fnorm(self.var['n_cells'].values)
zeros = fnorm(self.var['pct_dropout'].values)
dispersion = fnorm(self.var['dispersions'].values)
# higher is better TODO: check again what is the best strategy here
rating = n_cells + (1. - zeros) + dispersion
ids = np.argsort(rating)[::-1]
# indexing the genes
genes = np.arange(self.n_vars, dtype=np.int64) \
if return_indices else self.gene_id.values
genes = genes[ids][:n_vars]
return genes
def rank_vars_groups(self,
n_vars=100,
group_by=OMIC.proteomic,
clustering='kmeans',
method='logreg',
corr_method='benjamini-hochberg',
max_iter=1000,
reference='rest'):
r""" Rank genes for characterizing groups.
Arguments:
method : {'t-test_overestim_var', 't-test', 'wilcoxon', 'logreg'}
- 't-test_overestim_var' overestimates variance of each group,
- 't-test' uses t-test,
- 'wilcoxon' uses Wilcoxon rank-sum,
- 'logreg' uses logistic regression.
corr_method : p-value correction method.
Used only for `'t-test'`, `'t-test_overestim_var'`, and `'wilcoxon'`.
max_iter : an Integer.
Only used for `method='logred'`
Return:
the key to ranked groups in `.uns`
"""
self._record('rank_vars_groups', locals())
# group by is categorical variables in `obs`
if str(group_by) in self.obs:
pass
else: # search in obsm, then clustering it
group_by = OMIC.parse(group_by)
if clustering is not None:
group_by = self.clustering(group_by,
n_clusters=group_by,
algo=clustering,
return_key=True)
else:
self.probabilistic_embedding(group_by)
group_by = self.get_labels_name(group_by)
## check already ranked
key = f'{self.get_current_omic().name}_{group_by}_rank'
if key not in self.uns:
kw = {}
if method == 'logreg':
kw['max_iter'] = int(max_iter)
kw['random_state'] = 1
kw['solver'] = 'saga'
sc.tl.rank_genes_groups(self,
groupby=group_by,
n_genes=int(n_vars),
use_raw=True,
method=method,
corr_method=corr_method,
reference=reference,
copy=False,
key_added=key,
**kw)
return key
def calculate_quality_metrics(self,
n_bins=20,
flavor='cell_ranger',
percent_top=None,
log1p=False):
r"""\
Calculate quality control metrics for both the observations and variable.
Highly variable genes (i.e. variables) also calculated.
Arguments:
n_bins
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
flavor
Choose the flavor for computing normalized dispersion. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
percent_top : a list of Integer. Which proportions of top genes to cover.
If empty or None don’t calculate. Values are considered 1-indexed,
percent_top=[50] finds cumulative proportion to the 50th most
expressed gene.
log1p : a Boolean. If True, perform log1p before calculating the quality
metrics, then, expm1 after the calculation.
Observation level metrics include:
"n_[omic.name]". Number of genes with positive counts in a cell.
"total_[omic.name]". Total number of counts for a cell.
"pct_counts_in_top_50_[omic.name]". Cumulative percentage of counts for 50 most
expressed genes in a cell.
Variable level metrics include:
"total". Sum of counts for a gene.
"mean". Mean expression over all cells.
"n_cells". Number of cells this expression is measured in.
"pct_dropout". Percentage of cells this feature does not
appear in.
"highly_variable" : boolean indicator of highly-variable genes
"dispersions" : dispersions per gene
"dispersions_norm" : normalized dispersions per gene
"""
self._record('calculate_quality_metrics', locals())
cell_qc, gene_qc = sc.pp.calculate_qc_metrics(
self,
percent_top=as_tuple(percent_top, t=int)
if percent_top is not None else None,
inplace=False)
name = self._current_omic_name
# var quality
self.var['n_cells'] = gene_qc['n_cells_by_counts']
self.var['mean'] = gene_qc['mean_counts']
self.var['total'] = gene_qc['total_counts']
self.var['pct_dropout'] = gene_qc['pct_dropout_by_counts']
## cell quality
self.obs['n_%s' % name] = cell_qc['n_genes_by_counts']
self.obs['total_%s' % name] = cell_qc['total_counts']
if percent_top is not None:
for i in as_tuple(percent_top, t=int):
self.obs['pct_counts_in_top_%d_%s' % (i, name)] = \
cell_qc['pct_counts_in_top_%d_genes' % i]
## Expects logarithmized data.
if log1p:
sc.pp.log1p(self)
## highly_variable, means, dispersions, dispersions_norm
results = sc.pp.highly_variable_genes(self,
n_bins=min(int(n_bins),
self.X.shape[1]),
flavor=flavor,
subset=False,
inplace=False)
self.var['highly_variable'] = [i[0] for i in results]
self.var['dispersions'] = [i[2] for i in results]
## de-log
if log1p:
X = self.X
for s, e in batching(BATCH_SIZE, n=self.n_obs):
x = X[s:e]
if sp.sparse.issparse(x):
np.expm1(x.data, out=x.data)
else:
np.expm1(x, out=x)
return self
# ******************** other metrics ******************** #
@cache_memory
def get_marker_pairs(self,
omic1=OMIC.proteomic,
omic2=None,
var_names1=MARKER_ADTS,
var_names2=None,
threshold=None,
n=10,
most_correlated=False,
remove_duplicated=True):
r""" Return the most differentiated (or correlated) pairs within a
single OMIC (in case `omic2=None`) or between 2 different OMICs.
Arguments:
threshold : a Scalar.
The minimum correlation value to be selected (in case
`most_correlated=True`), otherwise, the maximum correlation value.
If None, set the value to infinity.
n : an Integer.
Number of pairs with smallest correlation to be selected.
If None, there is no limitation.
most_correlated : a Boolean (default: True)
if True, return most correlated pairs, otherwise, most un-correlated
pairs.
remove_duplicated : a Boolean (default: True)
if True, remove pairs with duplicated name for first OMIC (in case
`omic1 != omic2`), remove any pairs with duplicated name for both OMICs
(in case `omic1 == omic2`).
Return:
list of tuple (var1, var2) sorted in order of the most correlated or
un-correlated.
"""
is_same_omic = False
if omic2 is None:
omic2 = omic1
is_same_omic = True
ids1 = self.get_var_indices(omic1)
ids2 = self.get_var_indices(omic2)
# check var_names
if var_names1 is None:
var_names1 = self.get_var_names(omic1)
var_ids1 = set(ids1[i] for i in var_names1 if i in ids1)
if len(var_ids1) == 0:
raise ValueError(
f"No matching variables found from given var_names={var_names1}")
# for the second OMIC
if var_names2 is None:
var_names2 = self.get_var_names(omic2)
var_ids2 = set(ids2[i] for i in var_names2 if i in ids2)
if len(var_ids2) == 0:
raise ValueError(
f"No matching variables found from given var_names={var_names2}")
# filtering
var_names1 = self.get_var_names(omic1)
var_names2 = self.get_var_names(omic2)
scores = defaultdict(float)
for i1, i2, p, s in self.get_correlation(omic1=omic1, omic2=omic2):
if i1 not in var_ids1 or i2 not in var_ids2:
continue
name1 = var_names1[i1]
name2 = var_names2[i2]
key = (name1, name2)
if is_same_omic:
if name1 == name2:
continue
key = tuple(sorted(key))
x = p + s
if np.isnan(x):
x = 1.0
scores[key] += x
scores = sorted(scores.items(), key=lambda x: x[-1])
# most correlated
if most_correlated:
scores = scores[::-1]
# prepare filtering
threshold = (-np.inf if most_correlated else np.inf) \
if threshold is None else float(threshold)
n = np.inf if n is None else int(n)
fn = lambda x: (x / 2 > threshold) if most_correlated else \
(x / 2 < threshold)
# filtering
pairs = []
seen = {}
while True:
if len(scores) == 0 or len(pairs) >= n:
break
key, val = scores.pop(0)
if remove_duplicated:
if is_same_omic:
if any(k in seen for k in key):
continue
seen[key[0]] = 1
seen[key[1]] = 1
else:
if key[0] in seen:
continue
seen[key[0]] = 1
pairs.append(key)
return pairs
@cache_memory
def get_importance_matrix(self,
omic=OMIC.transcriptomic,
target_omic=OMIC.proteomic,
random_state=1):
r""" Using Tree Classifier to estimate the importance of each
`omic` for each `target_omic`.
"""
from odin.bay.vi.metrics import representative_importance_matrix
from odin.bay.vi.utils import discretizing
random_state = int(1)
omic1 = self.current_omic if omic is None else OMIC.parse(omic)
omic2 = self.current_omic if target_omic is None else OMIC.parse(
target_omic)
assert omic1 != omic2, "Mutual information only for 2 different OMIC type"
uns_key = f"importance_{omic.name}_{target_omic.name}"
if uns_key in self.uns:
return self.uns[uns_key]
# prepare data
X = self.numpy(omic1)
y = self.numpy(omic2)
if not is_discrete(y):
y = discretizing(y, n_bins=10, strategy='quantile')
# split the data 50:50 for train and test
rand = np.random.RandomState(random_state)
ids = rand.permutation(X.shape[0])
train = ids[:int(0.75 * X.shape[0])]
test = ids[int(0.75 * X.shape[0]):]
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
# calculate the importance matrix
matrix, train_acc, test_acc = representative_importance_matrix(
repr_train=X_train,
factor_train=y_train,
repr_test=X_test,
factor_test=y_test,
random_state=rand.randint(1e8))
self.uns[uns_key] = matrix
return matrix
@cache_memory
def get_mutual_information(self,
omic=OMIC.transcriptomic,
target_omic=OMIC.proteomic,
n_neighbors=3,
random_state=1):
r""" Estimate mutual information using k-NN
Return
a Matrix of shape `(n_features_omic, n_features_target_omic)`
estimation of mutual information between each feature in `omic` to
eacho feature in `target_omic`
"""
n_neighbors = int(n_neighbors)
random_state = int(1)
omic1 = self.current_omic if omic is None else OMIC.parse(omic)
omic2 = self.current_omic if target_omic is None else OMIC.parse(
target_omic)
assert omic1 != omic2, "Mutual information only for 2 different OMIC type"
uns_key = f"mutualinfo_{omic.name}_{target_omic.name}"
if uns_key in self.uns:
return self.uns[uns_key]
### prepare the data
x1 = self.numpy(omic1)
x2 = self.numpy(omic2)
n_om1 = x1.shape[1]
n_om2 = x2.shape[1]
discrete_features = np.array([is_discrete(i) for i in x1.T])
def _mi(i2):
y = x2[:, i2]
if is_discrete(y):
fn = mutual_info_classif
else:
fn = mutual_info_regression
return i2, fn(X=x1,
y=y,
discrete_features=discrete_features,
n_neighbors=n_neighbors,
random_state=random_state)
mi_mat = np.empty(shape=(n_om1, n_om2), dtype=np.float64)
for i2, mi in MPI(list(range(n_om2)),
func=_mi,
ncpu=max(1,
cpu_count() - 1),
batch=1):
mi_mat[:, i2] = mi
self.uns[uns_key] = mi_mat
return mi_mat
@cache_memory
def get_correlation(self, omic1=OMIC.transcriptomic, omic2=OMIC.proteomic):
r""" Calculate the correlation scores between two omic types
(could be different or the same OMIC).
Return:
list of tuple contained 4 scalars:
(omic1-idx, omic2-idx, pearson, spearman)
sorted in order from high to low average correlation
"""
omic1 = self.current_omic if omic1 is None else OMIC.parse(omic1)
omic2 = self.current_omic if omic2 is None else OMIC.parse(omic2)
uns_key = f"correlation_{omic1.name}_{omic2.name}"
if uns_key in self.uns:
return self.uns[uns_key]
### prepare the data
x1 = self.numpy(omic1)
x2 = self.numpy(omic2)
n_om1 = x1.shape[1]
n_om2 = x2.shape[1]
def _corr(ids):
results = []
if not isinstance(ids[0], tuple):
ids = [ids]
for i1, i2 in ids:
y1 = x1[:, i1]
y2 = x2[:, i2]
with catch_warnings_ignore(RuntimeWarning):
p = pearsonr(y1, y2)[0]
s = spearmanr(y1, y2, nan_policy='omit').correlation
# remove all NaNs
results.append((i1, i2, p, s))
yield results
### multiprocessing
jobs = list(itertools.product(range(n_om1), range(n_om2)))
ncpu = max(1, cpu_count() - 1)
results = []
for res in MPI(jobs, func=_corr, ncpu=ncpu, batch=len(jobs) // ncpu):
results += res
### sorted by decreasing order
all_correlations = sorted(
results,
key=lambda scores: (scores[-2] + scores[-1]) / 2,
)[::-1]
self.uns[uns_key] = all_correlations
return all_correlations
| [
"scanpy.pp.normalize_total",
"sklearn.cluster.SpectralClustering",
"sisua.label_threshold.ProbabilisticEmbedding",
"scanpy.pp.log1p",
"numpy.argsort",
"scanpy.pp.filter_genes",
"numpy.array",
"scanpy.tl.umap",
"odin.utils.batching",
"scipy.stats.pearsonr",
"numpy.random.RandomState",
"numpy.ar... | [((2010, 2068), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_components', 'random_state': '(1)'}), '(n_components=n_components, random_state=1)\n', (2025, 2068), False, 'from sklearn.mixture import GaussianMixture\n'), ((2175, 2189), 'numpy.argmax', 'np.argmax', (['aic'], {}), '(aic)\n', (2184, 2189), True, 'import numpy as np\n'), ((10254, 10385), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['omics'], {'min_counts': 'min_counts', 'max_counts': 'max_counts', 'min_cells': 'min_cells', 'max_cells': 'max_cells', 'inplace': '(True)'}), '(omics, min_counts=min_counts, max_counts=max_counts,\n min_cells=min_cells, max_cells=max_cells, inplace=True)\n', (10272, 10385), True, 'import scanpy as sc\n'), ((12353, 12484), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['self'], {'min_counts': 'min_counts', 'max_counts': 'max_counts', 'min_genes': 'min_genes', 'max_genes': 'max_genes', 'inplace': '(False)'}), '(self, min_counts=min_counts, max_counts=max_counts,\n min_genes=min_genes, max_genes=max_genes, inplace=False)\n', (12371, 12484), True, 'import scanpy as sc\n'), ((14094, 14110), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (14104, 14110), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((14393, 14411), 'sisua.data.utils.is_binary_dtype', 'is_binary_dtype', (['X'], {}), '(X)\n', (14408, 14411), False, 'from sisua.data.utils import apply_artificial_corruption, get_library_size, is_binary_dtype, is_categorical_dtype, standardize_protein_name\n'), ((16285, 16301), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (16295, 16301), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((18718, 18763), 'odin.utils.batching', 'batching', ([], {'n': 'self.n_obs', 'batch_size': 'BATCH_SIZE'}), '(n=self.n_obs, batch_size=BATCH_SIZE)\n', (18726, 18763), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((25182, 25198), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (25192, 25198), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((27265, 27281), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (27275, 27281), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((30202, 30224), 'pandas.Categorical', 'pd.Categorical', (['labels'], {}), '(labels)\n', (30216, 30224), True, 'import pandas as pd\n'), ((32704, 32720), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (32714, 32720), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((43775, 43793), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (43786, 43793), False, 'from collections import defaultdict\n'), ((46074, 46109), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (46095, 46109), True, 'import numpy as np\n'), ((48088, 48136), 'numpy.empty', 'np.empty', ([], {'shape': '(n_om1, n_om2)', 'dtype': 'np.float64'}), '(shape=(n_om1, n_om2), dtype=np.float64)\n', (48096, 48136), True, 'import numpy as np\n'), ((14168, 14224), 'warnings.warn', 'warnings.warn', (["('%d GMM will be trained!' % self.shape[1])"], {}), "('%d GMM will be trained!' % self.shape[1])\n", (14181, 14224), False, 'import warnings\n'), ((15627, 15649), 'pandas.Categorical', 'pd.Categorical', (['labels'], {}), '(labels)\n', (15641, 15649), True, 'import pandas as pd\n'), ((16630, 16687), 'numpy.empty', 'np.empty', ([], {'shape': '(X.shape[0], n_components)', 'dtype': 'X.dtype'}), '(shape=(X.shape[0], n_components), dtype=X.dtype)\n', (16638, 16687), True, 'import numpy as np\n'), ((16702, 16743), 'sklearn.decomposition.IncrementalPCA', 'IncrementalPCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (16716, 16743), False, 'from sklearn.decomposition import IncrementalPCA\n'), ((16784, 16818), 'odin.utils.batching', 'batching', (['BATCH_SIZE'], {'n': 'X.shape[0]'}), '(BATCH_SIZE, n=X.shape[0])\n', (16792, 16818), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((16989, 17023), 'odin.utils.batching', 'batching', (['BATCH_SIZE'], {'n': 'X.shape[0]'}), '(BATCH_SIZE, n=X.shape[0])\n', (16997, 17023), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((21281, 21426), 'scanpy.pp.normalize_total', 'sc.pp.normalize_total', (['om'], {'target_sum': 'target_sum', 'exclude_highly_expressed': 'exclude_highly_expressed', 'max_fraction': 'max_fraction', 'inplace': '(True)'}), '(om, target_sum=target_sum, exclude_highly_expressed=\n exclude_highly_expressed, max_fraction=max_fraction, inplace=True)\n', (21302, 21426), True, 'import scanpy as sc\n'), ((21655, 21719), 'scanpy.pp.log1p', 'sc.pp.log1p', (['om'], {'chunked': '(True)', 'chunk_size': 'BATCH_SIZE', 'copy': '(False)'}), '(om, chunked=True, chunk_size=BATCH_SIZE, copy=False)\n', (21666, 21719), True, 'import scanpy as sc\n'), ((21840, 21906), 'scanpy.pp.scale', 'sc.pp.scale', (['om'], {'zero_center': '(True)', 'max_value': 'max_value', 'copy': '(False)'}), '(om, zero_center=True, max_value=max_value, copy=False)\n', (21851, 21906), True, 'import scanpy as sc\n'), ((27930, 28005), 'warnings.warn', 'warnings.warn', (['f"""Found omic type:{cluster_omic} with {n_clusters} clusters"""'], {}), "(f'Found omic type:{cluster_omic} with {n_clusters} clusters')\n", (27943, 28005), False, 'import warnings\n'), ((28610, 28661), 'odin.utils.batching', 'batching', (['BATCH_SIZE', 'self.n_obs'], {'seed': 'random_state'}), '(BATCH_SIZE, self.n_obs, seed=random_state)\n', (28618, 28661), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((28771, 28803), 'odin.utils.batching', 'batching', (['BATCH_SIZE', 'self.n_obs'], {}), '(BATCH_SIZE, self.n_obs)\n', (28779, 28803), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((28879, 28909), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (28893, 28909), True, 'import numpy as np\n'), ((29714, 29772), 'numpy.empty', 'np.empty', ([], {'shape': '(X.shape[1], n_clusters)', 'dtype': 'np.float32'}), '(shape=(X.shape[1], n_clusters), dtype=np.float32)\n', (29722, 29772), True, 'import numpy as np\n'), ((29929, 29961), 'odin.search.diagonal_linear_assignment', 'diagonal_linear_assignment', (['corr'], {}), '(corr)\n', (29955, 29961), False, 'from odin.search import diagonal_linear_assignment\n'), ((30099, 30144), 'numpy.array', 'np.array', (['[labels_to_omic[i] for i in labels]'], {}), '([labels_to_omic[i] for i in labels])\n', (30107, 30144), True, 'import numpy as np\n'), ((34081, 34099), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (34092, 34099), False, 'from collections import defaultdict\n'), ((34408, 34440), 'numpy.array', 'np.array', (['[labels[i] for i in y]'], {}), '([labels[i] for i in y])\n', (34416, 34440), True, 'import numpy as np\n'), ((35370, 35388), 'numpy.argsort', 'np.argsort', (['rating'], {}), '(rating)\n', (35380, 35388), True, 'import numpy as np\n'), ((35432, 35470), 'numpy.arange', 'np.arange', (['self.n_vars'], {'dtype': 'np.int64'}), '(self.n_vars, dtype=np.int64)\n', (35441, 35470), True, 'import numpy as np\n'), ((36696, 36716), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['group_by'], {}), '(group_by)\n', (36706, 36716), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((40493, 40521), 'odin.utils.as_tuple', 'as_tuple', (['percent_top'], {'t': 'int'}), '(percent_top, t=int)\n', (40501, 40521), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((40690, 40707), 'scanpy.pp.log1p', 'sc.pp.log1p', (['self'], {}), '(self)\n', (40701, 40707), True, 'import scanpy as sc\n'), ((41299, 41333), 'odin.utils.batching', 'batching', (['BATCH_SIZE'], {'n': 'self.n_obs'}), '(BATCH_SIZE, n=self.n_obs)\n', (41307, 41333), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((44146, 44157), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (44154, 44157), True, 'import numpy as np\n'), ((45556, 45572), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (45566, 45572), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((45631, 45654), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['target_omic'], {}), '(target_omic)\n', (45641, 45654), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((45943, 45957), 'odin.stats.is_discrete', 'is_discrete', (['y'], {}), '(y)\n', (45954, 45957), False, 'from odin.stats import describe, is_discrete, sparsity_percentage, train_valid_test_split\n'), ((45969, 46016), 'odin.bay.vi.utils.discretizing', 'discretizing', (['y'], {'n_bins': '(10)', 'strategy': '"""quantile"""'}), "(y, n_bins=10, strategy='quantile')\n", (45981, 46016), False, 'from odin.bay.vi.utils import discretizing\n'), ((47233, 47249), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic'], {}), '(omic)\n', (47243, 47249), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((47308, 47331), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['target_omic'], {}), '(target_omic)\n', (47318, 47331), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((47777, 47791), 'odin.stats.is_discrete', 'is_discrete', (['y'], {}), '(y)\n', (47788, 47791), False, 'from odin.stats import describe, is_discrete, sparsity_percentage, train_valid_test_split\n'), ((48828, 48845), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic1'], {}), '(omic1)\n', (48838, 48845), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((48898, 48915), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['omic2'], {}), '(omic2)\n', (48908, 48915), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((17248, 17307), 'odin.ml.fast_tsne', 'fast_tsne', (['X'], {'n_components': 'n_components', 'return_model': '(False)'}), '(X, n_components=n_components, return_model=False)\n', (17257, 17307), False, 'from odin.ml import fast_tsne\n'), ((18642, 18653), 'scipy.sparse.issparse', 'issparse', (['x'], {}), '(x)\n', (18650, 18653), False, 'from scipy.sparse import issparse\n'), ((18586, 18614), 'numpy.expm1', 'np.expm1', (['x.data'], {'out': 'x.data'}), '(x.data, out=x.data)\n', (18594, 18614), True, 'import numpy as np\n'), ((18659, 18677), 'numpy.expm1', 'np.expm1', (['x'], {'out': 'x'}), '(x, out=x)\n', (18667, 18677), True, 'import numpy as np\n'), ((25452, 25482), 'odin.utils.catch_warnings_ignore', 'catch_warnings_ignore', (['Warning'], {}), '(Warning)\n', (25473, 25482), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((27504, 27526), 'sisua.data.const.OMIC.parse', 'OMIC.parse', (['n_clusters'], {}), '(n_clusters)\n', (27514, 27526), False, 'from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC\n'), ((29143, 29298), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'n_clusters', 'random_state': 'random_state', 'n_init': 'n_init', 'affinity': '"""precomputed_nearest_neighbors"""', 'n_neighbors': 'n_neighbors'}), "(n_clusters=n_clusters, random_state=random_state, n_init\n =n_init, affinity='precomputed_nearest_neighbors', n_neighbors=n_neighbors)\n", (29161, 29298), False, 'from sklearn.cluster import MiniBatchKMeans, SpectralClustering\n'), ((32809, 32839), 'odin.utils.catch_warnings_ignore', 'catch_warnings_ignore', (['Warning'], {}), '(Warning)\n', (32830, 32839), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((33041, 33333), 'scanpy.tl.louvain', 'sc.tl.louvain', (['self'], {'resolution': 'resolution', 'random_state': 'random_state', 'restrict_to': 'restrict_to', 'key_added': 'output_name', 'adjacency': 'adjacency', 'flavor': 'flavor', 'directed': 'directed', 'use_weights': 'use_weights', 'partition_type': 'partition_type', 'partition_kwargs': 'partition_kwargs', 'copy': '(False)'}), '(self, resolution=resolution, random_state=random_state,\n restrict_to=restrict_to, key_added=output_name, adjacency=adjacency,\n flavor=flavor, directed=directed, use_weights=use_weights,\n partition_type=partition_type, partition_kwargs=partition_kwargs, copy=\n False)\n', (33054, 33333), True, 'import scanpy as sc\n'), ((41365, 41386), 'scipy.sparse.issparse', 'sp.sparse.issparse', (['x'], {}), '(x)\n', (41383, 41386), True, 'import scipy as sp\n'), ((47699, 47713), 'odin.stats.is_discrete', 'is_discrete', (['i'], {}), '(i)\n', (47710, 47713), False, 'from odin.stats import describe, is_discrete, sparsity_percentage, train_valid_test_split\n'), ((49672, 49683), 'odin.utils.cpu_count', 'cpu_count', ([], {}), '()\n', (49681, 49683), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((14563, 14805), 'sisua.label_threshold.ProbabilisticEmbedding', 'ProbabilisticEmbedding', ([], {'n_components_per_class': 'n_components_per_class', 'positive_component': 'positive_component', 'log_norm': 'log_norm', 'clip_quartile': 'clip_quartile', 'remove_zeros': 'remove_zeros', 'ci_threshold': 'ci_threshold', 'random_state': 'seed'}), '(n_components_per_class=n_components_per_class,\n positive_component=positive_component, log_norm=log_norm, clip_quartile\n =clip_quartile, remove_zeros=remove_zeros, ci_threshold=ci_threshold,\n random_state=seed)\n', (14585, 14805), False, 'from sisua.label_threshold import ProbabilisticEmbedding\n'), ((15557, 15596), 'numpy.argmax', 'np.argmax', (['self.obsm[prob_name]'], {'axis': '(1)'}), '(self.obsm[prob_name], axis=1)\n', (15566, 15596), True, 'import numpy as np\n'), ((16884, 16899), 'scipy.sparse.issparse', 'issparse', (['chunk'], {}), '(chunk)\n', (16892, 16899), False, 'from scipy.sparse import issparse\n'), ((17089, 17104), 'scipy.sparse.issparse', 'issparse', (['chunk'], {}), '(chunk)\n', (17097, 17104), False, 'from scipy.sparse import issparse\n'), ((29901, 29916), 'numpy.sum', 'np.sum', (['x[mask]'], {}), '(x[mask])\n', (29907, 29916), True, 'import numpy as np\n'), ((35030, 35039), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (35036, 35039), True, 'import numpy as np\n'), ((35044, 35053), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (35050, 35053), True, 'import numpy as np\n'), ((35056, 35065), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (35062, 35065), True, 'import numpy as np\n'), ((39946, 39974), 'odin.utils.as_tuple', 'as_tuple', (['percent_top'], {'t': 'int'}), '(percent_top, t=int)\n', (39954, 39974), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((41398, 41426), 'numpy.expm1', 'np.expm1', (['x.data'], {'out': 'x.data'}), '(x.data, out=x.data)\n', (41406, 41426), True, 'import numpy as np\n'), ((41451, 41469), 'numpy.expm1', 'np.expm1', (['x'], {'out': 'x'}), '(x, out=x)\n', (41459, 41469), True, 'import numpy as np\n'), ((49341, 49378), 'odin.utils.catch_warnings_ignore', 'catch_warnings_ignore', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (49362, 49378), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((14907, 14948), 'odin.utils.catch_warnings_ignore', 'catch_warnings_ignore', (['ConvergenceWarning'], {}), '(ConvergenceWarning)\n', (14928, 14948), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((17814, 17848), 'odin.utils.catch_warnings_ignore', 'catch_warnings_ignore', (['UserWarning'], {}), '(UserWarning)\n', (17835, 17848), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((17858, 17928), 'scanpy.tl.umap', 'sc.tl.umap', (['self'], {'method': 'method', 'random_state': 'random_state', 'copy': '(False)'}), '(self, method=method, random_state=random_state, copy=False)\n', (17868, 17928), True, 'import scanpy as sc\n'), ((29092, 29126), 'numpy.sum', 'np.sum', (['(connectivities > 0)'], {'axis': '(1)'}), '(connectivities > 0, axis=1)\n', (29098, 29126), True, 'import numpy as np\n'), ((48276, 48287), 'odin.utils.cpu_count', 'cpu_count', ([], {}), '()\n', (48285, 48287), False, 'from odin.utils import MPI, IndexedList, as_tuple, batching, cache_memory, catch_warnings_ignore, cpu_count, is_primitive\n'), ((49394, 49410), 'scipy.stats.pearsonr', 'pearsonr', (['y1', 'y2'], {}), '(y1, y2)\n', (49402, 49410), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((49428, 49464), 'scipy.stats.spearmanr', 'spearmanr', (['y1', 'y2'], {'nan_policy': '"""omit"""'}), "(y1, y2, nan_policy='omit')\n", (49437, 49464), False, 'from scipy.stats import pearsonr, spearmanr\n')] |
# -*- coding: utf-8 -*-
"""
Author: <NAME>, 2022
Purpose: Plot intrinsic carrier concentration n_i with different models in cm^-3
Plot electron n and hole concentration p in cm^-3
Requires: carrier_concentrations.py
"""
import carrier_concentrations as cc
import numpy as np
import matplotlib.pyplot as mpl_p
#==============================================================================
T_axis = np.arange(100., 605., 5.)
N_D = 1.0e18 #9e19
N_A = 1.0e6 #1e20
NiMorinMaitaO = cc.MorinMaita()
NiPutleyMitchellO = cc.PutleyMitchell()
NiBarberO = cc.Barber()
NiSlotboomO = cc.Slotboom()
NiWasserabO = cc.Wasserab()
NiGreen1990O = cc.Green1990()
NiSproulGreen1991O = cc.SproulGreen1991()
NiSproulGreen1993O = cc.SproulGreen1993()
NiMisiakosTsamakisO = cc.MisiakosTsamakis()
NiKimmerleO = cc.Kimmerle()
n_i_MorinMaita_list = []
n_i_PutleyMitchell_list = []
n_i_Barber_list = []
n_i_Slotboom_list = []
n_i_Wasserab_list = []
n_i_Green1990_list = []
n_i_SproulGreen1991_list = []
n_i_SproulGreen1993_list = []
n_i_MisiakosTsamakis_list = []
n_i_Kimmerle_list = []
for T_sim in T_axis:
n_i_MorinMaita = NiMorinMaitaO.n_i(T_sim)
n_i_MorinMaita_list.append(n_i_MorinMaita * 1.0e-6)
n_i_PutleyMitchell = NiPutleyMitchellO.n_i(T_sim)
n_i_PutleyMitchell_list.append(n_i_PutleyMitchell * 1.0e-6)
n_i_Barber = NiBarberO.n_i(T_sim)
n_i_Barber_list.append(n_i_Barber * 1.0e-6)
n_i_Slotboom = NiSlotboomO.n_i(T_sim)
n_i_Slotboom_list.append(n_i_Slotboom * 1.0e-6)
n_i_Wasserab = NiWasserabO.n_i(T_sim)
n_i_Wasserab_list.append(n_i_Wasserab * 1.0e-6)
n_i_Green1990 = NiGreen1990O.n_i(T_sim)
n_i_Green1990_list.append(n_i_Green1990 * 1.0e-6)
n_i_SproulGreen1991 = NiSproulGreen1991O.n_i(T_sim)
n_i_SproulGreen1991_list.append(n_i_SproulGreen1991 * 1.0e-6)
n_i_SproulGreen1993 = NiSproulGreen1993O.n_i(T_sim)
n_i_SproulGreen1993_list.append(n_i_SproulGreen1993 * 1.0e-6)
n_i_MisiakosTsamakis = NiMisiakosTsamakisO.n_i(T_sim)
n_i_MisiakosTsamakis_list.append(n_i_MisiakosTsamakis * 1.0e-6)
n_i_Kimmerle = NiKimmerleO.np(T_sim, N_D, N_A)[0]
n_i_Kimmerle_list.append(n_i_Kimmerle * 1.0e-6)
#-----------------------------------------------------------------------------
# plotting results:
fig1 = mpl_p.figure(1)
ax1 = fig1.add_subplot(111)
ax1.set_xlabel(r'Temperatur $T / K$')
ax1.set_ylabel(r'Intrinsische Ladungsträgerdichte $n_i / \frac{1}{cm^3}$')
ax1.semilogy(NiMisiakosTsamakisO.T_MisiakosTsamakis, NiMisiakosTsamakisO.n_i_MisiakosTsamakis, 'ko', label=r'$n_{i, exp}$ Misiakos et al. (1993)')
ax1.semilogy(T_axis, n_i_MorinMaita_list, 'bs', label=r'$n_i$ Morin et al. (1954)')
ax1.semilogy(T_axis, n_i_PutleyMitchell_list, 'b*', label=r'$n_i$ Putley et al. (1958)')
ax1.semilogy(T_axis, n_i_Barber_list, 'b2', label=r'$n_i$ Barber (1967)')
ax1.semilogy(T_axis, n_i_Slotboom_list, 'b.', label=r'$n_i$ Slotboom (1976)')
ax1.semilogy(T_axis, n_i_Wasserab_list, 'b--', label=r'$n_i$ Wasserab (1977)')
ax1.semilogy(T_axis, n_i_Green1990_list, 'g.', label=r'$n_i$ Green (1990)')
ax1.semilogy(T_axis, n_i_SproulGreen1991_list, 'g--', label=r'$n_i$ Sproul et al. (1991)')
ax1.semilogy(T_axis, n_i_SproulGreen1993_list, 'g', label=r'$n_i$ Sproul et al. (1993)')
ax1.semilogy(T_axis, n_i_MisiakosTsamakis_list, 'b', label=r'$n_i$ Misiakos et al. (1993)')
ax1.semilogy(T_axis, n_i_Kimmerle_list, 'r.', label=r'$n_i$ Kimmerle (2011)')
ax1.legend()
mpl_p.ylim(1.0, 1.0e16)
mpl_p.xlim(100.0, 600.0)
mpl_p.show()
| [
"carrier_concentrations.MorinMaita",
"carrier_concentrations.MisiakosTsamakis",
"carrier_concentrations.Kimmerle",
"matplotlib.pyplot.xlim",
"carrier_concentrations.SproulGreen1993",
"carrier_concentrations.SproulGreen1991",
"carrier_concentrations.Barber",
"matplotlib.pyplot.figure",
"carrier_conce... | [((423, 451), 'numpy.arange', 'np.arange', (['(100.0)', '(605.0)', '(5.0)'], {}), '(100.0, 605.0, 5.0)\n', (432, 451), True, 'import numpy as np\n'), ((511, 526), 'carrier_concentrations.MorinMaita', 'cc.MorinMaita', ([], {}), '()\n', (524, 526), True, 'import carrier_concentrations as cc\n'), ((547, 566), 'carrier_concentrations.PutleyMitchell', 'cc.PutleyMitchell', ([], {}), '()\n', (564, 566), True, 'import carrier_concentrations as cc\n'), ((579, 590), 'carrier_concentrations.Barber', 'cc.Barber', ([], {}), '()\n', (588, 590), True, 'import carrier_concentrations as cc\n'), ((605, 618), 'carrier_concentrations.Slotboom', 'cc.Slotboom', ([], {}), '()\n', (616, 618), True, 'import carrier_concentrations as cc\n'), ((633, 646), 'carrier_concentrations.Wasserab', 'cc.Wasserab', ([], {}), '()\n', (644, 646), True, 'import carrier_concentrations as cc\n'), ((662, 676), 'carrier_concentrations.Green1990', 'cc.Green1990', ([], {}), '()\n', (674, 676), True, 'import carrier_concentrations as cc\n'), ((698, 718), 'carrier_concentrations.SproulGreen1991', 'cc.SproulGreen1991', ([], {}), '()\n', (716, 718), True, 'import carrier_concentrations as cc\n'), ((740, 760), 'carrier_concentrations.SproulGreen1993', 'cc.SproulGreen1993', ([], {}), '()\n', (758, 760), True, 'import carrier_concentrations as cc\n'), ((783, 804), 'carrier_concentrations.MisiakosTsamakis', 'cc.MisiakosTsamakis', ([], {}), '()\n', (802, 804), True, 'import carrier_concentrations as cc\n'), ((819, 832), 'carrier_concentrations.Kimmerle', 'cc.Kimmerle', ([], {}), '()\n', (830, 832), True, 'import carrier_concentrations as cc\n'), ((2291, 2306), 'matplotlib.pyplot.figure', 'mpl_p.figure', (['(1)'], {}), '(1)\n', (2303, 2306), True, 'import matplotlib.pyplot as mpl_p\n'), ((3439, 3461), 'matplotlib.pyplot.ylim', 'mpl_p.ylim', (['(1.0)', '(1e+16)'], {}), '(1.0, 1e+16)\n', (3449, 3461), True, 'import matplotlib.pyplot as mpl_p\n'), ((3463, 3487), 'matplotlib.pyplot.xlim', 'mpl_p.xlim', (['(100.0)', '(600.0)'], {}), '(100.0, 600.0)\n', (3473, 3487), True, 'import matplotlib.pyplot as mpl_p\n'), ((3488, 3500), 'matplotlib.pyplot.show', 'mpl_p.show', ([], {}), '()\n', (3498, 3500), True, 'import matplotlib.pyplot as mpl_p\n')] |
import numpy as np
import pandas as pd
color_file_path = "features/"+"DB"+"/"+"DB2"+"_"+"color"+".csv"
type_file_path = "features/"+"DB"+"/"+"DB2"+"_"+"type"+".csv"
color_f = np.loadtxt(color_file_path, delimiter=",",dtype="float32")
type_f = np.loadtxt(type_file_path, delimiter=",",dtype="float32")
black_color_f = color_f[:8]
blue_color_f = color_f[8:16]
white_color_f = color_f[16:24]
red_color_f = color_f[24:32]
black_type_f,blue_type_f,white_type_f,red_type_f = np.split(type_f[:32], 4)
x = np.array([np.concatenate([a,b]) for (a,b) in zip(color_f, type_f)])
black_x_f,blue_x_f,white_x_f,red_x_f = np.split(x[:32], 4)
output = [[0]*4 for i in range(7)]
for i, x_ in enumerate([black_x_f,blue_x_f,white_x_f,red_x_f]):
output[0][i] = np.average(np.std(x_,axis=0))
output[1][i] = np.average(np.std(x_[[0,2,3,4,5]],axis=0))
output[2][i] = np.average(np.std(x_[[1,2,3,6,7]],axis=0))
output[3][i] = np.average(np.std(x_[[0,1,3,5,7]],axis=0))
output[4][i] = np.average(np.std(x_[[0,1,2,4,6]],axis=0))
output[5][i] = np.average(np.std(x[:32],axis=0))
output[6][i] = np.average(np.std(x,axis=0))
output = np.array(output)
output = pd.DataFrame(output)
output.columns = ["black", "blue", "white", "red"]
# output.index = ["all", "pi/2_4way", "diagonal_4way","center&back", "right&left", "diagonal_center","diagonal_back", "onry_center","onry_back", "center+rl","onry_back_rl"]
output.index = ["all","not_center", "not_back","not_left","not_right","4colors","dataset"]
print(output)
output.to_csv("standard_deviation.csv")
for i, color in enumerate([black_color_f, blue_color_f, white_color_f, red_color_f]):
color = color[:4]
if i == 0:
a = color
else:
a = np.concatenate([a, color])
print(np.average(np.std(a, axis=0))) | [
"numpy.array",
"numpy.split",
"numpy.concatenate",
"numpy.std",
"pandas.DataFrame",
"numpy.loadtxt"
] | [((181, 240), 'numpy.loadtxt', 'np.loadtxt', (['color_file_path'], {'delimiter': '""","""', 'dtype': '"""float32"""'}), "(color_file_path, delimiter=',', dtype='float32')\n", (191, 240), True, 'import numpy as np\n'), ((250, 308), 'numpy.loadtxt', 'np.loadtxt', (['type_file_path'], {'delimiter': '""","""', 'dtype': '"""float32"""'}), "(type_file_path, delimiter=',', dtype='float32')\n", (260, 308), True, 'import numpy as np\n'), ((486, 510), 'numpy.split', 'np.split', (['type_f[:32]', '(4)'], {}), '(type_f[:32], 4)\n', (494, 510), True, 'import numpy as np\n'), ((626, 645), 'numpy.split', 'np.split', (['x[:32]', '(4)'], {}), '(x[:32], 4)\n', (634, 645), True, 'import numpy as np\n'), ((1168, 1184), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (1176, 1184), True, 'import numpy as np\n'), ((1195, 1215), 'pandas.DataFrame', 'pd.DataFrame', (['output'], {}), '(output)\n', (1207, 1215), True, 'import pandas as pd\n'), ((528, 550), 'numpy.concatenate', 'np.concatenate', (['[a, b]'], {}), '([a, b])\n', (542, 550), True, 'import numpy as np\n'), ((782, 800), 'numpy.std', 'np.std', (['x_'], {'axis': '(0)'}), '(x_, axis=0)\n', (788, 800), True, 'import numpy as np\n'), ((832, 867), 'numpy.std', 'np.std', (['x_[[0, 2, 3, 4, 5]]'], {'axis': '(0)'}), '(x_[[0, 2, 3, 4, 5]], axis=0)\n', (838, 867), True, 'import numpy as np\n'), ((895, 930), 'numpy.std', 'np.std', (['x_[[1, 2, 3, 6, 7]]'], {'axis': '(0)'}), '(x_[[1, 2, 3, 6, 7]], axis=0)\n', (901, 930), True, 'import numpy as np\n'), ((958, 993), 'numpy.std', 'np.std', (['x_[[0, 1, 3, 5, 7]]'], {'axis': '(0)'}), '(x_[[0, 1, 3, 5, 7]], axis=0)\n', (964, 993), True, 'import numpy as np\n'), ((1021, 1056), 'numpy.std', 'np.std', (['x_[[0, 1, 2, 4, 6]]'], {'axis': '(0)'}), '(x_[[0, 1, 2, 4, 6]], axis=0)\n', (1027, 1056), True, 'import numpy as np\n'), ((1084, 1106), 'numpy.std', 'np.std', (['x[:32]'], {'axis': '(0)'}), '(x[:32], axis=0)\n', (1090, 1106), True, 'import numpy as np\n'), ((1138, 1155), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1144, 1155), True, 'import numpy as np\n'), ((1761, 1787), 'numpy.concatenate', 'np.concatenate', (['[a, color]'], {}), '([a, color])\n', (1775, 1787), True, 'import numpy as np\n'), ((1806, 1823), 'numpy.std', 'np.std', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (1812, 1823), True, 'import numpy as np\n')] |
import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'shape': [(), (3, 2)],
}))
class Log1pFunctionTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = F.log1p(x)
gradient_check.assert_allclose(
numpy.log1p(self.x), y.data, atol=1e-7, rtol=1e-7)
@condition.retry(3)
def test_log1p_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_log1p_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(F.log1p, x_data, y_grad)
@condition.retry(3)
def test_log1p_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_log1p_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def test_log1p(self):
self.assertEqual(F.Log1p().label, 'log1p')
testing.run_module(__name__, __file__)
| [
"chainer.functions.Log1p",
"chainer.testing.condition.retry",
"chainer.Variable",
"chainer.testing.run_module",
"chainer.testing.product",
"numpy.random.uniform",
"chainer.functions.log1p",
"chainer.gradient_check.check_backward",
"numpy.log1p",
"chainer.cuda.to_gpu"
] | [((1406, 1444), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (1424, 1444), False, 'from chainer import testing\n'), ((741, 759), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (756, 759), False, 'from chainer.testing import condition\n'), ((853, 871), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (868, 871), False, 'from chainer.testing import condition\n'), ((1074, 1092), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (1089, 1092), False, 'from chainer.testing import condition\n'), ((1197, 1215), 'chainer.testing.condition.retry', 'condition.retry', (['(3)'], {}), '(3)\n', (1212, 1215), False, 'from chainer.testing import condition\n'), ((584, 608), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (600, 608), False, 'import chainer\n'), ((621, 631), 'chainer.functions.log1p', 'F.log1p', (['x'], {}), '(x)\n', (628, 631), True, 'import chainer.functions as F\n'), ((1013, 1067), 'chainer.gradient_check.check_backward', 'gradient_check.check_backward', (['F.log1p', 'x_data', 'y_grad'], {}), '(F.log1p, x_data, y_grad)\n', (1042, 1067), False, 'from chainer import gradient_check\n'), ((260, 300), 'chainer.testing.product', 'testing.product', (["{'shape': [(), (3, 2)]}"], {}), "({'shape': [(), (3, 2)]})\n", (275, 300), False, 'from chainer import testing\n'), ((684, 703), 'numpy.log1p', 'numpy.log1p', (['self.x'], {}), '(self.x)\n', (695, 703), False, 'import numpy\n'), ((937, 956), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (948, 956), False, 'from chainer import cuda\n'), ((1283, 1302), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1294, 1302), False, 'from chainer import cuda\n'), ((1304, 1324), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (1315, 1324), False, 'from chainer import cuda\n'), ((392, 432), 'numpy.random.uniform', 'numpy.random.uniform', (['(0.5)', '(1)', 'self.shape'], {}), '(0.5, 1, self.shape)\n', (412, 432), False, 'import numpy\n'), ((472, 511), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (492, 511), False, 'import numpy\n'), ((1378, 1387), 'chainer.functions.Log1p', 'F.Log1p', ([], {}), '()\n', (1385, 1387), True, 'import chainer.functions as F\n')] |
import sys, os, time
import numpy as np
import scipy as sci
import scipy.stats as ss
import scipy.sparse.linalg as slin
import copy
from .mytools.MinTree import MinTree
from scipy.sparse import coo_matrix, csr_matrix, lil_matrix
from .mytools.ioutil import loadedge2sm
from .edgepropertyAnalysis import MultiEedgePropBiGraph
import math
from .._model import DMmodel
from spartan.util.basicutil import param_default
from spartan.backend import STensor
def score_level_objects( objscores, p=0.90):
'''implement with Perato distribution, given significant value
'''
sortscores = sorted(objscores)
sortobjs = np.argsort(objscores)
alpha = 0.9
tail_fir_score = np.percentile(sortscores, [alpha*100])[0]
if tail_fir_score == 0:
'remove 0 if the number of percentile 90% is 0'
firindex = np.argwhere(sortscores > 0)[0]
sortscores = sortscores[firindex:]
sortobjs = sortobjs[firindex:]
'fit generalized pareto distribution using 10% upper tail data'
tailidx = int(alpha * len(sortscores))
tailscores = sortscores[tailidx:]
tailobjs = sortobjs[tailidx:]
shape, pos, scale = ss.pareto.fit(tailscores)
cdfs = ss.pareto.cdf(tailscores, shape, pos, scale)
levelidxs = np.argwhere(cdfs >= p)
levelobjs = tailobjs[levelidxs].T[0]
return levelobjs
def score_heristic_level_objects( objscores ):
'''todo: implement with Perato distribution, given significant value
'''
sortscores = sorted(objscores, reverse=True)
sortobjs = np.argsort(objscores)[::-1]
diffscores = - np.diff(sortscores)
levelid = np.argmax(diffscores)
levelobjs = sortobjs[ : levelid+1]
return levelobjs
def nonzero_objects( objscores ):
objects = np.where( objscores > 0 )[0]
return objects
class Ptype(object):
freq =0
ts = 1
rate=2
@staticmethod
def ptype2str(p):
if p == Ptype.freq:
return 'freq'
if p == Ptype.ts:
return 'ts'
if p == Ptype.rate:
return 'rate'
@staticmethod
def ptypes2str(ptypes):
strs=[]
if Ptype.freq in ptypes:
strs.append(Ptype.ptype2str(Ptype.freq))
if Ptype.ts in ptypes:
strs.append(Ptype.ptype2str(Ptype.ts))
if Ptype.rate in ptypes:
strs.append(Ptype.ptype2str(Ptype.rate))
pstr = '-'.join(strs)
return pstr
class HoloScopeOpt:
def __init__(self, graphmat, qfun='exp', b=32,
aggmethod='sum', sdrop=True, mbd=0.5, sdropscale='linear',
tsprop=None, tunit='s', rateprop=None):
'how many times of a user rates costumers if he get the cost balance'
self.coe = 0
'the larger expbase can give a heavy penalty to the power-law curve'
self.expbase = b
self.scale = qfun
self.b = b
self.aggmethod=aggmethod
self.suspbd = 0.0 #susp < suspbd will assign to zero
self.priordropslop=sdrop
self.graph=graphmat.tocoo()
self.graphr = self.graph.tocsr()
self.graphc = self.graph.tocsc()
self.matricizetenor=None
self.nU, self.nV=graphmat.shape
self.indegrees = graphmat.sum(0).getA1()
self.e0 = math.log(graphmat.sum(), self.nU) #logrithm of edges
print('matrix size: {} x {}\t#edges: {}'.format(self.nU, self.nV,
self.indegrees.sum()))
# tunit is only used for files input
self.tsprop, self.rateprop, self.tunit = tsprop, rateprop, tunit
self.tspim, self.ratepim = None, None
'field for multiple property graph'
if tsprop is not None or rateprop is not None:
if self.priordropslop:
self.orggraph = self.graphr.copy()
else:
self.orggraph = self.graphr
if tsprop is not None:
self.mbd = mbd #multiburst bound
self.tspim = MultiEedgePropBiGraph(self.orggraph)
"""
since the data is cut by the end of time, so we need to see
whether there is enough time twait from end of retweet to end of the
whole data to judge if it is a sudden drop or cut by the end of time.
twaits:
"""
if isinstance(tsprop, str) and os.path.isfile(tsprop):
self.tspim.load_from_edgeproperty(tsprop, mtype=coo_matrix,
dtype=np.int64)
twaits = {'s':12*3600, 'h':24, 'd':30, None:0}
twait = twaits[tunit]
elif isinstance(tsprop, STensor):
self.tspim.trans_array_to_edgeproperty(tsprop,
mtype=coo_matrix, dtype=np.int64)
twait = 12
else:
raise Exception('Error: incorrect time stamp property')
self.tspim.setup_ts4all_sinks(twait)
if self.priordropslop:
'slops weighted with max burst value'
self.weightWithDropslop(weighted=True, scale=sdropscale)
else:
self.priordropslop = False #no input of time attribute
if rateprop is not None:
self.ratepim = MultiEedgePropBiGraph(self.orggraph)
if isinstance(rateprop, str) and os.path.isfile(rateprop):
self.ratepim.load_from_edgeproperty(rateprop, mtype=coo_matrix, dtype=float)
elif isinstance(rateprop, STensor):
self.ratepim.trans_array_to_edgeproperty(rateprop,
mtype=coo_matrix, dtype=float)
else:
raise Exception('Error: incorrect rate property')
self.ratepim.setup_rate4all_sinks()
'weighed with idf prior from Fraudar'
#self.weightWithIDFprior()
'if weighted the matrix the windegrees is not equal to indegrees'
self.windegrees = self.graphc.sum(0).getA1()
self.woutdegrees = self.graphr.sum(1).getA1()
self.A = np.array([]) #binary array
self.fbs = np.zeros(graphmat.shape[1], dtype=np.int) #frequency of bs in B
'\frac_{ f_A{(bi)} }{ f_U{(bi)}}'
self.bsusps = np.array([]) # the suspicious scores of products given A
self.vx = 0 # current objective value
self.vxs = [] #record all the vxs of optimizing iterations
self.Y= np.array([])
self.yfbs = np.array([])
self.ybsusps = np.array([])
'current is the best'
self.bestvx = self.vx
self.bestA = np.array([])
self.bestfbs = np.array([])
self.bestbsusps = np.array([])
def weightWithDropslop(self, weighted, scale):
'weight the adjacency matrix with the sudden drop of ts for each col'
if weighted:
colWeights = np.multiply(self.tspim.dropslops, self.tspim.dropfalls)
else:
colWeights = self.tspim.dropslops
if scale == 'logistic':
from scipy.stats import logistic
from sklearn import preprocessing
'zero mean scale'
colWeights = preprocessing.scale(colWeights)
colWeights = logistic.cdf(colWeights)
elif scale == 'linear':
from sklearn import preprocessing
#add a base of suspecious for each edge
colWeights = preprocessing.minmax_scale(colWeights) +1
elif scale == 'plusone':
colWeights += 1
elif scale == 'log1p':
colWeights = np.log1p(colWeights) + 1
else:
print('[Warning] no scale for the prior weight')
n = self.nV
colDiag = lil_matrix((n, n))
colDiag.setdiag(colWeights)
self.graphr = self.graphr * colDiag.tocsr()
self.graph = self.graphr.tocoo(copy=False)
self.graphc = self.graph.tocsc(copy=False)
print("finished computing weight matrix")
def weightWithIDFprior(self):
print('weightd with IDF prior')
colWeights = 1.0/np.log(self.indegrees + 5)
n = self.nV
colDiag = lil_matrix((n, n))
colDiag.setdiag(colWeights)
self.graphr = self.graphr * colDiag.tocsr()
self.graph = self.graphr.tocoo(copy=False)
self.graphc = self.graph.tocsc(copy=False)
return
'new objective with no f_A(v)/|A|'
def maxobjfunc(self, A, fbs, bsusps=None):
nu = 0.0
de = 0.0
numA = np.sum(A)
de = numA + bsusps.sum() #math.sqrt(numA*bsusps.sum())#similar
if numA == 0:
return 0
if bsusps is not None:
nu = np.dot(fbs, bsusps)
else:
nu = fbs.sum()
res = nu/np.float64( de )
return res
def aggregationMultiProp(self, mbs, method='sum'):
if method == 'rank':
from scipy.stats import rankdata
rankmethod = 'average'
k=60 #for rank fusion
values = list(mbs.values())
if len(mbs) == 1:
val = values[0]
if method == 'rank':
rb = rankdata(-np.array(val), method=rankmethod)
return np.reciprocal(rb+k) * k
else:
return val
if method == 'sum':
'this is the joint probability of exp form of prob'
bsusps = values[0]
for v in values[1:]:
bsusps += v
elif method == 'rank':
'rank fusion'
arrbsusps = []
for val in values:
rb = rankdata(-np.array(val), method=rankmethod)
arrbsusps.append(np.reciprocal(rb+k))
bsusps = np.array(arrbsusps).sum(0) * k
else:
print('[Error] Invalid method {}\n'.format(method))
return bsusps
#@profile
def evalsusp4ts(self, suspusers, multiburstbd = 0.5, weighted=True):
'the id of suspusers consistently starts from 0 no matter the source'
incnt, inratio = self.tspim.suspburstinvolv(multiburstbd, weighted,
delta=True)
suspts=inratio
return suspts
#@profile
def evalsusp4rate(self, suspusers, neutral=False, scale='max'):
susprates = self.ratepim.suspratedivergence(neutral, delta=True)
if scale == 'max':
if self.ratepim.maxratediv > 0:
nsusprates = susprates/self.ratepim.maxratediv
else:
nsusprates = susprates
elif scale=='minmax':
#need a copy, and do not change susprates' value for delta
from sklearn import preprocessing
nsusprates = preprocessing.minmax_scale(susprates, copy=True)
else:
#no scale
nsusprates = susprates
return nsusprates
'sink suspicious with qfunc, no f_A(v)/|A|'
def prodsuspicious(self, fbs, A=None, scale='exp', ptype=[Ptype.freq]):
multibsusps={}
if Ptype.freq in ptype:
posids = self.windegrees>0
bs = np.zeros(self.nV)
bs[posids] = np.divide(fbs[posids], self.windegrees[posids].astype(np.float64))
multibsusps[Ptype.freq] = bs
if Ptype.ts in ptype:
suspusers = A.nonzero()[0]
bs = self.evalsusp4ts(suspusers, multiburstbd=self.mbd)
multibsusps[Ptype.ts] = bs
if Ptype.rate in ptype:
suspusers = A.nonzero()[0]
bs = self.evalsusp4rate(suspusers)
multibsusps[Ptype.rate] = bs
bsusps = self.aggregationMultiProp(multibsusps, self.aggmethod)
bsusps = self.qfunc(bsusps, fbs=fbs, scale=scale,
numratios=len(multibsusps))
return bsusps
def initpimsuspects(self, suspusers, ptype):
if Ptype.ts in ptype:
self.tspim.setupsuspects(suspusers)
temp1, temp2 = self.tspim.suspburstinvolv(multiburstbd=0.5, weighted=True,
delta=False)
if Ptype.rate in ptype:
self.ratepim.setupsuspects(suspusers)
tmp = self.ratepim.suspratedivergence(neutral=False,
delta=False)
return
def start(self, A0, ptype=[Ptype.ts]):
self.A = A0
users = A0.nonzero()[0]
self.ptype=ptype # the property type that the postiorer uses
self.fbs = self.graphr[users].sum(0).getA1()
self.fbs = self.fbs.astype(np.float64, copy=False)
'initially set up currrent suspects'
self.initpimsuspects(users, ptype=ptype)
self.bsusps = self.prodsuspicious(self.fbs, self.A, ptype=ptype)
self.vx = self.maxobjfunc(self.A, self.fbs, self.bsusps)
self.vxs.append(self.vx)
"current is the best"
self.bestA = np.array(self.A)
self.bestvx = self.vx
self.bestfbs = np.array(self.fbs)
self.bestbsusps = np.array(self.bsusps)
def candidatefbs(self, z):
'increase or decrease'
coef = 1 if self.A[z] == 0 else -1
bz = self.graphr[z]
candfbs = (coef*bz + self.fbs).getA1()
return candfbs
#@profile
def greedyshaving(self):
'''greedy algorithm'''
maxint = np.iinfo(np.int64).max//2
delscores = np.array([maxint]*self.nU)
delcands = self.A.nonzero()[0]
deluserCredit = self.graphr[delcands,:].dot(self.bsusps)
delscores[delcands] = deluserCredit
print('set up the greedy min tree')
MT = MinTree(delscores)
i=0
sizeA = np.sum(self.A)
sizeA0 = sizeA
setA = set(self.A.nonzero()[0])
while len(setA) > 0:
z, nextdelta = MT.getMin()
setY = setA - {z}
Y = copy.copy(self.A) # A is X
Y[z] = 1-Y[z]
self.Y=Y
self.yfbs = self.candidatefbs(z)
Ylist = Y.nonzero()[0]
self.setdeltapimsusp(z, Ylist, add=False)
self.ybsusps = self.prodsuspicious(self.yfbs, self.Y,
ptype=self.ptype)
vy = self.maxobjfunc(self.Y, self.yfbs, self.ybsusps)
'chose next if next if the best'
if vy > self.bestvx:
self.bestA = np.array(self.Y)
self.bestfbs = self.yfbs
self.bestbsusps = self.ybsusps
self.bestvx = vy
MT.changeVal(z, maxint) #make the min to the largest for deletion
prodchange = self.ybsusps - self.bsusps
effectprod = prodchange.nonzero()[0]
if len(effectprod)>0:
#this is delta for all users
userdelta = self.graphc[:,effectprod].dot(prodchange[effectprod])
yuserdelta = userdelta[Ylist]
for u in yuserdelta.nonzero()[0]:
uidx = Ylist[u]
MT.changeVal(uidx,yuserdelta[u])
'delete next user, make current to next'
self.A = self.Y
sizeA -= 1
setA = setY
self.fbs = self.yfbs
self.bsusps = self.ybsusps
self.vx = vy
self.vxs.append(self.vx)
if i % (sizeA0//100 + 1) == 0:
sys.stdout.write('.')
sys.stdout.flush()
i+=1
print()
return np.sum(self.A)
def initfastgreedy(self, ptype, numSing, rbd='avg', eps=1.6):
'''
default: ptype=[Ptype.freq], numSing=10, rbd='avg'
'''
self.ptype=ptype
self.numSing=numSing #number of singular vectors we consider
self.avgexponents=[]
if len(ptype)==1:
self.initfastgreedy2D(numSing, rbd, eps=eps)
elif len(ptype) > 1:
self.initfastgreedyMD(numSing, rbd, eps=eps)
self.bestvx = -1
self.qchop=False
#reciprocal of indegrees
self.sindegreciprocal = csr_matrix(self.windegrees).astype(np.float64)
data = self.sindegreciprocal.data
nozidx = data.nonzero()[0]
self.sindegreciprocal.data[nozidx] = data[nozidx]**(-1)
return
def tenormatricization(self, tspim, ratepim, tbindic, rbins,
mtype=coo_matrix, dropweight=True, logdegree=False):
'matricize the pim of ts and rates into matrix'
if tspim is None and ratepim is None:
return self.graph, range(self.nV)
tscm, rtcm, dl = None, None,0
if Ptype.ts in self.ptype and tspim is not None:
tscm = tspim.edgeidxm.tocoo()
dl = len(tscm.data)
if Ptype.rate in self.ptype and ratepim is not None:
rtcm = ratepim.edgeidxm.tocoo()
dl = len(rtcm.data)
if dropweight is True and tspim is not None:
w = np.multiply(tspim.dropfalls, tspim.dropslops)
w = np.log1p(w) + 1
else:
w = np.ones(self.nV)
xs, ys, data, colWeights = [],[],[],[] # for matricized tenor
matcols, rindexcols={},{}
for i in range(dl):
if tscm is not None and rtcm is not None:
assert(tscm.row[i] == rtcm.row[i] and tscm.col[i] == rtcm.col[i])
u = tscm.row[i]
v = tscm.col[i]
for t1, r1 in zip(tspim.eprop[tscm.data[i]],
ratepim.eprop[rtcm.data[i]]):
t = t1//int(tbindic[self.tunit])
r = rbins(r1)
strcol = ' '.join(map(str,[v,t,r]))
if strcol not in matcols:
idx = len(matcols)
matcols[strcol] = idx
rindexcols[idx]=strcol
xs.append(u)
ys.append(matcols[strcol])
data.append(1.0)
elif tscm is not None:
u = tscm.row[i]
v = tscm.col[i]
for t1 in tspim.eprop[tscm.data[i]]:
t = t1//int(tbindic[self.tunit])
strcol = ' '.join(map(str,[v,t]))
if strcol not in matcols:
idx = len(matcols)
matcols[strcol] = idx
rindexcols[idx]=strcol
xs.append(u)
ys.append(matcols[strcol])
data.append(1.0)
elif rtcm is not None:
u = rtcm.row[i]
v = rtcm.col[i]
for r1 in ratepim.eprop[rtcm.data[i]]:
r = rbins(r1)
strcol = ' '.join(map(str,[v,r]))
if strcol not in matcols:
idx = len(matcols)
matcols[strcol] = idx
rindexcols[idx]=strcol
xs.append(u)
ys.append(matcols[strcol])
data.append(1.0)
else:
print('Warning: no ts and rate for matricization')
return self.graph, range(self.nV)
nrow, ncol = max(xs)+1, max(ys)+1
sm = mtype( (data, (xs, ys)), shape=(nrow, ncol), dtype=np.float64 )
if logdegree:
print('using log degree')
sm.data[0:] = np.log1p(sm.data)
if dropweight:
m1, n1 = sm.shape
for i in range(n1):
pos = rindexcols[i].find(' ')
v = int(rindexcols[i][:pos])
colWeights.append(w[v])
colDiag = lil_matrix((n1, n1))
colDiag.setdiag(colWeights)
sm = sm * colDiag.tocsr()
return sm, rindexcols
def initfastgreedyMD(self, numSing, rbd, eps = 1.6):
'''
use matricizationSVD instead of freq matrix svd
'''
#afile = self.tsprop if self.tsprop is not None else self.rateprop
#ipath = os.path.dirname(os.path.abspath(afile))
tbindic={}
if isinstance(self.tsprop, str) and os.path.isfile(self.tsprop):
tbindic={'s':24*3600, 'd':30}
print('Generate tensorfile with tunit:{}, tbins:{}'.format(self.tunit,
tbindic[self.tunit]))
elif isinstance(self.tsprop, STensor):
tbindic={'s':1, 'd':1}
print('Generate tensorfile with time rescale: ', tbindic[self.tunit] )
'edgepropertyAnalysis has already digitized the ratings'
rbins = lambda x: int(x) #lambda x: 0 if x<2.5 else 1 if x<=3.5 else 2
if self.matricizetenor is None:
matricize_start = time.clock()
sm, rindexcol = self.tenormatricization(self.tspim, self.ratepim,
tbindic, rbins, mtype=coo_matrix,
dropweight=self.priordropslop,
logdegree=False)
self.matricizetenor = sm
print('::::matricize time cost: ', time.clock() - matricize_start)
sm = self.matricizetenor
print("matricize {}x{} and svd dense... ..."\
.format(sm.shape[0], sm.shape[1]))
u, s, vt = slin.svds(sm, k=numSing, which='LM')
u = np.fliplr(u)
s = s[::-1]
CU, CV = [],[]
for i in range(self.numSing):
ui = u[:, i]
si = s[i]
if abs(max(ui)) < abs(min(ui)):
ui = -1*ui
if type(rbd) is float:
sqrtSi = math.sqrt(si)
ui *= sqrtSi
rbdrow= rbd
elif rbd == 'avg':
rbdrow = 1.0/math.sqrt(self.nU)
else:
print('unkown rbd {}'.format(rbd))
rows = np.argsort(-ui, axis=None, kind='quicksort')
for jr in range(len(rows)):
r = rows[jr]
if ui[r] <= rbdrow:
break
self.avgexponents.append(math.log(jr, self.nU))
'consider the # limit'
if self.nU > 1e6:
e0 = self.e0
ep = max(eps, 2.0/(3-e0))
nn = sm.shape[0] + sm.shape[1]
nlimit = int(math.ceil(nn**(1/ep)))
cutrows = rows[:min(jr,nlimit)]
else:
cutrows = rows[:jr]
CU.append(cutrows)
self.CU = np.array(CU)
self.CV = np.array(CV)
return
def initfastgreedy2D(self, numSing, rbd, eps=1.6):
'rbd threshold that cut the singular vecotors, default is avg'
'parameters for fastgreedy'
u, s, vt = slin.svds(self.graphr.astype(np.float64), k=numSing, which='LM')
#revert to make the largest singular values and vectors in the front
u = np.fliplr(u)
vt = np.flipud(vt)
s = s[::-1]
self.U = []
self.V = []
self.CU = []
self.CV = []
for i in range(self.numSing):
ui = u[:, i]
vi = vt[i, :]
si = s[i]
if abs(max(ui)) < abs(min(ui)):
ui = -1*ui
if abs(max(vi)) < abs(min(vi)):
vi = -1*vi
if type(rbd) is float:
sqrtSi = math.sqrt(si)
ui *= sqrtSi
vi *= sqrtSi
rbdrow, rbdcol = rbd, rbd
elif rbd == 'avg':
rbdrow = 1.0/math.sqrt(self.nU)
rbdcol = 1.0/math.sqrt(self.nV)
else:
print('unkown rbd {}'.format(rbd))
rows = np.argsort(-ui, axis=None, kind='quicksort')
cols = np.argsort(-vi, axis=None, kind='quicksort')
for jr in range(len(rows)):
r = rows[jr]
if ui[r] <= rbdrow:
break
self.avgexponents.append(math.log(jr, self.nU))
if self.nU > 5e5:
e0=self.e0
ep = max(eps, 2.0/(3-e0))
nn = self.nU + self.nV
nlimit = int(math.ceil(nn**(1.0/ep)))
cutrows = rows[:min(jr,nlimit)]
else:
cutrows = rows[:jr]
for jc in range(len(cols)):
c = cols[jc]
if vi[c] <= rbdcol:
break
cutcols = cols[:jc]
'begin debug'
self.U.append(ui)
self.V.append(vi)
'end debug'
self.CU.append(cutrows)
self.CV.append(cutrows)
self.CU = np.array(self.CU)
self.CV = np.array(self.CV)
return
def qfunc(self, ratios, fbs=None, scale='exp', numratios=1):
if self.aggmethod == 'rank':
'do not use qfun if it is rank aggregation'
return ratios
if self.suspbd <= 0.0:
greatbdidx = ratios > 0.0
else:
greatbdidx = ratios >= self.suspbd
lessbdidx = ratios < self.suspbd
'picewise q funciton if < suspbd, i.e. epsilon1'
ratios[lessbdidx] = 0.0
'picewise q funciton if >= suspbd, i.e. epsilon1'
if scale == 'exp':
ratios[greatbdidx] = self.expbase**(ratios[greatbdidx]-numratios)
elif scale == 'pl':
ratios[greatbdidx] = ratios[greatbdidx]**self.b
elif scale == 'lin':
ratios[greatbdidx] = np.fmax(self.b*(ratios[greatbdidx]-1)+1, 0)
else:
print('unrecognized scale: ' + scale)
sys.exit(1)
return ratios
def setdeltapimsusp(self, z, ysuspusers, add):
if Ptype.ts in self.ptype:
self.tspim.deltasuspects(z, ysuspusers, add)
if Ptype.rate in self.ptype:
self.ratepim.deltasuspects(z, ysuspusers, add)
return
def removecurrentblock(self, rows):
'''it is for find second block, remove rows from
self.graph, self.matricizetenor
'''
print('removing {} rows from graph'.format(len(rows)))
lilm = self.graph.tolil()
lilm[rows,:]=0
self.graph=lilm.tocoo()
self.graphc= lilm.tocsc()
self.graphr = self.graph.tocsr()
if self.matricizetenor is not None:
print('removing {} rows from tensor'.format(len(rows)))
lilmm = self.matricizetenor.tolil()
lilmm[rows,:] = 0
self.matricizetenor = lilmm.tocoo()
return
#@profile
def fastgreedy(self):
'adding and deleting greed algorithm'
'No Need: user order for r with obj fuct'
self.fastlocalbest = []
self.fastbestvx = 0
self.fastbestA, self.fastbestfbs, self.fastbestbsusps = \
np.zeros(self.nU), np.zeros(self.nV), np.zeros(self.nV)
for k in range(self.numSing):
print('process {}-th singular vector'.format(k+1))
lenCU = len(self.CU[k])
if lenCU == 0:
continue
print('*** *** shaving ...')
A0 = np.zeros(self.nU, dtype=int)
A0[self.CU[k]]=1 #shaving from sub singluar space
self.start(A0, ptype=self.ptype)
self.greedyshaving()
print('*** *** shaving opt size: {}'.format(sum(self.bestA)))
print('*** *** shaving opt value: {}'.format(self.bestvx))
if self.fastbestvx < self.bestvx:
self.fastbestvx = self.bestvx
self.fastbestA = np.array(self.bestA)
self.fastbestfbs = np.array(self.bestfbs)
self.fastbestbsusps = np.array(self.bestbsusps)
print('=== === improved opt size: {}'.format(sum(self.fastbestA)))
print('=== === improved opt value: {}'.format(self.fastbestvx))
brankscores = np.multiply(self.bestbsusps, self.bestfbs)
A = self.bestA.nonzero()[0]
self.fastlocalbest.append((self.bestvx, (A, brankscores)))
'clear shaving best'
self.bestvx = 0
self.bestvx, self.bestA, self.bestfbs, self.bestbsusps = \
self.fastbestvx, self.fastbestA, \
self.fastbestfbs, self.fastbestbsusps
return
def drawObjectiveCurve(self, outfig):
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(self.vxs, '-')
plt.title('The convergence curve of simulated anealing.')
plt.xlabel('# of iterations')
plt.ylabel('objective value')
if outfig is not None:
fig.savefig(outfig)
return fig
def holoscope_interface(wmat, alg, ptype, qfun, b, rateprop=None, tsprop=None,
tunit='s', numSing=10, nblock=1, eps=1.6):
'''
The interface of HoloScope algorithm for external use
Parameters
----------
wmat: str or sparse matrix
If it is str, wmat is the input file name. We load the file into sparse
matrix. If it is sparse matrix, we just use wmat.
alg: str
which algorithm you are going to use. You can choose 'greedy' for
synthetic data (#rows+#cols<10000); or 'fastgreedy' for any size of data
sets.
ptype: list
contains which attributes the algorithm is going to use. The hololisc
use of all siginals is [Ptype.freq, Ptype.ts, Ptype.rate]
qfun: str
which kind of qfun the algorithm uses, choosing from 'exp' for
exponential (recommended), 'pl' for power-law, 'lin' for linear
b: float
The base of exponetial qfun, or the exponent of power-law qfun, or
absolute slope of linear qfun
rateprop: str or STensor or None
The file name with path for user-object rating sequences. The file
format is that each line looks like 'userid-objectid:#star1 #star2
...\n'.
If it is STensor, then rateprop contains (userid, objectid, #star) --> freq
tsprop: str or None
The file name with path for user-object timestamp sequences. The file
format is that each line looks like 'userid-objectid:t1 t2 ...\n'
If it is STensor, then rateprop contains (userid, objectid, tsbin) --> freq
tunit: str (only support 's' or 'd') or None
The time unit of input time
e.g. in amazon and yelp data, the time is date, i.e. tunit='d'.
We use # of days (integer) from the earlest date as input
It does not need if tsprop is STensor
numSing: int
The number of first left singular vectors used in our algorithm
nblock: int
The number of block we need from the algorithm
eps: float
It gives the approximate level cut for singular vectors, which
is a trade-off parameter for efficency and accuracy. Usually eps
is between (1.5, 2], and the complexity reduce from the quadratic in number of
nodes to the near linear in number of edges.
Return
---------
(gbestvx, (gsrows, gbscores)), opt
Block (gsrows, gbscores) has the best objective values 'gbestvx' among
*nblock* blocks.
gbestvx: float
the best objective value of the *nblock* blocks.
gsrows: list
is the list of suspicious rows.
gbscores: list
is the suspicoius scores for every objects. The index is object id,
and value is the score. With the scores, you can get the suspicious rank
of the objects.
opt: instance of HoloScopeOpt class
the class instance which contains all the *nblock* blocks in opt.nbests.
opt.nbests: list
This is the list contains *nblock* solutions in the form of
tuple, i.e., (opt.bestvx, (srows, bscores))
'''
print('initial...')
if sci.sparse.issparse(wmat) is False and os.path.isfile(wmat):
# sm = loadedge2sm(wmat, coo_matrix, weighted=True, idstartzero=True)
# remove unexpected parameter 'weighted'
sm = loadedge2sm(wmat, coo_matrix, idstartzero=True)
else:
sm = wmat.tocoo()
inprop = 'Considering '
if Ptype.freq in ptype:
inprop += '+[topology] '
if Ptype.ts in ptype:
inprop += '+[timestamps] '
#consider sdrop by default when Ptype.ts
inprop += '+[sudden drop]'
else:
tsprop=None
if Ptype.rate in ptype:
inprop += '+[rating i.e. # of stars] '
else:
rateprop = None
print(inprop)
opt = HoloScopeOpt(sm, qfun=qfun, b=b, tsprop=tsprop, tunit=tunit,
rateprop=rateprop)
opt.nbests=[]
opt.nlocalbests=[] #mainly used for fastgreedy
gsrows,gbscores,gbestvx = 0,0,0
for k in range(nblock):
start_time = time.clock()
if alg == 'greedy':
n1, n2 = sm.shape
if n1 + n2 > 1e4:
print('[Warning] alg {} is slow for size {}x{}'\
.format(alg, n1, n2))
A = np.ones(opt.nU,dtype=int)
print('initial start')
opt.start(A, ptype=ptype)
print('greedy shaving algorithm ...')
opt.greedyshaving()
elif alg == 'fastgreedy':
print("""alg: {}\n\t+ # of singlular vectors: {}\n""".format(alg, numSing))
print('initial start')
opt.initfastgreedy( ptype, numSing, eps=eps )
print("::::Finish Init @ ", time.clock() - start_time)
print('fast greedy algorithm ...')
opt.fastgreedy()
opt.nlocalbests.append(opt.fastlocalbest)
else:
print('No such algorithm: '+alg)
sys.exit(1)
print("::::Finish Algorithm @ ", time.clock() - start_time)
srows = opt.bestA.nonzero()[0]
bscores = np.multiply(opt.bestfbs, opt.bestbsusps)
opt.nbests.append((opt.bestvx, (srows, bscores)))
gsrows, gbscores, gbestvx = (srows,bscores,opt.bestvx) \
if gbestvx < opt.bestvx else (gsrows, gbscores, gbestvx)
if k < nblock-1:
opt.removecurrentblock(srows)
#levelcols = score_level_objects( gbscores )
nnzcols = nonzero_objects( gbscores )
#print('global best size: nodes', len(gsrows), len(nnzcols), 'with camouflage.')
#print('global best value ', gbestvx)
return ((gsrows, nnzcols), gbestvx), gbscores[nnzcols], opt
class HoloScope( DMmodel ):
'''Anomaly detection base on contrastively dense subgraphs, considering
topological, temporal, and categorical (e.g. rating scores) signals, or
any supported combinations.
Parameters
----------
graph: Graph
Graph instance contains adjency matrix, and possible multiple signals.
alg: string options ['fastgreedy' | 'greedy' ]
The algorithm used for detect dense blocks. You can choose 'greedy' for
synthetic data (#rows+#cols<10000); or 'fastgreedy' for any size of data
sets.
Default is 'fastgreedy' which
uses main (with first several large singular values) truncated singular vectors
to find dense blocks. alg is used with eps as truncation factor, and
numSing as number of first large singular vectors.
eps: float
It gives the approximate level cut for singular vectors, which
is a trade-off parameter for efficency and accuracy. Usually eps
is between (1.5, 2], and the complexity reduce from the quadratic in number of
nodes to the near linear in number of edges.
Larger eps gives faster detection, but may miss the denser blocks.
Default is 1.6.
numSing: int
The number of first large left singular vectors used in our algorithm
qfun: string options ['exp' | 'pl' | 'lin']
which kind of qfun the algorithm uses, choosing from 'exp' for
exponential (recommended), 'pl' for power-law, 'lin' for linear
Default is 'exp'.
b: float
The base of exponetial qfun, or the exponent of power-law qfun, or
absolute slope of linear qfun
Default is 32.
'''
def __init__(self, graph, **params):
self.graph = graph
self.alg = param_default(params, 'alg', 'fastgreedy')
self.eps = param_default(params, 'eps', 1.6)
self.numSing = param_default(params, 'numSing', 10)
self.qfun = param_default(params, 'qfun', 'exp')
self.b = param_default(params,'b', 32)
def __str__(self):
return str(vars(self))
def run(self, k:int=1, level:int=0, eps:float = 1.6):
'''run with how many blocks are output.
Parameters:
--------
nblock: int
The number of block we need from the algorithm
level: int
The level of signals used for anomaly detection. Choose in [0 | 1 | 2 |
3]. 0: topology only. 1: topology with time. 2: topology with category
(e.g. rating score). 3: all three.
Default is 0.
'''
if eps != 1.6:
epsuse=1.6
else:
epsuse = self.eps
self.level = level
nprop = self.graph.nprop
graph = self.graph
tsprop, rateprop = None, None
if self.level == 0:
ptype=[Ptype.freq]
elif self.level ==1:
ptype=[Ptype.freq, Ptype.ts]
if nprop < 1:
raise Exception("Error: at least 3-mode graph tensor is needed for level 1")
tsprop = graph.get_time_tensor()
elif self.level == 2:
ptype = [Ptype.freq, Ptype.rate]
if nprop < 1:
raise Exception("Error: at least 3-mode graph tensor is needed for level 2")
"The order of mode in graph tensor for categorical bins if exit, start from zero."
modec = 3 if nprop > 1 else 2
rateprop = graph.get_one_prop_tensor(modec)
elif self.level == 3:
ptype = [Ptype.freq, Ptype.ts, Ptype.rate]
if nprop < 2:
raise Exception("Error: at least 4-mode graph tensor is needed for level 3")
tsprop = graph.get_time_tensor()
modec=3
rateprop = graph.get_one_prop_tensor(3)
else:
print("Warning: no run level ",self.level,", use level 0 instead!")
ptype=[Ptype.freq]
bdres = holoscope_interface(graph.sm.astype(float),
self.alg, ptype, self.qfun, self.b,
tsprop=tsprop, rateprop=rateprop,
nblock=k, eps=epsuse, numSing=self.numSing)
nres = []
opt = bdres[-1]
for nb in range(k):
res = opt.nbests[nb]
print('block{}: \n\tobjective value {}'.format(nb + 1, res[0]))
levelcols = score_level_objects(res[1][1])
nnzcols = nonzero_objects(res[1][1])
rows = res[1][0]
nleveledges = graph.get_subgraph_nedges( rows, levelcols )
nedges = graph.get_subgraph_nedges( rows, nnzcols )
print( '\tNode size: {} x {}, edge size {}'.format(len(rows),
len(levelcols), nleveledges) )
print( '\tRow and nonzero columns {} x {}, edge size: {} with camouflage.'.format(
len(rows), len(nnzcols), nedges) )
nres.append( ( (rows, levelcols), res[0], nnzcols, res[1][1][nnzcols] ) )
self.nres = nres
return nres
def anomaly_detection(self, k:int=1, eps:float = 1.6):
return self.run(k=k, eps=eps)
def save(self, outpath):
import pickle
out = open(outpath,'wb')
pickle.dump(self.nres, out)
pass
| [
"time.clock",
"matplotlib.pyplot.ylabel",
"numpy.log",
"math.sqrt",
"numpy.iinfo",
"spartan.util.basicutil.param_default",
"numpy.argsort",
"numpy.array",
"math.log",
"sys.exit",
"scipy.sparse.linalg.svds",
"copy.copy",
"numpy.multiply",
"scipy.sparse.lil_matrix",
"numpy.where",
"numpy... | [((622, 643), 'numpy.argsort', 'np.argsort', (['objscores'], {}), '(objscores)\n', (632, 643), True, 'import numpy as np\n'), ((1147, 1172), 'scipy.stats.pareto.fit', 'ss.pareto.fit', (['tailscores'], {}), '(tailscores)\n', (1160, 1172), True, 'import scipy.stats as ss\n'), ((1184, 1228), 'scipy.stats.pareto.cdf', 'ss.pareto.cdf', (['tailscores', 'shape', 'pos', 'scale'], {}), '(tailscores, shape, pos, scale)\n', (1197, 1228), True, 'import scipy.stats as ss\n'), ((1246, 1268), 'numpy.argwhere', 'np.argwhere', (['(cdfs >= p)'], {}), '(cdfs >= p)\n', (1257, 1268), True, 'import numpy as np\n'), ((1605, 1626), 'numpy.argmax', 'np.argmax', (['diffscores'], {}), '(diffscores)\n', (1614, 1626), True, 'import numpy as np\n'), ((681, 721), 'numpy.percentile', 'np.percentile', (['sortscores', '[alpha * 100]'], {}), '(sortscores, [alpha * 100])\n', (694, 721), True, 'import numpy as np\n'), ((1524, 1545), 'numpy.argsort', 'np.argsort', (['objscores'], {}), '(objscores)\n', (1534, 1545), True, 'import numpy as np\n'), ((1571, 1590), 'numpy.diff', 'np.diff', (['sortscores'], {}), '(sortscores)\n', (1578, 1590), True, 'import numpy as np\n'), ((1736, 1759), 'numpy.where', 'np.where', (['(objscores > 0)'], {}), '(objscores > 0)\n', (1744, 1759), True, 'import numpy as np\n'), ((5991, 6003), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5999, 6003), True, 'import numpy as np\n'), ((6037, 6078), 'numpy.zeros', 'np.zeros', (['graphmat.shape[1]'], {'dtype': 'np.int'}), '(graphmat.shape[1], dtype=np.int)\n', (6045, 6078), True, 'import numpy as np\n'), ((6165, 6177), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6173, 6177), True, 'import numpy as np\n'), ((6351, 6363), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6359, 6363), True, 'import numpy as np\n'), ((6384, 6396), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6392, 6396), True, 'import numpy as np\n'), ((6420, 6432), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6428, 6432), True, 'import numpy as np\n'), ((6514, 6526), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6522, 6526), True, 'import numpy as np\n'), ((6550, 6562), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6558, 6562), True, 'import numpy as np\n'), ((6589, 6601), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6597, 6601), True, 'import numpy as np\n'), ((7608, 7626), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(n, n)'], {}), '((n, n))\n', (7618, 7626), False, 'from scipy.sparse import coo_matrix, csr_matrix, lil_matrix\n'), ((8032, 8050), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(n, n)'], {}), '((n, n))\n', (8042, 8050), False, 'from scipy.sparse import coo_matrix, csr_matrix, lil_matrix\n'), ((8392, 8401), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (8398, 8401), True, 'import numpy as np\n'), ((12732, 12748), 'numpy.array', 'np.array', (['self.A'], {}), '(self.A)\n', (12740, 12748), True, 'import numpy as np\n'), ((12802, 12820), 'numpy.array', 'np.array', (['self.fbs'], {}), '(self.fbs)\n', (12810, 12820), True, 'import numpy as np\n'), ((12847, 12868), 'numpy.array', 'np.array', (['self.bsusps'], {}), '(self.bsusps)\n', (12855, 12868), True, 'import numpy as np\n'), ((13211, 13239), 'numpy.array', 'np.array', (['([maxint] * self.nU)'], {}), '([maxint] * self.nU)\n', (13219, 13239), True, 'import numpy as np\n'), ((13490, 13504), 'numpy.sum', 'np.sum', (['self.A'], {}), '(self.A)\n', (13496, 13504), True, 'import numpy as np\n'), ((15283, 15297), 'numpy.sum', 'np.sum', (['self.A'], {}), '(self.A)\n', (15289, 15297), True, 'import numpy as np\n'), ((21043, 21079), 'scipy.sparse.linalg.svds', 'slin.svds', (['sm'], {'k': 'numSing', 'which': '"""LM"""'}), "(sm, k=numSing, which='LM')\n", (21052, 21079), True, 'import scipy.sparse.linalg as slin\n'), ((21092, 21104), 'numpy.fliplr', 'np.fliplr', (['u'], {}), '(u)\n', (21101, 21104), True, 'import numpy as np\n'), ((22226, 22238), 'numpy.array', 'np.array', (['CU'], {}), '(CU)\n', (22234, 22238), True, 'import numpy as np\n'), ((22257, 22269), 'numpy.array', 'np.array', (['CV'], {}), '(CV)\n', (22265, 22269), True, 'import numpy as np\n'), ((22621, 22633), 'numpy.fliplr', 'np.fliplr', (['u'], {}), '(u)\n', (22630, 22633), True, 'import numpy as np\n'), ((22647, 22660), 'numpy.flipud', 'np.flipud', (['vt'], {}), '(vt)\n', (22656, 22660), True, 'import numpy as np\n'), ((24363, 24380), 'numpy.array', 'np.array', (['self.CU'], {}), '(self.CU)\n', (24371, 24380), True, 'import numpy as np\n'), ((24399, 24416), 'numpy.array', 'np.array', (['self.CV'], {}), '(self.CV)\n', (24407, 24416), True, 'import numpy as np\n'), ((28108, 28120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28118, 28120), True, 'import matplotlib.pyplot as plt\n'), ((28129, 28152), 'matplotlib.pyplot.plot', 'plt.plot', (['self.vxs', '"""-"""'], {}), "(self.vxs, '-')\n", (28137, 28152), True, 'import matplotlib.pyplot as plt\n'), ((28161, 28218), 'matplotlib.pyplot.title', 'plt.title', (['"""The convergence curve of simulated anealing."""'], {}), "('The convergence curve of simulated anealing.')\n", (28170, 28218), True, 'import matplotlib.pyplot as plt\n'), ((28227, 28256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# of iterations"""'], {}), "('# of iterations')\n", (28237, 28256), True, 'import matplotlib.pyplot as plt\n'), ((28265, 28294), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""objective value"""'], {}), "('objective value')\n", (28275, 28294), True, 'import matplotlib.pyplot as plt\n'), ((31572, 31592), 'os.path.isfile', 'os.path.isfile', (['wmat'], {}), '(wmat)\n', (31586, 31592), False, 'import sys, os, time\n'), ((32466, 32478), 'time.clock', 'time.clock', ([], {}), '()\n', (32476, 32478), False, 'import sys, os, time\n'), ((33497, 33537), 'numpy.multiply', 'np.multiply', (['opt.bestfbs', 'opt.bestbsusps'], {}), '(opt.bestfbs, opt.bestbsusps)\n', (33508, 33537), True, 'import numpy as np\n'), ((35871, 35913), 'spartan.util.basicutil.param_default', 'param_default', (['params', '"""alg"""', '"""fastgreedy"""'], {}), "(params, 'alg', 'fastgreedy')\n", (35884, 35913), False, 'from spartan.util.basicutil import param_default\n'), ((35933, 35966), 'spartan.util.basicutil.param_default', 'param_default', (['params', '"""eps"""', '(1.6)'], {}), "(params, 'eps', 1.6)\n", (35946, 35966), False, 'from spartan.util.basicutil import param_default\n'), ((35990, 36026), 'spartan.util.basicutil.param_default', 'param_default', (['params', '"""numSing"""', '(10)'], {}), "(params, 'numSing', 10)\n", (36003, 36026), False, 'from spartan.util.basicutil import param_default\n'), ((36047, 36083), 'spartan.util.basicutil.param_default', 'param_default', (['params', '"""qfun"""', '"""exp"""'], {}), "(params, 'qfun', 'exp')\n", (36060, 36083), False, 'from spartan.util.basicutil import param_default\n'), ((36101, 36131), 'spartan.util.basicutil.param_default', 'param_default', (['params', '"""b"""', '(32)'], {}), "(params, 'b', 32)\n", (36114, 36131), False, 'from spartan.util.basicutil import param_default\n'), ((39286, 39313), 'pickle.dump', 'pickle.dump', (['self.nres', 'out'], {}), '(self.nres, out)\n', (39297, 39313), False, 'import pickle\n'), ((826, 853), 'numpy.argwhere', 'np.argwhere', (['(sortscores > 0)'], {}), '(sortscores > 0)\n', (837, 853), True, 'import numpy as np\n'), ((6779, 6834), 'numpy.multiply', 'np.multiply', (['self.tspim.dropslops', 'self.tspim.dropfalls'], {}), '(self.tspim.dropslops, self.tspim.dropfalls)\n', (6790, 6834), True, 'import numpy as np\n'), ((7073, 7104), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['colWeights'], {}), '(colWeights)\n', (7092, 7104), False, 'from sklearn import preprocessing\n'), ((7130, 7154), 'scipy.stats.logistic.cdf', 'logistic.cdf', (['colWeights'], {}), '(colWeights)\n', (7142, 7154), False, 'from scipy.stats import logistic\n'), ((7967, 7993), 'numpy.log', 'np.log', (['(self.indegrees + 5)'], {}), '(self.indegrees + 5)\n', (7973, 7993), True, 'import numpy as np\n'), ((8564, 8583), 'numpy.dot', 'np.dot', (['fbs', 'bsusps'], {}), '(fbs, bsusps)\n', (8570, 8583), True, 'import numpy as np\n'), ((8642, 8656), 'numpy.float64', 'np.float64', (['de'], {}), '(de)\n', (8652, 8656), True, 'import numpy as np\n'), ((10971, 10988), 'numpy.zeros', 'np.zeros', (['self.nV'], {}), '(self.nV)\n', (10979, 10988), True, 'import numpy as np\n'), ((13682, 13699), 'copy.copy', 'copy.copy', (['self.A'], {}), '(self.A)\n', (13691, 13699), False, 'import copy\n'), ((16729, 16774), 'numpy.multiply', 'np.multiply', (['tspim.dropfalls', 'tspim.dropslops'], {}), '(tspim.dropfalls, tspim.dropslops)\n', (16740, 16774), True, 'import numpy as np\n'), ((16837, 16853), 'numpy.ones', 'np.ones', (['self.nV'], {}), '(self.nV)\n', (16844, 16853), True, 'import numpy as np\n'), ((19191, 19208), 'numpy.log1p', 'np.log1p', (['sm.data'], {}), '(sm.data)\n', (19199, 19208), True, 'import numpy as np\n'), ((19447, 19467), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(n1, n1)'], {}), '((n1, n1))\n', (19457, 19467), False, 'from scipy.sparse import coo_matrix, csr_matrix, lil_matrix\n'), ((19914, 19941), 'os.path.isfile', 'os.path.isfile', (['self.tsprop'], {}), '(self.tsprop)\n', (19928, 19941), False, 'import sys, os, time\n'), ((20537, 20549), 'time.clock', 'time.clock', ([], {}), '()\n', (20547, 20549), False, 'import sys, os, time\n'), ((21602, 21646), 'numpy.argsort', 'np.argsort', (['(-ui)'], {'axis': 'None', 'kind': '"""quicksort"""'}), "(-ui, axis=None, kind='quicksort')\n", (21612, 21646), True, 'import numpy as np\n'), ((23405, 23449), 'numpy.argsort', 'np.argsort', (['(-ui)'], {'axis': 'None', 'kind': '"""quicksort"""'}), "(-ui, axis=None, kind='quicksort')\n", (23415, 23449), True, 'import numpy as np\n'), ((23469, 23513), 'numpy.argsort', 'np.argsort', (['(-vi)'], {'axis': 'None', 'kind': '"""quicksort"""'}), "(-vi, axis=None, kind='quicksort')\n", (23479, 23513), True, 'import numpy as np\n'), ((26525, 26542), 'numpy.zeros', 'np.zeros', (['self.nU'], {}), '(self.nU)\n', (26533, 26542), True, 'import numpy as np\n'), ((26544, 26561), 'numpy.zeros', 'np.zeros', (['self.nV'], {}), '(self.nV)\n', (26552, 26561), True, 'import numpy as np\n'), ((26563, 26580), 'numpy.zeros', 'np.zeros', (['self.nV'], {}), '(self.nV)\n', (26571, 26580), True, 'import numpy as np\n'), ((26828, 26856), 'numpy.zeros', 'np.zeros', (['self.nU'], {'dtype': 'int'}), '(self.nU, dtype=int)\n', (26836, 26856), True, 'import numpy as np\n'), ((27600, 27642), 'numpy.multiply', 'np.multiply', (['self.bestbsusps', 'self.bestfbs'], {}), '(self.bestbsusps, self.bestfbs)\n', (27611, 27642), True, 'import numpy as np\n'), ((31533, 31558), 'scipy.sparse.issparse', 'sci.sparse.issparse', (['wmat'], {}), '(wmat)\n', (31552, 31558), True, 'import scipy as sci\n'), ((32694, 32720), 'numpy.ones', 'np.ones', (['opt.nU'], {'dtype': 'int'}), '(opt.nU, dtype=int)\n', (32701, 32720), True, 'import numpy as np\n'), ((4330, 4352), 'os.path.isfile', 'os.path.isfile', (['tsprop'], {}), '(tsprop)\n', (4344, 4352), False, 'import sys, os, time\n'), ((5289, 5313), 'os.path.isfile', 'os.path.isfile', (['rateprop'], {}), '(rateprop)\n', (5303, 5313), False, 'import sys, os, time\n'), ((10589, 10637), 'sklearn.preprocessing.minmax_scale', 'preprocessing.minmax_scale', (['susprates'], {'copy': '(True)'}), '(susprates, copy=True)\n', (10615, 10637), False, 'from sklearn import preprocessing\n'), ((13165, 13183), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (13173, 13183), True, 'import numpy as np\n'), ((14194, 14210), 'numpy.array', 'np.array', (['self.Y'], {}), '(self.Y)\n', (14202, 14210), True, 'import numpy as np\n'), ((15178, 15199), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (15194, 15199), False, 'import sys, os, time\n'), ((15216, 15234), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15232, 15234), False, 'import sys, os, time\n'), ((15856, 15883), 'scipy.sparse.csr_matrix', 'csr_matrix', (['self.windegrees'], {}), '(self.windegrees)\n', (15866, 15883), False, 'from scipy.sparse import coo_matrix, csr_matrix, lil_matrix\n'), ((16791, 16802), 'numpy.log1p', 'np.log1p', (['w'], {}), '(w)\n', (16799, 16802), True, 'import numpy as np\n'), ((21364, 21377), 'math.sqrt', 'math.sqrt', (['si'], {}), '(si)\n', (21373, 21377), False, 'import math\n'), ((21815, 21836), 'math.log', 'math.log', (['jr', 'self.nU'], {}), '(jr, self.nU)\n', (21823, 21836), False, 'import math\n'), ((23076, 23089), 'math.sqrt', 'math.sqrt', (['si'], {}), '(si)\n', (23085, 23089), False, 'import math\n'), ((23682, 23703), 'math.log', 'math.log', (['jr', 'self.nU'], {}), '(jr, self.nU)\n', (23690, 23703), False, 'import math\n'), ((27267, 27287), 'numpy.array', 'np.array', (['self.bestA'], {}), '(self.bestA)\n', (27275, 27287), True, 'import numpy as np\n'), ((27323, 27345), 'numpy.array', 'np.array', (['self.bestfbs'], {}), '(self.bestfbs)\n', (27331, 27345), True, 'import numpy as np\n'), ((27384, 27409), 'numpy.array', 'np.array', (['self.bestbsusps'], {}), '(self.bestbsusps)\n', (27392, 27409), True, 'import numpy as np\n'), ((33358, 33369), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (33366, 33369), False, 'import sys, os, time\n'), ((33412, 33424), 'time.clock', 'time.clock', ([], {}), '()\n', (33422, 33424), False, 'import sys, os, time\n'), ((7310, 7348), 'sklearn.preprocessing.minmax_scale', 'preprocessing.minmax_scale', (['colWeights'], {}), '(colWeights)\n', (7336, 7348), False, 'from sklearn import preprocessing\n'), ((9080, 9101), 'numpy.reciprocal', 'np.reciprocal', (['(rb + k)'], {}), '(rb + k)\n', (9093, 9101), True, 'import numpy as np\n'), ((20854, 20866), 'time.clock', 'time.clock', ([], {}), '()\n', (20864, 20866), False, 'import sys, os, time\n'), ((22050, 22075), 'math.ceil', 'math.ceil', (['(nn ** (1 / ep))'], {}), '(nn ** (1 / ep))\n', (22059, 22075), False, 'import math\n'), ((23872, 23899), 'math.ceil', 'math.ceil', (['(nn ** (1.0 / ep))'], {}), '(nn ** (1.0 / ep))\n', (23881, 23899), False, 'import math\n'), ((25203, 25252), 'numpy.fmax', 'np.fmax', (['(self.b * (ratios[greatbdidx] - 1) + 1)', '(0)'], {}), '(self.b * (ratios[greatbdidx] - 1) + 1, 0)\n', (25210, 25252), True, 'import numpy as np\n'), ((25323, 25334), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (25331, 25334), False, 'import sys, os, time\n'), ((9023, 9036), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (9031, 9036), True, 'import numpy as np\n'), ((9546, 9567), 'numpy.reciprocal', 'np.reciprocal', (['(rb + k)'], {}), '(rb + k)\n', (9559, 9567), True, 'import numpy as np\n'), ((21495, 21513), 'math.sqrt', 'math.sqrt', (['self.nU'], {}), '(self.nU)\n', (21504, 21513), False, 'import math\n'), ((23250, 23268), 'math.sqrt', 'math.sqrt', (['self.nU'], {}), '(self.nU)\n', (23259, 23268), False, 'import math\n'), ((23298, 23316), 'math.sqrt', 'math.sqrt', (['self.nV'], {}), '(self.nV)\n', (23307, 23316), False, 'import math\n'), ((33130, 33142), 'time.clock', 'time.clock', ([], {}), '()\n', (33140, 33142), False, 'import sys, os, time\n'), ((7469, 7489), 'numpy.log1p', 'np.log1p', (['colWeights'], {}), '(colWeights)\n', (7477, 7489), True, 'import numpy as np\n'), ((9479, 9492), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (9487, 9492), True, 'import numpy as np\n'), ((9588, 9607), 'numpy.array', 'np.array', (['arrbsusps'], {}), '(arrbsusps)\n', (9596, 9607), True, 'import numpy as np\n')] |
# Written by <NAME>
# <EMAIL>
# July 12, 2018
# Last Updated: Aug 31, 2018
# requirements:
# * pyYAML: https://pyyaml.org/wiki/PyYAMLDocumentation
# or
# * ruamel.yaml: https://pypi.org/project/ruamel.yaml
import os
import yaml
import sys
import math
import numpy as np
class DataBox:
# Handles extraction of data from smr repository
def __init__(self):
# initializes a new DataBox object
self.__default_tags = ("security","performance","memory","resource management","determinism")
def getProjectCount(self,directory):
# calculates the number of projects in the given directory
# - directory is a string of the path to search (e.x., "py-data")
# - returns an int
if not self.__validate(directory):
return
count = 0
for (dirname, dirs, files) in os.walk(directory):
for filename in files:
if filename.endswith('project.yml'):
count+=1
return count
def getProblemCount(self,directory):
# calculates the number of problems in the given directory
# - directory is a string of the path to search (e.x., "py-data")
# - returns an int
if not self.__validate(directory):
return
count = 0
for (dirname, dirs, files) in os.walk(directory):
for filename in files:
if filename.endswith('problem.yml'):
count+=1
return count
def _getStatList(self,directory,stat):
# get a list of all stat counts in the directory
# - directory is a string of the path to search (e.x., "py-data")
# - stat is a string of the stat to search for (e.x., "stars")
# - returns a list object
if not self.__validate(directory,string = stat):
return
stats = []
for (dirname, dirs, files) in os.walk(directory):
for filename in files:
if filename.endswith('project.yml'):
data = yaml.load(open(dirname+"/"+filename,"r").read())
my_stat = int(data["repository"]["stats"][stat])
stats.append(my_stat)
return stats
def getStatDistribution(self,directory,stat,ranges):
# calculates the number of project stars, watches, or forks
# within a range in the given directory
# - directory is a string of the path to search (e.x., "py-data")
# - stat is a string of the stat to search for (e.x., "stars")
# - ranges is a tuple of bins to separate into, right boundary excluded
# -- e.x. if ranges = (0,1,50,100), the boundaries are [0,1),[1,50),[50,100),[100, inf)
# - returns a dictionary with fields range:frequency
if not self.__validate(directory,string = stat,int_tup = ranges):
return
dist = {}
stat_list = self._getStatList(directory,stat)
list_rg = list(ranges)
list_rg.append(math.inf)
range_bins = np.array(list_rg)
hist = list(np.histogram(stat_list,bins=range_bins))
# populate the distribution
for index in range(len(ranges)-1):
dist["["+str(ranges[index])+", "+str(ranges[index+1])+")"] = hist[0][index]
dist["["+str(max(ranges))+", "+str(math.inf)+")"] = hist[0][len(hist[0])-1]
return dist
def getTagDistribution(self,directory, tag_requests = None):
# calculates the number of project by tag
# - directory is a string of the path to search (e.x., "py-data")
# - tag_requests is a tuple of tags to get the distribution of
# --(by default get all)
# - returns a dictionary with fields tag:frequency
if tag_requests is None:
tag_requests = self.__default_tags
if not self.__validate(directory,str_tup = tag_requests):
return
tag_nums = {}
# populate dictionary
for tag in tag_requests:
tag_nums[tag.lower().strip()] = 0
for (dirname, dirs, files) in os.walk(directory):
for filename in files:
if filename.endswith('problem.yml'):
code = yaml.load(open(dirname+"/"+filename,"r").read())
# split because some have more than one tag
tags = code["fix"]["tag"].split(",")
for tag in tags:
if tag.strip() in tag_nums.keys():
tag_nums[tag.strip()]+=1
break
return tag_nums
def getAPIs(self,directory):
# collects the frequency of problematic (prior to fix) APIs
# - directory is a string of the path to search (e.x., "py-data")
# - returns a dictionary with fields API:frequency
if not self.__validate(directory):
return
apis = {}
for (dirname, dirs, files) in os.walk(directory):
if ("api-related" not in dirname):
continue
for filename in files:
if filename.endswith('problem.yml'):
code = yaml.load(open(dirname+"/"+filename,"r").read())
for api in code["api"].split():
api = api.strip()
if api in apis.keys():
apis[api]+=1
else:
apis[api] = 1
return apis
def getProblemTypes(self,directory):
# counts the frequency of problem types
# - directory is a string of the path to search (e.x., "py-data")
# - returns a dictionary with fields type:frequency
if not self.__validate(directory):
return
problem_types = {"api-related":0,"general-practise":0,"project-specific":0}
for (dirname, dirs, files) in os.walk(directory):
for ptype in problem_types.keys():
if (ptype not in dirname):
continue
for filename in files:
if filename.endswith('problem.yml'):
problem_types[ptype]+=1
return problem_types
def getSources(self,directory):
# counts the number of problems from each source
# - directory is a string of the path to search (e.x., "py-data")
# - returns a dictionary with fields source:frequency
if not self.__validate(directory):
return
sources = {}
for (dirname, dirs, files) in os.walk(directory):
for filename in files:
if filename.endswith('problem.yml'):
code = yaml.load(open(dirname+"/"+filename,"r").read())
source = code["source"]["name"].strip()
if source in sources.keys():
sources[source]+=1
else:
sources[source] = 1
return sources
def __validate(self,directory,string="stars",int_tup=(0,),str_tup=("",)):
if not isinstance(directory,str):
print("Error: directory should be a string. Aborting command.",file=sys.stderr)
return False
if not isinstance(string,str) or string.lower() not in ("stars","watches","forks"):
print("Error: stat must be 'stars', 'watches', or 'forks'." + \
" Aborting command.", file=sys.stderr)
return False
if not isinstance(int_tup,tuple) or not all([isinstance(num,int) for num in int_tup]):
print("Error: ranges must be a tuple of ints. Aborting command.",file=sys.stderr)
return False
if not isinstance(str_tup,tuple) or not all([isinstance(tag,str) for tag in str_tup]):
print("Error: tags must be a tuple of strings. Aborting command.",file=sys.stderr)
return False
return True
| [
"numpy.array",
"numpy.histogram",
"os.walk"
] | [((814, 832), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (821, 832), False, 'import os\n'), ((1269, 1287), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1276, 1287), False, 'import os\n'), ((1816, 1834), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1823, 1834), False, 'import os\n'), ((2850, 2867), 'numpy.array', 'np.array', (['list_rg'], {}), '(list_rg)\n', (2858, 2867), True, 'import numpy as np\n'), ((3855, 3873), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (3862, 3873), False, 'import os\n'), ((4701, 4719), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (4708, 4719), False, 'import os\n'), ((5591, 5609), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (5598, 5609), False, 'import os\n'), ((6209, 6227), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (6216, 6227), False, 'import os\n'), ((2884, 2924), 'numpy.histogram', 'np.histogram', (['stat_list'], {'bins': 'range_bins'}), '(stat_list, bins=range_bins)\n', (2896, 2924), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import random
import keras as ks
import numpy as np
from Markov_elem import Markov_elem
class Markov_chain:
def __init__(self):
self._elems = {}
def compute_elems(self, words, all):
self._sentences = [s for s in ks.preprocessing.text.text_to_word_sequence(all, filters='\t"()#$%&()*+-/:;<=>@[\]^_`{|}~', lower=True, split='.!?;')]
words = ks.preprocessing.text.text_to_word_sequence(all, filters='\t!"#$%&()*+,-./:;<=>?@[\]^_`{|}~ ', lower=True, split=' ')
self._diff_words = list(set(words))
self._elems = dict({word : Markov_elem(word) for word in self._diff_words})
l = len(words)
for i in range(0, l - 1):
self._elems[words[i]].add_following(words[i + 1])
for v in self._elems.values():
v.calc_following_proba()
def generateText(self, size):
t = self._diff_words[random.randint(0, len(self._elems) - 1)]
elem = self._elems[t]
text = [np.random.choice(self._sentences)]
for i in range(0, size - 1):
word = elem.pick_next()
text.append(word)
elem = self._elems[word]
return text
def isclose(self, sentence):
words = ks.preprocessing.text.text_to_word_sequence(sentence, filters='\t!"#$%&()*+,-./:;<=>?@[\]^_`{|}~ ', lower=True, split=' ')
dist = 0
l = len(words)
for i in range(0, l - 1):
if words[i] in self._elems:
dist += 1 - self._elems[words[i]].get_following_proba(words[i + 1])
return dist
| [
"numpy.random.choice",
"Markov_elem.Markov_elem",
"keras.preprocessing.text.text_to_word_sequence"
] | [((394, 517), 'keras.preprocessing.text.text_to_word_sequence', 'ks.preprocessing.text.text_to_word_sequence', (['all'], {'filters': '"""\t!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~ """', 'lower': '(True)', 'split': '""" """'}), '(all, filters=\n \'\\t!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~ \', lower=True, split=\' \')\n', (437, 517), True, 'import keras as ks\n'), ((1231, 1359), 'keras.preprocessing.text.text_to_word_sequence', 'ks.preprocessing.text.text_to_word_sequence', (['sentence'], {'filters': '"""\t!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~ """', 'lower': '(True)', 'split': '""" """'}), '(sentence, filters=\n \'\\t!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~ \', lower=True, split=\' \')\n', (1274, 1359), True, 'import keras as ks\n'), ((986, 1019), 'numpy.random.choice', 'np.random.choice', (['self._sentences'], {}), '(self._sentences)\n', (1002, 1019), True, 'import numpy as np\n'), ((259, 382), 'keras.preprocessing.text.text_to_word_sequence', 'ks.preprocessing.text.text_to_word_sequence', (['all'], {'filters': '"""\t"()#$%&()*+-/:;<=>@[\\\\]^_`{|}~"""', 'lower': '(True)', 'split': '""".!?;"""'}), '(all, filters=\n \'\\t"()#$%&()*+-/:;<=>@[\\\\]^_`{|}~\', lower=True, split=\'.!?;\')\n', (302, 382), True, 'import keras as ks\n'), ((591, 608), 'Markov_elem.Markov_elem', 'Markov_elem', (['word'], {}), '(word)\n', (602, 608), False, 'from Markov_elem import Markov_elem\n')] |
# We need install numpy in order to import it
import numpy as np
# input two matrices
mat1 = ([1, 6, 5], [3, 4, 8], [2, 12, 3])
mat2 = ([3, 4, 6], [5, 6, 7], [6, 56, 7])
# This will return dot product
res = np.dot(mat1, mat2)
# print resulted matrix
print(res)
# input two matrices of size n x m
matrix1 = [[12, 7, 3],
[4, 5, 6],
[7, 8, 9]]
matrix2 = [[5, 8, 1],
[6, 7, 3],
[4, 5, 9]]
res = [[0 for x in range(3)] for y in range(3)]
# explicit for loops
for i in range(len(matrix1)):
for j in range(len(matrix2[0])):
for k in range(len(matrix2)):
# resulted matrix
res[i][j] += matrix1[i][k] * matrix2[k][j]
print(res)
| [
"numpy.dot"
] | [((217, 235), 'numpy.dot', 'np.dot', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (223, 235), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# maskbit.py
#
# @Author: <NAME> <andrews>
# @Date: 2017-10-06 10:10:00
# @Last modified by: <NAME> (<EMAIL>)
# @Last modified time: 2018-11-26 11:51:50
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import marvin
from marvin.extern.yanny import yanny
# Stores the maskbits yanny file structure so that we don't need to open it more than once.
_maskbits_from_yanny = None
def _read_maskbit_schemas():
"""Read all available SDSS maskbit schemas from yanny file.
Returns:
Record Array: all bits for all schemas.
"""
global _maskbits_from_yanny
if _maskbits_from_yanny is None:
path_maskbits = os.path.join(os.path.dirname(marvin.__file__), 'data', 'sdssMaskbits.par')
_maskbits_from_yanny = yanny(path_maskbits, np=True)
return _maskbits_from_yanny['MASKBITS']
def get_available_maskbits():
"""Get names of available maskbit schemas from yanny file.
Returns:
list: Names of available maskbits.
"""
maskbits = _read_maskbit_schemas()
return sorted(set([it[0] for it in maskbits]))
def get_manga_target(flag_id, bitmasks, header):
"""Get MANGA_TARGET[``flag_id``] flag.
Parameters:
flag_id (str):
Flag ID number (e.g., "1" for MANGA_TARGET1).
bitmasks (dict):
`Maskbit` objects.
header (`astropy.io.fits.header.Header`):
File header.
Returns:
`Maskbit`
"""
flag_id = str(int(flag_id))
manga_target = bitmasks['MANGA_TARGET{}'.format(flag_id)]
try:
manga_target.mask = int(header['MNGTRG{}'.format(flag_id)])
except KeyError:
manga_target.mask = int(header['MNGTARG{}'.format(flag_id)])
return manga_target
class Maskbit(object):
"""A class representing a maskbit.
Parameters:
schema (DataFrame):
Maskbit schema.
name (str):
Name of maskbit.
description (str):
Description of maskbit.
"""
def __init__(self, name, schema=None, description=None):
self.name = name
self.schema = schema if schema is not None else self._load_schema(name)
self.description = description if description is not None else None
self.mask = None
def __repr__(self):
if (isinstance(self.mask, int) or self.mask is None):
labels = self.labels
else:
labels = 'shape={}'.format(self.mask.shape)
return '<Maskbit {0!r} {1}>'.format(self.name, labels)
def _load_schema(self, flag_name):
"""Load SDSS Maskbit schema from yanny file.
Parameters:
flag_name (str):
Name of flag.
Returns:
DataFrame: Schema of flag.
"""
maskbits = _read_maskbit_schemas()
flag = maskbits[maskbits['flag'] == flag_name]
return pd.DataFrame(flag[['bit', 'label', 'description']])
@property
def bits(self):
return self.values_to_bits() if self.mask is not None else None
@property
def labels(self):
return self.values_to_labels() if self.mask is not None else None
def values_to_bits(self, values=None):
"""Convert mask values to a list of bits set.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
Returns:
list:
Bits that are set.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.values_to_bits()
[[[0, 1, 4, 30],
[0, 1, 4, 30],
...
[0, 1, 4, 30]]]
"""
# assert (self.mask is not None) or (values is not None), 'Must provide values.'
# values = np.array(self.mask) if values is None else np.array(values)
# ndim = values.ndim
# assert ndim <= 3, '`value` must be int, 1-D array, 2-D array, or 3-D array.'
# # expand up to 2 dimensions
# while values.ndim < 3:
# values = np.array([values])
# # create list of list of lists of bits set
# bits_set = []
# for ii in range(values.shape[0]):
# row_ii = []
# for jj in range(values.shape[1]):
# row_jj = []
# for kk in range(values.shape[2]):
# row_jj.append(self._value_to_bits(values[ii, jj, kk], self.schema.bit.values))
# row_ii.append(row_jj)
# bits_set.append(row_ii)
# # condense back down to initial dimensions
# for __ in range(3 - ndim):
# bits_set = bits_set[0]
bits_set = self._get_a_set(values, convert_to='bits')
return bits_set
def _get_uniq_bits(self, values):
''' Return a dictionary of unique bits
Parameters:
values (list):
A flattened list of mask values
Returns:
dict:
A unique dictionary of {mask value: bit list} as {key: value}
'''
uniqvals = set(values)
vdict = {v: self._value_to_bits(v, self.schema.bit.values) for v in uniqvals}
return vdict
def _get_uniq_labels(self, values):
''' Return a dictionary of unique labels
Parameters:
values (list):
A flattened list of mask values
Returns:
dict:
A unique dictionary of {mask value: labels list} as {key: value}
'''
uniqbits = self._get_uniq_bits(values)
uniqlabels = {k: self.schema.label[self.schema.bit.isin(v)].values.tolist() for k, v in uniqbits.items()}
return uniqlabels
def _get_a_set(self, values, convert_to='bits'):
''' Convert mask values to a list of either bit or label sets.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
convert_to (str):
Indicates what to convert to. Either "bits" or "labels"
Returns:
list:
Bits/Labels that are set.
'''
assert (self.mask is not None) or (values is not None), 'Must provide values.'
values = np.array(self.mask) if values is None else np.array(values)
ndim = values.ndim
shape = values.shape
assert ndim <= 3, '`value` must be int, 1-D array, 2-D array, or 3-D array.'
flatmask = values.flatten()
if convert_to == 'bits':
uniqvals = self._get_uniq_bits(flatmask)
elif convert_to == 'labels':
uniqvals = self._get_uniq_labels(flatmask)
vallist = list(map(lambda x: uniqvals[x], flatmask))
if ndim > 0:
vals_set = np.reshape(vallist, shape).tolist()
else:
vals_set = vallist[0]
return vals_set
def _value_to_bits(self, value, bits_all):
"""Convert mask value to a list of bits.
Parameters:
value (int):
Mask value.
bits_all (array):
All bits for flag.
Returns:
list:
Bits that are set.
"""
return [it for it in bits_all if int(value) & (1 << it)]
def values_to_labels(self, values=None):
"""Convert mask values to a list of the labels of bits set.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
Returns:
list:
Bits that are set.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.values_to_labels()
[[['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE'],
...
['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE']]]
"""
#bits_set = self.values_to_bits(values=values)
#labels_set = self._bits_to_labels(bits_set)
labels_set = self._get_a_set(values, convert_to='labels')
return labels_set
def _bits_to_labels(self, nested):
"""Recursively convert a nested list of bits to labels.
Parameters:
nested (list):
Nested list of bits.
Returns:
list: Nested list of labels.
"""
# Base condition
if isinstance(nested, (int, np.integer)):
return self.schema.label[self.schema.bit == nested].values[0]
return [self._bits_to_labels(it) for it in nested]
def labels_to_value(self, labels):
"""Convert bit labels into a bit value.
Parameters:
labels (str or list):
Labels of bits to set.
Returns:
int: Integer bit value.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask._labels_to_value('DONOTUSE')
1073741824
>>> ha.pixmask._labels_to_value(['NOCOV', 'LOWCOV'])
3
"""
if isinstance(labels, str):
labels = [labels]
bit_values = []
for label in labels:
bit = self.schema.bit[self.schema.label == label]
if not bit.empty:
bit_values.append(bit.values[0])
return np.sum([2**value for value in bit_values])
def labels_to_bits(self, labels):
"""Convert bit labels into bits.
Parameters:
labels (str or list):
Labels of bits.
Returns:
list: Bits that correspond to the labels.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.labels_to_bits('DONOTUSE')
[30]
>>> ha.pixmask.labels_to_value(['NOCOV', 'LOWCOV'])
[0, 1]
"""
return self.values_to_bits(self.labels_to_value(labels))
def get_mask(self, labels, mask=None, dtype=int):
"""Create mask from a list of labels.
If ``dtype`` is ``int``, then ``get_mask`` can effectively
perform an OR or AND operation. However, if ``dtype`` is
``bool``, then ``get_mask`` does an OR.
Parameters:
labels (str or list):
Labels of bits.
mask (int or array):
User-defined mask. If ``None``, use ``self.mask``.
Default is ``None``.
dtype:
Output dtype, which must be either ``int`` or ``bool``.
Default is ``int``.
Returns:
array: Mask for given labels.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.get_mask(['NOCOV', 'LOWCOV'])
array([[3, 3, 3, ..., 3, 3, 3],
...,
[3, 3, 3, ..., 3, 3, 3]])
>>> ha.pixmask.get_mask(['NOCOV', 'LOWCOV'], dtype=bool)
array([[ True, True, True, ..., True, True, True],
...,
[ True, True, True, ..., True, True, True]], dtype=bool)
"""
assert dtype in [int, bool], '``dtype`` must be either ``int`` or ``bool``.'
if isinstance(labels, str):
labels = [labels]
schema_labels = self.schema.label.tolist()
for label in labels:
if label not in schema_labels:
raise ValueError('label {0!r} not found in the maskbit schema.'.format(label))
bits = self.labels_to_bits(labels)
mask = mask if mask is not None else self.mask
if len(bits) == 0:
return np.zeros(mask.shape, dtype=np.int)
return np.sum([mask & 2**bit for bit in bits], axis=0).astype(dtype)
| [
"numpy.reshape",
"numpy.sum",
"os.path.dirname",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"marvin.extern.yanny.yanny"
] | [((855, 884), 'marvin.extern.yanny.yanny', 'yanny', (['path_maskbits'], {'np': '(True)'}), '(path_maskbits, np=True)\n', (860, 884), False, 'from marvin.extern.yanny import yanny\n'), ((2965, 3016), 'pandas.DataFrame', 'pd.DataFrame', (["flag[['bit', 'label', 'description']]"], {}), "(flag[['bit', 'label', 'description']])\n", (2977, 3016), True, 'import pandas as pd\n'), ((9702, 9748), 'numpy.sum', 'np.sum', (['[(2 ** value) for value in bit_values]'], {}), '([(2 ** value) for value in bit_values])\n', (9708, 9748), True, 'import numpy as np\n'), ((762, 794), 'os.path.dirname', 'os.path.dirname', (['marvin.__file__'], {}), '(marvin.__file__)\n', (777, 794), False, 'import os\n'), ((6481, 6500), 'numpy.array', 'np.array', (['self.mask'], {}), '(self.mask)\n', (6489, 6500), True, 'import numpy as np\n'), ((6524, 6540), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (6532, 6540), True, 'import numpy as np\n'), ((12089, 12123), 'numpy.zeros', 'np.zeros', (['mask.shape'], {'dtype': 'np.int'}), '(mask.shape, dtype=np.int)\n', (12097, 12123), True, 'import numpy as np\n'), ((12140, 12191), 'numpy.sum', 'np.sum', (['[(mask & 2 ** bit) for bit in bits]'], {'axis': '(0)'}), '([(mask & 2 ** bit) for bit in bits], axis=0)\n', (12146, 12191), True, 'import numpy as np\n'), ((7005, 7031), 'numpy.reshape', 'np.reshape', (['vallist', 'shape'], {}), '(vallist, shape)\n', (7015, 7031), True, 'import numpy as np\n')] |
"""This module contains common inference methods."""
__all__ = ['Rejection', 'SMC', 'BayesianOptimization', 'BOLFI']
import logging
from math import ceil
import matplotlib.pyplot as plt
import numpy as np
import elfi.client
import elfi.methods.mcmc as mcmc
import elfi.visualization.interactive as visin
import elfi.visualization.visualization as vis
from elfi.loader import get_sub_seed
from elfi.methods.bo.acquisition import LCBSC
from elfi.methods.bo.gpy_regression import GPyRegression
from elfi.methods.bo.utils import stochastic_optimization
from elfi.methods.posteriors import BolfiPosterior
from elfi.methods.results import BolfiSample, OptimizationResult, Sample, SmcSample
from elfi.methods.utils import (GMDistribution, ModelPrior, arr2d_to_batch,
batch_to_arr2d, ceil_to_batch_size, weighted_var)
from elfi.model.elfi_model import ComputationContext, ElfiModel, NodeReference
from elfi.utils import is_array
from elfi.visualization.visualization import progress_bar
logger = logging.getLogger(__name__)
# TODO: refactor the plotting functions
class ParameterInference:
"""A base class for parameter inference methods.
Attributes
----------
model : elfi.ElfiModel
The ELFI graph used by the algorithm
output_names : list
Names of the nodes whose outputs are included in the batches
client : elfi.client.ClientBase
The batches are computed in the client
max_parallel_batches : int
state : dict
Stores any changing data related to achieving the objective. Must include a key
``n_batches`` for determining when the inference is finished.
objective : dict
Holds the data for the algorithm to internally determine how many batches are still
needed. You must have a key ``n_batches`` here. By default the algorithm finished when
the ``n_batches`` in the state dictionary is equal or greater to the corresponding
objective value.
batches : elfi.client.BatchHandler
Helper class for submitting batches to the client and keeping track of their
indexes.
pool : elfi.store.OutputPool
Pool object for storing and reusing node outputs.
"""
def __init__(self,
model,
output_names,
batch_size=1,
seed=None,
pool=None,
max_parallel_batches=None):
"""Construct the inference algorithm object.
If you are implementing your own algorithm do not forget to call `super`.
Parameters
----------
model : ElfiModel
Model to perform the inference with.
output_names : list
Names of the nodes whose outputs will be requested from the ELFI graph.
batch_size : int, optional
The number of parameter evaluations in each pass through the ELFI graph.
When using a vectorized simulator, using a suitably large batch_size can provide
a significant performance boost.
seed : int, optional
Seed for the data generation from the ElfiModel
pool : OutputPool, optional
OutputPool both stores and provides precomputed values for batches.
max_parallel_batches : int, optional
Maximum number of batches allowed to be in computation at the same time.
Defaults to number of cores in the client
"""
model = model.model if isinstance(model, NodeReference) else model
if not model.parameter_names:
raise ValueError('Model {} defines no parameters'.format(model))
self.model = model.copy()
self.output_names = self._check_outputs(output_names)
self.client = elfi.client.get_client()
# Prepare the computation_context
context = ComputationContext(batch_size=batch_size, seed=seed, pool=pool)
self.batches = elfi.client.BatchHandler(
self.model, context=context, output_names=output_names, client=self.client)
self.computation_context = context
self.max_parallel_batches = max_parallel_batches or self.client.num_cores
if self.max_parallel_batches <= 0:
msg = 'Value for max_parallel_batches ({}) must be at least one.'.format(
self.max_parallel_batches)
if self.client.num_cores == 0:
msg += ' Client has currently no workers available. Please make sure ' \
'the cluster has fully started or set the max_parallel_batches ' \
'parameter by hand.'
raise ValueError(msg)
# State and objective should contain all information needed to continue the
# inference after an iteration.
self.state = dict(n_sim=0, n_batches=0)
self.objective = dict()
@property
def pool(self):
"""Return the output pool of the inference."""
return self.computation_context.pool
@property
def seed(self):
"""Return the seed of the inference."""
return self.computation_context.seed
@property
def parameter_names(self):
"""Return the parameters to be inferred."""
return self.model.parameter_names
@property
def batch_size(self):
"""Return the current batch_size."""
return self.computation_context.batch_size
def set_objective(self, *args, **kwargs):
"""Set the objective of the inference.
This method sets the objective of the inference (values typically stored in the
`self.objective` dict).
Returns
-------
None
"""
raise NotImplementedError
def extract_result(self):
"""Prepare the result from the current state of the inference.
ELFI calls this method in the end of the inference to return the result.
Returns
-------
result : elfi.methods.result.Result
"""
raise NotImplementedError
def update(self, batch, batch_index):
"""Update the inference state with a new batch.
ELFI calls this method when a new batch has been computed and the state of
the inference should be updated with it. It is also possible to bypass ELFI and
call this directly to update the inference.
Parameters
----------
batch : dict
dict with `self.outputs` as keys and the corresponding outputs for the batch
as values
batch_index : int
Returns
-------
None
"""
self.state['n_batches'] += 1
self.state['n_sim'] += self.batch_size
def prepare_new_batch(self, batch_index):
"""Prepare values for a new batch.
ELFI calls this method before submitting a new batch with an increasing index
`batch_index`. This is an optional method to override. Use this if you have a need
do do preparations, e.g. in Bayesian optimization algorithm, the next acquisition
points would be acquired here.
If you need provide values for certain nodes, you can do so by constructing a
batch dictionary and returning it. See e.g. BayesianOptimization for an example.
Parameters
----------
batch_index : int
next batch_index to be submitted
Returns
-------
batch : dict or None
Keys should match to node names in the model. These values will override any
default values or operations in those nodes.
"""
pass
def plot_state(self, **kwargs):
"""Plot the current state of the algorithm.
Parameters
----------
axes : matplotlib.axes.Axes (optional)
figure : matplotlib.figure.Figure (optional)
xlim
x-axis limits
ylim
y-axis limits
interactive : bool (default False)
If true, uses IPython.display to update the cell figure
close
Close figure in the end of plotting. Used in the end of interactive mode.
Returns
-------
None
"""
raise NotImplementedError
def infer(self, *args, vis=None, bar=True, **kwargs):
"""Set the objective and start the iterate loop until the inference is finished.
See the other arguments from the `set_objective` method.
Parameters
----------
vis : dict, optional
Plotting options. More info in self.plot_state method
bar : bool, optional
Flag to remove (False) or keep (True) the progress bar from/in output.
Returns
-------
result : Sample
"""
vis_opt = vis if isinstance(vis, dict) else {}
self.set_objective(*args, **kwargs)
if bar:
progress_bar(0, self._objective_n_batches, prefix='Progress:',
suffix='Complete', length=50)
while not self.finished:
self.iterate()
if vis:
self.plot_state(interactive=True, **vis_opt)
if bar:
progress_bar(self.state['n_batches'], self._objective_n_batches,
prefix='Progress:', suffix='Complete', length=50)
self.batches.cancel_pending()
if vis:
self.plot_state(close=True, **vis_opt)
return self.extract_result()
def iterate(self):
"""Advance the inference by one iteration.
This is a way to manually progress the inference. One iteration consists of
waiting and processing the result of the next batch in succession and possibly
submitting new batches.
Notes
-----
If the next batch is ready, it will be processed immediately and no new batches
are submitted.
New batches are submitted only while waiting for the next one to complete. There
will never be more batches submitted in parallel than the `max_parallel_batches`
setting allows.
Returns
-------
None
"""
# Submit new batches if allowed
while self._allow_submit(self.batches.next_index):
next_batch = self.prepare_new_batch(self.batches.next_index)
logger.debug("Submitting batch %d" % self.batches.next_index)
self.batches.submit(next_batch)
# Handle the next ready batch in succession
batch, batch_index = self.batches.wait_next()
logger.debug('Received batch %d' % batch_index)
self.update(batch, batch_index)
@property
def finished(self):
return self._objective_n_batches <= self.state['n_batches']
def _allow_submit(self, batch_index):
return (self.max_parallel_batches > self.batches.num_pending and
self._has_batches_to_submit and (not self.batches.has_ready()))
@property
def _has_batches_to_submit(self):
return self._objective_n_batches > self.state['n_batches'] + self.batches.num_pending
@property
def _objective_n_batches(self):
"""Check that n_batches can be computed from the objective."""
if 'n_batches' in self.objective:
n_batches = self.objective['n_batches']
elif 'n_sim' in self.objective:
n_batches = ceil(self.objective['n_sim'] / self.batch_size)
else:
raise ValueError('Objective must define either `n_batches` or `n_sim`.')
return n_batches
def _extract_result_kwargs(self):
"""Extract common arguments for the ParameterInferenceResult object."""
return {
'method_name': self.__class__.__name__,
'parameter_names': self.parameter_names,
'seed': self.seed,
'n_sim': self.state['n_sim'],
'n_batches': self.state['n_batches']
}
@staticmethod
def _resolve_model(model, target, default_reference_class=NodeReference):
if isinstance(model, ElfiModel) and target is None:
raise NotImplementedError("Please specify the target node of the inference method")
if isinstance(model, NodeReference):
target = model
model = target.model
if isinstance(target, str):
target = model[target]
if not isinstance(target, default_reference_class):
raise ValueError('Unknown target node class')
return model, target.name
def _check_outputs(self, output_names):
"""Filter out duplicates and check that corresponding nodes exist.
Preserves the order.
"""
output_names = output_names or []
checked_names = []
seen = set()
for name in output_names:
if isinstance(name, NodeReference):
name = name.name
if name in seen:
continue
elif not isinstance(name, str):
raise ValueError(
'All output names must be strings, object {} was given'.format(name))
elif not self.model.has_node(name):
raise ValueError('Node {} output was requested, but it is not in the model.')
seen.add(name)
checked_names.append(name)
return checked_names
class Sampler(ParameterInference):
def sample(self, n_samples, *args, **kwargs):
"""Sample from the approximate posterior.
See the other arguments from the `set_objective` method.
Parameters
----------
n_samples : int
Number of samples to generate from the (approximate) posterior
*args
**kwargs
Returns
-------
result : Sample
"""
bar = kwargs.pop('bar', True)
return self.infer(n_samples, *args, bar=bar, **kwargs)
def _extract_result_kwargs(self):
kwargs = super(Sampler, self)._extract_result_kwargs()
for state_key in ['threshold', 'accept_rate']:
if state_key in self.state:
kwargs[state_key] = self.state[state_key]
if hasattr(self, 'discrepancy_name'):
kwargs['discrepancy_name'] = self.discrepancy_name
return kwargs
class Rejection(Sampler):
"""Parallel ABC rejection sampler.
For a description of the rejection sampler and a general introduction to ABC, see e.g.
Lintusaari et al. 2016.
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2016). Fundamentals and
Recent Developments in Approximate Bayesian Computation. Systematic Biology.
http://dx.doi.org/10.1093/sysbio/syw077.
"""
def __init__(self, model, discrepancy_name=None, output_names=None, **kwargs):
"""Initialize the Rejection sampler.
Parameters
----------
model : ElfiModel or NodeReference
discrepancy_name : str, NodeReference, optional
Only needed if model is an ElfiModel
output_names : list, optional
Additional outputs from the model to be included in the inference result, e.g.
corresponding summaries to the acquired samples
kwargs:
See InferenceMethod
"""
model, discrepancy_name = self._resolve_model(model, discrepancy_name)
output_names = [discrepancy_name] + model.parameter_names + (output_names or [])
super(Rejection, self).__init__(model, output_names, **kwargs)
self.discrepancy_name = discrepancy_name
def set_objective(self, n_samples, threshold=None, quantile=None, n_sim=None):
"""Set objective for inference.
Parameters
----------
n_samples : int
number of samples to generate
threshold : float
Acceptance threshold
quantile : float
In between (0,1). Define the threshold as the p-quantile of all the
simulations. n_sim = n_samples/quantile.
n_sim : int
Total number of simulations. The threshold will be the n_samples smallest
discrepancy among n_sim simulations.
"""
if quantile is None and threshold is None and n_sim is None:
quantile = .01
self.state = dict(samples=None, threshold=np.Inf, n_sim=0, accept_rate=1, n_batches=0)
if quantile:
n_sim = ceil(n_samples / quantile)
# Set initial n_batches estimate
if n_sim:
n_batches = ceil(n_sim / self.batch_size)
else:
n_batches = self.max_parallel_batches
self.objective = dict(n_samples=n_samples, threshold=threshold, n_batches=n_batches)
# Reset the inference
self.batches.reset()
def update(self, batch, batch_index):
"""Update the inference state with a new batch.
Parameters
----------
batch : dict
dict with `self.outputs` as keys and the corresponding outputs for the batch
as values
batch_index : int
"""
super(Rejection, self).update(batch, batch_index)
if self.state['samples'] is None:
# Lazy initialization of the outputs dict
self._init_samples_lazy(batch)
self._merge_batch(batch)
self._update_state_meta()
self._update_objective_n_batches()
def extract_result(self):
"""Extract the result from the current state.
Returns
-------
result : Sample
"""
if self.state['samples'] is None:
raise ValueError('Nothing to extract')
# Take out the correct number of samples
outputs = dict()
for k, v in self.state['samples'].items():
outputs[k] = v[:self.objective['n_samples']]
return Sample(outputs=outputs, **self._extract_result_kwargs())
def _init_samples_lazy(self, batch):
"""Initialize the outputs dict based on the received batch."""
samples = {}
e_noarr = "Node {} output must be in a numpy array of length {} (batch_size)."
e_len = "Node {} output has array length {}. It should be equal to the batch size {}."
for node in self.output_names:
# Check the requested outputs
if node not in batch:
raise KeyError("Did not receive outputs for node {}".format(node))
nbatch = batch[node]
if not is_array(nbatch):
raise ValueError(e_noarr.format(node, self.batch_size))
elif len(nbatch) != self.batch_size:
raise ValueError(e_len.format(node, len(nbatch), self.batch_size))
# Prepare samples
shape = (self.objective['n_samples'] + self.batch_size, ) + nbatch.shape[1:]
dtype = nbatch.dtype
if node == self.discrepancy_name:
# Initialize the distances to inf
samples[node] = np.ones(shape, dtype=dtype) * np.inf
else:
samples[node] = np.empty(shape, dtype=dtype)
self.state['samples'] = samples
def _merge_batch(self, batch):
# TODO: add index vector so that you can recover the original order
samples = self.state['samples']
# Put the acquired samples to the end
for node, v in samples.items():
v[self.objective['n_samples']:] = batch[node]
# Sort the smallest to the beginning
sort_mask = np.argsort(samples[self.discrepancy_name], axis=0).ravel()
for k, v in samples.items():
v[:] = v[sort_mask]
def _update_state_meta(self):
"""Update `n_sim`, `threshold`, and `accept_rate`."""
o = self.objective
s = self.state
s['threshold'] = s['samples'][self.discrepancy_name][o['n_samples'] - 1].item()
s['accept_rate'] = min(1, o['n_samples'] / s['n_sim'])
def _update_objective_n_batches(self):
# Only in the case that the threshold is used
if self.objective.get('threshold') is None:
return
s = self.state
t, n_samples = [self.objective.get(k) for k in ('threshold', 'n_samples')]
# noinspection PyTypeChecker
n_acceptable = np.sum(s['samples'][self.discrepancy_name] <= t) if s['samples'] else 0
if n_acceptable == 0:
# No acceptable samples found yet, increase n_batches of objective by one in
# order to keep simulating
n_batches = self.objective['n_batches'] + 1
else:
accept_rate_t = n_acceptable / s['n_sim']
# Add some margin to estimated n_batches. One could also use confidence
# bounds here
margin = .2 * self.batch_size * int(n_acceptable < n_samples)
n_batches = (n_samples / accept_rate_t + margin) / self.batch_size
n_batches = ceil(n_batches)
self.objective['n_batches'] = n_batches
logger.debug('Estimated objective n_batches=%d' % self.objective['n_batches'])
def plot_state(self, **options):
"""Plot the current state of the inference algorithm.
This feature is still experimental and only supports 1d or 2d cases.
"""
displays = []
if options.get('interactive'):
from IPython import display
displays.append(
display.HTML('<span>Threshold: {}</span>'.format(self.state['threshold'])))
visin.plot_sample(
self.state['samples'],
nodes=self.parameter_names,
n=self.objective['n_samples'],
displays=displays,
**options)
class SMC(Sampler):
"""Sequential Monte Carlo ABC sampler."""
def __init__(self, model, discrepancy_name=None, output_names=None, **kwargs):
"""Initialize the SMC-ABC sampler.
Parameters
----------
model : ElfiModel or NodeReference
discrepancy_name : str, NodeReference, optional
Only needed if model is an ElfiModel
output_names : list, optional
Additional outputs from the model to be included in the inference result, e.g.
corresponding summaries to the acquired samples
kwargs:
See InferenceMethod
"""
model, discrepancy_name = self._resolve_model(model, discrepancy_name)
super(SMC, self).__init__(model, output_names, **kwargs)
self._prior = ModelPrior(self.model)
self.discrepancy_name = discrepancy_name
self.state['round'] = 0
self._populations = []
self._rejection = None
self._round_random_state = None
def set_objective(self, n_samples, thresholds):
"""Set the objective of the inference."""
self.objective.update(
dict(
n_samples=n_samples,
n_batches=self.max_parallel_batches,
round=len(thresholds) - 1,
thresholds=thresholds))
self._init_new_round()
def extract_result(self):
"""Extract the result from the current state.
Returns
-------
SmcSample
"""
# Extract information from the population
pop = self._extract_population()
return SmcSample(
outputs=pop.outputs,
populations=self._populations.copy() + [pop],
weights=pop.weights,
threshold=pop.threshold,
**self._extract_result_kwargs())
def update(self, batch, batch_index):
"""Update the inference state with a new batch.
Parameters
----------
batch : dict
dict with `self.outputs` as keys and the corresponding outputs for the batch
as values
batch_index : int
"""
super(SMC, self).update(batch, batch_index)
self._rejection.update(batch, batch_index)
if self._rejection.finished:
self.batches.cancel_pending()
if self.state['round'] < self.objective['round']:
self._populations.append(self._extract_population())
self.state['round'] += 1
self._init_new_round()
self._update_objective()
def prepare_new_batch(self, batch_index):
"""Prepare values for a new batch.
Parameters
----------
batch_index : int
next batch_index to be submitted
Returns
-------
batch : dict or None
Keys should match to node names in the model. These values will override any
default values or operations in those nodes.
"""
if self.state['round'] == 0:
# Use the actual prior
return
# Sample from the proposal, condition on actual prior
params = GMDistribution.rvs(*self._gm_params, size=self.batch_size,
prior_logpdf=self._prior.logpdf,
random_state=self._round_random_state)
batch = arr2d_to_batch(params, self.parameter_names)
return batch
def _init_new_round(self):
round = self.state['round']
dashes = '-' * 16
logger.info('%s Starting round %d %s' % (dashes, round, dashes))
# Get a subseed for this round for ensuring consistent results for the round
seed = self.seed if round == 0 else get_sub_seed(self.seed, round)
self._round_random_state = np.random.RandomState(seed)
self._rejection = Rejection(
self.model,
discrepancy_name=self.discrepancy_name,
output_names=self.output_names,
batch_size=self.batch_size,
seed=seed,
max_parallel_batches=self.max_parallel_batches)
self._rejection.set_objective(
self.objective['n_samples'], threshold=self.current_population_threshold)
def _extract_population(self):
sample = self._rejection.extract_result()
# Append the sample object
sample.method_name = "Rejection within SMC-ABC"
w, cov = self._compute_weights_and_cov(sample)
sample.weights = w
sample.meta['cov'] = cov
return sample
def _compute_weights_and_cov(self, pop):
params = np.column_stack(tuple([pop.outputs[p] for p in self.parameter_names]))
if self._populations:
q_logpdf = GMDistribution.logpdf(params, *self._gm_params)
p_logpdf = self._prior.logpdf(params)
w = np.exp(p_logpdf - q_logpdf)
else:
w = np.ones(pop.n_samples)
if np.count_nonzero(w) == 0:
raise RuntimeError("All sample weights are zero. If you are using a prior "
"with a bounded support, this may be caused by specifying "
"a too small sample size.")
# New covariance
cov = 2 * np.diag(weighted_var(params, w))
if not np.all(np.isfinite(cov)):
logger.warning("Could not estimate the sample covariance. This is often "
"caused by majority of the sample weights becoming zero."
"Falling back to using unit covariance.")
cov = np.diag(np.ones(params.shape[1]))
return w, cov
def _update_objective(self):
"""Update the objective n_batches."""
n_batches = sum([pop.n_batches for pop in self._populations])
self.objective['n_batches'] = n_batches + self._rejection.objective['n_batches']
@property
def _gm_params(self):
sample = self._populations[-1]
params = sample.samples_array
return params, sample.cov, sample.weights
@property
def current_population_threshold(self):
"""Return the threshold for current population."""
return self.objective['thresholds'][self.state['round']]
class BayesianOptimization(ParameterInference):
"""Bayesian Optimization of an unknown target function."""
def __init__(self,
model,
target_name=None,
bounds=None,
initial_evidence=None,
update_interval=10,
target_model=None,
acquisition_method=None,
acq_noise_var=0,
exploration_rate=10,
batch_size=1,
batches_per_acquisition=None,
async_acq=False,
**kwargs):
"""Initialize Bayesian optimization.
Parameters
----------
model : ElfiModel or NodeReference
target_name : str or NodeReference
Only needed if model is an ElfiModel
bounds : dict, optional
The region where to estimate the posterior for each parameter in
model.parameters: dict('parameter_name':(lower, upper), ... )`. Not used if
custom target_model is given.
initial_evidence : int, dict, optional
Number of initial evidence or a precomputed batch dict containing parameter
and discrepancy values. Default value depends on the dimensionality.
update_interval : int, optional
How often to update the GP hyperparameters of the target_model
target_model : GPyRegression, optional
acquisition_method : Acquisition, optional
Method of acquiring evidence points. Defaults to LCBSC.
acq_noise_var : float or np.array, optional
Variance(s) of the noise added in the default LCBSC acquisition method.
If an array, should be 1d specifying the variance for each dimension.
exploration_rate : float, optional
Exploration rate of the acquisition method
batch_size : int, optional
Elfi batch size. Defaults to 1.
batches_per_acquisition : int, optional
How many batches will be requested from the acquisition function at one go.
Defaults to max_parallel_batches.
async_acq : bool, optional
Allow acquisitions to be made asynchronously, i.e. do not wait for all the
results from the previous acquisition before making the next. This can be more
efficient with a large amount of workers (e.g. in cluster environments) but
forgoes the guarantee for the exactly same result with the same initial
conditions (e.g. the seed). Default False.
**kwargs
"""
model, target_name = self._resolve_model(model, target_name)
output_names = [target_name] + model.parameter_names
super(BayesianOptimization, self).__init__(
model, output_names, batch_size=batch_size, **kwargs)
target_model = target_model or GPyRegression(self.model.parameter_names, bounds=bounds)
self.target_name = target_name
self.target_model = target_model
n_precomputed = 0
n_initial, precomputed = self._resolve_initial_evidence(initial_evidence)
if precomputed is not None:
params = batch_to_arr2d(precomputed, self.parameter_names)
n_precomputed = len(params)
self.target_model.update(params, precomputed[target_name])
self.batches_per_acquisition = batches_per_acquisition or self.max_parallel_batches
self.acquisition_method = acquisition_method or LCBSC(self.target_model,
prior=ModelPrior(self.model),
noise_var=acq_noise_var,
exploration_rate=exploration_rate,
seed=self.seed)
self.n_initial_evidence = n_initial
self.n_precomputed_evidence = n_precomputed
self.update_interval = update_interval
self.async_acq = async_acq
self.state['n_evidence'] = self.n_precomputed_evidence
self.state['last_GP_update'] = self.n_initial_evidence
self.state['acquisition'] = []
def _resolve_initial_evidence(self, initial_evidence):
# Some sensibility limit for starting GP regression
precomputed = None
n_required = max(10, 2**self.target_model.input_dim + 1)
n_required = ceil_to_batch_size(n_required, self.batch_size)
if initial_evidence is None:
n_initial_evidence = n_required
elif isinstance(initial_evidence, (int, np.int, float)):
n_initial_evidence = int(initial_evidence)
else:
precomputed = initial_evidence
n_initial_evidence = len(precomputed[self.target_name])
if n_initial_evidence < 0:
raise ValueError('Number of initial evidence must be positive or zero '
'(was {})'.format(initial_evidence))
elif n_initial_evidence < n_required:
logger.warning('We recommend having at least {} initialization points for '
'the initialization (now {})'.format(n_required, n_initial_evidence))
if precomputed is None and (n_initial_evidence % self.batch_size != 0):
logger.warning('Number of initial_evidence %d is not divisible by '
'batch_size %d. Rounding it up...' % (n_initial_evidence,
self.batch_size))
n_initial_evidence = ceil_to_batch_size(n_initial_evidence, self.batch_size)
return n_initial_evidence, precomputed
@property
def n_evidence(self):
"""Return the number of acquired evidence points."""
return self.state.get('n_evidence', 0)
@property
def acq_batch_size(self):
"""Return the total number of acquisition per iteration."""
return self.batch_size * self.batches_per_acquisition
def set_objective(self, n_evidence=None):
"""Set objective for inference.
You can continue BO by giving a larger n_evidence.
Parameters
----------
n_evidence : int
Number of total evidence for the GP fitting. This includes any initial
evidence.
"""
if n_evidence is None:
n_evidence = self.objective.get('n_evidence', self.n_evidence)
if n_evidence < self.n_evidence:
logger.warning('Requesting less evidence than there already exists')
self.objective['n_evidence'] = n_evidence
self.objective['n_sim'] = n_evidence - self.n_precomputed_evidence
def extract_result(self):
"""Extract the result from the current state.
Returns
-------
OptimizationResult
"""
x_min, _ = stochastic_optimization(
self.target_model.predict_mean, self.target_model.bounds, seed=self.seed)
batch_min = arr2d_to_batch(x_min, self.parameter_names)
outputs = arr2d_to_batch(self.target_model.X, self.parameter_names)
outputs[self.target_name] = self.target_model.Y
return OptimizationResult(
x_min=batch_min, outputs=outputs, **self._extract_result_kwargs())
def update(self, batch, batch_index):
"""Update the GP regression model of the target node with a new batch.
Parameters
----------
batch : dict
dict with `self.outputs` as keys and the corresponding outputs for the batch
as values
batch_index : int
"""
super(BayesianOptimization, self).update(batch, batch_index)
self.state['n_evidence'] += self.batch_size
params = batch_to_arr2d(batch, self.parameter_names)
self._report_batch(batch_index, params, batch[self.target_name])
optimize = self._should_optimize()
self.target_model.update(params, batch[self.target_name], optimize)
if optimize:
self.state['last_GP_update'] = self.target_model.n_evidence
def prepare_new_batch(self, batch_index):
"""Prepare values for a new batch.
Parameters
----------
batch_index : int
next batch_index to be submitted
Returns
-------
batch : dict or None
Keys should match to node names in the model. These values will override any
default values or operations in those nodes.
"""
t = self._get_acquisition_index(batch_index)
# Check if we still should take initial points from the prior
if t < 0:
return
# Take the next batch from the acquisition_batch
acquisition = self.state['acquisition']
if len(acquisition) == 0:
acquisition = self.acquisition_method.acquire(self.acq_batch_size, t=t)
batch = arr2d_to_batch(acquisition[:self.batch_size], self.parameter_names)
self.state['acquisition'] = acquisition[self.batch_size:]
return batch
def _get_acquisition_index(self, batch_index):
acq_batch_size = self.batch_size * self.batches_per_acquisition
initial_offset = self.n_initial_evidence - self.n_precomputed_evidence
starting_sim_index = self.batch_size * batch_index
t = (starting_sim_index - initial_offset) // acq_batch_size
return t
# TODO: use state dict
@property
def _n_submitted_evidence(self):
return self.batches.total * self.batch_size
def _allow_submit(self, batch_index):
if not super(BayesianOptimization, self)._allow_submit(batch_index):
return False
if self.async_acq:
return True
# Allow submitting freely as long we are still submitting initial evidence
t = self._get_acquisition_index(batch_index)
if t < 0:
return True
# Do not allow acquisition until previous acquisitions are ready (as well
# as all initial acquisitions)
acquisitions_left = len(self.state['acquisition'])
if acquisitions_left == 0 and self.batches.has_pending:
return False
return True
def _should_optimize(self):
current = self.target_model.n_evidence + self.batch_size
next_update = self.state['last_GP_update'] + self.update_interval
return current >= self.n_initial_evidence and current >= next_update
def _report_batch(self, batch_index, params, distances):
str = "Received batch {}:\n".format(batch_index)
fill = 6 * ' '
for i in range(self.batch_size):
str += "{}{} at {}\n".format(fill, distances[i].item(), params[i])
logger.debug(str)
def plot_state(self, **options):
"""Plot the GP surface.
This feature is still experimental and currently supports only 2D cases.
"""
f = plt.gcf()
if len(f.axes) < 2:
f, _ = plt.subplots(1, 2, figsize=(13, 6), sharex='row', sharey='row')
gp = self.target_model
# Draw the GP surface
visin.draw_contour(
gp.predict_mean,
gp.bounds,
self.parameter_names,
title='GP target surface',
points=gp.X,
axes=f.axes[0],
**options)
# Draw the latest acquisitions
if options.get('interactive'):
point = gp.X[-1, :]
if len(gp.X) > 1:
f.axes[1].scatter(*point, color='red')
displays = [gp._gp]
if options.get('interactive'):
from IPython import display
displays.insert(
0,
display.HTML('<span><b>Iteration {}:</b> Acquired {} at {}</span>'.format(
len(gp.Y), gp.Y[-1][0], point)))
# Update
visin._update_interactive(displays, options)
def acq(x):
return self.acquisition_method.evaluate(x, len(gp.X))
# Draw the acquisition surface
visin.draw_contour(
acq,
gp.bounds,
self.parameter_names,
title='Acquisition surface',
points=None,
axes=f.axes[1],
**options)
if options.get('close'):
plt.close()
def plot_discrepancy(self, axes=None, **kwargs):
"""Plot acquired parameters vs. resulting discrepancy.
Parameters
----------
axes : plt.Axes or arraylike of plt.Axes
Return
------
axes : np.array of plt.Axes
"""
return vis.plot_discrepancy(self.target_model, self.parameter_names, axes=axes, **kwargs)
def plot_gp(self, axes=None, resol=50, const=None, bounds=None, **kwargs):
"""Plot pairwise relationships as a matrix with parameters vs. discrepancy.
Parameters
----------
axes : matplotlib.axes.Axes, optional
resol : int, optional
Resolution of the plotted grid.
const : np.array, optional
Values for parameters in plots where held constant. Defaults to minimum evidence.
bounds: list of tuples, optional
List of tuples for axis boundaries.
Returns
-------
axes : np.array of plt.Axes
"""
return vis.plot_gp(self.target_model, self.parameter_names, axes,
resol, const, bounds, **kwargs)
class BOLFI(BayesianOptimization):
"""Bayesian Optimization for Likelihood-Free Inference (BOLFI).
Approximates the discrepancy function by a stochastic regression model.
Discrepancy model is fit by sampling the discrepancy function at points decided by
the acquisition function.
The method implements the framework introduced in Gutmann & Corander, 2016.
References
----------
<NAME>, <NAME> (2016). Bayesian Optimization for Likelihood-Free Inference
of Simulator-Based Statistical Models. JMLR 17(125):1−47, 2016.
http://jmlr.org/papers/v17/15-017.html
"""
def fit(self, n_evidence, threshold=None, bar=True):
"""Fit the surrogate model.
Generates a regression model for the discrepancy given the parameters.
Currently only Gaussian processes are supported as surrogate models.
Parameters
----------
n_evidence : int, required
Number of evidence for fitting
threshold : float, optional
Discrepancy threshold for creating the posterior (log with log discrepancy).
bar : bool, optional
Flag to remove (False) the progress bar from output.
"""
logger.info("BOLFI: Fitting the surrogate model...")
if n_evidence is None:
raise ValueError(
'You must specify the number of evidence (n_evidence) for the fitting')
self.infer(n_evidence, bar=bar)
return self.extract_posterior(threshold)
def extract_posterior(self, threshold=None):
"""Return an object representing the approximate posterior.
The approximation is based on surrogate model regression.
Parameters
----------
threshold: float, optional
Discrepancy threshold for creating the posterior (log with log discrepancy).
Returns
-------
posterior : elfi.methods.posteriors.BolfiPosterior
"""
if self.state['n_batches'] == 0:
raise ValueError('Model is not fitted yet, please see the `fit` method.')
return BolfiPosterior(self.target_model, threshold=threshold, prior=ModelPrior(self.model))
def sample(self,
n_samples,
warmup=None,
n_chains=4,
threshold=None,
initials=None,
algorithm='nuts',
sigma_proposals=None,
n_evidence=None,
**kwargs):
r"""Sample the posterior distribution of BOLFI.
Here the likelihood is defined through the cumulative density function
of the standard normal distribution:
L(\theta) \propto F((h-\mu(\theta)) / \sigma(\theta))
where h is the threshold, and \mu(\theta) and \sigma(\theta) are the posterior mean and
(noisy) standard deviation of the associated Gaussian process.
The sampling is performed with an MCMC sampler (the No-U-Turn Sampler, NUTS).
Parameters
----------
n_samples : int
Number of requested samples from the posterior for each chain. This includes warmup,
and note that the effective sample size is usually considerably smaller.
warmpup : int, optional
Length of warmup sequence in MCMC sampling. Defaults to n_samples//2.
n_chains : int, optional
Number of independent chains.
threshold : float, optional
The threshold (bandwidth) for posterior (give as log if log discrepancy).
initials : np.array of shape (n_chains, n_params), optional
Initial values for the sampled parameters for each chain.
Defaults to best evidence points.
algorithm : string, optional
Sampling algorithm to use. Currently 'nuts'(default) and 'metropolis' are supported.
sigma_proposals : np.array
Standard deviations for Gaussian proposals of each parameter for Metropolis
Markov Chain sampler.
n_evidence : int
If the regression model is not fitted yet, specify the amount of evidence
Returns
-------
BolfiSample
"""
if self.state['n_batches'] == 0:
self.fit(n_evidence)
# TODO: add more MCMC algorithms
if algorithm not in ['nuts', 'metropolis']:
raise ValueError("Unknown posterior sampler.")
posterior = self.extract_posterior(threshold)
warmup = warmup or n_samples // 2
# Unless given, select the evidence points with smallest discrepancy
if initials is not None:
if np.asarray(initials).shape != (n_chains, self.target_model.input_dim):
raise ValueError("The shape of initials must be (n_chains, n_params).")
else:
inds = np.argsort(self.target_model.Y[:, 0])
initials = np.asarray(self.target_model.X[inds])
self.target_model.is_sampling = True # enables caching for default RBF kernel
tasks_ids = []
ii_initial = 0
if algorithm == 'metropolis':
if sigma_proposals is None:
raise ValueError("Gaussian proposal standard deviations "
"have to be provided for Metropolis-sampling.")
elif sigma_proposals.shape[0] != self.target_model.input_dim:
raise ValueError("The length of Gaussian proposal standard "
"deviations must be n_params.")
# sampling is embarrassingly parallel, so depending on self.client this may parallelize
for ii in range(n_chains):
seed = get_sub_seed(self.seed, ii)
# discard bad initialization points
while np.isinf(posterior.logpdf(initials[ii_initial])):
ii_initial += 1
if ii_initial == len(inds):
raise ValueError(
"BOLFI.sample: Cannot find enough acceptable initialization points!")
if algorithm == 'nuts':
tasks_ids.append(
self.client.apply(
mcmc.nuts,
n_samples,
initials[ii_initial],
posterior.logpdf,
posterior.gradient_logpdf,
n_adapt=warmup,
seed=seed,
**kwargs))
elif algorithm == 'metropolis':
tasks_ids.append(
self.client.apply(
mcmc.metropolis,
n_samples,
initials[ii_initial],
posterior.logpdf,
sigma_proposals,
warmup,
seed=seed,
**kwargs))
ii_initial += 1
# get results from completed tasks or run sampling (client-specific)
chains = []
for id in tasks_ids:
chains.append(self.client.get_result(id))
chains = np.asarray(chains)
print(
"{} chains of {} iterations acquired. Effective sample size and Rhat for each "
"parameter:".format(n_chains, n_samples))
for ii, node in enumerate(self.parameter_names):
print(node, mcmc.eff_sample_size(chains[:, :, ii]),
mcmc.gelman_rubin(chains[:, :, ii]))
self.target_model.is_sampling = False
return BolfiSample(
method_name='BOLFI',
chains=chains,
parameter_names=self.parameter_names,
warmup=warmup,
threshold=float(posterior.threshold),
n_sim=self.state['n_sim'],
seed=self.seed)
| [
"logging.getLogger",
"elfi.methods.utils.ceil_to_batch_size",
"numpy.count_nonzero",
"elfi.loader.get_sub_seed",
"numpy.argsort",
"elfi.utils.is_array",
"numpy.isfinite",
"elfi.model.elfi_model.ComputationContext",
"numpy.random.RandomState",
"numpy.asarray",
"numpy.exp",
"matplotlib.pyplot.cl... | [((1025, 1052), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1042, 1052), False, 'import logging\n'), ((3857, 3920), 'elfi.model.elfi_model.ComputationContext', 'ComputationContext', ([], {'batch_size': 'batch_size', 'seed': 'seed', 'pool': 'pool'}), '(batch_size=batch_size, seed=seed, pool=pool)\n', (3875, 3920), False, 'from elfi.model.elfi_model import ComputationContext, ElfiModel, NodeReference\n'), ((21414, 21548), 'elfi.visualization.interactive.plot_sample', 'visin.plot_sample', (["self.state['samples']"], {'nodes': 'self.parameter_names', 'n': "self.objective['n_samples']", 'displays': 'displays'}), "(self.state['samples'], nodes=self.parameter_names, n=self\n .objective['n_samples'], displays=displays, **options)\n", (21431, 21548), True, 'import elfi.visualization.interactive as visin\n'), ((22405, 22427), 'elfi.methods.utils.ModelPrior', 'ModelPrior', (['self.model'], {}), '(self.model)\n', (22415, 22427), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((24768, 24903), 'elfi.methods.utils.GMDistribution.rvs', 'GMDistribution.rvs', (['*self._gm_params'], {'size': 'self.batch_size', 'prior_logpdf': 'self._prior.logpdf', 'random_state': 'self._round_random_state'}), '(*self._gm_params, size=self.batch_size, prior_logpdf=\n self._prior.logpdf, random_state=self._round_random_state)\n', (24786, 24903), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((24988, 25032), 'elfi.methods.utils.arr2d_to_batch', 'arr2d_to_batch', (['params', 'self.parameter_names'], {}), '(params, self.parameter_names)\n', (25002, 25032), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((25418, 25445), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (25439, 25445), True, 'import numpy as np\n'), ((32304, 32351), 'elfi.methods.utils.ceil_to_batch_size', 'ceil_to_batch_size', (['n_required', 'self.batch_size'], {}), '(n_required, self.batch_size)\n', (32322, 32351), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((34747, 34849), 'elfi.methods.bo.utils.stochastic_optimization', 'stochastic_optimization', (['self.target_model.predict_mean', 'self.target_model.bounds'], {'seed': 'self.seed'}), '(self.target_model.predict_mean, self.target_model.\n bounds, seed=self.seed)\n', (34770, 34849), False, 'from elfi.methods.bo.utils import stochastic_optimization\n'), ((34879, 34922), 'elfi.methods.utils.arr2d_to_batch', 'arr2d_to_batch', (['x_min', 'self.parameter_names'], {}), '(x_min, self.parameter_names)\n', (34893, 34922), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((34941, 34998), 'elfi.methods.utils.arr2d_to_batch', 'arr2d_to_batch', (['self.target_model.X', 'self.parameter_names'], {}), '(self.target_model.X, self.parameter_names)\n', (34955, 34998), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((35641, 35684), 'elfi.methods.utils.batch_to_arr2d', 'batch_to_arr2d', (['batch', 'self.parameter_names'], {}), '(batch, self.parameter_names)\n', (35655, 35684), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((36794, 36861), 'elfi.methods.utils.arr2d_to_batch', 'arr2d_to_batch', (['acquisition[:self.batch_size]', 'self.parameter_names'], {}), '(acquisition[:self.batch_size], self.parameter_names)\n', (36808, 36861), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((38809, 38818), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (38816, 38818), True, 'import matplotlib.pyplot as plt\n'), ((39001, 39141), 'elfi.visualization.interactive.draw_contour', 'visin.draw_contour', (['gp.predict_mean', 'gp.bounds', 'self.parameter_names'], {'title': '"""GP target surface"""', 'points': 'gp.X', 'axes': 'f.axes[0]'}), "(gp.predict_mean, gp.bounds, self.parameter_names, title=\n 'GP target surface', points=gp.X, axes=f.axes[0], **options)\n", (39019, 39141), True, 'import elfi.visualization.interactive as visin\n'), ((39745, 39789), 'elfi.visualization.interactive._update_interactive', 'visin._update_interactive', (['displays', 'options'], {}), '(displays, options)\n', (39770, 39789), True, 'import elfi.visualization.interactive as visin\n'), ((39925, 40055), 'elfi.visualization.interactive.draw_contour', 'visin.draw_contour', (['acq', 'gp.bounds', 'self.parameter_names'], {'title': '"""Acquisition surface"""', 'points': 'None', 'axes': 'f.axes[1]'}), "(acq, gp.bounds, self.parameter_names, title=\n 'Acquisition surface', points=None, axes=f.axes[1], **options)\n", (39943, 40055), True, 'import elfi.visualization.interactive as visin\n'), ((40494, 40581), 'elfi.visualization.visualization.plot_discrepancy', 'vis.plot_discrepancy', (['self.target_model', 'self.parameter_names'], {'axes': 'axes'}), '(self.target_model, self.parameter_names, axes=axes, **\n kwargs)\n', (40514, 40581), True, 'import elfi.visualization.visualization as vis\n'), ((41215, 41309), 'elfi.visualization.visualization.plot_gp', 'vis.plot_gp', (['self.target_model', 'self.parameter_names', 'axes', 'resol', 'const', 'bounds'], {}), '(self.target_model, self.parameter_names, axes, resol, const,\n bounds, **kwargs)\n', (41226, 41309), True, 'import elfi.visualization.visualization as vis\n'), ((48435, 48453), 'numpy.asarray', 'np.asarray', (['chains'], {}), '(chains)\n', (48445, 48453), True, 'import numpy as np\n'), ((8866, 8963), 'elfi.visualization.visualization.progress_bar', 'progress_bar', (['(0)', 'self._objective_n_batches'], {'prefix': '"""Progress:"""', 'suffix': '"""Complete"""', 'length': '(50)'}), "(0, self._objective_n_batches, prefix='Progress:', suffix=\n 'Complete', length=50)\n", (8878, 8963), False, 'from elfi.visualization.visualization import progress_bar\n'), ((16367, 16393), 'math.ceil', 'ceil', (['(n_samples / quantile)'], {}), '(n_samples / quantile)\n', (16371, 16393), False, 'from math import ceil\n'), ((16478, 16507), 'math.ceil', 'ceil', (['(n_sim / self.batch_size)'], {}), '(n_sim / self.batch_size)\n', (16482, 16507), False, 'from math import ceil\n'), ((20200, 20248), 'numpy.sum', 'np.sum', (["(s['samples'][self.discrepancy_name] <= t)"], {}), "(s['samples'][self.discrepancy_name] <= t)\n", (20206, 20248), True, 'import numpy as np\n'), ((20841, 20856), 'math.ceil', 'ceil', (['n_batches'], {}), '(n_batches)\n', (20845, 20856), False, 'from math import ceil\n'), ((25352, 25382), 'elfi.loader.get_sub_seed', 'get_sub_seed', (['self.seed', 'round'], {}), '(self.seed, round)\n', (25364, 25382), False, 'from elfi.loader import get_sub_seed\n'), ((26355, 26402), 'elfi.methods.utils.GMDistribution.logpdf', 'GMDistribution.logpdf', (['params', '*self._gm_params'], {}), '(params, *self._gm_params)\n', (26376, 26402), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((26469, 26496), 'numpy.exp', 'np.exp', (['(p_logpdf - q_logpdf)'], {}), '(p_logpdf - q_logpdf)\n', (26475, 26496), True, 'import numpy as np\n'), ((26527, 26549), 'numpy.ones', 'np.ones', (['pop.n_samples'], {}), '(pop.n_samples)\n', (26534, 26549), True, 'import numpy as np\n'), ((26562, 26581), 'numpy.count_nonzero', 'np.count_nonzero', (['w'], {}), '(w)\n', (26578, 26581), True, 'import numpy as np\n'), ((30733, 30789), 'elfi.methods.bo.gpy_regression.GPyRegression', 'GPyRegression', (['self.model.parameter_names'], {'bounds': 'bounds'}), '(self.model.parameter_names, bounds=bounds)\n', (30746, 30789), False, 'from elfi.methods.bo.gpy_regression import GPyRegression\n'), ((31037, 31086), 'elfi.methods.utils.batch_to_arr2d', 'batch_to_arr2d', (['precomputed', 'self.parameter_names'], {}), '(precomputed, self.parameter_names)\n', (31051, 31086), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((33458, 33513), 'elfi.methods.utils.ceil_to_batch_size', 'ceil_to_batch_size', (['n_initial_evidence', 'self.batch_size'], {}), '(n_initial_evidence, self.batch_size)\n', (33476, 33513), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((38866, 38929), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(13, 6)', 'sharex': '"""row"""', 'sharey': '"""row"""'}), "(1, 2, figsize=(13, 6), sharex='row', sharey='row')\n", (38878, 38929), True, 'import matplotlib.pyplot as plt\n'), ((40182, 40193), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (40191, 40193), True, 'import matplotlib.pyplot as plt\n'), ((46167, 46204), 'numpy.argsort', 'np.argsort', (['self.target_model.Y[:, 0]'], {}), '(self.target_model.Y[:, 0])\n', (46177, 46204), True, 'import numpy as np\n'), ((46228, 46265), 'numpy.asarray', 'np.asarray', (['self.target_model.X[inds]'], {}), '(self.target_model.X[inds])\n', (46238, 46265), True, 'import numpy as np\n'), ((47001, 47028), 'elfi.loader.get_sub_seed', 'get_sub_seed', (['self.seed', 'ii'], {}), '(self.seed, ii)\n', (47013, 47028), False, 'from elfi.loader import get_sub_seed\n'), ((9163, 9282), 'elfi.visualization.visualization.progress_bar', 'progress_bar', (["self.state['n_batches']", 'self._objective_n_batches'], {'prefix': '"""Progress:"""', 'suffix': '"""Complete"""', 'length': '(50)'}), "(self.state['n_batches'], self._objective_n_batches, prefix=\n 'Progress:', suffix='Complete', length=50)\n", (9175, 9282), False, 'from elfi.visualization.visualization import progress_bar\n'), ((11355, 11402), 'math.ceil', 'ceil', (["(self.objective['n_sim'] / self.batch_size)"], {}), "(self.objective['n_sim'] / self.batch_size)\n", (11359, 11402), False, 'from math import ceil\n'), ((18414, 18430), 'elfi.utils.is_array', 'is_array', (['nbatch'], {}), '(nbatch)\n', (18422, 18430), False, 'from elfi.utils import is_array\n'), ((19005, 19033), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (19013, 19033), True, 'import numpy as np\n'), ((19437, 19487), 'numpy.argsort', 'np.argsort', (['samples[self.discrepancy_name]'], {'axis': '(0)'}), '(samples[self.discrepancy_name], axis=0)\n', (19447, 19487), True, 'import numpy as np\n'), ((26878, 26901), 'elfi.methods.utils.weighted_var', 'weighted_var', (['params', 'w'], {}), '(params, w)\n', (26890, 26901), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((26926, 26942), 'numpy.isfinite', 'np.isfinite', (['cov'], {}), '(cov)\n', (26937, 26942), True, 'import numpy as np\n'), ((27211, 27235), 'numpy.ones', 'np.ones', (['params.shape[1]'], {}), '(params.shape[1])\n', (27218, 27235), True, 'import numpy as np\n'), ((43502, 43524), 'elfi.methods.utils.ModelPrior', 'ModelPrior', (['self.model'], {}), '(self.model)\n', (43512, 43524), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((48696, 48734), 'elfi.methods.mcmc.eff_sample_size', 'mcmc.eff_sample_size', (['chains[:, :, ii]'], {}), '(chains[:, :, ii])\n', (48716, 48734), True, 'import elfi.methods.mcmc as mcmc\n'), ((48754, 48789), 'elfi.methods.mcmc.gelman_rubin', 'mcmc.gelman_rubin', (['chains[:, :, ii]'], {}), '(chains[:, :, ii])\n', (48771, 48789), True, 'import elfi.methods.mcmc as mcmc\n'), ((18918, 18945), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (18925, 18945), True, 'import numpy as np\n'), ((31440, 31462), 'elfi.methods.utils.ModelPrior', 'ModelPrior', (['self.model'], {}), '(self.model)\n', (31450, 31462), False, 'from elfi.methods.utils import GMDistribution, ModelPrior, arr2d_to_batch, batch_to_arr2d, ceil_to_batch_size, weighted_var\n'), ((45975, 45995), 'numpy.asarray', 'np.asarray', (['initials'], {}), '(initials)\n', (45985, 45995), True, 'import numpy as np\n')] |
import torch
import numpy as np
import tqdm
from tensorboardX import SummaryWriter
from abp.utils import clear_summary_path
from abp.models.trans_model_tow import TransModel
import pickle
import os
import logging
import time
import random
from abp.adaptives.common.prioritized_memory.memory import ReplayBuffer
from copy import deepcopy
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class TransAdaptive(object):
""" Adaptive that uses Transition Model """
def __init__(self, name, network_config, reinforce_config):
super(TransAdaptive, self).__init__()
self.name = name
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer(self.reinforce_config.memory_size)
# Global
self.steps = 0
# self.batch_size = 128
reinforce_summary_path = self.reinforce_config.summaries_path + "/" + self.name
if self.network_config.restore_network:
restore_path = self.network_config.network_path + "/adaptive.info"
self.memory.load(self.network_config.network_path)
print("memory length:", len(self.memory))
if os.path.exists(restore_path):
with open(restore_path, "rb") as file:
info = pickle.load(file)
self.steps = info["steps"]
print(self.steps)
else:
print("no restore steps")
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.trans_model_units = TransModel("trans_units", 30, 24, self.network_config, use_cuda)
self.trans_model_hp = TransModel("trans_hp", 31, 1, self.network_config, use_cuda)
# self.memory_resotre = memory_resotre
def add_memory(self, pre_state, curr_state):
self.steps += 1
inputs = self.separate_state(pre_state, is_pre = True)
ground_truths = self.separate_state(curr_state, is_pre = False)
for input, gt in zip(inputs, ground_truths):
# print("input")
# print(input)
# print("gt")
# print(gt)
self.memory.add(input,
None,
None,
gt, None)
if self.steps % 10 == 0:
self.update()
if self.steps % 1000 == 0:
self.save()
def save(self, appendix = ""):
info = {
"steps": self.steps
}
print("*************saved*****************")
# logger.info("Saving network. Found new best reward (%.2f)" % total_reward)
self.trans_model_units.save_network(appendix = appendix)
self.trans_model_hp.save_network(appendix = appendix)
with open(self.network_config.network_path + "/adaptive.info", "wb") as file:
pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)
self.memory.save(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
def separate_state(self, state, is_pre):
state = state.copy()
self_buildings_top = state[1:4]
self_buildings_bottom = state[4:7]
enemy_buildings_top = state[8:11]
enemy_buildings_bottom = state[11:14]
self_units_top = state[15:27]
self_units_bottom = state[27:39]
# print(self_units_top)
# print(self_units_top)
self_units_top_reversed = np.concatenate((state[24:27], state[21:24], state[18:21], state[15:18]))
self_units_bottom_reversed = np.concatenate((state[36:39], state[33:36], state[30:33], state[27:30]))
# print(self_units_top_reversed)
# print(self_units_bottom_reversed)
enemy_units_top = state[39:51]
enemy_units_bottom = state[51:63]
# print(enemy_units_top)
# print(enemy_units_bottom)
enemy_units_top_reversed = np.concatenate((state[48:51], state[45:48], state[42:45], state[39:42]))
enemy_units_bottom_reversed = np.concatenate((state[60:63], state[57:60], state[54:57], state[51:54]))
# print(enemy_units_top)
# print(enemy_units_bottom)
self_hp_top = state[63].reshape(1)
self_hp_bottom = state[64].reshape(1)
enemy_hp_top = state[65].reshape(1)
enemy_hp_bottom = state[66].reshape(1)
# input()
# print(self_buildings_top.shape, enemy_buildings_top.shape, self_units_top.shape, enemy_units_top.shape, self_hp_top.shape)
# print(self_buildings_bottom.shape, enemy_buildings_bottom.shape, self_units_bottom.shape, enemy_units_bottom.shape, self_hp_bottom.shape)
if is_pre:
input_1 = np.concatenate((self_buildings_top, enemy_buildings_top, self_units_top, enemy_units_top, self_hp_top, np.array([1])))
input_2 = np.concatenate((self_buildings_bottom, enemy_buildings_bottom, self_units_bottom, enemy_units_bottom, self_hp_bottom, np.array([2])))
input_3 = np.concatenate((enemy_buildings_top, self_buildings_top, enemy_units_top_reversed, self_units_top_reversed, enemy_hp_top, np.array([3])))
input_4 = np.concatenate((enemy_buildings_bottom, self_buildings_bottom, enemy_units_bottom_reversed, self_units_bottom_reversed, enemy_hp_bottom, np.array([4])))
return [input_1, input_2, input_3, input_4]
else:
ground_truth_1 = np.concatenate((self_units_top, enemy_units_top, self_hp_top, np.array([1])))
ground_truth_2 = np.concatenate((self_units_bottom, enemy_units_bottom, self_hp_bottom, np.array([2])))
ground_truth_3 = np.concatenate((enemy_units_top_reversed, self_units_top_reversed, enemy_hp_top, np.array([3])))
ground_truth_4 = np.concatenate((enemy_units_bottom_reversed, self_units_bottom_reversed, enemy_hp_bottom, np.array([4])))
return [ground_truth_1, ground_truth_2, ground_truth_3, ground_truth_4]
def update(self):
if len(self.memory) < self.reinforce_config.batch_size:
return
batch = self.memory.sample(self.reinforce_config.batch_size)
(inputs, _, _, ground_truths, _) = batch
assert np.sum(inputs[:, -1] == ground_truths[:, -1]) == len(ground_truths[:, -1]),print(inputs[:, -1], ground_truths[:, -1])
# if self.steps == 40:
# print(inputs[:, -1])
# print(ground_truths[:, -1])
# input()
inputs_units = FloatTensor(inputs[:, :-2])
inputs_hp = FloatTensor(inputs[:, :-1])
gt_units = FloatTensor(ground_truths[:, : -2])
gt_hps = FloatTensor(ground_truths[:, -2])
outputs_unit = self.trans_model_units.predict_batch(inputs_units)
outputs_hp = self.trans_model_hp.predict_batch(inputs_hp)
self.trans_model_units.fit(gt_units, outputs_unit, self.steps)
self.trans_model_hp.fit(gt_hps, outputs_hp, self.steps) | [
"logging.getLogger",
"os.path.exists",
"pickle.dump",
"tensorboardX.SummaryWriter",
"abp.adaptives.common.prioritized_memory.memory.ReplayBuffer",
"pickle.load",
"numpy.sum",
"numpy.array",
"torch.cuda.is_available",
"numpy.concatenate",
"abp.models.trans_model_tow.TransModel"
] | [((349, 374), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (366, 374), False, 'import logging\n'), ((386, 411), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (409, 411), False, 'import torch\n'), ((1049, 1096), 'abp.adaptives.common.prioritized_memory.memory.ReplayBuffer', 'ReplayBuffer', (['self.reinforce_config.memory_size'], {}), '(self.reinforce_config.memory_size)\n', (1061, 1096), False, 'from abp.adaptives.common.prioritized_memory.memory import ReplayBuffer\n'), ((1822, 1867), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'reinforce_summary_path'}), '(log_dir=reinforce_summary_path)\n', (1835, 1867), False, 'from tensorboardX import SummaryWriter\n'), ((1910, 1974), 'abp.models.trans_model_tow.TransModel', 'TransModel', (['"""trans_units"""', '(30)', '(24)', 'self.network_config', 'use_cuda'], {}), "('trans_units', 30, 24, self.network_config, use_cuda)\n", (1920, 1974), False, 'from abp.models.trans_model_tow import TransModel\n'), ((2014, 2074), 'abp.models.trans_model_tow.TransModel', 'TransModel', (['"""trans_hp"""', '(31)', '(1)', 'self.network_config', 'use_cuda'], {}), "('trans_hp', 31, 1, self.network_config, use_cuda)\n", (2024, 2074), False, 'from abp.models.trans_model_tow import TransModel\n'), ((3883, 3955), 'numpy.concatenate', 'np.concatenate', (['(state[24:27], state[21:24], state[18:21], state[15:18])'], {}), '((state[24:27], state[21:24], state[18:21], state[15:18]))\n', (3897, 3955), True, 'import numpy as np\n'), ((3993, 4065), 'numpy.concatenate', 'np.concatenate', (['(state[36:39], state[33:36], state[30:33], state[27:30])'], {}), '((state[36:39], state[33:36], state[30:33], state[27:30]))\n', (4007, 4065), True, 'import numpy as np\n'), ((4354, 4426), 'numpy.concatenate', 'np.concatenate', (['(state[48:51], state[45:48], state[42:45], state[39:42])'], {}), '((state[48:51], state[45:48], state[42:45], state[39:42]))\n', (4368, 4426), True, 'import numpy as np\n'), ((4465, 4537), 'numpy.concatenate', 'np.concatenate', (['(state[60:63], state[57:60], state[54:57], state[51:54])'], {}), '((state[60:63], state[57:60], state[54:57], state[51:54]))\n', (4479, 4537), True, 'import numpy as np\n'), ((1531, 1559), 'os.path.exists', 'os.path.exists', (['restore_path'], {}), '(restore_path)\n', (1545, 1559), False, 'import os\n'), ((3252, 3309), 'pickle.dump', 'pickle.dump', (['info', 'file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n', (3263, 3309), False, 'import pickle\n'), ((6736, 6781), 'numpy.sum', 'np.sum', (['(inputs[:, -1] == ground_truths[:, -1])'], {}), '(inputs[:, -1] == ground_truths[:, -1])\n', (6742, 6781), True, 'import numpy as np\n'), ((1643, 1660), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1654, 1660), False, 'import pickle\n'), ((5266, 5279), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (5274, 5279), True, 'import numpy as np\n'), ((5423, 5436), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (5431, 5436), True, 'import numpy as np\n'), ((5584, 5597), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (5592, 5597), True, 'import numpy as np\n'), ((5760, 5773), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (5768, 5773), True, 'import numpy as np\n'), ((5950, 5963), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (5958, 5963), True, 'import numpy as np\n'), ((6079, 6092), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (6087, 6092), True, 'import numpy as np\n'), ((6218, 6231), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (6226, 6231), True, 'import numpy as np\n'), ((6366, 6379), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (6374, 6379), True, 'import numpy as np\n')] |
import GPy
import GPyOpt
import argparse
import os
import numpy as np
import time
import FireflyAlgorithm as ffa
def func(var):
hist = []
gamma = var[:,0][0]
alpha = var[:,1][0]
fireflies = int(var[:,2][0] * 100)
step = int(var[:,3][0] * 100)
if args.v == 1 or args.v == 4:
alpha = int(alpha * 16)
if args.v == 2 or args.v == 5:
alpha = int(alpha * 32)
for i in range(args.n):
best_firefly = ffa.fireflyAlgorithm(0, d=args.d, i=args.i, g=gamma, a=alpha, f=fireflies, e=args.e, v=args.v, p=args.p, s=step, sch=args.sch)
hist.append(best_firefly.luminosity)
res = np.array(hist).mean()
print('Tried [Gamma, Alpha, #Fireflies, step] = [{}, {}, {}, {}], got {}'.format(gamma, alpha, fireflies, step, res))
with open('bayesopt', 'a') as f:
f.write('{}\t{}\t{}\t{}\t{}\n'.format(gamma, alpha, fireflies, step, res))
return res
def main(args):
with open('bayesopt', 'w') as f:
print('cleaning previous results')
# bounds = [{'name': 'gamma', 'type': 'continuous', 'domain': (0, 1)},
# {'name': 'alpha', 'type': 'continuous', 'domain': (0, 1)},
# {'name': 'nbfireflies', 'type': 'continuous', 'domain': (0.02, 1)}]
bounds = [{'name': 'gamma', 'type': 'continuous', 'domain': (0.001, 1)},
{'name': 'alpha', 'type': 'continuous', 'domain': (0.0625, 1)},
{'name': 'nbfireflies', 'type': 'continuous', 'domain': (0.02, 1)},
{'name': 'step', 'type': 'continuous', 'domain': (0.01, 1)}]
myBopt = GPyOpt.methods.BayesianOptimization(f = func,
domain = bounds,
model_type = 'GP',
acquisition_type = 'EI',
normalize_Y = True,
exact_feval = False,
initial_design_numdata = 8,
evaluator_type = 'local_penalization',
batch_size = 4,
num_cores = 4)
max_iter = args.m
t_start = time.time()
myBopt.run_optimization(max_iter)
best_value = myBopt.fx_opt[0]
best_gamma = myBopt.x_opt[0]
best_alpha = myBopt.x_opt[1]
if args.v == 1 or args.v == 4:
best_alpha = int(best_alpha * 16)
if args.v == 2 or args.v == 5:
best_alpha = int(best_alpha * 32)
best_fireflies = int(myBopt.x_opt[2] * 100)
best_step = int(myBopt.x_opt[3] * 100)
print('Best value: {} at [Gamma, Alpha, #Firefly, step] = [{}, {}, {}, {}], found in {} s'.format(best_value, best_gamma, best_alpha, best_fireflies, best_step, time.time() - t_start))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-m", type = int, default = 100, help = "number of max iterations")
parser.add_argument("-d", type = int, default = 2, help = "number of drones")
parser.add_argument("-i", type = int, default = 10000, help = "number of iterations")
parser.add_argument("-e", type = float, default = 0.1, help = "distance penalization coeficient")
parser.add_argument("-v", type = int, default = 1, help = "alpha version")
parser.add_argument("-n", type = int, default = 1, help = "number of runs")
parser.add_argument("-p", type = int, default = 1, help = "enable/desable verbose")
parser.add_argument("-s", type = int, default = 1, help = "step")
parser.add_argument("-sch", type = str, default = "linear", help = "segment schedule")
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"GPyOpt.methods.BayesianOptimization",
"FireflyAlgorithm.fireflyAlgorithm",
"numpy.array",
"time.time"
] | [((1568, 1805), 'GPyOpt.methods.BayesianOptimization', 'GPyOpt.methods.BayesianOptimization', ([], {'f': 'func', 'domain': 'bounds', 'model_type': '"""GP"""', 'acquisition_type': '"""EI"""', 'normalize_Y': '(True)', 'exact_feval': '(False)', 'initial_design_numdata': '(8)', 'evaluator_type': '"""local_penalization"""', 'batch_size': '(4)', 'num_cores': '(4)'}), "(f=func, domain=bounds, model_type='GP',\n acquisition_type='EI', normalize_Y=True, exact_feval=False,\n initial_design_numdata=8, evaluator_type='local_penalization',\n batch_size=4, num_cores=4)\n", (1603, 1805), False, 'import GPyOpt\n'), ((2291, 2302), 'time.time', 'time.time', ([], {}), '()\n', (2300, 2302), False, 'import time\n'), ((2915, 2940), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2938, 2940), False, 'import argparse\n'), ((450, 580), 'FireflyAlgorithm.fireflyAlgorithm', 'ffa.fireflyAlgorithm', (['(0)'], {'d': 'args.d', 'i': 'args.i', 'g': 'gamma', 'a': 'alpha', 'f': 'fireflies', 'e': 'args.e', 'v': 'args.v', 'p': 'args.p', 's': 'step', 'sch': 'args.sch'}), '(0, d=args.d, i=args.i, g=gamma, a=alpha, f=fireflies,\n e=args.e, v=args.v, p=args.p, s=step, sch=args.sch)\n', (470, 580), True, 'import FireflyAlgorithm as ffa\n'), ((632, 646), 'numpy.array', 'np.array', (['hist'], {}), '(hist)\n', (640, 646), True, 'import numpy as np\n'), ((2851, 2862), 'time.time', 'time.time', ([], {}), '()\n', (2860, 2862), False, 'import time\n')] |
import cv2
import numpy as np
import glob
from scipy.stats import multivariate_normal
import copy
out = cv2.VideoWriter('3D_GMM.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 5, (640, 480))
DATASET = "DETECTBUOY-FRAMES/Data"
def Ellipse_Fit(mask):
processed = mask.astype(np.uint8)
processed = cv2.GaussianBlur(processed, (5, 5), cv2.BORDER_DEFAULT)
ret, thresh = cv2.threshold(processed, 60, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
ellipses = []
for cnt in contours:
if cv2.contourArea(cnt) > 300 and cv2.contourArea(cnt) < 5000:
ellipses.append(cv2.fitEllipse(cnt))
outEllipse = []
for ell in ellipses:
(x, y), (MA, ma), angle = ell
if abs(MA / ma - 1) < 0.3:
outEllipse.append(ell)
return outEllipse
def High_PDF(prob, threshold):
p = prob.reshape((prob.shape[0] * prob.shape[1], prob.shape[2]))
q = np.multiply(p, p > threshold)
b = np.multiply(q > 0, np.equal(q, np.max(q, axis=-1, keepdims=True))) * 255
c = b.reshape((prob.shape[0], prob.shape[1], prob.shape[2]))
return c
def Water_Mask(frame):
# For redBuoy1
mean = np.array([80.27603646, 141.43706643, 253.22644464])
cov = np.array([[190.60613704, 201.66921469, -5.62641894],
[201.66921469, 340.80624709, -14.2263423],
[-5.62641894, -14.2263423, 3.51000389]])
P_RB1 = multivariate_normal.pdf(frame, mean, cov)
# For redBuoy2
mean = np.array([129.75146712, 187.0247840, 232.87476706])
cov = np.array([[792.3089489, 966.06181438, -76.63443504],
[966.06181438, 1358.97343543, -15.6558208],
[-76.63443504, -15.6558208, 274.29810684]])
P_RB2 = multivariate_normal.pdf(frame, mean, cov)
# For redBuoy3
mean = np.array([117.81710669, 204.2309085, 239.41048976])
cov = np.array([[443.75427994, 518.28342899, -139.95097112],
[518.28342899, 707.05237291, -187.05091184],
[-139.95097112, -187.05091184, 64.27720605]])
P_RB3 = multivariate_normal.pdf(frame, mean, cov)
P_RB = P_RB1 + P_RB2 + P_RB3
# For Green1
mean = np.array([112.05003011, 183.18656764, 103.53271839])
cov = np.array([[98.18729895, 128.48175019, 111.23031125],
[128.48175019, 372.47086917, 237.17047113],
[111.23031125, 237.17047113, 230.78640153]])
P_GB1 = multivariate_normal.pdf(frame, mean, cov)
# For Green2
mean = np.array([125.22320558, 229.46544678, 142.17248589])
cov = np.array([[83.42004155, 109.12603316, 133.04099339],
[109.12603316, 181.75339967, 209.44426981],
[133.04099339, 209.44426981, 280.21373779]])
P_GB2 = multivariate_normal.pdf(frame, mean, cov)
# For Green3
mean = np.array([150.32076907, 239.42616469, 187.56685088])
cov = np.array([[296.42463121, 109.06686387, 351.389052],
[109.06686387, 138.29429843, 172.87515629],
[351.389052, 172.87515629, 653.94501523]])
P_GB3 = multivariate_normal.pdf(frame, mean, cov)
P_GB = P_GB1 + P_GB2 + P_GB3
# For yellowBuoy
mean = np.array([93.18674196, 204.10273852, 208.83574233])
cov = np.array([[325.95744462, 14.78707018, -304.72169773],
[14.78707018, 161.85807802, 267.4821683],
[-304.72169773, 267.4821683, 890.87026603]])
P_YB = multivariate_normal.pdf(frame, mean, cov)
# For Water1
mean = np.array([154.242466 ,228.26091272,233.45074722])
cov = np.array([[59.2038326 , 46.17327671, 5.3503438 ],
[46.17327671, 58.66903207, -7.51014766],
[ 5.3503438 , -7.51014766, 26.28058457]])
P_W1 = multivariate_normal.pdf(frame, mean, cov)
mean = np.array([141.96297332 ,204.83155696,220.47708726])
cov = np.array([[100.70632783, 148.60410607, 59.9378063 ],
[148.60410607, 320.22102525, 129.64470878],
[ 59.9378063 , 129.64470878, 121.25904618]])
P_W2 = multivariate_normal.pdf(frame, mean, cov)
mean = np.array([178.2135104 ,238.03114502 ,180.63696875])
cov = np.array([[ 44.16861721, 46.21022285, 68.88757629],
[ 46.21022285, 58.90147946, 78.51143783],
[ 68.88757629, 78.51143783, 203.85445566]])
P_W3 = multivariate_normal.pdf(frame, mean, cov)
P_W = P_W1 + P_W2 + P_W3
prob = np.zeros((frame.shape[0], frame.shape[1], 4))
prob[:, :, 0] = P_RB
prob[:, :, 1] = P_GB
prob[:, :, 2] = P_YB
prob[:, :, 3] = P_W * 0.99
# best results with Multiply
RGY_Buoy = High_PDF(prob, 1e-15) # -15
return RGY_Buoy
def Buoy_data(waterRemoved):
# For redBuoy1
mean = np.array([129.75151074, 187.02495822, 232.87487513])
cov = np.array([[792.30842907, 966.0620035, -76.63515958],
[966.0620035, 1358.97477086, -15.65802897],
[-76.63515958, -15.65802897, 274.29390402]])
P_RB1 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For redBuoy2
mean = np.array([117.81699529, 204.23082796, 239.41051339])
cov = np.array([[443.75320996, 518.2835338, -139.95105276],
[518.2835338, 707.05318175, -187.05121695],
[-139.95105276, -187.05121695, 64.27726249]])
P_RB2 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For redBuoy3
mean = np.array([81.53413865, 141.57207486, 253.14210245])
cov = np.array([[228.92875888, 224.1567059, -7.02999134],
[224.1567059, 339.10305449, -13.59245238],
[-7.02999134, -13.59245238, 3.91363665]])
P_RB3 = multivariate_normal.pdf(waterRemoved, mean, cov)
PiRb = np.array([0.15838274, 0.38113269, 0.44139788])
P_RB = PiRb[0] * P_RB1 + PiRb[1] * P_RB2 + PiRb[2] * P_RB3
# For Green1
mean = np.array([110.15586103, 177.988079, 97.8360865])
cov = np.array([[82.84302567, 106.35540435, 74.22384909],
[106.35540435, 306.33086617, 154.3897207],
[74.22384909, 154.3897207, 118.64202382]])
P_GB1 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For Green2
mean = np.array([124.00448114, 217.39861905, 136.44552769])
cov = np.array([[135.27527716, 132.43005772, 186.54968698],
[132.43005772, 361.10595221, 281.7120668],
[186.54968698, 281.7120668, 375.55342302]])
P_GB2 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For Green3
mean = np.array([152.97075593, 244.63284543, 194.2491698])
cov = np.array([[269.37418864, 37.51788466, 286.85356749],
[37.51788466, 38.57928137, 14.06820397],
[286.85356749, 14.06820397, 491.56890665]])
P_GB3 = multivariate_normal.pdf(waterRemoved, mean, cov)
PiGb = np.array([0.39978126, 0.38033716, 0.19886462])
P_GB = PiGb[0] * P_GB1 + PiGb[1] * P_GB2 + PiGb[2] * P_GB3
# For yellowBuoy1
mean = np.array([124.48956165, 235.49979435, 232.22955126])
cov = np.array([[1165.98834055, 180.00433825, -59.25367115],
[180.00433825, 78.85588687, 20.33064827],
[-59.25367115, 20.33064827, 81.66227936]])
P_YB1 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For yellowBuoy
mean = np.array([93.18674196, 204.10273852, 208.83574233])
cov = np.array([[325.95744462, 14.78707018, -304.72169773],
[14.78707018, 161.85807802, 267.4821683],
[-304.72169773, 267.4821683, 890.87026603]])
P_YB2 = multivariate_normal.pdf(waterRemoved, mean, cov)
# For yellowBuoy
mean = np.array([138.56180468, 240.07565167, 229.07810767])
cov = np.array([[775.88598663, -42.21694591, -40.46084514],
[-42.21694591, 4.60254418, 2.08209706],
[-40.46084514, 2.08209706, 6.96561565]])
P_YB3 = multivariate_normal.pdf(waterRemoved, mean, cov)
PiYb = np.array([0.26255614, 0.2175131, 0.50246477])
P_YB = PiYb[0] * P_YB1 + PiYb[1] * P_YB2 + PiYb[2] * P_YB3
prob = np.zeros((frame.shape[0], frame.shape[1], 3))
prob[:, :, 0] = P_RB
prob[:, :, 1] = P_GB
prob[:, :, 2] = P_YB
RGY_Buoy_2 = High_PDF(prob, 1e-6) # -20
return RGY_Buoy_2
def Draw_Ellipse(RGY_Buoy_2,Image_Input):
ellipseR = Ellipse_Fit(RGY_Buoy_2[:, :, 0].astype(np.uint8))
Image_Input_1 = copy.deepcopy(Image_Input)
for ell in ellipseR:
cv2.ellipse(Image_Input_1, ell, (0, 0, 255), 5)
ellipseG = Ellipse_Fit(RGY_Buoy_2[:, :, 1].astype(np.uint8))
for ell in ellipseG:
cv2.ellipse(Image_Input_1, ell, (0, 255, 0), 5)
ellipseY = Ellipse_Fit(RGY_Buoy_2[:, :, 2].astype(np.uint8))
for ell in ellipseY:
cv2.ellipse(Image_Input_1, ell, (0, 255, 255), 5)
return Image_Input_1
for file in glob.glob(f"{DATASET}/*.jpg"):
Image_Input = cv2.imread(file)
frame = np.zeros((Image_Input.shape[0], Image_Input.shape[1], 3))
frame[:, :, 0] = Image_Input[:, :, 0]
frame[:, :, 1] = Image_Input[:, :, 1]
frame[:, :, 2] = Image_Input[:, :, 2]
## Order of Probabilities - green, red
RGY_Buoy = Water_Mask(frame)
Water_Remove = RGY_Buoy[:, :, 3].astype(np.int8)
Water_Remove = cv2.bitwise_not(Water_Remove)
waterRemoved = cv2.bitwise_and(Image_Input, Image_Input, mask=Water_Remove)
# cv2.imshow("WATERMASK",waterRemoved)
RGY_Buoy_2 = Buoy_data(waterRemoved)
redBuoySegement = cv2.bitwise_and(Image_Input, Image_Input, mask=RGY_Buoy_2[:, :, 0].astype(np.int8))
greenBuoySegment = cv2.bitwise_and(Image_Input, Image_Input, mask=RGY_Buoy_2[:, :, 1].astype(np.int8))
yellowBuoySegment = cv2.bitwise_and(Image_Input, Image_Input, mask=RGY_Buoy_2[:, :, 2].astype(np.int8))
# cv2.imshow("R-BUOY",redBuoySegement)
# cv2.imshow("G-BUOY",greenBuoySegment)
# cv2.imshow("Y-BUOY",yellowBuoySegment)
Image_Input_1 = Draw_Ellipse(RGY_Buoy_2, Image_Input)
out.write(Image_Input_1)
cv2.imshow("ALL-BUOY-DETECT", Image_Input_1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.release()
cv2.destroyAllWindows() | [
"cv2.imshow",
"numpy.array",
"cv2.ellipse",
"cv2.destroyAllWindows",
"cv2.fitEllipse",
"copy.deepcopy",
"numpy.multiply",
"cv2.threshold",
"cv2.contourArea",
"numpy.max",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"glob.glob",
"cv2.GaussianBlur",
"cv2.imread",
"scipy.stats.multivariate_n... | [((9163, 9192), 'glob.glob', 'glob.glob', (['f"""{DATASET}/*.jpg"""'], {}), "(f'{DATASET}/*.jpg')\n", (9172, 9192), False, 'import glob\n'), ((10455, 10478), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10476, 10478), False, 'import cv2\n'), ((139, 181), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (161, 181), False, 'import cv2\n'), ((316, 371), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['processed', '(5, 5)', 'cv2.BORDER_DEFAULT'], {}), '(processed, (5, 5), cv2.BORDER_DEFAULT)\n', (332, 371), False, 'import cv2\n'), ((391, 443), 'cv2.threshold', 'cv2.threshold', (['processed', '(60)', '(255)', 'cv2.THRESH_BINARY'], {}), '(processed, 60, 255, cv2.THRESH_BINARY)\n', (404, 443), False, 'import cv2\n'), ((471, 535), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (487, 535), False, 'import cv2\n'), ((999, 1028), 'numpy.multiply', 'np.multiply', (['p', '(p > threshold)'], {}), '(p, p > threshold)\n', (1010, 1028), True, 'import numpy as np\n'), ((1249, 1300), 'numpy.array', 'np.array', (['[80.27603646, 141.43706643, 253.22644464]'], {}), '([80.27603646, 141.43706643, 253.22644464])\n', (1257, 1300), True, 'import numpy as np\n'), ((1312, 1453), 'numpy.array', 'np.array', (['[[190.60613704, 201.66921469, -5.62641894], [201.66921469, 340.80624709, -\n 14.2263423], [-5.62641894, -14.2263423, 3.51000389]]'], {}), '([[190.60613704, 201.66921469, -5.62641894], [201.66921469, \n 340.80624709, -14.2263423], [-5.62641894, -14.2263423, 3.51000389]])\n', (1320, 1453), True, 'import numpy as np\n'), ((1504, 1545), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (1527, 1545), False, 'from scipy.stats import multivariate_normal\n'), ((1580, 1630), 'numpy.array', 'np.array', (['[129.75146712, 187.024784, 232.87476706]'], {}), '([129.75146712, 187.024784, 232.87476706])\n', (1588, 1630), True, 'import numpy as np\n'), ((1643, 1788), 'numpy.array', 'np.array', (['[[792.3089489, 966.06181438, -76.63443504], [966.06181438, 1358.97343543, -\n 15.6558208], [-76.63443504, -15.6558208, 274.29810684]]'], {}), '([[792.3089489, 966.06181438, -76.63443504], [966.06181438, \n 1358.97343543, -15.6558208], [-76.63443504, -15.6558208, 274.29810684]])\n', (1651, 1788), True, 'import numpy as np\n'), ((1839, 1880), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (1862, 1880), False, 'from scipy.stats import multivariate_normal\n'), ((1915, 1966), 'numpy.array', 'np.array', (['[117.81710669, 204.2309085, 239.41048976]'], {}), '([117.81710669, 204.2309085, 239.41048976])\n', (1923, 1966), True, 'import numpy as np\n'), ((1978, 2128), 'numpy.array', 'np.array', (['[[443.75427994, 518.28342899, -139.95097112], [518.28342899, 707.05237291, \n -187.05091184], [-139.95097112, -187.05091184, 64.27720605]]'], {}), '([[443.75427994, 518.28342899, -139.95097112], [518.28342899, \n 707.05237291, -187.05091184], [-139.95097112, -187.05091184, 64.27720605]])\n', (1986, 2128), True, 'import numpy as np\n'), ((2179, 2220), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (2202, 2220), False, 'from scipy.stats import multivariate_normal\n'), ((2289, 2341), 'numpy.array', 'np.array', (['[112.05003011, 183.18656764, 103.53271839]'], {}), '([112.05003011, 183.18656764, 103.53271839])\n', (2297, 2341), True, 'import numpy as np\n'), ((2353, 2499), 'numpy.array', 'np.array', (['[[98.18729895, 128.48175019, 111.23031125], [128.48175019, 372.47086917, \n 237.17047113], [111.23031125, 237.17047113, 230.78640153]]'], {}), '([[98.18729895, 128.48175019, 111.23031125], [128.48175019, \n 372.47086917, 237.17047113], [111.23031125, 237.17047113, 230.78640153]])\n', (2361, 2499), True, 'import numpy as np\n'), ((2550, 2591), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (2573, 2591), False, 'from scipy.stats import multivariate_normal\n'), ((2624, 2676), 'numpy.array', 'np.array', (['[125.22320558, 229.46544678, 142.17248589]'], {}), '([125.22320558, 229.46544678, 142.17248589])\n', (2632, 2676), True, 'import numpy as np\n'), ((2688, 2834), 'numpy.array', 'np.array', (['[[83.42004155, 109.12603316, 133.04099339], [109.12603316, 181.75339967, \n 209.44426981], [133.04099339, 209.44426981, 280.21373779]]'], {}), '([[83.42004155, 109.12603316, 133.04099339], [109.12603316, \n 181.75339967, 209.44426981], [133.04099339, 209.44426981, 280.21373779]])\n', (2696, 2834), True, 'import numpy as np\n'), ((2885, 2926), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (2908, 2926), False, 'from scipy.stats import multivariate_normal\n'), ((2959, 3011), 'numpy.array', 'np.array', (['[150.32076907, 239.42616469, 187.56685088]'], {}), '([150.32076907, 239.42616469, 187.56685088])\n', (2967, 3011), True, 'import numpy as np\n'), ((3023, 3166), 'numpy.array', 'np.array', (['[[296.42463121, 109.06686387, 351.389052], [109.06686387, 138.29429843, \n 172.87515629], [351.389052, 172.87515629, 653.94501523]]'], {}), '([[296.42463121, 109.06686387, 351.389052], [109.06686387, \n 138.29429843, 172.87515629], [351.389052, 172.87515629, 653.94501523]])\n', (3031, 3166), True, 'import numpy as np\n'), ((3217, 3258), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (3240, 3258), False, 'from scipy.stats import multivariate_normal\n'), ((3331, 3382), 'numpy.array', 'np.array', (['[93.18674196, 204.10273852, 208.83574233]'], {}), '([93.18674196, 204.10273852, 208.83574233])\n', (3339, 3382), True, 'import numpy as np\n'), ((3394, 3539), 'numpy.array', 'np.array', (['[[325.95744462, 14.78707018, -304.72169773], [14.78707018, 161.85807802, \n 267.4821683], [-304.72169773, 267.4821683, 890.87026603]]'], {}), '([[325.95744462, 14.78707018, -304.72169773], [14.78707018, \n 161.85807802, 267.4821683], [-304.72169773, 267.4821683, 890.87026603]])\n', (3402, 3539), True, 'import numpy as np\n'), ((3589, 3630), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (3612, 3630), False, 'from scipy.stats import multivariate_normal\n'), ((3663, 3713), 'numpy.array', 'np.array', (['[154.242466, 228.26091272, 233.45074722]'], {}), '([154.242466, 228.26091272, 233.45074722])\n', (3671, 3713), True, 'import numpy as np\n'), ((3724, 3857), 'numpy.array', 'np.array', (['[[59.2038326, 46.17327671, 5.3503438], [46.17327671, 58.66903207, -\n 7.51014766], [5.3503438, -7.51014766, 26.28058457]]'], {}), '([[59.2038326, 46.17327671, 5.3503438], [46.17327671, 58.66903207, \n -7.51014766], [5.3503438, -7.51014766, 26.28058457]])\n', (3732, 3857), True, 'import numpy as np\n'), ((3910, 3951), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (3933, 3951), False, 'from scipy.stats import multivariate_normal\n'), ((3966, 4018), 'numpy.array', 'np.array', (['[141.96297332, 204.83155696, 220.47708726]'], {}), '([141.96297332, 204.83155696, 220.47708726])\n', (3974, 4018), True, 'import numpy as np\n'), ((4029, 4172), 'numpy.array', 'np.array', (['[[100.70632783, 148.60410607, 59.9378063], [148.60410607, 320.22102525, \n 129.64470878], [59.9378063, 129.64470878, 121.25904618]]'], {}), '([[100.70632783, 148.60410607, 59.9378063], [148.60410607, \n 320.22102525, 129.64470878], [59.9378063, 129.64470878, 121.25904618]])\n', (4037, 4172), True, 'import numpy as np\n'), ((4224, 4265), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (4247, 4265), False, 'from scipy.stats import multivariate_normal\n'), ((4280, 4331), 'numpy.array', 'np.array', (['[178.2135104, 238.03114502, 180.63696875]'], {}), '([178.2135104, 238.03114502, 180.63696875])\n', (4288, 4331), True, 'import numpy as np\n'), ((4344, 4483), 'numpy.array', 'np.array', (['[[44.16861721, 46.21022285, 68.88757629], [46.21022285, 58.90147946, \n 78.51143783], [68.88757629, 78.51143783, 203.85445566]]'], {}), '([[44.16861721, 46.21022285, 68.88757629], [46.21022285, \n 58.90147946, 78.51143783], [68.88757629, 78.51143783, 203.85445566]])\n', (4352, 4483), True, 'import numpy as np\n'), ((4539, 4580), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['frame', 'mean', 'cov'], {}), '(frame, mean, cov)\n', (4562, 4580), False, 'from scipy.stats import multivariate_normal\n'), ((4627, 4672), 'numpy.zeros', 'np.zeros', (['(frame.shape[0], frame.shape[1], 4)'], {}), '((frame.shape[0], frame.shape[1], 4))\n', (4635, 4672), True, 'import numpy as np\n'), ((4951, 5003), 'numpy.array', 'np.array', (['[129.75151074, 187.02495822, 232.87487513]'], {}), '([129.75151074, 187.02495822, 232.87487513])\n', (4959, 5003), True, 'import numpy as np\n'), ((5015, 5161), 'numpy.array', 'np.array', (['[[792.30842907, 966.0620035, -76.63515958], [966.0620035, 1358.97477086, -\n 15.65802897], [-76.63515958, -15.65802897, 274.29390402]]'], {}), '([[792.30842907, 966.0620035, -76.63515958], [966.0620035, \n 1358.97477086, -15.65802897], [-76.63515958, -15.65802897, 274.29390402]])\n', (5023, 5161), True, 'import numpy as np\n'), ((5212, 5260), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (5235, 5260), False, 'from scipy.stats import multivariate_normal\n'), ((5295, 5347), 'numpy.array', 'np.array', (['[117.81699529, 204.23082796, 239.41051339]'], {}), '([117.81699529, 204.23082796, 239.41051339])\n', (5303, 5347), True, 'import numpy as np\n'), ((5359, 5507), 'numpy.array', 'np.array', (['[[443.75320996, 518.2835338, -139.95105276], [518.2835338, 707.05318175, -\n 187.05121695], [-139.95105276, -187.05121695, 64.27726249]]'], {}), '([[443.75320996, 518.2835338, -139.95105276], [518.2835338, \n 707.05318175, -187.05121695], [-139.95105276, -187.05121695, 64.27726249]])\n', (5367, 5507), True, 'import numpy as np\n'), ((5558, 5606), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (5581, 5606), False, 'from scipy.stats import multivariate_normal\n'), ((5641, 5692), 'numpy.array', 'np.array', (['[81.53413865, 141.57207486, 253.14210245]'], {}), '([81.53413865, 141.57207486, 253.14210245])\n', (5649, 5692), True, 'import numpy as np\n'), ((5704, 5845), 'numpy.array', 'np.array', (['[[228.92875888, 224.1567059, -7.02999134], [224.1567059, 339.10305449, -\n 13.59245238], [-7.02999134, -13.59245238, 3.91363665]]'], {}), '([[228.92875888, 224.1567059, -7.02999134], [224.1567059, \n 339.10305449, -13.59245238], [-7.02999134, -13.59245238, 3.91363665]])\n', (5712, 5845), True, 'import numpy as np\n'), ((5896, 5944), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (5919, 5944), False, 'from scipy.stats import multivariate_normal\n'), ((5959, 6005), 'numpy.array', 'np.array', (['[0.15838274, 0.38113269, 0.44139788]'], {}), '([0.15838274, 0.38113269, 0.44139788])\n', (5967, 6005), True, 'import numpy as np\n'), ((6102, 6150), 'numpy.array', 'np.array', (['[110.15586103, 177.988079, 97.8360865]'], {}), '([110.15586103, 177.988079, 97.8360865])\n', (6110, 6150), True, 'import numpy as np\n'), ((6162, 6304), 'numpy.array', 'np.array', (['[[82.84302567, 106.35540435, 74.22384909], [106.35540435, 306.33086617, \n 154.3897207], [74.22384909, 154.3897207, 118.64202382]]'], {}), '([[82.84302567, 106.35540435, 74.22384909], [106.35540435, \n 306.33086617, 154.3897207], [74.22384909, 154.3897207, 118.64202382]])\n', (6170, 6304), True, 'import numpy as np\n'), ((6355, 6403), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (6378, 6403), False, 'from scipy.stats import multivariate_normal\n'), ((6436, 6488), 'numpy.array', 'np.array', (['[124.00448114, 217.39861905, 136.44552769]'], {}), '([124.00448114, 217.39861905, 136.44552769])\n', (6444, 6488), True, 'import numpy as np\n'), ((6500, 6645), 'numpy.array', 'np.array', (['[[135.27527716, 132.43005772, 186.54968698], [132.43005772, 361.10595221, \n 281.7120668], [186.54968698, 281.7120668, 375.55342302]]'], {}), '([[135.27527716, 132.43005772, 186.54968698], [132.43005772, \n 361.10595221, 281.7120668], [186.54968698, 281.7120668, 375.55342302]])\n', (6508, 6645), True, 'import numpy as np\n'), ((6696, 6744), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (6719, 6744), False, 'from scipy.stats import multivariate_normal\n'), ((6777, 6828), 'numpy.array', 'np.array', (['[152.97075593, 244.63284543, 194.2491698]'], {}), '([152.97075593, 244.63284543, 194.2491698])\n', (6785, 6828), True, 'import numpy as np\n'), ((6840, 6982), 'numpy.array', 'np.array', (['[[269.37418864, 37.51788466, 286.85356749], [37.51788466, 38.57928137, \n 14.06820397], [286.85356749, 14.06820397, 491.56890665]]'], {}), '([[269.37418864, 37.51788466, 286.85356749], [37.51788466, \n 38.57928137, 14.06820397], [286.85356749, 14.06820397, 491.56890665]])\n', (6848, 6982), True, 'import numpy as np\n'), ((7033, 7081), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (7056, 7081), False, 'from scipy.stats import multivariate_normal\n'), ((7096, 7142), 'numpy.array', 'np.array', (['[0.39978126, 0.38033716, 0.19886462]'], {}), '([0.39978126, 0.38033716, 0.19886462])\n', (7104, 7142), True, 'import numpy as np\n'), ((7246, 7298), 'numpy.array', 'np.array', (['[124.48956165, 235.49979435, 232.22955126]'], {}), '([124.48956165, 235.49979435, 232.22955126])\n', (7254, 7298), True, 'import numpy as np\n'), ((7310, 7454), 'numpy.array', 'np.array', (['[[1165.98834055, 180.00433825, -59.25367115], [180.00433825, 78.85588687, \n 20.33064827], [-59.25367115, 20.33064827, 81.66227936]]'], {}), '([[1165.98834055, 180.00433825, -59.25367115], [180.00433825, \n 78.85588687, 20.33064827], [-59.25367115, 20.33064827, 81.66227936]])\n', (7318, 7454), True, 'import numpy as np\n'), ((7505, 7553), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (7528, 7553), False, 'from scipy.stats import multivariate_normal\n'), ((7590, 7641), 'numpy.array', 'np.array', (['[93.18674196, 204.10273852, 208.83574233]'], {}), '([93.18674196, 204.10273852, 208.83574233])\n', (7598, 7641), True, 'import numpy as np\n'), ((7653, 7798), 'numpy.array', 'np.array', (['[[325.95744462, 14.78707018, -304.72169773], [14.78707018, 161.85807802, \n 267.4821683], [-304.72169773, 267.4821683, 890.87026603]]'], {}), '([[325.95744462, 14.78707018, -304.72169773], [14.78707018, \n 161.85807802, 267.4821683], [-304.72169773, 267.4821683, 890.87026603]])\n', (7661, 7798), True, 'import numpy as np\n'), ((7849, 7897), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (7872, 7897), False, 'from scipy.stats import multivariate_normal\n'), ((7934, 7986), 'numpy.array', 'np.array', (['[138.56180468, 240.07565167, 229.07810767]'], {}), '([138.56180468, 240.07565167, 229.07810767])\n', (7942, 7986), True, 'import numpy as np\n'), ((7998, 8137), 'numpy.array', 'np.array', (['[[775.88598663, -42.21694591, -40.46084514], [-42.21694591, 4.60254418, \n 2.08209706], [-40.46084514, 2.08209706, 6.96561565]]'], {}), '([[775.88598663, -42.21694591, -40.46084514], [-42.21694591, \n 4.60254418, 2.08209706], [-40.46084514, 2.08209706, 6.96561565]])\n', (8006, 8137), True, 'import numpy as np\n'), ((8188, 8236), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['waterRemoved', 'mean', 'cov'], {}), '(waterRemoved, mean, cov)\n', (8211, 8236), False, 'from scipy.stats import multivariate_normal\n'), ((8251, 8296), 'numpy.array', 'np.array', (['[0.26255614, 0.2175131, 0.50246477]'], {}), '([0.26255614, 0.2175131, 0.50246477])\n', (8259, 8296), True, 'import numpy as np\n'), ((8377, 8422), 'numpy.zeros', 'np.zeros', (['(frame.shape[0], frame.shape[1], 3)'], {}), '((frame.shape[0], frame.shape[1], 3))\n', (8385, 8422), True, 'import numpy as np\n'), ((8706, 8732), 'copy.deepcopy', 'copy.deepcopy', (['Image_Input'], {}), '(Image_Input)\n', (8719, 8732), False, 'import copy\n'), ((9213, 9229), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (9223, 9229), False, 'import cv2\n'), ((9243, 9300), 'numpy.zeros', 'np.zeros', (['(Image_Input.shape[0], Image_Input.shape[1], 3)'], {}), '((Image_Input.shape[0], Image_Input.shape[1], 3))\n', (9251, 9300), True, 'import numpy as np\n'), ((9582, 9611), 'cv2.bitwise_not', 'cv2.bitwise_not', (['Water_Remove'], {}), '(Water_Remove)\n', (9597, 9611), False, 'import cv2\n'), ((9632, 9692), 'cv2.bitwise_and', 'cv2.bitwise_and', (['Image_Input', 'Image_Input'], {'mask': 'Water_Remove'}), '(Image_Input, Image_Input, mask=Water_Remove)\n', (9647, 9692), False, 'import cv2\n'), ((10332, 10376), 'cv2.imshow', 'cv2.imshow', (['"""ALL-BUOY-DETECT"""', 'Image_Input_1'], {}), "('ALL-BUOY-DETECT', Image_Input_1)\n", (10342, 10376), False, 'import cv2\n'), ((8768, 8815), 'cv2.ellipse', 'cv2.ellipse', (['Image_Input_1', 'ell', '(0, 0, 255)', '(5)'], {}), '(Image_Input_1, ell, (0, 0, 255), 5)\n', (8779, 8815), False, 'import cv2\n'), ((8919, 8966), 'cv2.ellipse', 'cv2.ellipse', (['Image_Input_1', 'ell', '(0, 255, 0)', '(5)'], {}), '(Image_Input_1, ell, (0, 255, 0), 5)\n', (8930, 8966), False, 'import cv2\n'), ((9070, 9119), 'cv2.ellipse', 'cv2.ellipse', (['Image_Input_1', 'ell', '(0, 255, 255)', '(5)'], {}), '(Image_Input_1, ell, (0, 255, 255), 5)\n', (9081, 9119), False, 'import cv2\n'), ((10387, 10401), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10398, 10401), False, 'import cv2\n'), ((593, 613), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (608, 613), False, 'import cv2\n'), ((624, 644), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (639, 644), False, 'import cv2\n'), ((682, 701), 'cv2.fitEllipse', 'cv2.fitEllipse', (['cnt'], {}), '(cnt)\n', (696, 701), False, 'import cv2\n'), ((1069, 1102), 'numpy.max', 'np.max', (['q'], {'axis': '(-1)', 'keepdims': '(True)'}), '(q, axis=-1, keepdims=True)\n', (1075, 1102), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# - In this notebook -
# - generate random edits of type (s',r',o') and (s',r',o')
# - i.e. generate totally random edits, not restricted to the neighboourhood of target triple
# - this random baseline is needed for comparison with decoy composition where edits are not restricted to neighbourhood of target
# In[1]:
import pickle
from typing import Dict, Tuple, List
import os
import numpy as np
import json
import torch
import logging
import argparse
import math
from pprint import pprint
import errno
import torch
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from dataset import TrainDataset, BidirectionalOneShotIterator
from evaluation import evaluation
from model import Distmult, Complex, Conve, Transe
# In[2]:
def add_arguments():
parser = argparse.ArgumentParser(description='Link prediction for knowledge graphs')
parser.add_argument('--data', type=str, default='FB15k-237', help='Dataset to use: {FB15k-237, YAGO3-10, WN18RR, umls, nations, kinship}, default: FB15k-237')
parser.add_argument('--model', type=str, default='conve', help='Choose from: {conve, distmult, complex}')
parser.add_argument('--add-reciprocals', action='store_true', help='Option to add reciprocal relations')
parser.add_argument('--transe-margin', type=float, default=12.0, help='Margin value for TransE scoring function. Default:12.0')
parser.add_argument('--transe-norm', type=int, default=2, help='P-norm value for TransE scoring function. Default:2')
parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train (default: 400)')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate (default: 0.001)')#maybe 0.1
parser.add_argument('--lr-decay', type=float, default=0.0, help='Weight decay value to use in the optimizer. Default: 0.0')
parser.add_argument('--num-batches', type=int, default=400, help='Number of batches for training (default: 400)') #maybe 200?
parser.add_argument('--test-batch-size', type=int, default=128, help='Batch size for test split (default: 128)')
parser.add_argument('--valid-batch-size', type=int, default=128, help='Batch size for valid split (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, help='Number of workers to use for the batch loaders on GPU. Default: 4')
parser.add_argument('--embedding-dim', type=int, default=200, help='The embedding dimension (1D). Default: 200')
parser.add_argument('--stack_width', type=int, default=20, help='The first dimension of the reshaped/stacked 2D embedding. Second dimension is inferred. Default: 20')
#parser.add_argument('--stack_height', type=int, default=10, help='The second dimension of the reshaped/stacked 2D embedding. Default: 10')
parser.add_argument('--hidden-drop', type=float, default=0.3, help='Dropout for the hidden layer. Default: 0.3.')
parser.add_argument('--input-drop', type=float, default=0.2, help='Dropout for the input embeddings. Default: 0.2.')
parser.add_argument('--feat-drop', type=float, default=0.3, help='Dropout for the convolutional features. Default: 0.2.')
parser.add_argument('-num-filters', default=32, type=int, help='Number of filters for convolution')
parser.add_argument('-kernel-size', default=3, type=int, help='Kernel Size for convolution')
parser.add_argument('--use-bias', action='store_true', help='Use a bias in the convolutional layer. Default: True')
parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing value to use. Default: 0.1')
parser.add_argument('--reg-weight', type=float, default=5e-12, help='Weight for regularization. Default: 5e-12')#maybe 5e-2?
parser.add_argument('--reg-norm', type=int, default=2, help='Norm for regularization. Default: 3')
parser.add_argument('--resume', action='store_true', help='Restore a saved model.')
parser.add_argument('--resume-split', type=str, default='test', help='Split to evaluate a restored model')
parser.add_argument('--seed', type=int, default=17, metavar='S', help='Random seed (default: 17)')
return parser
def generate_dicts(data_path):
with open (os.path.join(data_path, 'entities_dict.json'), 'r') as f:
ent_to_id = json.load(f)
with open (os.path.join(data_path, 'relations_dict.json'), 'r') as f:
rel_to_id = json.load(f)
n_ent = len(list(ent_to_id.keys()))
n_rel = len(list(rel_to_id.keys()))
return n_ent, n_rel, ent_to_id, rel_to_id
import pandas as pd
def load_data(data_path):
data = {}
for split in ['train', 'valid', 'test']:
df = pd.read_csv(os.path.join(data_path, split+'.txt'), sep='\t', header=None, names=None, dtype=int)
df = df.drop_duplicates()
data[split] = df.values
return data
def add_model(args, n_ent, n_rel):
if args.add_reciprocals:
if args.model is None:
model = Conve(args, n_ent, 2*n_rel)
elif args.model == 'conve':
model = Conve(args, n_ent, 2*n_rel)
elif args.model == 'distmult':
model = Distmult(args, n_ent, 2*n_rel)
elif args.model == 'complex':
model = Complex(args, n_ent, 2*n_rel)
elif args.model == 'transe':
model = Transe(args, n_ent, 2*n_rel)
else:
logger.info('Unknown model: {0}', args.model)
raise Exception("Unknown model!")
else:
if args.model is None:
model = Conve(args, n_ent, n_rel)
elif args.model == 'conve':
model = Conve(args, n_ent, n_rel)
elif args.model == 'distmult':
model = Distmult(args, n_ent, n_rel)
elif args.model == 'complex':
model = Complex(args, n_ent, n_rel)
elif args.model == 'transe':
model = Transe(args, n_ent, n_rel)
else:
logger.info('Unknown model: {0}', args.model)
raise Exception("Unknown model!")
#model.to(self.device)
return model
if __name__ == '__main__':
parser = add_arguments()
parser.add_argument('--target-split', type=int, default=1, help='Ranks to use for target set. Values are 1 for ranks <=10; 2 for ranks>10 and ranks<=100. Default: 1')
parser.add_argument('--budget', type=int, default=1, help='Budget for each target triple for each corruption side')
parser.add_argument('--rand-run', type=int, default=1, help='A number assigned to the random run of experiment')
# In[5]:
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# In[6]:
#args.target_split = 1 # which target split to use
#Values are 1 for ranks <=10; 2 for ranks>10 and ranks<=100.
#args.budget = 1 #indicates the num of adversarial edits for each target triple for each corruption side
#args.rand_run = 1 # a number assigned to the random run of the experiment
args.seed = args.seed + (args.rand_run - 1) # default seed is 17
#args.model = 'distmult'
#args.data = 'FB15k-237'
# Below is based on hyperparams for original model
if args.data == 'WN18RR':
if args.model == 'distmult':
args.lr = 0.01
args.num_batches = 50
elif args.model == 'complex':
args.lr = 0.01
elif args.model == 'conve':
args.lr = 0.001
elif args.model == 'transe':
args.lr = 0.005
args.input_drop = 0.0
args.transe_margin = 9.0
args.num_batches = 1000
args.epochs = 100
args.reg_weight = 1e-12
else:
print("New model:{0},{1}. Set hyperparams".format(args.data, args.model))
elif args.data == 'FB15k-237':
if args.model == 'distmult':
args.lr = 0.005
args.input_drop = 0.5
elif args.model == 'complex':
args.lr = 0.005
args.input_drop = 0.5
elif args.model == 'conve':
args.lr = 0.001
args.hidden_drop = 0.5
elif args.model == 'transe':
args.lr = 0.001
args.input_drop = 0.0
args.transe_margin = 9.0
args.num_batches = 800
args.epochs = 100
args.reg_weight = 1e-10
else:
print("New model:{0},{1}. Set hyperparams".format(args.data, args.model))
else:
print("New dataset:{0}. Set hyperparams".format(args.data))
# In[7]:
# Fixing random seeds for reproducibility -https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(args.seed)
cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
rng = np.random.default_rng(seed=args.seed)
args.epochs = -1 #no training here
model_name = '{0}_{1}_{2}_{3}_{4}'.format(args.model, args.embedding_dim, args.input_drop, args.hidden_drop, args.feat_drop)
model_path = 'saved_models/{0}_{1}.model'.format(args.data, model_name)
#log_path = 'logs/inv_add_1_{0}_{1}_{2}_{3}.log'.format(args.data, model_name, args.num_batches, args.epochs)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO
)
logger = logging.getLogger(__name__)
data_path = 'data/target_{0}_{1}_{2}'.format(args.model, args.data, args.target_split)
n_ent, n_rel, ent_to_id, rel_to_id = generate_dicts(data_path)
##### load data####
data = load_data(data_path)
train_data, valid_data, test_data = data['train'], data['valid'], data['test']
inp_f = open(os.path.join(data_path, 'to_skip_eval.pickle'), 'rb')
to_skip_eval: Dict[str, Dict[Tuple[int, int], List[int]]] = pickle.load(inp_f)
inp_f.close()
to_skip_eval['lhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['lhs'].items()}
to_skip_eval['rhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['rhs'].items()}
# In[ ]:
# #### Pseudocode
# - Sample an entity from list of entities and relation from list of relations (the list should exclude target entity and relation)
# - If the random triple already exists in the data, sample again
# In[7]:
#np.where(np.asarray(list(rel_to_id.values()))!=2), n_rel
# In[8]:
ents = np.asarray(list(ent_to_id.values()))
rels = np.asarray(list(rel_to_id.values()))
# In[9]:
train_data.shape
# In[11]:
# Finding corruptions of type 1; checking existence in train set and adding them
per_tr = np.empty_like(train_data)
per_tr[:] = train_data
summary_dict = {}
logger.info('------------ Generating random corruptions -----------------')
trip_to_add = [] # of type s`r`o`
test_trip_idx = 0
while test_trip_idx < len(test_data):
if test_trip_idx%500 == 0:
logger.info('Processing triple ' + str(test_trip_idx))
test_trip = test_data[test_trip_idx]
s = test_trip[0]
r = test_trip[1]
o = test_trip[2]
#print(s,r,o)
summary_list = []
summary_list.append(list(map(int, [s,r,o])))
budget_idx = 0
# depending on how many triples already exist in data and how many triples are added, this might turn into an infinite loop
# 2*budget because there are two sides to corrupt
while budget_idx < 2*args.budget:
rel_choices = rels[np.where(rels!=r)]
ent_choices = ents
#ent_choices = ents[np.where((ents!=o) & (ents!=s))]
rand_r1 = rng.choice(a=rel_choices, size = 1, replace=True)[0]
ch = rng.choice(a=ent_choices, size = 2, replace=False)
rand_s, rand_o = ch[0], ch[1]
adv = [rand_s, rand_r1, rand_o]
# mask for triple in training set
m1 = (np.isin(per_tr[:,0], [adv[0]])
& np.isin(per_tr[:,1], [adv[1]])
& np.isin(per_tr[:,2], [adv[2]]))
if np.any(m1):
logger.info('Random triple already exists...generating another random triple')
else:
trip_to_add.append(adv)
per_tr = np.append(per_tr, np.asarray(adv).reshape(-1,3), axis=0)
#summary_dict[(s,r,o)] = tuple(adv)
summary_list.append(list(map(int, adv)))
budget_idx += 1
summary_dict[test_trip_idx] = summary_list
test_trip_idx += 1
del per_tr
# In[11]:
#summary_dict = {'rhs':summary_dict_o, 'lhs':summary_dict_s}
# In[12]:
logger.info(len(trip_to_add))
logger.info(test_data.shape[0])
# In[13]:
trips_to_add = np.asarray(trip_to_add)
# In[14]:
new_train = np.concatenate((trips_to_add, train_data))
# In[15]:
logger.info ('Length of original training set: ' + str(train_data.shape[0]))
logger.info ('Length of new poisoned training set: ' + str(new_train.shape[0]))
# In[16]:
num_en_or = np.unique(np.concatenate((train_data[:,0], train_data[:,2]))).shape[0]
num_en_pos = np.unique(np.concatenate((new_train[:,0], new_train[:,2]))).shape[0]
# In[17]:
#decoy_trips = np.concatenate((np.asarray(decoy_trip_o), np.asarray(decoy_trip_s)))
# In[17]:
logger.info ('Length of original test set: ' + str(test_data.shape[0]))
logger.info ('Number of edits generated : ' + str(trips_to_add.shape[0]))
#print ('Number of triples in decoy test set: ' + str(decoy_trips.shape[0]))
# In[19]:
save_path = 'data/rand_add_g_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data, args.target_split, args.budget, args.rand_run)
# In[20]:
try :
os.makedirs(save_path)
except OSError as e:
if e.errno == errno.EEXIST:
logger.info(e)
logger.info('Using the existing folder {0} for processed data'.format(save_path))
else:
raise
# In[21]:
with open(os.path.join(save_path, 'train.txt'), 'w') as out:
for item in new_train:
out.write("%s\n" % "\t".join(map(str, item)))
out = open(os.path.join(save_path, 'train.pickle'), 'wb')
pickle.dump(new_train.astype('uint64'), out)
out.close()
# In[22]:
with open(os.path.join(save_path, 'entities_dict.json'), 'w') as f:
f.write(json.dumps(ent_to_id) + '\n')
with open(os.path.join(save_path, 'relations_dict.json'), 'w') as f:
f.write(json.dumps(rel_to_id) + '\n')
# In[23]:
with open(os.path.join(save_path, 'valid.txt'), 'w') as out:
for item in valid_data:
out.write("%s\n" % "\t".join(map(str, item)))
out = open(os.path.join(save_path, 'valid.pickle'), 'wb')
pickle.dump(valid_data.astype('uint64'), out)
out.close()
# In[24]:
with open(os.path.join(save_path, 'test.txt'), 'w') as out:
for item in test_data:
out.write("%s\n" % "\t".join(map(str, item)))
out = open(os.path.join(save_path, 'test.pickle'), 'wb')
pickle.dump(test_data.astype('uint64'), out)
out.close()
# In[25]:
# In[26]:
with open(os.path.join(save_path, 'summary_edits.json'), 'w') as out:
out.write(json.dumps(summary_dict) + '\n')
# In[27]:
# In[28]:
with open(os.path.join(save_path, 'stats.txt'), 'w') as f:
f.write('Length of original training set: {0} \n'. format(train_data.shape[0]))
f.write('Length of new poisoned training set: {0} \n'. format(new_train.shape[0]))
f.write('Number of entities in original training set: {0} \n'. format(num_en_or))
f.write('Number of entities in poisoned training set: {0} \n'. format(num_en_pos))
f.write('Length of original test set: {0} \n'. format(test_data.shape[0]))
f.write('Number of triples added from corrupting both sides: {0}\n'. format(trips_to_add.shape[0]))
f.write('This attack version is generated from global random edits \n')
f.write('---------------------------------------------------------------------- \n')
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"logging.getLogger",
"numpy.random.default_rng",
"numpy.isin",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"model.Distmult",
"numpy.where",
"json.dumps",
"numpy.asarray",
"numpy.random.seed",
"numpy.concatenate",
"model.Transe",
"model.Conve",
"pickle.load",
"numpy.any",
"mod... | [((848, 923), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Link prediction for knowledge graphs"""'}), "(description='Link prediction for knowledge graphs')\n", (871, 923), False, 'import argparse\n'), ((8736, 8764), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8753, 8764), False, 'import torch\n'), ((8828, 8853), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (8842, 8853), True, 'import numpy as np\n'), ((8864, 8901), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'args.seed'}), '(seed=args.seed)\n', (8885, 8901), True, 'import numpy as np\n'), ((9268, 9414), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - \xa0 %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - \\xa0 %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (9287, 9414), False, 'import logging\n'), ((9505, 9532), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9522, 9532), False, 'import logging\n'), ((9971, 9989), 'pickle.load', 'pickle.load', (['inp_f'], {}), '(inp_f)\n', (9982, 9989), False, 'import pickle\n'), ((10790, 10815), 'numpy.empty_like', 'np.empty_like', (['train_data'], {}), '(train_data)\n', (10803, 10815), True, 'import numpy as np\n'), ((12915, 12938), 'numpy.asarray', 'np.asarray', (['trip_to_add'], {}), '(trip_to_add)\n', (12925, 12938), True, 'import numpy as np\n'), ((12973, 13015), 'numpy.concatenate', 'np.concatenate', (['(trips_to_add, train_data)'], {}), '((trips_to_add, train_data))\n', (12987, 13015), True, 'import numpy as np\n'), ((4389, 4401), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4398, 4401), False, 'import json\n'), ((4496, 4508), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4505, 4508), False, 'import json\n'), ((9853, 9899), 'os.path.join', 'os.path.join', (['data_path', '"""to_skip_eval.pickle"""'], {}), "(data_path, 'to_skip_eval.pickle')\n", (9865, 9899), False, 'import os\n'), ((13935, 13957), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (13946, 13957), False, 'import os\n'), ((14360, 14399), 'os.path.join', 'os.path.join', (['save_path', '"""train.pickle"""'], {}), "(save_path, 'train.pickle')\n", (14372, 14399), False, 'import os\n'), ((14919, 14958), 'os.path.join', 'os.path.join', (['save_path', '"""valid.pickle"""'], {}), "(save_path, 'valid.pickle')\n", (14931, 14958), False, 'import os\n'), ((15219, 15257), 'os.path.join', 'os.path.join', (['save_path', '"""test.pickle"""'], {}), "(save_path, 'test.pickle')\n", (15231, 15257), False, 'import os\n'), ((4311, 4356), 'os.path.join', 'os.path.join', (['data_path', '"""entities_dict.json"""'], {}), "(data_path, 'entities_dict.json')\n", (4323, 4356), False, 'import os\n'), ((4417, 4463), 'os.path.join', 'os.path.join', (['data_path', '"""relations_dict.json"""'], {}), "(data_path, 'relations_dict.json')\n", (4429, 4463), False, 'import os\n'), ((4771, 4810), 'os.path.join', 'os.path.join', (['data_path', "(split + '.txt')"], {}), "(data_path, split + '.txt')\n", (4783, 4810), False, 'import os\n'), ((5067, 5096), 'model.Conve', 'Conve', (['args', 'n_ent', '(2 * n_rel)'], {}), '(args, n_ent, 2 * n_rel)\n', (5072, 5096), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((5622, 5647), 'model.Conve', 'Conve', (['args', 'n_ent', 'n_rel'], {}), '(args, n_ent, n_rel)\n', (5627, 5647), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((6708, 6733), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6731, 6733), False, 'import torch\n'), ((12224, 12234), 'numpy.any', 'np.any', (['m1'], {}), '(m1)\n', (12230, 12234), True, 'import numpy as np\n'), ((14204, 14240), 'os.path.join', 'os.path.join', (['save_path', '"""train.txt"""'], {}), "(save_path, 'train.txt')\n", (14216, 14240), False, 'import os\n'), ((14504, 14549), 'os.path.join', 'os.path.join', (['save_path', '"""entities_dict.json"""'], {}), "(save_path, 'entities_dict.json')\n", (14516, 14549), False, 'import os\n'), ((14624, 14670), 'os.path.join', 'os.path.join', (['save_path', '"""relations_dict.json"""'], {}), "(save_path, 'relations_dict.json')\n", (14636, 14670), False, 'import os\n'), ((14762, 14798), 'os.path.join', 'os.path.join', (['save_path', '"""valid.txt"""'], {}), "(save_path, 'valid.txt')\n", (14774, 14798), False, 'import os\n'), ((15064, 15099), 'os.path.join', 'os.path.join', (['save_path', '"""test.txt"""'], {}), "(save_path, 'test.txt')\n", (15076, 15099), False, 'import os\n'), ((15378, 15423), 'os.path.join', 'os.path.join', (['save_path', '"""summary_edits.json"""'], {}), "(save_path, 'summary_edits.json')\n", (15390, 15423), False, 'import os\n'), ((15540, 15576), 'os.path.join', 'os.path.join', (['save_path', '"""stats.txt"""'], {}), "(save_path, 'stats.txt')\n", (15552, 15576), False, 'import os\n'), ((5151, 5180), 'model.Conve', 'Conve', (['args', 'n_ent', '(2 * n_rel)'], {}), '(args, n_ent, 2 * n_rel)\n', (5156, 5180), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((5704, 5729), 'model.Conve', 'Conve', (['args', 'n_ent', 'n_rel'], {}), '(args, n_ent, n_rel)\n', (5709, 5729), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((11662, 11681), 'numpy.where', 'np.where', (['(rels != r)'], {}), '(rels != r)\n', (11670, 11681), True, 'import numpy as np\n'), ((12177, 12208), 'numpy.isin', 'np.isin', (['per_tr[:, 2]', '[adv[2]]'], {}), '(per_tr[:, 2], [adv[2]])\n', (12184, 12208), True, 'import numpy as np\n'), ((13243, 13295), 'numpy.concatenate', 'np.concatenate', (['(train_data[:, 0], train_data[:, 2])'], {}), '((train_data[:, 0], train_data[:, 2]))\n', (13257, 13295), True, 'import numpy as np\n'), ((13331, 13381), 'numpy.concatenate', 'np.concatenate', (['(new_train[:, 0], new_train[:, 2])'], {}), '((new_train[:, 0], new_train[:, 2]))\n', (13345, 13381), True, 'import numpy as np\n'), ((14578, 14599), 'json.dumps', 'json.dumps', (['ent_to_id'], {}), '(ent_to_id)\n', (14588, 14599), False, 'import json\n'), ((14699, 14720), 'json.dumps', 'json.dumps', (['rel_to_id'], {}), '(rel_to_id)\n', (14709, 14720), False, 'import json\n'), ((15456, 15480), 'json.dumps', 'json.dumps', (['summary_dict'], {}), '(summary_dict)\n', (15466, 15480), False, 'import json\n'), ((5238, 5270), 'model.Distmult', 'Distmult', (['args', 'n_ent', '(2 * n_rel)'], {}), '(args, n_ent, 2 * n_rel)\n', (5246, 5270), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((5789, 5817), 'model.Distmult', 'Distmult', (['args', 'n_ent', 'n_rel'], {}), '(args, n_ent, n_rel)\n', (5797, 5817), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((12073, 12104), 'numpy.isin', 'np.isin', (['per_tr[:, 0]', '[adv[0]]'], {}), '(per_tr[:, 0], [adv[0]])\n', (12080, 12104), True, 'import numpy as np\n'), ((12125, 12156), 'numpy.isin', 'np.isin', (['per_tr[:, 1]', '[adv[1]]'], {}), '(per_tr[:, 1], [adv[1]])\n', (12132, 12156), True, 'import numpy as np\n'), ((5327, 5358), 'model.Complex', 'Complex', (['args', 'n_ent', '(2 * n_rel)'], {}), '(args, n_ent, 2 * n_rel)\n', (5334, 5358), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((5876, 5903), 'model.Complex', 'Complex', (['args', 'n_ent', 'n_rel'], {}), '(args, n_ent, n_rel)\n', (5883, 5903), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((5414, 5444), 'model.Transe', 'Transe', (['args', 'n_ent', '(2 * n_rel)'], {}), '(args, n_ent, 2 * n_rel)\n', (5420, 5444), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((5961, 5987), 'model.Transe', 'Transe', (['args', 'n_ent', 'n_rel'], {}), '(args, n_ent, n_rel)\n', (5967, 5987), False, 'from model import Distmult, Complex, Conve, Transe\n'), ((12432, 12447), 'numpy.asarray', 'np.asarray', (['adv'], {}), '(adv)\n', (12442, 12447), True, 'import numpy as np\n')] |
import abc
import inspect
from pathlib import Path
from typing import Any, Callable, Mapping, Optional
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.backends.base_sqlalchemy.compiler as comp
import ibis.expr.types as ir
# TODO: Merge into BackendTest, #2564
class RoundingConvention:
@staticmethod
@abc.abstractmethod
def round(series: pd.Series, decimals: int = 0) -> pd.Series:
"""Round a series to `decimals` number of decimal values."""
# TODO: Merge into BackendTest, #2564
class RoundAwayFromZero(RoundingConvention):
@staticmethod
def round(series: pd.Series, decimals: int = 0) -> pd.Series:
if not decimals:
return (
-(np.sign(series)) * np.ceil(-(series.abs()) - 0.5)
).astype(np.int64)
return series.round(decimals=decimals)
# TODO: Merge into BackendTest, #2564
class RoundHalfToEven(RoundingConvention):
@staticmethod
def round(series: pd.Series, decimals: int = 0) -> pd.Series:
result = series.round(decimals=decimals)
return result if decimals else result.astype(np.int64)
# TODO: Merge into BackendTest, #2564
class UnorderedComparator:
@classmethod
def assert_series_equal(
cls, left: pd.Series, right: pd.Series, *args: Any, **kwargs: Any
) -> None:
left = left.sort_values().reset_index(drop=True)
right = right.sort_values().reset_index(drop=True)
return super().assert_series_equal(left, right, *args, **kwargs)
@classmethod
def assert_frame_equal(
cls, left: pd.DataFrame, right: pd.DataFrame, *args: Any, **kwargs: Any
) -> None:
columns = list(set(left.columns) & set(right.columns))
left = left.sort_values(by=columns)
right = right.sort_values(by=columns)
return super().assert_frame_equal(left, right, *args, **kwargs)
class BackendTest(abc.ABC):
check_dtype = True
check_names = True
supports_arrays = True
supports_arrays_outside_of_select = supports_arrays
supports_window_operations = True
additional_skipped_operations = frozenset()
supports_divide_by_zero = False
returned_timestamp_unit = 'us'
supported_to_timestamp_units = {'s', 'ms', 'us'}
supports_floating_modulus = True
def __init__(self, data_directory: Path) -> None:
self.api # skips if we can't access the backend
self.connection = self.connect(data_directory)
def __str__(self):
return f'<BackendTest {self.name()}>'
@classmethod
def name(cls) -> str:
backend_tests_path = inspect.getmodule(cls).__file__
return Path(backend_tests_path).resolve().parent.parent.name
@staticmethod
@abc.abstractmethod
def connect(data_directory: Path) -> ibis.client.Client:
"""Return a connection with data loaded from `data_directory`."""
@classmethod
def assert_series_equal(
cls, left: pd.Series, right: pd.Series, *args: Any, **kwargs: Any
) -> None:
kwargs.setdefault('check_dtype', cls.check_dtype)
kwargs.setdefault('check_names', cls.check_names)
tm.assert_series_equal(left, right, *args, **kwargs)
@classmethod
def assert_frame_equal(
cls, left: pd.DataFrame, right: pd.DataFrame, *args: Any, **kwargs: Any
) -> None:
left = left.reset_index(drop=True)
right = right.reset_index(drop=True)
tm.assert_frame_equal(left, right, *args, **kwargs)
@staticmethod
def default_series_rename(
series: pd.Series, name: str = 'tmp'
) -> pd.Series:
return series.rename(name)
@staticmethod
def greatest(
f: Callable[..., ir.ValueExpr], *args: ir.ValueExpr
) -> ir.ValueExpr:
return f(*args)
@staticmethod
def least(
f: Callable[..., ir.ValueExpr], *args: ir.ValueExpr
) -> ir.ValueExpr:
return f(*args)
@property
def db(self) -> ibis.client.Database:
return self.connection.database()
@property
def functional_alltypes(self) -> ir.TableExpr:
return self.db.functional_alltypes
@property
def batting(self) -> ir.TableExpr:
return self.db.batting
@property
def awards_players(self) -> ir.TableExpr:
return self.db.awards_players
@property
def geo(self) -> Optional[ir.TableExpr]:
return None
@property
def api(self):
return getattr(ibis.backends, self.name()).Backend()
def make_context(
self, params: Optional[Mapping[ir.ValueExpr, Any]] = None
) -> comp.QueryContext:
return self.api.dialect.make_context(params=params)
# TODO move to the spark/pyspark backends, #2565
_spark_testing_client = None
_pyspark_testing_client = None
# TODO move to the sparn/pyspark backends, #2565
def get_spark_testing_client(data_directory):
global _spark_testing_client
if _spark_testing_client is None:
_spark_testing_client = get_common_spark_testing_client(
data_directory,
lambda session: ibis.backends.spark.Backend().connect(session),
)
return _spark_testing_client
# TODO move to the spark/pyspark backends, #2565
def get_pyspark_testing_client(data_directory):
global _pyspark_testing_client
if _pyspark_testing_client is None:
_pyspark_testing_client = get_common_spark_testing_client(
data_directory,
lambda session: ibis.backends.pyspark.Backend().connect(session),
)
return _pyspark_testing_client
# TODO move to the spark/pyspark backends, #2565
def get_common_spark_testing_client(data_directory, connect):
pytest.importorskip('pyspark')
import pyspark.sql.types as pt
from pyspark.sql import SparkSession
spark = SparkSession.builder.config(
'spark.default.parallelism', 4
).getOrCreate()
_spark_testing_client = connect(spark)
s = _spark_testing_client._session
num_partitions = 4
df_functional_alltypes = (
s.read.csv(
path=str(data_directory / 'functional_alltypes.csv'),
schema=pt.StructType(
[
pt.StructField('index', pt.IntegerType(), True),
pt.StructField('Unnamed: 0', pt.IntegerType(), True),
pt.StructField('id', pt.IntegerType(), True),
# cast below, Spark can't read 0/1 as bool
pt.StructField('bool_col', pt.ByteType(), True),
pt.StructField('tinyint_col', pt.ByteType(), True),
pt.StructField('smallint_col', pt.ShortType(), True),
pt.StructField('int_col', pt.IntegerType(), True),
pt.StructField('bigint_col', pt.LongType(), True),
pt.StructField('float_col', pt.FloatType(), True),
pt.StructField('double_col', pt.DoubleType(), True),
pt.StructField('date_string_col', pt.StringType(), True),
pt.StructField('string_col', pt.StringType(), True),
pt.StructField('timestamp_col', pt.TimestampType(), True),
pt.StructField('year', pt.IntegerType(), True),
pt.StructField('month', pt.IntegerType(), True),
]
),
mode='FAILFAST',
header=True,
)
.repartition(num_partitions)
.sort('index')
)
df_functional_alltypes = df_functional_alltypes.withColumn(
"bool_col", df_functional_alltypes["bool_col"].cast("boolean")
)
df_functional_alltypes.createOrReplaceTempView('functional_alltypes')
df_batting = (
s.read.csv(
path=str(data_directory / 'batting.csv'),
schema=pt.StructType(
[
pt.StructField('playerID', pt.StringType(), True),
pt.StructField('yearID', pt.IntegerType(), True),
pt.StructField('stint', pt.IntegerType(), True),
pt.StructField('teamID', pt.StringType(), True),
pt.StructField('lgID', pt.StringType(), True),
pt.StructField('G', pt.IntegerType(), True),
pt.StructField('AB', pt.DoubleType(), True),
pt.StructField('R', pt.DoubleType(), True),
pt.StructField('H', pt.DoubleType(), True),
pt.StructField('X2B', pt.DoubleType(), True),
pt.StructField('X3B', pt.DoubleType(), True),
pt.StructField('HR', pt.DoubleType(), True),
pt.StructField('RBI', pt.DoubleType(), True),
pt.StructField('SB', pt.DoubleType(), True),
pt.StructField('CS', pt.DoubleType(), True),
pt.StructField('BB', pt.DoubleType(), True),
pt.StructField('SO', pt.DoubleType(), True),
pt.StructField('IBB', pt.DoubleType(), True),
pt.StructField('HBP', pt.DoubleType(), True),
pt.StructField('SH', pt.DoubleType(), True),
pt.StructField('SF', pt.DoubleType(), True),
pt.StructField('GIDP', pt.DoubleType(), True),
]
),
header=True,
)
.repartition(num_partitions)
.sort('playerID')
)
df_batting.createOrReplaceTempView('batting')
df_awards_players = (
s.read.csv(
path=str(data_directory / 'awards_players.csv'),
schema=pt.StructType(
[
pt.StructField('playerID', pt.StringType(), True),
pt.StructField('awardID', pt.StringType(), True),
pt.StructField('yearID', pt.IntegerType(), True),
pt.StructField('lgID', pt.StringType(), True),
pt.StructField('tie', pt.StringType(), True),
pt.StructField('notes', pt.StringType(), True),
]
),
header=True,
)
.repartition(num_partitions)
.sort('playerID')
)
df_awards_players.createOrReplaceTempView('awards_players')
df_simple = s.createDataFrame([(1, 'a')], ['foo', 'bar'])
df_simple.createOrReplaceTempView('simple')
df_struct = s.createDataFrame([((1, 2, 'a'),)], ['struct_col'])
df_struct.createOrReplaceTempView('struct')
df_nested_types = s.createDataFrame(
[([1, 2], [[3, 4], [5, 6]], {'a': [[2, 4], [3, 5]]})],
[
'list_of_ints',
'list_of_list_of_ints',
'map_string_list_of_list_of_ints',
],
)
df_nested_types.createOrReplaceTempView('nested_types')
df_complicated = s.createDataFrame(
[({(1, 3): [[2, 4], [3, 5]]},)], ['map_tuple_list_of_list_of_ints']
)
df_complicated.createOrReplaceTempView('complicated')
df_udf = s.createDataFrame(
[('a', 1, 4.0, 'a'), ('b', 2, 5.0, 'a'), ('c', 3, 6.0, 'b')],
['a', 'b', 'c', 'key'],
)
df_udf.createOrReplaceTempView('udf')
df_udf_nan = s.createDataFrame(
pd.DataFrame(
{
'a': np.arange(10, dtype=float),
'b': [3.0, np.NaN] * 5,
'key': list('ddeefffggh'),
}
)
)
df_udf_nan.createOrReplaceTempView('udf_nan')
df_udf_null = s.createDataFrame(
[
(float(i), None if i % 2 else 3.0, 'ddeefffggh'[i])
for i in range(10)
],
['a', 'b', 'key'],
)
df_udf_null.createOrReplaceTempView('udf_null')
df_udf_random = s.createDataFrame(
pd.DataFrame(
{
'a': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist()
+ np.random.rand(3).tolist(),
'key': list('ddeefff'),
}
)
)
df_udf_random.createOrReplaceTempView('udf_random')
return _spark_testing_client
| [
"numpy.random.rand",
"pyspark.sql.types.ShortType",
"pyspark.sql.SparkSession.builder.config",
"pandas.testing.assert_frame_equal",
"pyspark.sql.types.ByteType",
"numpy.arange",
"pathlib.Path",
"pyspark.sql.types.IntegerType",
"ibis.backends.pyspark.Backend",
"ibis.backends.spark.Backend",
"insp... | [((5697, 5727), 'pytest.importorskip', 'pytest.importorskip', (['"""pyspark"""'], {}), "('pyspark')\n", (5716, 5727), False, 'import pytest\n'), ((3176, 3228), 'pandas.testing.assert_series_equal', 'tm.assert_series_equal', (['left', 'right', '*args'], {}), '(left, right, *args, **kwargs)\n', (3198, 3228), True, 'import pandas.testing as tm\n'), ((3466, 3517), 'pandas.testing.assert_frame_equal', 'tm.assert_frame_equal', (['left', 'right', '*args'], {}), '(left, right, *args, **kwargs)\n', (3487, 3517), True, 'import pandas.testing as tm\n'), ((2637, 2659), 'inspect.getmodule', 'inspect.getmodule', (['cls'], {}), '(cls)\n', (2654, 2659), False, 'import inspect\n'), ((5817, 5876), 'pyspark.sql.SparkSession.builder.config', 'SparkSession.builder.config', (['"""spark.default.parallelism"""', '(4)'], {}), "('spark.default.parallelism', 4)\n", (5844, 5876), False, 'from pyspark.sql import SparkSession\n'), ((11250, 11276), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'float'}), '(10, dtype=float)\n', (11259, 11276), True, 'import numpy as np\n'), ((5097, 5126), 'ibis.backends.spark.Backend', 'ibis.backends.spark.Backend', ([], {}), '()\n', (5124, 5126), False, 'import ibis\n'), ((5485, 5516), 'ibis.backends.pyspark.Backend', 'ibis.backends.pyspark.Backend', ([], {}), '()\n', (5514, 5516), False, 'import ibis\n'), ((757, 772), 'numpy.sign', 'np.sign', (['series'], {}), '(series)\n', (764, 772), True, 'import numpy as np\n'), ((2684, 2708), 'pathlib.Path', 'Path', (['backend_tests_path'], {}), '(backend_tests_path)\n', (2688, 2708), False, 'from pathlib import Path\n'), ((11777, 11802), 'numpy.arange', 'np.arange', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (11786, 11802), True, 'import numpy as np\n'), ((11830, 11847), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (11844, 11847), True, 'import numpy as np\n'), ((11879, 11904), 'numpy.arange', 'np.arange', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (11888, 11904), True, 'import numpy as np\n'), ((11932, 11949), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (11946, 11949), True, 'import numpy as np\n'), ((6224, 6240), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (6238, 6240), True, 'import pyspark.sql.types as pt\n'), ((6298, 6314), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (6312, 6314), True, 'import pyspark.sql.types as pt\n'), ((6364, 6380), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (6378, 6380), True, 'import pyspark.sql.types as pt\n'), ((6499, 6512), 'pyspark.sql.types.ByteType', 'pt.ByteType', ([], {}), '()\n', (6510, 6512), True, 'import pyspark.sql.types as pt\n'), ((6571, 6584), 'pyspark.sql.types.ByteType', 'pt.ByteType', ([], {}), '()\n', (6582, 6584), True, 'import pyspark.sql.types as pt\n'), ((6644, 6658), 'pyspark.sql.types.ShortType', 'pt.ShortType', ([], {}), '()\n', (6656, 6658), True, 'import pyspark.sql.types as pt\n'), ((6713, 6729), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (6727, 6729), True, 'import pyspark.sql.types as pt\n'), ((6787, 6800), 'pyspark.sql.types.LongType', 'pt.LongType', ([], {}), '()\n', (6798, 6800), True, 'import pyspark.sql.types as pt\n'), ((6857, 6871), 'pyspark.sql.types.FloatType', 'pt.FloatType', ([], {}), '()\n', (6869, 6871), True, 'import pyspark.sql.types as pt\n'), ((6929, 6944), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (6942, 6944), True, 'import pyspark.sql.types as pt\n'), ((7007, 7022), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (7020, 7022), True, 'import pyspark.sql.types as pt\n'), ((7080, 7095), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (7093, 7095), True, 'import pyspark.sql.types as pt\n'), ((7156, 7174), 'pyspark.sql.types.TimestampType', 'pt.TimestampType', ([], {}), '()\n', (7172, 7174), True, 'import pyspark.sql.types as pt\n'), ((7226, 7242), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (7240, 7242), True, 'import pyspark.sql.types as pt\n'), ((7295, 7311), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (7309, 7311), True, 'import pyspark.sql.types as pt\n'), ((7892, 7907), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (7905, 7907), True, 'import pyspark.sql.types as pt\n'), ((7961, 7977), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (7975, 7977), True, 'import pyspark.sql.types as pt\n'), ((8030, 8046), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (8044, 8046), True, 'import pyspark.sql.types as pt\n'), ((8100, 8115), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (8113, 8115), True, 'import pyspark.sql.types as pt\n'), ((8167, 8182), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (8180, 8182), True, 'import pyspark.sql.types as pt\n'), ((8231, 8247), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (8245, 8247), True, 'import pyspark.sql.types as pt\n'), ((8297, 8312), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8310, 8312), True, 'import pyspark.sql.types as pt\n'), ((8361, 8376), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8374, 8376), True, 'import pyspark.sql.types as pt\n'), ((8425, 8440), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8438, 8440), True, 'import pyspark.sql.types as pt\n'), ((8491, 8506), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8504, 8506), True, 'import pyspark.sql.types as pt\n'), ((8557, 8572), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8570, 8572), True, 'import pyspark.sql.types as pt\n'), ((8622, 8637), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8635, 8637), True, 'import pyspark.sql.types as pt\n'), ((8688, 8703), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8701, 8703), True, 'import pyspark.sql.types as pt\n'), ((8753, 8768), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8766, 8768), True, 'import pyspark.sql.types as pt\n'), ((8818, 8833), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8831, 8833), True, 'import pyspark.sql.types as pt\n'), ((8883, 8898), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8896, 8898), True, 'import pyspark.sql.types as pt\n'), ((8948, 8963), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (8961, 8963), True, 'import pyspark.sql.types as pt\n'), ((9014, 9029), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (9027, 9029), True, 'import pyspark.sql.types as pt\n'), ((9080, 9095), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (9093, 9095), True, 'import pyspark.sql.types as pt\n'), ((9145, 9160), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (9158, 9160), True, 'import pyspark.sql.types as pt\n'), ((9210, 9225), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (9223, 9225), True, 'import pyspark.sql.types as pt\n'), ((9277, 9292), 'pyspark.sql.types.DoubleType', 'pt.DoubleType', ([], {}), '()\n', (9290, 9292), True, 'import pyspark.sql.types as pt\n'), ((9695, 9710), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (9708, 9710), True, 'import pyspark.sql.types as pt\n'), ((9765, 9780), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (9778, 9780), True, 'import pyspark.sql.types as pt\n'), ((9834, 9850), 'pyspark.sql.types.IntegerType', 'pt.IntegerType', ([], {}), '()\n', (9848, 9850), True, 'import pyspark.sql.types as pt\n'), ((9902, 9917), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (9915, 9917), True, 'import pyspark.sql.types as pt\n'), ((9968, 9983), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (9981, 9983), True, 'import pyspark.sql.types as pt\n'), ((10036, 10051), 'pyspark.sql.types.StringType', 'pt.StringType', ([], {}), '()\n', (10049, 10051), True, 'import pyspark.sql.types as pt\n')] |
"""
Sweep one tone across its filter bank bin.
"""
import time
import numpy as np
from kid_readout.roach import instantiate, analog
from kid_readout.measurement import acquire
from kid_readout.equipment import hardware
acquire.show_settings()
acquire.show_git_status()
logger = acquire.get_script_logger(__file__)
# Parameters
suffix = 'filterbank_bin_wideband'
fft_gain = 2
lo_MHz = 3000
baseband_MHz = 100
lo_round_to_MHz = 0.1
dac_attenuation = 0
tones_per_bin_exponent = 3
stream_length_blocks = 1
wait = 5
# Hardware
conditioner = analog.HeterodyneMarkII()
magnet = hardware.Thing(name='magnet_array', state={'orientation': 'up',
'distance_from_base_mm': 276})
hw = hardware.Hardware(conditioner, magnet)
ri = instantiate.r1h11_with_mk2(initialize=True, use_config=False)
ri.set_fft_gain(fft_gain)
# External is 1 and internal is 0
ri.adc_valon.set_ref_select(0)
ri.lo_valon.set_ref_select(1)
# Calculate tone bin integers
f_filterbank_MHz = ri.fs / ri.nfft
n_filterbank = int(np.round(baseband_MHz / f_filterbank_MHz))
tone_sample_exponent = int(np.log2(ri.nfft) + tones_per_bin_exponent)
center_integer = 2**tones_per_bin_exponent * n_filterbank
tone_integers = center_integer + np.arange(-4 * 2**tones_per_bin_exponent, 4 * 2**tones_per_bin_exponent + 1)
# Acquire
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
ri.set_lo(lomhz=lo_MHz, chan_spacing=lo_round_to_MHz)
ri.set_dac_attenuator(dac_attenuation)
for tone_integer in tone_integers:
ri.set_tone_bins(bins=np.array([tone_integer]), nsamp=2**tone_sample_exponent)
ri.fft_bins = np.atleast_2d(np.array([n_filterbank]))
ri.select_bank(0)
ri.select_fft_bins(np.array([0]))
time.sleep(wait)
npd.write(ri.get_measurement_blocks(num_blocks=stream_length_blocks, demod=False, state=hw.state()))
npd.write(ri.get_adc_measurement())
finally:
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
| [
"kid_readout.roach.analog.HeterodyneMarkII",
"kid_readout.measurement.acquire.get_script_logger",
"numpy.arange",
"kid_readout.roach.instantiate.r1h11_with_mk2",
"kid_readout.measurement.acquire.show_settings",
"time.sleep",
"kid_readout.equipment.hardware.Hardware",
"numpy.array",
"kid_readout.meas... | [((222, 245), 'kid_readout.measurement.acquire.show_settings', 'acquire.show_settings', ([], {}), '()\n', (243, 245), False, 'from kid_readout.measurement import acquire\n'), ((246, 271), 'kid_readout.measurement.acquire.show_git_status', 'acquire.show_git_status', ([], {}), '()\n', (269, 271), False, 'from kid_readout.measurement import acquire\n'), ((281, 316), 'kid_readout.measurement.acquire.get_script_logger', 'acquire.get_script_logger', (['__file__'], {}), '(__file__)\n', (306, 316), False, 'from kid_readout.measurement import acquire\n'), ((541, 566), 'kid_readout.roach.analog.HeterodyneMarkII', 'analog.HeterodyneMarkII', ([], {}), '()\n', (564, 566), False, 'from kid_readout.roach import instantiate, analog\n'), ((576, 674), 'kid_readout.equipment.hardware.Thing', 'hardware.Thing', ([], {'name': '"""magnet_array"""', 'state': "{'orientation': 'up', 'distance_from_base_mm': 276}"}), "(name='magnet_array', state={'orientation': 'up',\n 'distance_from_base_mm': 276})\n", (590, 674), False, 'from kid_readout.equipment import hardware\n'), ((728, 766), 'kid_readout.equipment.hardware.Hardware', 'hardware.Hardware', (['conditioner', 'magnet'], {}), '(conditioner, magnet)\n', (745, 766), False, 'from kid_readout.equipment import hardware\n'), ((772, 833), 'kid_readout.roach.instantiate.r1h11_with_mk2', 'instantiate.r1h11_with_mk2', ([], {'initialize': '(True)', 'use_config': '(False)'}), '(initialize=True, use_config=False)\n', (798, 833), False, 'from kid_readout.roach import instantiate, analog\n'), ((1338, 1378), 'kid_readout.measurement.acquire.new_npy_directory', 'acquire.new_npy_directory', ([], {'suffix': 'suffix'}), '(suffix=suffix)\n', (1363, 1378), False, 'from kid_readout.measurement import acquire\n'), ((1385, 1396), 'time.time', 'time.time', ([], {}), '()\n', (1394, 1396), False, 'import time\n'), ((1040, 1081), 'numpy.round', 'np.round', (['(baseband_MHz / f_filterbank_MHz)'], {}), '(baseband_MHz / f_filterbank_MHz)\n', (1048, 1081), True, 'import numpy as np\n'), ((1244, 1329), 'numpy.arange', 'np.arange', (['(-4 * 2 ** tones_per_bin_exponent)', '(4 * 2 ** tones_per_bin_exponent + 1)'], {}), '(-4 * 2 ** tones_per_bin_exponent, 4 * 2 ** tones_per_bin_exponent + 1\n )\n', (1253, 1329), True, 'import numpy as np\n'), ((1110, 1126), 'numpy.log2', 'np.log2', (['ri.nfft'], {}), '(ri.nfft)\n', (1117, 1126), True, 'import numpy as np\n'), ((1767, 1783), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (1777, 1783), False, 'import time\n'), ((1665, 1689), 'numpy.array', 'np.array', (['[n_filterbank]'], {}), '([n_filterbank])\n', (1673, 1689), True, 'import numpy as np\n'), ((1744, 1757), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1752, 1757), True, 'import numpy as np\n'), ((1572, 1596), 'numpy.array', 'np.array', (['[tone_integer]'], {}), '([tone_integer])\n', (1580, 1596), True, 'import numpy as np\n'), ((2055, 2066), 'time.time', 'time.time', ([], {}), '()\n', (2064, 2066), False, 'import time\n')] |
import tensorflow as tf
import numpy as np
def max_pool_2d_nxn_regions(inputs, output_size: int, mode: str):
"""
Performs a pooling operation that results in a fixed size:
output_size x output_size.
Used by spatial_pyramid_pool. Refer to appendix A in [1].
Args:
inputs: A 4D Tensor (B, H, W, C)
output_size: The output size of the pooling operation.
mode: The pooling mode {max, avg}
Returns:
A list of tensors, for each output bin.
The list contains output_size * output_size elements, where
each elment is a Tensor (N, C).
References:
[1] <NAME> et al (2015):
Spatial Pyramid Pooling in Deep Convolutional Networks
for Visual Recognition.
https://arxiv.org/pdf/1406.4729.pdf.
Ported from: https://github.com/luizgh/Lasagne/commit/c01e3d922a5712ca4c54617a15a794c23746ac8c
"""
inputs_shape = tf.shape(inputs)
h = tf.cast(tf.gather(inputs_shape, 1), tf.int32)
w = tf.cast(tf.gather(inputs_shape, 2), tf.int32)
if mode == 'max':
pooling_op = tf.reduce_max
elif mode == 'avg':
pooling_op = tf.reduce_mean
else:
msg = "Mode must be either 'max' or 'avg'. Got '{0}'"
raise ValueError(msg.format(mode))
result = []
n = output_size
for row in range(output_size):
for col in range(output_size):
# start_h = floor(row / n * h)
start_h = tf.cast(tf.floor(tf.multiply(tf.divide(row, n), tf.cast(h, tf.float32))), tf.int32)
# end_h = ceil((row + 1) / n * h)
end_h = tf.cast(tf.ceil(tf.multiply(tf.divide((row + 1), n), tf.cast(h, tf.float32))), tf.int32)
# start_w = floor(col / n * w)
start_w = tf.cast(tf.floor(tf.multiply(tf.divide(col, n), tf.cast(w, tf.float32))), tf.int32)
# end_w = ceil((col + 1) / n * w)
end_w = tf.cast(tf.ceil(tf.multiply(tf.divide((col + 1), n), tf.cast(w, tf.float32))), tf.int32)
pooling_region = inputs[:, start_h:end_h, start_w:end_w, :]
pool_result = pooling_op(pooling_region, axis=(1, 2))
result.append(pool_result)
return result
def spatial_pyramid_pool(inputs, dimensions=[2,1], mode='max', implementation='kaiming'):
"""
Performs spatial pyramid pooling (SPP) over the input.
It will turn a 2D input of arbitrary size into an output of fixed
dimenson.
Hence, the convlutional part of a DNN can be connected to a dense part
with a fixed number of nodes even if the dimensions of the input
image are unknown.
The pooling is performed over :math:`l` pooling levels.
Each pooling level :math:`i` will create :math:`M_i` output features.
:math:`M_i` is given by :math:`n_i * n_i`, with :math:`n_i` as the number
of pooling operations per dimension level :math:`i`.
The length of the parameter dimensions is the level of the spatial pyramid.
Args:
inputs: A 4D Tensor (B, H, W, C).
dimensions: The list of :math:`n_i`'s that define the output dimension
of each pooling level :math:`i`. The length of dimensions is the level of
the spatial pyramid.
mode: Pooling mode 'max' or 'avg'.
implementation: The implementation to use, either 'kaiming' or 'fast'.
kamming is the original implementation from the paper, and supports variable
sizes of input vectors, which fast does not support.
Returns:
A fixed length vector representing the inputs.
Notes:
SPP should be inserted between the convolutional part of a DNN and it's
dense part. Convolutions can be used for arbitrary input dimensions, but
the size of their output will depend on their input dimensions.
Connecting the output of the convolutional to the dense part then
usually demands us to fix the dimensons of the network's input.
The spatial pyramid pooling layer, however, allows us to leave
the network input dimensions arbitrary.
The advantage over a global pooling layer is the added robustness
against object deformations due to the pooling on different scales.
References:
[1] <NAME> et al (2015):
Spatial Pyramid Pooling in Deep Convolutional Networks
for Visual Recognition.
https://arxiv.org/pdf/1406.4729.pdf.
Ported from: https://github.com/luizgh/Lasagne/commit/c01e3d922a5712ca4c54617a15a794c23746ac8c
"""
pool_list = []
if implementation == 'kaiming':
for pool_dim in dimensions:
pool_list += max_pool_2d_nxn_regions(inputs, pool_dim, mode)
else:
shape = inputs.get_shape().as_list()
for d in dimensions:
h = shape[1]
w = shape[2]
ph = np.ceil(h * 1.0 / d).astype(np.int32)
pw = np.ceil(w * 1.0 / d).astype(np.int32)
sh = np.floor(h * 1.0 / d + 1).astype(np.int32)
sw = np.floor(w * 1.0 / d + 1).astype(np.int32)
pool_result = tf.nn.max_pool(inputs,
ksize=[1, ph, pw, 1],
strides=[1, sh, sw, 1],
padding='SAME')
pool_list.append(tf.reshape(pool_result, [tf.shape(inputs)[0], -1]))
return tf.concat(pool_list, 0)
| [
"tensorflow.nn.max_pool",
"numpy.ceil",
"tensorflow.shape",
"numpy.floor",
"tensorflow.concat",
"tensorflow.gather",
"tensorflow.divide",
"tensorflow.cast"
] | [((932, 948), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (940, 948), True, 'import tensorflow as tf\n'), ((5368, 5391), 'tensorflow.concat', 'tf.concat', (['pool_list', '(0)'], {}), '(pool_list, 0)\n', (5377, 5391), True, 'import tensorflow as tf\n'), ((965, 991), 'tensorflow.gather', 'tf.gather', (['inputs_shape', '(1)'], {}), '(inputs_shape, 1)\n', (974, 991), True, 'import tensorflow as tf\n'), ((1019, 1045), 'tensorflow.gather', 'tf.gather', (['inputs_shape', '(2)'], {}), '(inputs_shape, 2)\n', (1028, 1045), True, 'import tensorflow as tf\n'), ((5068, 5156), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['inputs'], {'ksize': '[1, ph, pw, 1]', 'strides': '[1, sh, sw, 1]', 'padding': '"""SAME"""'}), "(inputs, ksize=[1, ph, pw, 1], strides=[1, sh, sw, 1],\n padding='SAME')\n", (5082, 5156), True, 'import tensorflow as tf\n'), ((4829, 4849), 'numpy.ceil', 'np.ceil', (['(h * 1.0 / d)'], {}), '(h * 1.0 / d)\n', (4836, 4849), True, 'import numpy as np\n'), ((4884, 4904), 'numpy.ceil', 'np.ceil', (['(w * 1.0 / d)'], {}), '(w * 1.0 / d)\n', (4891, 4904), True, 'import numpy as np\n'), ((4939, 4964), 'numpy.floor', 'np.floor', (['(h * 1.0 / d + 1)'], {}), '(h * 1.0 / d + 1)\n', (4947, 4964), True, 'import numpy as np\n'), ((4999, 5024), 'numpy.floor', 'np.floor', (['(w * 1.0 / d + 1)'], {}), '(w * 1.0 / d + 1)\n', (5007, 5024), True, 'import numpy as np\n'), ((1495, 1512), 'tensorflow.divide', 'tf.divide', (['row', 'n'], {}), '(row, n)\n', (1504, 1512), True, 'import tensorflow as tf\n'), ((1514, 1536), 'tensorflow.cast', 'tf.cast', (['h', 'tf.float32'], {}), '(h, tf.float32)\n', (1521, 1536), True, 'import tensorflow as tf\n'), ((1644, 1665), 'tensorflow.divide', 'tf.divide', (['(row + 1)', 'n'], {}), '(row + 1, n)\n', (1653, 1665), True, 'import tensorflow as tf\n'), ((1669, 1691), 'tensorflow.cast', 'tf.cast', (['h', 'tf.float32'], {}), '(h, tf.float32)\n', (1676, 1691), True, 'import tensorflow as tf\n'), ((1799, 1816), 'tensorflow.divide', 'tf.divide', (['col', 'n'], {}), '(col, n)\n', (1808, 1816), True, 'import tensorflow as tf\n'), ((1818, 1840), 'tensorflow.cast', 'tf.cast', (['w', 'tf.float32'], {}), '(w, tf.float32)\n', (1825, 1840), True, 'import tensorflow as tf\n'), ((1948, 1969), 'tensorflow.divide', 'tf.divide', (['(col + 1)', 'n'], {}), '(col + 1, n)\n', (1957, 1969), True, 'import tensorflow as tf\n'), ((1973, 1995), 'tensorflow.cast', 'tf.cast', (['w', 'tf.float32'], {}), '(w, tf.float32)\n', (1980, 1995), True, 'import tensorflow as tf\n'), ((5330, 5346), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (5338, 5346), True, 'import tensorflow as tf\n')] |
import os
import torch
import torch.utils.data
import torchvision
import numpy as np
from data.apple_dataset import AppleDataset
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import utility.utils as utils
import utility.transforms as T
######################################################
# Predict with either a Faster-RCNN or Mask-RCNN predictor
# using the MinneApple dataset
######################################################
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def get_maskrcnn_model_instance(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
return model
def get_frcnn_model_instance(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def main(args):
num_classes = 2
device = args.device
# Load the model from
print("Loading model")
# Create the correct model type
if args.mrcnn:
model = get_maskrcnn_model_instance(num_classes)
else:
model = get_frcnn_model_instance(num_classes)
# Load model parameters and keep on CPU
checkpoint = torch.load(args.weight_file, map_location=device)
model.load_state_dict(checkpoint['model'], strict=False)
model.eval()
print("Creating data loaders")
dataset_test = AppleDataset(args.data_path, get_transform(train=False))
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,
shuffle=False, num_workers=1,
collate_fn=utils.collate_fn)
# Create output directory
base_path = os.path.dirname(args.output_file)
if not os.path.exists(base_path):
os.makedirs(base_path)
# Predict on bboxes on each image
f = open(args.output_file, 'a')
for image, targets in data_loader_test:
image = list(img.to(device) for img in image)
outputs = model(image)
for ii, output in enumerate(outputs):
img_id = targets[ii]['image_id']
img_name = data_loader_test.dataset.get_img_name(img_id)
print("Predicting on image: {}".format(img_name))
boxes = output['boxes'].detach().numpy()
scores = output['scores'].detach().numpy()
im_names = np.repeat(img_name, len(boxes), axis=0)
stacked = np.hstack((im_names.reshape(len(scores), 1), boxes.astype(int), scores.reshape(len(scores), 1)))
# File to write predictions to
np.savetxt(f, stacked, fmt='%s', delimiter=',', newline='\n')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='PyTorch Detection')
parser.add_argument('--data_path', required=True, help='path to the data to predict on')
parser.add_argument('--output_file', required=True, help='path where to write the prediction outputs')
parser.add_argument('--weight_file', required=True, help='path to the weight file')
parser.add_argument('--device', default='cpu', help='device to use. Either cpu or cuda')
model = parser.add_mutually_exclusive_group(required=True)
model.add_argument('--frcnn', action='store_true', help='use a Faster-RCNN model')
model.add_argument('--mrcnn', action='store_true', help='use a Mask-RCNN model')
args = parser.parse_args()
main(args)
| [
"os.path.exists",
"torchvision.models.detection.maskrcnn_resnet50_fpn",
"argparse.ArgumentParser",
"os.makedirs",
"utility.transforms.RandomHorizontalFlip",
"torch.load",
"torchvision.models.detection.faster_rcnn.FastRCNNPredictor",
"torchvision.models.detection.mask_rcnn.MaskRCNNPredictor",
"os.pat... | [((696, 717), 'utility.transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (705, 717), True, 'import utility.transforms as T\n'), ((852, 920), 'torchvision.models.detection.maskrcnn_resnet50_fpn', 'torchvision.models.detection.maskrcnn_resnet50_fpn', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (902, 920), False, 'import torchvision\n'), ((1132, 1175), 'torchvision.models.detection.faster_rcnn.FastRCNNPredictor', 'FastRCNNPredictor', (['in_features', 'num_classes'], {}), '(in_features, num_classes)\n', (1149, 1175), False, 'from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n'), ((1433, 1495), 'torchvision.models.detection.mask_rcnn.MaskRCNNPredictor', 'MaskRCNNPredictor', (['in_features_mask', 'hidden_layer', 'num_classes'], {}), '(in_features_mask, hidden_layer, num_classes)\n', (1450, 1495), False, 'from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\n'), ((1644, 1714), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'torchvision.models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (1696, 1714), False, 'import torchvision\n'), ((1926, 1969), 'torchvision.models.detection.faster_rcnn.FastRCNNPredictor', 'FastRCNNPredictor', (['in_features', 'num_classes'], {}), '(in_features, num_classes)\n', (1943, 1969), False, 'from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n'), ((2342, 2391), 'torch.load', 'torch.load', (['args.weight_file'], {'map_location': 'device'}), '(args.weight_file, map_location=device)\n', (2352, 2391), False, 'import torch\n'), ((2605, 2723), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_test'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'collate_fn': 'utils.collate_fn'}), '(dataset_test, batch_size=1, shuffle=False,\n num_workers=1, collate_fn=utils.collate_fn)\n', (2632, 2723), False, 'import torch\n'), ((2869, 2902), 'os.path.dirname', 'os.path.dirname', (['args.output_file'], {}), '(args.output_file)\n', (2884, 2902), False, 'import os\n'), ((3869, 3925), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Detection"""'}), "(description='PyTorch Detection')\n", (3892, 3925), False, 'import argparse\n'), ((602, 614), 'utility.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (612, 614), True, 'import utility.transforms as T\n'), ((2914, 2939), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (2928, 2939), False, 'import os\n'), ((2949, 2971), 'os.makedirs', 'os.makedirs', (['base_path'], {}), '(base_path)\n', (2960, 2971), False, 'import os\n'), ((656, 683), 'utility.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (678, 683), True, 'import utility.transforms as T\n'), ((3745, 3806), 'numpy.savetxt', 'np.savetxt', (['f', 'stacked'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'newline': '"""\n"""'}), "(f, stacked, fmt='%s', delimiter=',', newline='\\n')\n", (3755, 3806), True, 'import numpy as np\n')] |
import pickle
import tensorflow as tf
import os
import numpy as np
import random
from PIL import Image
from tqdm import tqdm
from common import load_pickle_file
from constants import INPUT_HEIGHT, INPUT_WIDTH, CLASS_NO, DATASET_DIRECTORY_PATH, TFRECORDS_SAVE_PATH, \
TFRECORDS_FORMAT_PATTERN
def generate_tf_records_for_dataset(dataset_trainval_files, test_set_files, dataset_segmentation_image_files_path,
dataset_segmentation_annotation_files_path, output_path, split_ratio=0.9):
if tf.io.gfile.exists(output_path):
tf.io.gfile.rmtree(output_path)
tf.io.gfile.mkdir(output_path)
np.random.seed(0)
random.shuffle(dataset_trainval_files)
samples_no = len(dataset_trainval_files)
train_examples_no = int(split_ratio * samples_no)
val_examples_no = samples_no - train_examples_no
train_set_files = dataset_trainval_files[:train_examples_no]
val_set_files = dataset_trainval_files[-val_examples_no:]
sets_files = [(train_set_files, 'train'), (val_set_files, 'valid'), (test_set_files, 'test')]
for set_files, set_name in tqdm(sets_files, position=0):
generate_tf_records_for_set(set_files, set_name, dataset_segmentation_image_files_path,
dataset_segmentation_annotation_files_path, output_path)
def generate_tf_records_for_set(set_files, set_name, dataset_segmentation_image_files_path,
dataset_segmentation_annotation_files_path, output_path):
set_files_no = len(set_files)
part_files_arr = np.array_split(set_files, np.ceil(set_files_no / 2000))
for part_no, part_arr in tqdm(enumerate(part_files_arr), position=1, total=len(part_files_arr)):
generate_part_of_tf_records_for_set(part_no=part_no,
part_files=part_arr,
set_name=set_name,
no_of_parts=len(part_files_arr),
dataset_segmentation_image_files_path=dataset_segmentation_image_files_path,
dataset_segmentation_annotation_files_path=dataset_segmentation_annotation_files_path,
output_path=output_path)
write_sets_length(os.path.join(TFRECORDS_SAVE_PATH, '../'), set_name, set_files_no)
def generate_part_of_tf_records_for_set(part_no, part_files, set_name, no_of_parts,
dataset_segmentation_image_files_path,
dataset_segmentation_annotation_files_path, output_path):
with tf.io.TFRecordWriter(
os.path.join(output_path, TFRECORDS_FORMAT_PATTERN.format(set_name, part_no + 1, no_of_parts))) as writer:
for file in tqdm(part_files, position=2):
segmentation_image_path = os.path.join(dataset_segmentation_image_files_path, file + ".jpg")
image = Image.open(segmentation_image_path)
image = image.resize((INPUT_HEIGHT, INPUT_WIDTH))
image = np.array(image)
if 'test' not in set_name:
segmentation_annotation_path = os.path.join(dataset_segmentation_annotation_files_path, file + ".png")
annotation = Image.open(segmentation_annotation_path)
annotation = annotation.resize((INPUT_HEIGHT, INPUT_WIDTH))
annotation = np.array(annotation, dtype=np.uint8)
annotation[annotation == 255] = CLASS_NO
else:
annotation = np.zeros(image.shape[:2])
example = convert_image_and_annotation_to_tf_example(file, image, annotation)
writer.write(example.SerializeToString())
def convert_image_and_annotation_to_tf_example(filename, x, y):
assert (x.shape[0] == y.shape[0] and x.shape[1] == y.shape[
1]), "Dimensions of image and annotation image must be equal."
return tf.train.Example(features=tf.train.Features(
feature={'filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename.tostring()])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[x.tostring()])),
'annotation': tf.train.Feature(bytes_list=tf.train.BytesList(value=[y.tostring()]))}))
def write_sets_length(path, set_name, set_files_no):
local_sum_sets_split = {set_name: set_files_no}
if tf.io.gfile.exists(os.path.join(path, "sets_count.pickle")):
local_sum_sets_split = load_pickle_file("sets_count.pickle")
local_sum_sets_split[set_name] = set_files_no
with tf.io.gfile.GFile(os.path.join(path, "sets_count.pickle"), mode='wb') as f:
pickle.dump(local_sum_sets_split, f, protocol=pickle.HIGHEST_PROTOCOL)
def main():
dataset_segmentation_annotation_files_path = os.path.join(DATASET_DIRECTORY_PATH, "SegmentationClass")
dataset_segmentation_image_files_path = os.path.join(DATASET_DIRECTORY_PATH, "JPEGImages")
dataset_trainval_split_file = os.path.join(DATASET_DIRECTORY_PATH, "ImageSets", "Segmentation", "trainval.txt")
dataset_test_split_file = os.path.join(DATASET_DIRECTORY_PATH, "ImageSets", "Segmentation", "test.txt")
with open(dataset_trainval_split_file, "r") as f:
dataset_trainval_files = [file.replace("\n", "") for file in f.readlines()]
with open(dataset_test_split_file, "r") as f:
dataset_test_files = [file.replace("\n", "") for file in f.readlines()]
generate_tf_records_for_dataset(dataset_trainval_files, dataset_test_files, dataset_segmentation_image_files_path,
dataset_segmentation_annotation_files_path, TFRECORDS_SAVE_PATH)
if __name__ == '__main__':
main()
| [
"numpy.ceil",
"PIL.Image.open",
"pickle.dump",
"random.shuffle",
"tensorflow.io.gfile.rmtree",
"tqdm.tqdm",
"os.path.join",
"common.load_pickle_file",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"constants.TFRECORDS_FORMAT_PATTERN.format",
"tensorflow.io.gfile.exists",
"tensorflow.i... | [((533, 564), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['output_path'], {}), '(output_path)\n', (551, 564), True, 'import tensorflow as tf\n'), ((610, 640), 'tensorflow.io.gfile.mkdir', 'tf.io.gfile.mkdir', (['output_path'], {}), '(output_path)\n', (627, 640), True, 'import tensorflow as tf\n'), ((646, 663), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (660, 663), True, 'import numpy as np\n'), ((668, 706), 'random.shuffle', 'random.shuffle', (['dataset_trainval_files'], {}), '(dataset_trainval_files)\n', (682, 706), False, 'import random\n'), ((1118, 1146), 'tqdm.tqdm', 'tqdm', (['sets_files'], {'position': '(0)'}), '(sets_files, position=0)\n', (1122, 1146), False, 'from tqdm import tqdm\n'), ((4874, 4931), 'os.path.join', 'os.path.join', (['DATASET_DIRECTORY_PATH', '"""SegmentationClass"""'], {}), "(DATASET_DIRECTORY_PATH, 'SegmentationClass')\n", (4886, 4931), False, 'import os\n'), ((4976, 5026), 'os.path.join', 'os.path.join', (['DATASET_DIRECTORY_PATH', '"""JPEGImages"""'], {}), "(DATASET_DIRECTORY_PATH, 'JPEGImages')\n", (4988, 5026), False, 'import os\n'), ((5061, 5146), 'os.path.join', 'os.path.join', (['DATASET_DIRECTORY_PATH', '"""ImageSets"""', '"""Segmentation"""', '"""trainval.txt"""'], {}), "(DATASET_DIRECTORY_PATH, 'ImageSets', 'Segmentation',\n 'trainval.txt')\n", (5073, 5146), False, 'import os\n'), ((5173, 5250), 'os.path.join', 'os.path.join', (['DATASET_DIRECTORY_PATH', '"""ImageSets"""', '"""Segmentation"""', '"""test.txt"""'], {}), "(DATASET_DIRECTORY_PATH, 'ImageSets', 'Segmentation', 'test.txt')\n", (5185, 5250), False, 'import os\n'), ((574, 605), 'tensorflow.io.gfile.rmtree', 'tf.io.gfile.rmtree', (['output_path'], {}), '(output_path)\n', (592, 605), True, 'import tensorflow as tf\n'), ((1603, 1631), 'numpy.ceil', 'np.ceil', (['(set_files_no / 2000)'], {}), '(set_files_no / 2000)\n', (1610, 1631), True, 'import numpy as np\n'), ((2345, 2385), 'os.path.join', 'os.path.join', (['TFRECORDS_SAVE_PATH', '"""../"""'], {}), "(TFRECORDS_SAVE_PATH, '../')\n", (2357, 2385), False, 'import os\n'), ((2844, 2872), 'tqdm.tqdm', 'tqdm', (['part_files'], {'position': '(2)'}), '(part_files, position=2)\n', (2848, 2872), False, 'from tqdm import tqdm\n'), ((4480, 4519), 'os.path.join', 'os.path.join', (['path', '"""sets_count.pickle"""'], {}), "(path, 'sets_count.pickle')\n", (4492, 4519), False, 'import os\n'), ((4553, 4590), 'common.load_pickle_file', 'load_pickle_file', (['"""sets_count.pickle"""'], {}), "('sets_count.pickle')\n", (4569, 4590), False, 'from common import load_pickle_file\n'), ((4740, 4810), 'pickle.dump', 'pickle.dump', (['local_sum_sets_split', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(local_sum_sets_split, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (4751, 4810), False, 'import pickle\n'), ((2912, 2978), 'os.path.join', 'os.path.join', (['dataset_segmentation_image_files_path', "(file + '.jpg')"], {}), "(dataset_segmentation_image_files_path, file + '.jpg')\n", (2924, 2978), False, 'import os\n'), ((3000, 3035), 'PIL.Image.open', 'Image.open', (['segmentation_image_path'], {}), '(segmentation_image_path)\n', (3010, 3035), False, 'from PIL import Image\n'), ((3118, 3133), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3126, 3133), True, 'import numpy as np\n'), ((4674, 4713), 'os.path.join', 'os.path.join', (['path', '"""sets_count.pickle"""'], {}), "(path, 'sets_count.pickle')\n", (4686, 4713), False, 'import os\n'), ((2743, 2810), 'constants.TFRECORDS_FORMAT_PATTERN.format', 'TFRECORDS_FORMAT_PATTERN.format', (['set_name', '(part_no + 1)', 'no_of_parts'], {}), '(set_name, part_no + 1, no_of_parts)\n', (2774, 2810), False, 'from constants import INPUT_HEIGHT, INPUT_WIDTH, CLASS_NO, DATASET_DIRECTORY_PATH, TFRECORDS_SAVE_PATH, TFRECORDS_FORMAT_PATTERN\n'), ((3221, 3292), 'os.path.join', 'os.path.join', (['dataset_segmentation_annotation_files_path', "(file + '.png')"], {}), "(dataset_segmentation_annotation_files_path, file + '.png')\n", (3233, 3292), False, 'import os\n'), ((3322, 3362), 'PIL.Image.open', 'Image.open', (['segmentation_annotation_path'], {}), '(segmentation_annotation_path)\n', (3332, 3362), False, 'from PIL import Image\n'), ((3468, 3504), 'numpy.array', 'np.array', (['annotation'], {'dtype': 'np.uint8'}), '(annotation, dtype=np.uint8)\n', (3476, 3504), True, 'import numpy as np\n'), ((3609, 3634), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (3617, 3634), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.